Merge branch 'main' into save-load-nodes

This commit is contained in:
blessedcoolant
2023-07-13 13:37:36 +12:00
committed by GitHub
22 changed files with 527 additions and 315 deletions

View File

@ -250,8 +250,8 @@ from .model_cache import ModelCache, ModelLocker
from .models import (
BaseModelType, ModelType, SubModelType,
ModelError, SchedulerPredictionType, MODEL_CLASSES,
ModelConfigBase, ModelNotFoundException,
)
ModelConfigBase, ModelNotFoundException, InvalidModelException,
)
# We are only starting to number the config file with release 3.
# The config file version doesn't have to start at release version, but it will help
@ -275,10 +275,6 @@ class ModelInfo():
def __exit__(self,*args, **kwargs):
self.context.__exit__(*args, **kwargs)
class InvalidModelError(Exception):
"Raised when an invalid model is requested"
pass
class AddModelResult(BaseModel):
name: str = Field(description="The name of the model after installation")
model_type: ModelType = Field(description="The type of model")
@ -817,6 +813,8 @@ class ModelManager(object):
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config
new_models_found = True
except InvalidModelException:
self.logger.warning(f"Not a valid model: {model_path}")
except NotImplementedError as e:
self.logger.warning(e)

View File

@ -2,7 +2,7 @@ import inspect
from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .vae import VaeModel
from .lora import LoRAModel

View File

@ -15,6 +15,9 @@ from contextlib import suppress
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
class InvalidModelException(Exception):
pass
class ModelNotFoundException(Exception):
pass

View File

@ -13,6 +13,7 @@ from .base import (
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
)
class ControlNetModelFormat(str, Enum):
@ -73,10 +74,18 @@ class ControlNetModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return ControlNetModelFormat.Diffusers
else:
return ControlNetModelFormat.Checkpoint
if os.path.exists(os.path.join(path, "config.json")):
return ControlNetModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
return ControlNetModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
ModelType,
SubModelType,
classproperty,
InvalidModelException,
)
# TODO: naming
from ..lora import LoRAModel as LoRAModelRaw
@ -56,10 +57,18 @@ class LoRAModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return LoRAModelFormat.Diffusers
else:
return LoRAModelFormat.LyCORIS
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
return LoRAModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return LoRAModelFormat.LyCORIS
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -16,6 +16,7 @@ from .base import (
SilenceWarnings,
read_checkpoint_meta,
classproperty,
InvalidModelException,
)
from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf
@ -98,10 +99,18 @@ class StableDiffusion1Model(DiffusersModel):
@classmethod
def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path):
return StableDiffusion1ModelFormat.Diffusers
else:
return StableDiffusion1ModelFormat.Checkpoint
if os.path.exists(os.path.join(model_path, "model_index.json")):
return StableDiffusion1ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion1ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod
def convert_if_required(
@ -200,10 +209,18 @@ class StableDiffusion2Model(DiffusersModel):
@classmethod
def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path):
return StableDiffusion2ModelFormat.Diffusers
else:
return StableDiffusion2ModelFormat.Checkpoint
if os.path.exists(os.path.join(model_path, "model_index.json")):
return StableDiffusion2ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion2ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod
def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
SubModelType,
classproperty,
ModelNotFoundException,
InvalidModelException,
)
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
@ -59,7 +60,18 @@ class TextualInversionModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
return None
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
if os.path.exists(os.path.join(path, "learned_embeds.bin")):
return None # diffusers-ti
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return None
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -15,6 +15,7 @@ from .base import (
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
)
from invokeai.app.services.config import InvokeAIAppConfig
from diffusers.utils import is_safetensors_available
@ -75,10 +76,18 @@ class VaeModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return VaeModelFormat.Diffusers
else:
return VaeModelFormat.Checkpoint
if os.path.exists(os.path.join(path, "config.json")):
return VaeModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return VaeModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -102,6 +102,8 @@ export type AppFeature =
export type SDFeature =
| 'controlNet'
| 'noise'
| 'perlinNoise'
| 'noiseThreshold'
| 'variation'
| 'symmetry'
| 'seamless'

View File

@ -27,6 +27,9 @@ const ParamNoiseCollapse = () => {
const { t } = useTranslation();
const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled;
const isPerlinNoiseEnabled = useFeatureStatus('perlinNoise').isFeatureEnabled;
const isNoiseThresholdEnabled =
useFeatureStatus('noiseThreshold').isFeatureEnabled;
const { activeLabel } = useAppSelector(selector);
@ -42,8 +45,8 @@ const ParamNoiseCollapse = () => {
<Flex sx={{ gap: 2, flexDirection: 'column' }}>
<ParamNoiseToggle />
<ParamCpuNoiseToggle />
<ParamPerlinNoise />
<ParamNoiseThreshold />
{isPerlinNoiseEnabled && <ParamPerlinNoise />}
{isNoiseThresholdEnabled && <ParamNoiseThreshold />}
</Flex>
</IAICollapse>
);

View File

@ -6,8 +6,15 @@ import { merge } from 'lodash-es';
export const initialConfigState: AppConfig = {
shouldUpdateImagesOnConnect: false,
disabledTabs: [],
disabledFeatures: [],
disabledSDFeatures: [],
disabledFeatures: ['lightbox', 'faceRestore'],
disabledSDFeatures: [
'variation',
'seamless',
'symmetry',
'hires',
'perlinNoise',
'noiseThreshold',
],
canRestoreDeletedImagesFromBin: true,
sd: {
disabledControlNetModels: [],