Fixes, first runable version

This commit is contained in:
Sergey Borisov 2023-06-11 16:42:40 +03:00
parent 738ba40f51
commit 694fd0c92f
4 changed files with 64 additions and 23 deletions

View File

@ -16,13 +16,14 @@ class RestorationServices:
gfpgan, codeformer, esrgan = None, None, None
if args.restore or args.esrgan:
restoration = Restoration()
if args.restore:
# TODO: redo for new model structure
if False and args.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
args.gfpgan_model_path
)
else:
logger.info("Face restoration disabled")
if args.esrgan:
if False and args.esrgan:
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
else:
logger.info("Upscaling disabled")

View File

@ -185,9 +185,10 @@ CONFIG_FILE_VERSION='3.0.0'
class SDModelInfo():
context: ModelLocker
name: str
type: SDModelType
base_model: BaseModelType
type: ModelType
hash: str
location: Union[Path,str]
location: Union[Path, str]
precision: torch.dtype
revision: str = None
_cache: ModelCache = None
@ -233,6 +234,38 @@ class ModelManager(object):
logger: types.ModuleType = logger
# TODO:
def _convert_2_3_models(self, config: DictConfig):
for model_name, model_config in config.items():
if model_config["format"] == "diffusers":
pass
elif model_config["format"] == "ckpt":
if any(model_config["config"].endswith(file) for file in {
"v1-finetune.yaml",
"v1-finetune_style.yaml",
"v1-inference.yaml",
"v1-inpainting-inference.yaml",
"v1-m1-finetune.yaml",
}):
# copy as as sd1.5
pass
# ~99% accurate should be
elif model_config["config"].endswith("v2-inference-v.yaml"):
# copy as sd 2.x (768)
pass
# for real don't know how accurate it
elif model_config["config"].endswith("v2-inference.yaml"):
# copy as sd 2.x-base (512)
pass
else:
# TODO:
raise Exception("Unknown model")
def __init__(
self,
config: Union[Path, DictConfig, str],
@ -257,7 +290,10 @@ class ModelManager(object):
elif not isinstance(config, DictConfig):
raise ValueError('config argument must be an OmegaConf object, a Path or a string')
config_meta = ConfigMeta(config.pop("__metadata__")) # TODO: naming
#if "__meta__" not in config:
# config = self._convert_2_3_models(config)
config_meta = ConfigMeta(**config.pop("__metadata__")) # TODO: naming
# TODO: metadata not found
self.models = dict()
@ -268,7 +304,6 @@ class ModelManager(object):
# check config version number and update on disk/RAM if necessary
self.globals = InvokeAIAppConfig.get_config()
self._update_config_file_version()
self.logger = logger
self.cache = ModelCache(
max_cache_size=max_cache_size,
@ -280,7 +315,7 @@ class ModelManager(object):
self.cache_keys = dict()
# add controlnet, lora and textual_inversion models from disk
self.scan_models_directory(include_diffusers=False)
self.scan_models_directory()
def model_exists(
self,
@ -306,7 +341,7 @@ class ModelManager(object):
def parse_key(self, model_key: str) -> Tuple[str, BaseModelType, ModelType]:
base_model_str, model_type_str, model_name = model_key.split('/', 2)
try:
model_type = SDModelType(model_type_str)
model_type = ModelType(model_type_str)
except:
raise Exception(f"Unknown model type: {model_type_str}")
@ -492,7 +527,7 @@ class ModelManager(object):
def list_models(
self,
base_model: Optional[BaseModelType] = None,
model_type: Optional[SDModelType] = None,
model_type: Optional[ModelType] = None,
) -> Dict[str, Dict[str, str]]:
"""
Return a dict of models, in format [base_model][model_type][model_name]
@ -519,9 +554,9 @@ class ModelManager(object):
if cur_model_type not in models[cur_base_model]:
models[cur_base_model][cur_model_type] = dict()
models[m_base_model][stanza_type][model_name] = dict(
models[cur_base_model][cur_model_type][cur_model_name] = dict(
**model_config.dict(exclude_defaults=True),
name=model_name,
name=cur_model_name,
base_model=cur_base_model,
type=cur_model_type,
)
@ -631,6 +666,7 @@ class ModelManager(object):
path to the configuration file, then the new entry will be committed to the
models.yaml file.
"""
raise NotImlementedError("TODO: ")
model_name = model_name or Path(repo_or_path).stem
model_description = description or f"Imported diffusers model {model_name}"
new_config = dict(
@ -658,6 +694,7 @@ class ModelManager(object):
Creates an entry for the indicated lora file. Call
mgr.commit() to write out the configuration to models.yaml
"""
raise NotImlementedError("TODO: ")
path = Path(path)
model_name = model_name or path.stem
model_description = description or f"LoRA model {model_name}"
@ -682,6 +719,7 @@ class ModelManager(object):
Creates an entry for the indicated lora file. Call
mgr.commit() to write out the configuration to models.yaml
"""
raise NotImlementedError("TODO: ")
path = Path(path)
if path.is_directory() and (path / "learned_embeds.bin").exists():
weights = path / "learned_embeds.bin"
@ -717,6 +755,7 @@ class ModelManager(object):
Convert a legacy ckpt weights file to diffuser model and import
into models.yaml.
"""
raise NotImlementedError("TODO: ")
ckpt_path = self._resolve_path(ckpt_path, "models/ldm/stable-diffusion-v1")
if original_config_file:
original_config_file = self._resolve_path(
@ -883,15 +922,11 @@ class ModelManager(object):
resolved_path = self.globals.root_dir / source
return resolved_path
def _update_config_file_version(self):
# TODO:
raise Exception("TODO: ")
def scan_models_directory(self):
for model_key in list(self.models.keys()):
model_name, base_model, model_type = self.parse_key(model_key)
if not os.path.exists(model_config.path):
if not os.path.exists(self.models[model_key].path):
if model_class.save_to_config:
self.models[model_key].error = ModelError.NotFound
else:
@ -904,6 +939,9 @@ class ModelManager(object):
model_class = MODEL_CLASSES[base_model][model_type]
models_dir = os.path.join(self.globals.models_path, base_model, model_type)
if not os.path.exists(models_dir):
continue # TODO: or create all folders?
for entry_name in os.listdir(models_dir):
model_path = os.path.join(models_dir, entry_name)
model_name = Path(model_path).stem

View File

@ -1,19 +1,21 @@
import sys
import typing
import inspect
from enum import Enum
import torch
from diffusers import DiffusionPipeline, ConfigMixin
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type
from typing import List, Dict, Optional, Type, Literal
class BaseModelType(str, Enum):
#StableDiffusion1_5 = "stable_diffusion_1_5"
#StableDiffusion2 = "stable_diffusion_2"
#StableDiffusion2Base = "stable_diffusion_2_base"
# TODO: maybe then add sample size(512/768)?
StableDiffusion1_5 = "SD-1"
StableDiffusion2Base = "SD-2-base" # 512 pixels; this will have epsilon parameterization
StableDiffusion2 = "SD-2" # 768 pixels; this will have v-prediction parameterization
StableDiffusion1_5 = "sd-1.5"
StableDiffusion2Base = "sd-2-base" # 512 pixels; this will have epsilon parameterization
StableDiffusion2 = "sd-2" # 768 pixels; this will have v-prediction parameterization
#Kandinsky2_1 = "kandinsky_2_1"
class ModelType(str, Enum):
@ -21,7 +23,7 @@ class ModelType(str, Enum):
Vae = "vae"
Lora = "lora"
ControlNet = "controlnet"
#ControlNet = "controlnet"
TextualInversion = "embedding"
class SubModelType(str, Enum):

View File

@ -14,7 +14,7 @@ export const receivedModels = createAppAsyncThunk(
const response = await ModelsService.listModels();
const deserializedModels = reduce(
response.models['diffusers'],
response.models['sd-1.5']['pipeline'],
(modelsAccumulator, model, modelName) => {
modelsAccumulator[modelName] = { ...model, name: modelName };
@ -25,7 +25,7 @@ export const receivedModels = createAppAsyncThunk(
models.info(
{ response },
`Received ${size(response.models['diffusers'])} models`
`Received ${size(response.models['sd-1.5']['pipeline'])} models`
);
return deserializedModels;