From 3546c41f4a361dfc4be0f7cd97005b41ba191e73 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 18:48:14 -0400 Subject: [PATCH 1/3] close #4975 --- invokeai/app/services/model_manager/__init__.py | 1 + invokeai/backend/training/textual_inversion_training.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index e69de29bb2..da54cbf89f 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -0,0 +1 @@ +from .model_manager_default import ModelManagerService diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 153bd0fcc4..9bc1d188bc 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -41,7 +41,7 @@ from transformers import CLIPTextModel, CLIPTokenizer # invokeai stuff from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser -from invokeai.app.services.model_manager_service import ModelManagerService +from invokeai.app.services.model_manager import ModelManagerService from invokeai.backend.model_management.models import SubModelType if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): From c14aa30956c4255f49dce00d20c3e5a2b1df6a1e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 20:37:33 -0400 Subject: [PATCH 2/3] fix the merge script to correctly display models sorted by base --- invokeai/frontend/merge/merge_diffusers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 8fa02cb49c..f3672acbf2 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -131,6 +131,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): values=[ "Models Built on SD-1.x", "Models Built on SD-2.x", + "Models Built on SDXL", ], value=[self.current_base], columns=4, @@ -309,7 +310,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): else: return True - def get_model_names(self, base_model: Optional[BaseModelType] = None) -> List[str]: + def get_model_names(self, base_model: BaseModelType = BaseModelType.StableDiffusion1) -> List[str]: model_names = [ info["model_name"] for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model) @@ -318,7 +319,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): return sorted(model_names) def _populate_models(self, value=None): - base_model = tuple(BaseModelType)[value[0]] + bases = ["sd-1", "sd-2", "sdxl"] + base_model = BaseModelType(bases[value[0]]) self.model_names = self.get_model_names(base_model) models_plus_none = self.model_names.copy() From 6cbc69f3b7785531b49470ca93399d917fff832d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 23 Oct 2023 22:06:10 -0400 Subject: [PATCH 3/3] support conversion of controlnets from safetensors to diffusers --- .../backend/install/model_install_backend.py | 6 ++ .../model_management/models/controlnet.py | 3 +- invokeai/configs/controlnet/cldm_v15.yaml | 79 +++++++++++++++++ invokeai/configs/controlnet/cldm_v21.yaml | 85 +++++++++++++++++++ 4 files changed, 172 insertions(+), 1 deletion(-) create mode 100644 invokeai/configs/controlnet/cldm_v15.yaml create mode 100644 invokeai/configs/controlnet/cldm_v21.yaml diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 9224f5c8b2..9784aa0ac2 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -460,6 +460,12 @@ class ModelInstall(object): possible_conf = path.with_suffix(".yaml") if possible_conf.exists(): legacy_conf = str(self.relative_to_root(possible_conf)) + else: + legacy_conf = Path( + self.config.root_path, + "configs/controlnet", + ("cldm_v15.yaml" if info.base_type == BaseModelType("sd-1") else "cldm_v21.yaml"), + ) if legacy_conf: attributes.update(dict(config=str(legacy_conf))) diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py index 359df91a82..6a42b59fe1 100644 --- a/invokeai/backend/model_management/models/controlnet.py +++ b/invokeai/backend/model_management/models/controlnet.py @@ -132,13 +132,14 @@ def _convert_controlnet_ckpt_and_cache( model_path: str, output_path: str, base_model: BaseModelType, - model_config: ControlNetModel.CheckpointConfig, + model_config: str, ) -> str: """ Convert the controlnet from checkpoint format to diffusers format, cache it to disk, and return Path to converted file. If already on disk then just returns Path. """ + print(f"DEBUG: controlnet config = {model_config}") app_config = InvokeAIAppConfig.get_config() weights = app_config.root_path / model_path output_path = Path(output_path) diff --git a/invokeai/configs/controlnet/cldm_v15.yaml b/invokeai/configs/controlnet/cldm_v15.yaml new file mode 100644 index 0000000000..fde1825577 --- /dev/null +++ b/invokeai/configs/controlnet/cldm_v15.yaml @@ -0,0 +1,79 @@ +model: + target: cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + control_key: "hint" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + + control_stage_config: + target: cldm.cldm.ControlNet + params: + image_size: 32 # unused + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + unet_config: + target: cldm.cldm.ControlledUnetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/invokeai/configs/controlnet/cldm_v21.yaml b/invokeai/configs/controlnet/cldm_v21.yaml new file mode 100644 index 0000000000..fc65193647 --- /dev/null +++ b/invokeai/configs/controlnet/cldm_v21.yaml @@ -0,0 +1,85 @@ +model: + target: cldm.cldm.ControlLDM + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + control_key: "hint" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + only_mid_control: False + + control_stage_config: + target: cldm.cldm.ControlNet + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 4 + hint_channels: 3 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + unet_config: + target: cldm.cldm.ControlledUnetModel + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate"