rename ModelType.Pipeline to ModelType.Main

This commit is contained in:
Lincoln Stein 2023-06-24 11:45:49 -04:00
parent 539d1f3bde
commit ba1371a88f
10 changed files with 35 additions and 35 deletions

View File

@ -73,7 +73,7 @@ class PipelineModelLoaderInvocation(BaseInvocation):
base_model = self.model.base_model base_model = self.model.base_model
model_name = self.model.model_name model_name = self.model.model_name
model_type = ModelType.Pipeline model_type = ModelType.Main
# TODO: not found exceptions # TODO: not found exceptions
if not context.services.model_manager.model_exists( if not context.services.model_manager.model_exists(

View File

@ -618,7 +618,7 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
dest = root / 'models' dest = root / 'models'
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora,
ModelType.ControlNet,ModelType.TextualInversion]: ModelType.ControlNet,ModelType.TextualInversion]:
path = dest / model_base.value / model_type.value path = dest / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True) path.mkdir(parents=True, exist_ok=True)

View File

@ -94,7 +94,7 @@ class MigrateTo3(object):
Create the basic directory structure for the models folder. Create the basic directory structure for the models folder.
''' '''
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora,
ModelType.ControlNet,ModelType.TextualInversion]: ModelType.ControlNet,ModelType.TextualInversion]:
path = self.dest_models / model_base.value / model_type.value path = self.dest_models / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True) path.mkdir(parents=True, exist_ok=True)

View File

@ -139,7 +139,7 @@ class ModelInstall(object):
models = set() models = set()
for key, value in self.datasets.items(): for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key) name,base,model_type = ModelManager.parse_key(key)
if model_type==ModelType.Pipeline: if model_type==ModelType.Main:
models.add(key) models.add(key)
return models return models
@ -224,7 +224,7 @@ class ModelInstall(object):
try: try:
logger.info(f'Probing {path}') logger.info(f'Probing {path}')
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
if info.model_type == ModelType.Pipeline: if info.model_type == ModelType.Main:
model_name = path.stem if info.format=='checkpoint' else path.name model_name = path.stem if info.format=='checkpoint' else path.name
if self.mgr.model_exists(model_name, info.base_type, info.model_type): if self.mgr.model_exists(model_name, info.base_type, info.model_type):
raise Exception(f'A model named "{model_name}" is already installed.') raise Exception(f'A model named "{model_name}" is already installed.')
@ -314,7 +314,7 @@ class ModelInstall(object):
description = str(description), description = str(description),
model_format = info.format, model_format = info.format,
) )
if info.model_type == ModelType.Pipeline: if info.model_type == ModelType.Main:
attributes.update(dict(variant = info.variant_type,)) attributes.update(dict(variant = info.variant_type,))
if info.format=="checkpoint": if info.format=="checkpoint":
try: try:

View File

@ -33,7 +33,7 @@ class ModelProbe(object):
} }
CLASS2TYPE = { CLASS2TYPE = {
'StableDiffusionPipeline' : ModelType.Pipeline, 'StableDiffusionPipeline' : ModelType.Main,
'AutoencoderKL' : ModelType.Vae, 'AutoencoderKL' : ModelType.Vae,
'ControlNetModel' : ModelType.ControlNet, 'ControlNetModel' : ModelType.ControlNet,
} }
@ -116,7 +116,7 @@ class ModelProbe(object):
if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()):
return ModelType.TextualInversion return ModelType.TextualInversion
if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]): if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]):
return ModelType.Pipeline return ModelType.Main
if any([x.startswith("encoder.conv_in") for x in state_dict.keys()]): if any([x.startswith("encoder.conv_in") for x in state_dict.keys()]):
return ModelType.Vae return ModelType.Vae
if "string_to_token" in state_dict or "emb_params" in state_dict: if "string_to_token" in state_dict or "emb_params" in state_dict:
@ -207,7 +207,7 @@ class CheckpointProbeBase(ProbeBase):
def get_variant_type(self)-> ModelVariantType: def get_variant_type(self)-> ModelVariantType:
model_type = ModelProbe.get_model_type_from_checkpoint(self.checkpoint_path,self.checkpoint) model_type = ModelProbe.get_model_type_from_checkpoint(self.checkpoint_path,self.checkpoint)
if model_type != ModelType.Pipeline: if model_type != ModelType.Main:
return ModelVariantType.Normal return ModelVariantType.Normal
state_dict = self.checkpoint.get('state_dict') or self.checkpoint state_dict = self.checkpoint.get('state_dict') or self.checkpoint
in_channels = state_dict[ in_channels = state_dict[
@ -409,12 +409,12 @@ class LoRAFolderProbe(FolderProbeBase):
return LoRACheckpointProbe(model_file,None).get_base_type() return LoRACheckpointProbe(model_file,None).get_base_type()
############## register probe classes ###### ############## register probe classes ######
ModelProbe.register_probe('diffusers', ModelType.Pipeline, PipelineFolderProbe) ModelProbe.register_probe('diffusers', ModelType.Main, PipelineFolderProbe)
ModelProbe.register_probe('diffusers', ModelType.Vae, VaeFolderProbe) ModelProbe.register_probe('diffusers', ModelType.Vae, VaeFolderProbe)
ModelProbe.register_probe('diffusers', ModelType.Lora, LoRAFolderProbe) ModelProbe.register_probe('diffusers', ModelType.Lora, LoRAFolderProbe)
ModelProbe.register_probe('diffusers', ModelType.TextualInversion, TextualInversionFolderProbe) ModelProbe.register_probe('diffusers', ModelType.TextualInversion, TextualInversionFolderProbe)
ModelProbe.register_probe('diffusers', ModelType.ControlNet, ControlNetFolderProbe) ModelProbe.register_probe('diffusers', ModelType.ControlNet, ControlNetFolderProbe)
ModelProbe.register_probe('checkpoint', ModelType.Pipeline, PipelineCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Main, PipelineCheckpointProbe)
ModelProbe.register_probe('checkpoint', ModelType.Vae, VaeCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Vae, VaeCheckpointProbe)
ModelProbe.register_probe('checkpoint', ModelType.Lora, LoRACheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Lora, LoRACheckpointProbe)
ModelProbe.register_probe('checkpoint', ModelType.TextualInversion, TextualInversionCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.TextualInversion, TextualInversionCheckpointProbe)

View File

@ -11,21 +11,21 @@ from .textual_inversion import TextualInversionModel
MODEL_CLASSES = { MODEL_CLASSES = {
BaseModelType.StableDiffusion1: { BaseModelType.StableDiffusion1: {
ModelType.Pipeline: StableDiffusion1Model, ModelType.Main: StableDiffusion1Model,
ModelType.Vae: VaeModel, ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel, ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel, ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel, ModelType.TextualInversion: TextualInversionModel,
}, },
BaseModelType.StableDiffusion2: { BaseModelType.StableDiffusion2: {
ModelType.Pipeline: StableDiffusion2Model, ModelType.Main: StableDiffusion2Model,
ModelType.Vae: VaeModel, ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel, ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel, ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel, ModelType.TextualInversion: TextualInversionModel,
}, },
#BaseModelType.Kandinsky2_1: { #BaseModelType.Kandinsky2_1: {
# ModelType.Pipeline: Kandinsky2_1Model, # ModelType.Main: Kandinsky2_1Model,
# ModelType.MoVQ: MoVQModel, # ModelType.MoVQ: MoVQModel,
# ModelType.Lora: LoRAModel, # ModelType.Lora: LoRAModel,
# ModelType.ControlNet: ControlNetModel, # ModelType.ControlNet: ControlNetModel,

View File

@ -18,7 +18,7 @@ class BaseModelType(str, Enum):
#Kandinsky2_1 = "kandinsky-2.1" #Kandinsky2_1 = "kandinsky-2.1"
class ModelType(str, Enum): class ModelType(str, Enum):
Pipeline = "pipeline" Main = "main"
Vae = "vae" Vae = "vae"
Lora = "lora" Lora = "lora"
ControlNet = "controlnet" # used by model_probe ControlNet = "controlnet" # used by model_probe

View File

@ -40,11 +40,11 @@ class StableDiffusion1Model(DiffusersModel):
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
assert base_model == BaseModelType.StableDiffusion1 assert base_model == BaseModelType.StableDiffusion1
assert model_type == ModelType.Pipeline assert model_type == ModelType.Main
super().__init__( super().__init__(
model_path=model_path, model_path=model_path,
base_model=BaseModelType.StableDiffusion1, base_model=BaseModelType.StableDiffusion1,
model_type=ModelType.Pipeline, model_type=ModelType.Main,
) )
@classmethod @classmethod
@ -140,11 +140,11 @@ class StableDiffusion2Model(DiffusersModel):
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
assert base_model == BaseModelType.StableDiffusion2 assert base_model == BaseModelType.StableDiffusion2
assert model_type == ModelType.Pipeline assert model_type == ModelType.Main
super().__init__( super().__init__(
model_path=model_path, model_path=model_path,
base_model=BaseModelType.StableDiffusion2, base_model=BaseModelType.StableDiffusion2,
model_type=ModelType.Pipeline, model_type=ModelType.Main,
) )
@classmethod @classmethod

View File

@ -1,58 +1,58 @@
# This file predefines a few models that the user may want to install. # This file predefines a few models that the user may want to install.
sd-1/pipeline/stable-diffusion-v1-5: sd-1/main/stable-diffusion-v1-5:
description: Stable Diffusion version 1.5 diffusers model (4.27 GB) description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
repo_id: runwayml/stable-diffusion-v1-5 repo_id: runwayml/stable-diffusion-v1-5
recommended: True recommended: True
default: True default: True
sd-1/pipeline/stable-diffusion-inpainting: sd-1/main/stable-diffusion-inpainting:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting repo_id: runwayml/stable-diffusion-inpainting
recommended: True recommended: True
sd-2/pipeline/stable-diffusion-2-1: sd-2/main/stable-diffusion-2-1:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1 repo_id: stabilityai/stable-diffusion-2-1
recommended: True recommended: True
sd-2/pipeline/stable-diffusion-2-inpainting: sd-2/main/stable-diffusion-2-inpainting:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB) description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-inpainting repo_id: stabilityai/stable-diffusion-2-inpainting
recommended: False recommended: False
sd-1/pipeline/Analog-Diffusion: sd-1/main/Analog-Diffusion:
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
repo_id: wavymulder/Analog-Diffusion repo_id: wavymulder/Analog-Diffusion
recommended: false recommended: false
sd-1/pipeline/Deliberate: sd-1/main/Deliberate:
description: Versatile model that produces detailed images up to 768px (4.27 GB) description: Versatile model that produces detailed images up to 768px (4.27 GB)
repo_id: XpucT/Deliberate repo_id: XpucT/Deliberate
recommended: False recommended: False
sd-1/pipeline/Dungeons-and-Diffusion: sd-1/main/Dungeons-and-Diffusion:
description: Dungeons & Dragons characters (2.13 GB) description: Dungeons & Dragons characters (2.13 GB)
repo_id: 0xJustin/Dungeons-and-Diffusion repo_id: 0xJustin/Dungeons-and-Diffusion
recommended: False recommended: False
sd-1/pipeline/dreamlike-photoreal-2: sd-1/main/dreamlike-photoreal-2:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
repo_id: dreamlike-art/dreamlike-photoreal-2.0 repo_id: dreamlike-art/dreamlike-photoreal-2.0
recommended: False recommended: False
sd-1/pipeline/Inkpunk-Diffusion: sd-1/main/Inkpunk-Diffusion:
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
repo_id: Envvi/Inkpunk-Diffusion repo_id: Envvi/Inkpunk-Diffusion
recommended: False recommended: False
sd-1/pipeline/openjourney: sd-1/main/openjourney:
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
repo_id: prompthero/openjourney repo_id: prompthero/openjourney
recommended: False recommended: False
sd-1/pipeline/portraitplus: sd-1/main/portraitplus:
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB) description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
repo_id: wavymulder/portraitplus repo_id: wavymulder/portraitplus
recommended: False recommended: False
sd-1/pipeline/seek.art_MEGA: sd-1/main/seek.art_MEGA:
repo_id: coreco/seek.art_MEGA repo_id: coreco/seek.art_MEGA
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
recommended: False recommended: False
sd-1/pipeline/trinart_stable_diffusion_v2: sd-1/main/trinart_stable_diffusion_v2:
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2 repo_id: naclbit/trinart_stable_diffusion_v2
recommended: False recommended: False
sd-1/pipeline/waifu-diffusion: sd-1/main/waifu-diffusion:
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB) description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
repo_id: hakurei/waifu-diffusion repo_id: hakurei/waifu-diffusion
recommended: False recommended: False

View File

@ -127,7 +127,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.nextrely = top_of_table self.nextrely = top_of_table
self.pipeline_models = self.add_pipeline_widgets( self.pipeline_models = self.add_pipeline_widgets(
model_type=ModelType.Pipeline, model_type=ModelType.Main,
window_width=window_width, window_width=window_width,
exclude = self.starter_models exclude = self.starter_models
) )
@ -303,7 +303,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
### Tab for arbitrary diffusers widgets ### ### Tab for arbitrary diffusers widgets ###
def add_pipeline_widgets(self, def add_pipeline_widgets(self,
model_type: ModelType=ModelType.Pipeline, model_type: ModelType=ModelType.Main,
window_width: int=120, window_width: int=120,
**kwargs, **kwargs,
)->dict[str,npyscreen.widget]: )->dict[str,npyscreen.widget]: