From 132790eebe0a49b95699367a170549d8b7a38201 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 6 Mar 2024 19:42:47 +1100 Subject: [PATCH] tidy(nodes): use canonical capitalizations --- invokeai/app/invocations/compel.py | 26 ++++---- invokeai/app/invocations/latent.py | 8 +-- invokeai/app/invocations/model.py | 63 +++++++++---------- invokeai/app/invocations/sdxl.py | 22 +++---- invokeai/backend/model_manager/config.py | 2 +- .../model_manager/load/model_loaders/lora.py | 2 +- .../model_manager/load/model_loaders/vae.py | 2 +- invokeai/invocation_api/__init__.py | 16 ++--- 8 files changed, 70 insertions(+), 71 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index a2e0bd06c4..c6b4e378f8 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -20,7 +20,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( from invokeai.backend.util.devices import torch_dtype from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from .model import ClipField +from .model import CLIPField # unconditioned: Optional[torch.Tensor] @@ -46,7 +46,7 @@ class CompelInvocation(BaseInvocation): description=FieldDescriptions.compel_prompt, ui_component=UIComponent.Textarea, ) - clip: ClipField = InputField( + clip: CLIPField = InputField( title="CLIP", description=FieldDescriptions.clip, input=Input.Connection, @@ -127,7 +127,7 @@ class SDXLPromptInvocationBase: def run_clip_compel( self, context: InvocationContext, - clip_field: ClipField, + clip_field: CLIPField, prompt: str, get_pooled: bool, lora_prefix: str, @@ -253,8 +253,8 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): crop_left: int = InputField(default=0, description="") target_width: int = InputField(default=1024, description="") target_height: int = InputField(default=1024, description="") - clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1") - clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2") + clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1") + clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2") @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: @@ -340,7 +340,7 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase crop_top: int = InputField(default=0, description="") crop_left: int = InputField(default=0, description="") aesthetic_score: float = InputField(default=6.0, description=FieldDescriptions.sdxl_aesthetic) - clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection) + clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection) @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: @@ -370,10 +370,10 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase @invocation_output("clip_skip_output") -class ClipSkipInvocationOutput(BaseInvocationOutput): - """Clip skip node output""" +class CLIPSkipInvocationOutput(BaseInvocationOutput): + """CLIP skip node output""" - clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") + clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") @invocation( @@ -383,15 +383,15 @@ class ClipSkipInvocationOutput(BaseInvocationOutput): category="conditioning", version="1.0.0", ) -class ClipSkipInvocation(BaseInvocation): +class CLIPSkipInvocation(BaseInvocation): """Skip layers in clip text_encoder model.""" - clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP") + clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP") skipped_layers: int = InputField(default=0, ge=0, description=FieldDescriptions.skipped_layers) - def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput: + def invoke(self, context: InvocationContext) -> CLIPSkipInvocationOutput: self.clip.skipped_layers += self.skipped_layers - return ClipSkipInvocationOutput( + return CLIPSkipInvocationOutput( clip=self.clip, ) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c3704b2ed8..f21e28cfa4 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -76,7 +76,7 @@ from .baseinvocation import ( invocation_output, ) from .controlnet_image_processors import ControlField -from .model import ModelField, UNetField, VaeField +from .model import ModelField, UNetField, VAEField if choose_torch_device() == torch.device("mps"): from torch import mps @@ -119,7 +119,7 @@ class SchedulerInvocation(BaseInvocation): class CreateDenoiseMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" - vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0) + vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0) image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1) mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) @@ -832,7 +832,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): description=FieldDescriptions.latents, input=Input.Connection, ) - vae: VaeField = InputField( + vae: VAEField = InputField( description=FieldDescriptions.vae, input=Input.Connection, ) @@ -1010,7 +1010,7 @@ class ImageToLatentsInvocation(BaseInvocation): image: ImageField = InputField( description="The image to encode", ) - vae: VaeField = InputField( + vae: VAEField = InputField( description=FieldDescriptions.vae, input=Input.Connection, ) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 648f34e749..98aacb793d 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -29,20 +29,19 @@ class LoRAField(BaseModel): class UNetField(BaseModel): unet: ModelField = Field(description="Info to load unet submodel") scheduler: ModelField = Field(description="Info to load scheduler submodel") - loras: List[LoRAField] = Field(description="Loras to apply on model loading") + loras: List[LoRAField] = Field(description="LoRAs to apply on model loading") seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration") -class ClipField(BaseModel): +class CLIPField(BaseModel): tokenizer: ModelField = Field(description="Info to load tokenizer submodel") text_encoder: ModelField = Field(description="Info to load text_encoder submodel") skipped_layers: int = Field(description="Number of skipped layers in text_encoder") - loras: List[LoRAField] = Field(description="Loras to apply on model loading") + loras: List[LoRAField] = Field(description="LoRAs to apply on model loading") -class VaeField(BaseModel): - # TODO: better naming? +class VAEField(BaseModel): vae: ModelField = Field(description="Info to load vae submodel") seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') @@ -58,14 +57,14 @@ class UNetOutput(BaseInvocationOutput): class VAEOutput(BaseInvocationOutput): """Base class for invocations that output a VAE field""" - vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") + vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE") @invocation_output("clip_output") class CLIPOutput(BaseInvocationOutput): """Base class for invocations that output a CLIP field""" - clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP") + clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP") @invocation_output("model_loader_output") @@ -101,21 +100,21 @@ class MainModelLoaderInvocation(BaseInvocation): return ModelLoaderOutput( unet=UNetField(unet=unet, scheduler=scheduler, loras=[]), - clip=ClipField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0), - vae=VaeField(vae=vae), + clip=CLIPField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0), + vae=VAEField(vae=vae), ) @invocation_output("lora_loader_output") -class LoraLoaderOutput(BaseInvocationOutput): +class LoRALoaderOutput(BaseInvocationOutput): """Model loader output""" unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") - clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") + clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") @invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.1") -class LoraLoaderInvocation(BaseInvocation): +class LoRALoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: ModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") @@ -126,26 +125,26 @@ class LoraLoaderInvocation(BaseInvocation): input=Input.Connection, title="UNet", ) - clip: Optional[ClipField] = InputField( + clip: Optional[CLIPField] = InputField( default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP", ) - def invoke(self, context: InvocationContext) -> LoraLoaderOutput: + def invoke(self, context: InvocationContext) -> LoRALoaderOutput: lora_key = self.lora.key if not context.models.exists(lora_key): raise Exception(f"Unkown lora: {lora_key}!") if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras): - raise Exception(f'Lora "{lora_key}" already applied to unet') + raise Exception(f'LoRA "{lora_key}" already applied to unet') if self.clip is not None and any(lora.lora.key == lora_key for lora in self.clip.loras): - raise Exception(f'Lora "{lora_key}" already applied to clip') + raise Exception(f'LoRA "{lora_key}" already applied to clip') - output = LoraLoaderOutput() + output = LoRALoaderOutput() if self.unet is not None: output.unet = self.unet.model_copy(deep=True) @@ -169,12 +168,12 @@ class LoraLoaderInvocation(BaseInvocation): @invocation_output("sdxl_lora_loader_output") -class SDXLLoraLoaderOutput(BaseInvocationOutput): +class SDXLLoRALoaderOutput(BaseInvocationOutput): """SDXL LoRA Loader Output""" unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") - clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 1") - clip2: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2") + clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 1") + clip2: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2") @invocation( @@ -184,7 +183,7 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput): category="model", version="1.0.1", ) -class SDXLLoraLoaderInvocation(BaseInvocation): +class SDXLLoRALoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: ModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") @@ -195,35 +194,35 @@ class SDXLLoraLoaderInvocation(BaseInvocation): input=Input.Connection, title="UNet", ) - clip: Optional[ClipField] = InputField( + clip: Optional[CLIPField] = InputField( default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1", ) - clip2: Optional[ClipField] = InputField( + clip2: Optional[CLIPField] = InputField( default=None, description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2", ) - def invoke(self, context: InvocationContext) -> SDXLLoraLoaderOutput: + def invoke(self, context: InvocationContext) -> SDXLLoRALoaderOutput: lora_key = self.lora.key if not context.models.exists(lora_key): raise Exception(f"Unknown lora: {lora_key}!") if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras): - raise Exception(f'Lora "{lora_key}" already applied to unet') + raise Exception(f'LoRA "{lora_key}" already applied to unet') if self.clip is not None and any(lora.lora.key == lora_key for lora in self.clip.loras): - raise Exception(f'Lora "{lora_key}" already applied to clip') + raise Exception(f'LoRA "{lora_key}" already applied to clip') if self.clip2 is not None and any(lora.lora.key == lora_key for lora in self.clip2.loras): - raise Exception(f'Lora "{lora_key}" already applied to clip2') + raise Exception(f'LoRA "{lora_key}" already applied to clip2') - output = SDXLLoraLoaderOutput() + output = SDXLLoRALoaderOutput() if self.unet is not None: output.unet = self.unet.model_copy(deep=True) @@ -256,7 +255,7 @@ class SDXLLoraLoaderInvocation(BaseInvocation): @invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.1") -class VaeLoaderInvocation(BaseInvocation): +class VAELoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" vae_model: ModelField = InputField( @@ -271,7 +270,7 @@ class VaeLoaderInvocation(BaseInvocation): if not context.models.exists(key): raise Exception(f"Unkown vae: {key}!") - return VAEOutput(vae=VaeField(vae=self.vae_model)) + return VAEOutput(vae=VAEField(vae=self.vae_model)) @invocation_output("seamless_output") @@ -279,7 +278,7 @@ class SeamlessModeOutput(BaseInvocationOutput): """Modified Seamless Model output""" unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") - vae: Optional[VaeField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE") + vae: Optional[VAEField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE") @invocation( @@ -298,7 +297,7 @@ class SeamlessModeInvocation(BaseInvocation): input=Input.Connection, title="UNet", ) - vae: Optional[VaeField] = InputField( + vae: Optional[VAEField] = InputField( default=None, description=FieldDescriptions.vae_model, input=Input.Connection, diff --git a/invokeai/app/invocations/sdxl.py b/invokeai/app/invocations/sdxl.py index 77c825a3eb..17b6ef2053 100644 --- a/invokeai/app/invocations/sdxl.py +++ b/invokeai/app/invocations/sdxl.py @@ -8,7 +8,7 @@ from .baseinvocation import ( invocation, invocation_output, ) -from .model import ClipField, ModelField, UNetField, VaeField +from .model import CLIPField, ModelField, UNetField, VAEField @invocation_output("sdxl_model_loader_output") @@ -16,9 +16,9 @@ class SDXLModelLoaderOutput(BaseInvocationOutput): """SDXL base model loader output""" unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet") - clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 1") - clip2: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 2") - vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") + clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 1") + clip2: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 2") + vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE") @invocation_output("sdxl_refiner_model_loader_output") @@ -26,8 +26,8 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput): """SDXL refiner model loader output""" unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet") - clip2: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 2") - vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") + clip2: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 2") + vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE") @invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.1") @@ -56,9 +56,9 @@ class SDXLModelLoaderInvocation(BaseInvocation): return SDXLModelLoaderOutput( unet=UNetField(unet=unet, scheduler=scheduler, loras=[]), - clip=ClipField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0), - clip2=ClipField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0), - vae=VaeField(vae=vae), + clip=CLIPField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0), + clip2=CLIPField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0), + vae=VAEField(vae=vae), ) @@ -92,6 +92,6 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation): return SDXLRefinerModelLoaderOutput( unet=UNetField(unet=unet, scheduler=scheduler, loras=[]), - clip2=ClipField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0), - vae=VaeField(vae=vae), + clip2=CLIPField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0), + vae=VAEField(vae=vae), ) diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 8f0f437eb8..250e8c5f83 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -310,7 +310,7 @@ class IPAdapterConfig(ModelConfigBase): class CLIPVisionDiffusersConfig(ModelConfigBase): - """Model config for ClipVision.""" + """Model config for CLIPVision.""" type: Literal[ModelType.CLIPVision] = ModelType.CLIPVision format: Literal[ModelFormat.Diffusers] diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index 436442a622..20a39e56c3 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -24,7 +24,7 @@ from .. import ModelLoader, ModelLoaderRegistry @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.LyCORIS) -class LoraLoader(ModelLoader): +class LoRALoader(ModelLoader): """Class to load LoRA models.""" # We cheat a little bit to get access to the model base diff --git a/invokeai/backend/model_manager/load/model_loaders/vae.py b/invokeai/backend/model_manager/load/model_loaders/vae.py index e18351138f..72e165d0f9 100644 --- a/invokeai/backend/model_manager/load/model_loaders/vae.py +++ b/invokeai/backend/model_manager/load/model_loaders/vae.py @@ -23,7 +23,7 @@ from .generic_diffusers import GenericDiffusersLoader @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers) @ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.VAE, format=ModelFormat.Checkpoint) @ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.VAE, format=ModelFormat.Checkpoint) -class VaeLoader(GenericDiffusersLoader): +class VAELoader(GenericDiffusersLoader): """Class to load VAE models.""" def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool: diff --git a/invokeai/invocation_api/__init__.py b/invokeai/invocation_api/__init__.py index 492b5c1f4c..c15beb446e 100644 --- a/invokeai/invocation_api/__init__.py +++ b/invokeai/invocation_api/__init__.py @@ -33,15 +33,15 @@ from invokeai.app.invocations.fields import ( from invokeai.app.invocations.latent import SchedulerOutput from invokeai.app.invocations.metadata import MetadataItemField, MetadataItemOutput, MetadataOutput from invokeai.app.invocations.model import ( - ClipField, + CLIPField, CLIPOutput, - LoraLoaderOutput, + LoRALoaderOutput, ModelField, ModelLoaderOutput, - SDXLLoraLoaderOutput, + SDXLLoRALoaderOutput, UNetField, UNetOutput, - VaeField, + VAEField, VAEOutput, ) from invokeai.app.invocations.primitives import ( @@ -116,14 +116,14 @@ __all__ = [ # invokeai.app.invocations.model "ModelField", "UNetField", - "ClipField", - "VaeField", + "CLIPField", + "VAEField", "UNetOutput", "VAEOutput", "CLIPOutput", "ModelLoaderOutput", - "LoraLoaderOutput", - "SDXLLoraLoaderOutput", + "LoRALoaderOutput", + "SDXLLoRALoaderOutput", # invokeai.app.invocations.primitives "BooleanCollectionOutput", "BooleanOutput",