mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Add model loader node; unet, clip, vae fields; change compel node to clip field
This commit is contained in:
@ -3,6 +3,8 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||
|
||||
from .model import ClipField
|
||||
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
|
||||
from ...backend.stable_diffusion.textual_inversion_manager import TextualInversionManager
|
||||
@ -41,7 +43,7 @@ class CompelInvocation(BaseInvocation):
|
||||
type: Literal["compel"] = "compel"
|
||||
|
||||
prompt: str = Field(default="", description="Prompt")
|
||||
model: str = Field(default="", description="Model to use")
|
||||
clip: ClipField = Field(None, description="Clip to use")
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
@ -58,12 +60,15 @@ class CompelInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||
|
||||
# TODO: load without model
|
||||
model = context.services.model_manager.get_model(self.model)
|
||||
text_encoder_info = context.services.model_manager.get_model(
|
||||
self.model, SDModelType.diffusers, SDModelType.text_encoder
|
||||
model_name=self.clip.text_encoder.model_name,
|
||||
model_type=SDModelType[self.clip.text_encoder.model_type],
|
||||
submodel=SDModelType[self.clip.text_encoder.submodel],
|
||||
)
|
||||
tokenizer_info = context.services.model_manager.get_model(
|
||||
self.model, SDModelType.diffusers, SDModelType.tokenizer
|
||||
model_name=self.clip.tokenizer.model_name,
|
||||
model_type=SDModelType[self.clip.tokenizer.model_type],
|
||||
submodel=SDModelType[self.clip.tokenizer.submodel],
|
||||
)
|
||||
with text_encoder_info.context as text_encoder,\
|
||||
tokenizer_info.context as tokenizer:
|
||||
|
131
invokeai/app/invocations/model.py
Normal file
131
invokeai/app/invocations/model.py
Normal file
@ -0,0 +1,131 @@
|
||||
from typing import Literal, Optional, Union
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||
from ...backend.model_management import SDModelType
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
model_name: str = Field(description="Info to load unet submodel")
|
||||
model_type: str = Field(description="Info to load unet submodel")
|
||||
submodel: Optional[str] = Field(description="Info to load unet submodel")
|
||||
|
||||
class UNetField(BaseModel):
|
||||
unet: ModelInfo = Field(description="Info to load unet submodel")
|
||||
scheduler: ModelInfo = Field(description="Info to load scheduler submodel")
|
||||
# loras: List[ModelInfo]
|
||||
|
||||
class ClipField(BaseModel):
|
||||
tokenizer: ModelInfo = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelInfo = Field(description="Info to load text_encoder submodel")
|
||||
# loras: List[ModelInfo]
|
||||
|
||||
class VaeField(BaseModel):
|
||||
# TODO: better naming?
|
||||
vae: ModelInfo = Field(description="Info to load vae submodel")
|
||||
|
||||
|
||||
class ModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
#fmt: off
|
||||
type: Literal["model_loader_output"] = "model_loader_output"
|
||||
|
||||
unet: UNetField = Field(default=None, description="UNet submodel")
|
||||
clip: ClipField = Field(default=None, description="Tokenizer and text_encoder submodels")
|
||||
vae: VaeField = Field(default=None, description="Vae submodel")
|
||||
#fmt: on
|
||||
|
||||
|
||||
class ModelLoaderInvocation(BaseInvocation):
|
||||
"""Loading submodels of selected model."""
|
||||
|
||||
type: Literal["model_loader"] = "model_loader"
|
||||
|
||||
model_name: str = Field(default="", description="Model to load")
|
||||
# TODO: precision?
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"tags": ["model", "loader"],
|
||||
"type_hints": {
|
||||
"model_name": "model" # TODO: rename to model_name?
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
|
||||
|
||||
# TODO: not found exceptions
|
||||
if not context.services.model_manager.valid_model(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers,
|
||||
):
|
||||
raise Exception(f"Unkown model name: {self.model_name}!")
|
||||
|
||||
"""
|
||||
if not context.services.model_manager.valid_model(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers,
|
||||
submodel=SDModelType.tokenizer,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find tokenizer submodel in {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
|
||||
if not context.services.model_manager.valid_model(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers,
|
||||
submodel=SDModelType.text_encoder,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find text_encoder submodel in {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
|
||||
if not context.services.model_manager.valid_model(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers,
|
||||
submodel=SDModelType.unet,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find unet submodel from {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
"""
|
||||
|
||||
|
||||
return ModelLoaderOutput(
|
||||
unet=UNetField(
|
||||
unet=ModelInfo(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers.name,
|
||||
submodel=SDModelType.unet.name,
|
||||
),
|
||||
scheduler=ModelInfo(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers.name,
|
||||
submodel=SDModelType.scheduler.name,
|
||||
),
|
||||
),
|
||||
clip=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers.name,
|
||||
submodel=SDModelType.tokenizer.name,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers.name,
|
||||
submodel=SDModelType.text_encoder.name,
|
||||
),
|
||||
),
|
||||
vae=VaeField(
|
||||
vae=ModelInfo(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.diffusers.name,
|
||||
submodel=SDModelType.vae.name,
|
||||
),
|
||||
)
|
||||
)
|
Reference in New Issue
Block a user