mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Rewrite latent nodes to new model manager
This commit is contained in:
parent
3b2a054f7a
commit
bc96727cbe
@ -7,6 +7,7 @@ import einops
|
|||||||
import torch
|
import torch
|
||||||
from diffusers import DiffusionPipeline
|
from diffusers import DiffusionPipeline
|
||||||
from diffusers.schedulers import SchedulerMixin as Scheduler
|
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||||
|
from diffusers.image_processor import VaeImageProcessor
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
from invokeai.app.util.misc import SEED_MAX, get_random_seed
|
||||||
@ -26,6 +27,9 @@ from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
|
|||||||
from .compel import ConditioningField
|
from .compel import ConditioningField
|
||||||
from .image import ImageField, ImageOutput, build_image_output
|
from .image import ImageField, ImageOutput, build_image_output
|
||||||
|
|
||||||
|
from .model import ModelInfo, UNetField, VaeField
|
||||||
|
from ...backend.model_management import SDModelType
|
||||||
|
|
||||||
|
|
||||||
class LatentsField(BaseModel):
|
class LatentsField(BaseModel):
|
||||||
"""A latents field used for passing latents between invocations"""
|
"""A latents field used for passing latents between invocations"""
|
||||||
@ -70,9 +74,21 @@ SAMPLER_NAME_VALUES = Literal[
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
def get_scheduler(
|
||||||
|
context: InvocationContext,
|
||||||
|
scheduler_info: ModelInfo,
|
||||||
|
scheduler_name: str,
|
||||||
|
) -> Scheduler:
|
||||||
|
orig_scheduler_info = context.services.model_manager.get_model(
|
||||||
|
model_name=scheduler_info.model_name,
|
||||||
|
model_type=SDModelType[scheduler_info.model_type],
|
||||||
|
submodel=SDModelType[scheduler_info.submodel],
|
||||||
|
)
|
||||||
|
with orig_scheduler_info.context as orig_scheduler:
|
||||||
|
scheduler_config = orig_scheduler.config
|
||||||
|
|
||||||
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
|
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
|
||||||
scheduler = scheduler_class.from_config(model.scheduler.config)
|
scheduler = scheduler_class.from_config(scheduler_config)
|
||||||
# hack copied over from generate.py
|
# hack copied over from generate.py
|
||||||
if not hasattr(scheduler, 'uses_inpainting_model'):
|
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||||
scheduler.uses_inpainting_model = lambda: False
|
scheduler.uses_inpainting_model = lambda: False
|
||||||
@ -102,12 +118,6 @@ def get_noise(width:int, height:int, device:torch.device, seed:int = 0, latent_c
|
|||||||
# x = (1 - self.perlin) * x + self.perlin * perlin_noise
|
# x = (1 - self.perlin) * x + self.perlin * perlin_noise
|
||||||
return x
|
return x
|
||||||
|
|
||||||
class ModelGetter:
|
|
||||||
def get_model(self, context: InvocationContext) -> StableDiffusionGeneratorPipeline:
|
|
||||||
model_manager = context.services.model_manager
|
|
||||||
model_info = model_manager.get_model(self.model,node=self,context=context)
|
|
||||||
return model_info.context
|
|
||||||
|
|
||||||
class NoiseInvocation(BaseInvocation):
|
class NoiseInvocation(BaseInvocation):
|
||||||
"""Generates latent noise."""
|
"""Generates latent noise."""
|
||||||
|
|
||||||
@ -139,7 +149,7 @@ class NoiseInvocation(BaseInvocation):
|
|||||||
|
|
||||||
|
|
||||||
# Text to image
|
# Text to image
|
||||||
class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
class TextToLatentsInvocation(BaseInvocation):
|
||||||
"""Generates latents from conditionings."""
|
"""Generates latents from conditionings."""
|
||||||
|
|
||||||
type: Literal["t2l"] = "t2l"
|
type: Literal["t2l"] = "t2l"
|
||||||
@ -152,9 +162,10 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
||||||
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
|
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
|
||||||
model: str = Field(default="", description="The model to use (currently ignored)")
|
|
||||||
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||||
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
|
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
|
||||||
|
|
||||||
|
unet: UNetField = Field(default=None, description="UNet submodel")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
# Schema customisation
|
# Schema customisation
|
||||||
@ -162,9 +173,6 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
schema_extra = {
|
schema_extra = {
|
||||||
"ui": {
|
"ui": {
|
||||||
"tags": ["latents", "image"],
|
"tags": ["latents", "image"],
|
||||||
"type_hints": {
|
|
||||||
"model": "model"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,7 +187,7 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
source_node_id=source_node_id,
|
source_node_id=source_node_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_conditioning_data(self, context: InvocationContext, model: StableDiffusionGeneratorPipeline) -> ConditioningData:
|
def get_conditioning_data(self, context: InvocationContext, scheduler) -> ConditioningData:
|
||||||
c, extra_conditioning_info = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
c, extra_conditioning_info = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
||||||
uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name)
|
uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name)
|
||||||
|
|
||||||
@ -194,9 +202,36 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
h_symmetry_time_pct=None,#h_symmetry_time_pct,
|
h_symmetry_time_pct=None,#h_symmetry_time_pct,
|
||||||
v_symmetry_time_pct=None#v_symmetry_time_pct,
|
v_symmetry_time_pct=None#v_symmetry_time_pct,
|
||||||
),
|
),
|
||||||
).add_scheduler_args_if_applicable(model.scheduler, eta=None)#ddim_eta)
|
).add_scheduler_args_if_applicable(scheduler, eta=None)#ddim_eta)
|
||||||
return conditioning_data
|
return conditioning_data
|
||||||
|
|
||||||
|
def create_pipeline(self, unet, scheduler) -> StableDiffusionGeneratorPipeline:
|
||||||
|
configure_model_padding(
|
||||||
|
unet,
|
||||||
|
self.seamless,
|
||||||
|
self.seamless_axes,
|
||||||
|
)
|
||||||
|
|
||||||
|
class FakeVae:
|
||||||
|
class FakeVaeConfig:
|
||||||
|
def __init__(self):
|
||||||
|
self.block_out_channels = [0]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.config = FakeVae.FakeVaeConfig()
|
||||||
|
|
||||||
|
return StableDiffusionGeneratorPipeline(
|
||||||
|
vae=FakeVae(), # TODO: oh...
|
||||||
|
text_encoder=None,
|
||||||
|
tokenizer=None,
|
||||||
|
unet=unet,
|
||||||
|
scheduler=scheduler,
|
||||||
|
safety_checker=None,
|
||||||
|
feature_extractor=None,
|
||||||
|
requires_safety_checker=False,
|
||||||
|
precision="float16" if unet.dtype == torch.float16 else "float32",
|
||||||
|
#precision="float16", # TODO:
|
||||||
|
)
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
noise = context.services.latents.get(self.noise.latents_name)
|
noise = context.services.latents.get(self.noise.latents_name)
|
||||||
@ -208,12 +243,26 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
def step_callback(state: PipelineIntermediateState):
|
def step_callback(state: PipelineIntermediateState):
|
||||||
self.dispatch_progress(context, source_node_id, state)
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
with self.get_model(context) as model:
|
#unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
|
||||||
conditioning_data = self.get_conditioning_data(context, model)
|
unet_info = context.services.model_manager.get_model(
|
||||||
|
model_name=self.unet.unet.model_name,
|
||||||
|
model_type=SDModelType[self.unet.unet.model_type],
|
||||||
|
submodel=SDModelType[self.unet.unet.submodel] if self.unet.unet.submodel else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
with unet_info.context as unet:
|
||||||
|
scheduler = get_scheduler(
|
||||||
|
context=context,
|
||||||
|
scheduler_info=self.unet.scheduler,
|
||||||
|
scheduler_name=self.scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline = self.create_pipeline(unet, scheduler)
|
||||||
|
conditioning_data = self.get_conditioning_data(context, scheduler)
|
||||||
|
|
||||||
# TODO: Verify the noise is the right size
|
# TODO: Verify the noise is the right size
|
||||||
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
|
||||||
latents=torch.zeros_like(noise, dtype=torch_dtype(model.device)),
|
latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)),
|
||||||
noise=noise,
|
noise=noise,
|
||||||
num_inference_steps=self.steps,
|
num_inference_steps=self.steps,
|
||||||
conditioning_data=conditioning_data,
|
conditioning_data=conditioning_data,
|
||||||
@ -229,30 +278,8 @@ class TextToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
latents=LatentsField(latents_name=name)
|
latents=LatentsField(latents_name=name)
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_model(self, context: InvocationContext) -> StableDiffusionGeneratorPipeline:
|
|
||||||
model_ctx = super().get_model(context)
|
|
||||||
|
|
||||||
with model_ctx as model:
|
class LatentsToLatentsInvocation(TextToLatentsInvocation):
|
||||||
model.scheduler = get_scheduler(
|
|
||||||
model=model,
|
|
||||||
scheduler_name=self.scheduler
|
|
||||||
)
|
|
||||||
|
|
||||||
if isinstance(model, DiffusionPipeline):
|
|
||||||
for component in [model.unet, model.vae]:
|
|
||||||
configure_model_padding(component,
|
|
||||||
self.seamless,
|
|
||||||
self.seamless_axes
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
configure_model_padding(model,
|
|
||||||
self.seamless,
|
|
||||||
self.seamless_axes
|
|
||||||
)
|
|
||||||
return model_ctx
|
|
||||||
|
|
||||||
|
|
||||||
class LatentsToLatentsInvocation(TextToLatentsInvocation, ModelGetter):
|
|
||||||
"""Generates latents using latents as base image."""
|
"""Generates latents using latents as base image."""
|
||||||
|
|
||||||
type: Literal["l2l"] = "l2l"
|
type: Literal["l2l"] = "l2l"
|
||||||
@ -266,9 +293,6 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation, ModelGetter):
|
|||||||
schema_extra = {
|
schema_extra = {
|
||||||
"ui": {
|
"ui": {
|
||||||
"tags": ["latents"],
|
"tags": ["latents"],
|
||||||
"type_hints": {
|
|
||||||
"model": "model"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,22 +307,35 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation, ModelGetter):
|
|||||||
def step_callback(state: PipelineIntermediateState):
|
def step_callback(state: PipelineIntermediateState):
|
||||||
self.dispatch_progress(context, source_node_id, state)
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
with self.get_model(context) as model:
|
#unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
|
||||||
conditioning_data = self.get_conditioning_data(model)
|
unet_info = context.services.model_manager.get_model(
|
||||||
|
model_name=self.unet.unet.model_name,
|
||||||
|
model_type=SDModelType[self.unet.unet.model_type],
|
||||||
|
submodel=SDModelType[self.unet.unet.submodel] if self.unet.unet.submodel else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
with unet_info.context as unet:
|
||||||
|
scheduler = get_scheduler(
|
||||||
|
context=context,
|
||||||
|
scheduler_info=self.unet.scheduler,
|
||||||
|
scheduler_name=self.scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
pipeline = self.create_pipeline(unet, scheduler)
|
||||||
|
conditioning_data = self.get_conditioning_data(context, scheduler)
|
||||||
|
|
||||||
# TODO: Verify the noise is the right size
|
# TODO: Verify the noise is the right size
|
||||||
|
|
||||||
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
|
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
|
||||||
latent, device=model.device, dtype=latent.dtype
|
latent, device=unet.device, dtype=latent.dtype
|
||||||
)
|
)
|
||||||
|
|
||||||
timesteps, _ = model.get_img2img_timesteps(
|
timesteps, _ = pipeline.get_img2img_timesteps(
|
||||||
self.steps,
|
self.steps,
|
||||||
self.strength,
|
self.strength,
|
||||||
device=model.device,
|
device=unet.device,
|
||||||
)
|
)
|
||||||
|
|
||||||
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
|
||||||
latents=initial_latents,
|
latents=initial_latents,
|
||||||
timesteps=timesteps,
|
timesteps=timesteps,
|
||||||
noise=noise,
|
noise=noise,
|
||||||
@ -318,23 +355,21 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation, ModelGetter):
|
|||||||
|
|
||||||
|
|
||||||
# Latent to image
|
# Latent to image
|
||||||
class LatentsToImageInvocation(BaseInvocation, ModelGetter):
|
class LatentsToImageInvocation(BaseInvocation):
|
||||||
"""Generates an image from latents."""
|
"""Generates an image from latents."""
|
||||||
|
|
||||||
type: Literal["l2i"] = "l2i"
|
type: Literal["l2i"] = "l2i"
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
|
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
|
||||||
model: str = Field(default="", description="The model to use")
|
vae: VaeField = Field(default=None, description="Vae submodel")
|
||||||
|
tiled: bool = Field(default=False, description="Decode latents by overlaping tiles(less memory consumption)")
|
||||||
|
|
||||||
# Schema customisation
|
# Schema customisation
|
||||||
class Config(InvocationConfig):
|
class Config(InvocationConfig):
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
"ui": {
|
"ui": {
|
||||||
"tags": ["latents", "image"],
|
"tags": ["latents", "image"],
|
||||||
"type_hints": {
|
|
||||||
"model": "model"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,11 +377,29 @@ class LatentsToImageInvocation(BaseInvocation, ModelGetter):
|
|||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
latents = context.services.latents.get(self.latents.latents_name)
|
latents = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
# TODO: this only really needs the vae
|
#vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
|
||||||
with self.get_model(context) as model:
|
vae_info = context.services.model_manager.get_model(
|
||||||
|
model_name=self.vae.vae.model_name,
|
||||||
|
model_type=SDModelType[self.vae.vae.model_type],
|
||||||
|
submodel=SDModelType[self.vae.vae.submodel] if self.vae.vae.submodel else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
with vae_info.context as vae:
|
||||||
|
# TODO: check if it works
|
||||||
|
if self.tiled:
|
||||||
|
vae.enable_tiling()
|
||||||
|
else:
|
||||||
|
vae.disable_tiling()
|
||||||
|
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
np_image = model.decode_latents(latents)
|
# copied from diffusers pipeline
|
||||||
image = model.numpy_to_pil(np_image)[0]
|
latents = latents / vae.config.scaling_factor
|
||||||
|
image = vae.decode(latents, return_dict=False)[0]
|
||||||
|
image = (image / 2 + 0.5).clamp(0, 1) # denormalize
|
||||||
|
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
||||||
|
np_image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
||||||
|
|
||||||
|
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
||||||
|
|
||||||
image_type = ImageType.RESULT
|
image_type = ImageType.RESULT
|
||||||
image_name = context.services.images.create_name(
|
image_name = context.services.images.create_name(
|
||||||
@ -430,21 +483,21 @@ class ScaleLatentsInvocation(BaseInvocation):
|
|||||||
return LatentsOutput(latents=LatentsField(latents_name=name))
|
return LatentsOutput(latents=LatentsField(latents_name=name))
|
||||||
|
|
||||||
|
|
||||||
class ImageToLatentsInvocation(BaseInvocation, ModelGetter):
|
class ImageToLatentsInvocation(BaseInvocation):
|
||||||
"""Encodes an image into latents."""
|
"""Encodes an image into latents."""
|
||||||
|
|
||||||
type: Literal["i2l"] = "i2l"
|
type: Literal["i2l"] = "i2l"
|
||||||
|
|
||||||
# Inputs
|
# Inputs
|
||||||
image: Union[ImageField, None] = Field(description="The image to encode")
|
image: Union[ImageField, None] = Field(description="The image to encode")
|
||||||
model: str = Field(default="", description="The model to use")
|
vae: VaeField = Field(default=None, description="Vae submodel")
|
||||||
|
tiled: bool = Field(default=False, description="Encode latents by overlaping tiles(less memory consumption)")
|
||||||
|
|
||||||
# Schema customisation
|
# Schema customisation
|
||||||
class Config(InvocationConfig):
|
class Config(InvocationConfig):
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
"ui": {
|
"ui": {
|
||||||
"tags": ["latents", "image"],
|
"tags": ["latents", "image"],
|
||||||
"type_hints": {"model": "model"},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,22 +507,38 @@ class ImageToLatentsInvocation(BaseInvocation, ModelGetter):
|
|||||||
self.image.image_type, self.image.image_name
|
self.image.image_type, self.image.image_name
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: this only really needs the vae
|
#vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
|
||||||
model_info = self.get_model(context)
|
vae_info = context.services.model_manager.get_model(
|
||||||
model: StableDiffusionGeneratorPipeline = model_info["model"]
|
model_name=self.vae.vae.model_name,
|
||||||
|
model_type=SDModelType[self.vae.vae.model_type],
|
||||||
|
submodel=SDModelType[self.vae.vae.submodel] if self.vae.vae.submodel else None,
|
||||||
|
)
|
||||||
|
|
||||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||||
|
|
||||||
if image_tensor.dim() == 3:
|
if image_tensor.dim() == 3:
|
||||||
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
||||||
|
|
||||||
latents = model.non_noised_latents_from_image(
|
with vae_info.context as vae:
|
||||||
image_tensor,
|
# TODO: check if it works
|
||||||
device=model._model_group.device_for(model.unet),
|
if self.tiled:
|
||||||
dtype=model.unet.dtype,
|
vae.enable_tiling()
|
||||||
)
|
else:
|
||||||
|
vae.disable_tiling()
|
||||||
|
|
||||||
|
latents = self.non_noised_latents_from_image(vae, image_tensor)
|
||||||
|
|
||||||
name = f"{context.graph_execution_state_id}__{self.id}"
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
||||||
context.services.latents.set(name, latents)
|
context.services.latents.set(name, latents)
|
||||||
return LatentsOutput(latents=LatentsField(latents_name=name))
|
return LatentsOutput(latents=LatentsField(latents_name=name))
|
||||||
|
|
||||||
|
|
||||||
|
def non_noised_latents_from_image(self, vae, init_image):
|
||||||
|
init_image = init_image.to(device=vae.device, dtype=vae.dtype)
|
||||||
|
with torch.inference_mode():
|
||||||
|
init_latent_dist = vae.encode(init_image).latent_dist
|
||||||
|
init_latents = init_latent_dist.sample().to(
|
||||||
|
dtype=vae.dtype
|
||||||
|
) # FIXME: uses torch.randn. make reproducible!
|
||||||
|
|
||||||
|
init_latents = 0.18215 * init_latents
|
||||||
|
return init_latents
|
Loading…
Reference in New Issue
Block a user