diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 11ea7134bb..339f1333d8 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -6,6 +6,8 @@ import einops from pydantic import BaseModel, Field, validator import torch +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel + from invokeai.app.invocations.util.choose_model import choose_model from invokeai.app.models.image import ImageCategory from invokeai.app.util.misc import SEED_MAX, get_random_seed @@ -28,7 +30,7 @@ from .compel import ConditioningField from ...backend.stable_diffusion import PipelineIntermediateState from diffusers.schedulers import SchedulerMixin as Scheduler import diffusers -from diffusers import DiffusionPipeline +from diffusers import DiffusionPipeline, ControlNetModel class LatentsField(BaseModel): @@ -84,13 +86,13 @@ SAMPLER_NAME_VALUES = Literal[ def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim']) - + scheduler_config = model.scheduler.config if "_backup" in scheduler_config: scheduler_config = scheduler_config["_backup"] scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config} scheduler = scheduler_class.from_config(scheduler_config) - + # hack copied over from generate.py if not hasattr(scheduler, 'uses_inpainting_model'): scheduler.uses_inpainting_model = lambda: False @@ -171,6 +173,9 @@ class TextToLatentsInvocation(BaseInvocation): model: str = Field(default="", description="The model to use (currently ignored)") # seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) # seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'") + progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", ) + control_model: Optional[str] = Field(default=None, description="The control model to use") + control_image: Optional[ImageField] = Field(default=None, description="The processed control image") # fmt: on # Schema customisation @@ -252,6 +257,63 @@ class TextToLatentsInvocation(BaseInvocation): model = self.get_model(context.services.model_manager) conditioning_data = self.get_conditioning_data(context, model) + # loading controlnet model + if (self.control_model is None or self.control_model==''): + control_model = None + else: + # FIXME: change this to dropdown menu? + # FIXME: generalize so don't have to hardcode torch_dtype and device + control_model = ControlNetModel.from_pretrained(self.control_model, + torch_dtype=torch.float16).to("cuda") + model.control_model = control_model + + # loading controlnet image (currently requires pre-processed image) + control_image = ( + None if self.control_image is None + else context.services.images.get( + self.control_image.image_type, self.control_image.image_name + ) + ) + + # copied from old backend/txt2img.py + # FIXME: still need to test with different widths, heights, devices, dtypes + # and add in batch_size, num_images_per_prompt? + if control_image is not None: + if isinstance(control_model, ControlNetModel): + control_image = model.prepare_control_image( + image=control_image, + # do_classifier_free_guidance=do_classifier_free_guidance, + do_classifier_free_guidance=True, + # width=width, + # height=height, + width=512, + height=512, + # batch_size=batch_size * num_images_per_prompt, + # num_images_per_prompt=num_images_per_prompt, + device=control_model.device, + dtype=control_model.dtype, + ) + elif isinstance(control_model, MultiControlNetModel): + images = [] + for image_ in control_image: + image_ = model.prepare_control_image( + image=image_, + # do_classifier_free_guidance=do_classifier_free_guidance, + do_classifier_free_guidance=True, + # width=width, + # height=height, + width=512, + height=512, + # batch_size=batch_size * num_images_per_prompt, + # num_images_per_prompt=num_images_per_prompt, + device=control_model.device, + dtype=control_model.dtype, + ) + images.append(image_) + control_image = images + + + # TODO: Verify the noise is the right size result_latents, result_attention_map_saver = model.latents_from_embeddings( @@ -259,7 +321,8 @@ class TextToLatentsInvocation(BaseInvocation): noise=noise, num_inference_steps=self.steps, conditioning_data=conditioning_data, - callback=step_callback + callback=step_callback, + control_image=control_image, ) # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 @@ -490,4 +553,4 @@ class ImageToLatentsInvocation(BaseInvocation): name = f"{context.graph_execution_state_id}__{self.id}" context.services.latents.save(name, latents) return build_latents_output(latents_name=name, latents=latents) - +