From 0c3b1fe3c43f56e789b44e94225edaffcf102cfa Mon Sep 17 00:00:00 2001 From: Kyle Schouviller Date: Sun, 12 Mar 2023 22:12:42 -0700 Subject: [PATCH 1/3] [nodes] Fixes calls into image to image and inpaint from nodes --- invokeai/app/invocations/generate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index c1a0028293..70892ecde9 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -118,7 +118,7 @@ class ImageToImageInvocation(TextToImageInvocation): generator_output = next( Img2Img(model).generate( prompt=self.prompt, - init_img=image, + init_image=image, init_mask=mask, step_callback=step_callback, **self.dict( @@ -179,8 +179,8 @@ class InpaintInvocation(ImageToImageInvocation): generator_output = next( Inpaint(model).generate( prompt=self.prompt, - init_img=image, - init_mask=mask, + init_image=image, + mask_image=mask, step_callback=step_callback, **self.dict( exclude={"prompt", "image", "mask"} From b980e563b9845572e2cc383695a5dd65a60dd3a9 Mon Sep 17 00:00:00 2001 From: JPPhoto Date: Mon, 13 Mar 2023 08:11:09 -0500 Subject: [PATCH 2/3] Fix bug #2931 --- invokeai/backend/generator/base.py | 16 ++++++++-------- invokeai/backend/generator/img2img.py | 9 +++------ invokeai/backend/generator/inpaint.py | 4 ++-- invokeai/backend/generator/txt2img.py | 2 +- invokeai/backend/generator/txt2img2img.py | 2 +- .../stable_diffusion/diffusers_pipeline.py | 9 +++++++-- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 4ec0f9d54f..db1a387002 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -58,7 +58,7 @@ class InvokeAIGeneratorOutput: ''' InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation operation, including the image, its seed, the model name used to generate the image - and the model hash, as well as all the generate() parameters that went into + and the model hash, as well as all the generate() parameters that went into generating the image (in .params, also available as attributes) ''' image: Image @@ -116,7 +116,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): outputs = txt2img.generate(prompt='banana sushi', iterations=None) for o in outputs: print(o.image, o.seed) - + ''' generator_args = dataclasses.asdict(self.params) generator_args.update(keyword_args) @@ -167,7 +167,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): if callback: callback(output) yield output - + @classmethod def schedulers(self)->List[str]: ''' @@ -177,7 +177,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]): return generator_class(model, self.params.precision) - + def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler: scheduler_class = self.scheduler_map.get(scheduler_name,'ddim') scheduler = scheduler_class.from_config(model.scheduler.config) @@ -267,12 +267,12 @@ class Embiggen(Txt2Img): embiggen_tiles=embiggen_tiles, strength=strength, **kwargs) - + @classmethod def _generator_class(cls): from .embiggen import Embiggen return Embiggen - + class Generator: downsampling_factor: int @@ -347,7 +347,6 @@ class Generator: h_symmetry_time_pct=h_symmetry_time_pct, v_symmetry_time_pct=v_symmetry_time_pct, attention_maps_callback=attention_maps_callback, - seed=seed, **kwargs, ) results = [] @@ -375,7 +374,8 @@ class Generator: print("** An error occurred while getting initial noise **") print(traceback.format_exc()) - image = make_image(x_T) + # Pass on the seed in case a layer beneath us needs to generate noise on its own. + image = make_image(x_T, seed) if self.safety_checker is not None: image = self.safety_checker.check(image) diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py index b7f90361a0..2c62bec4d6 100644 --- a/invokeai/backend/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -37,7 +37,6 @@ class Img2Img(Generator): h_symmetry_time_pct=None, v_symmetry_time_pct=None, attention_maps_callback=None, - seed=None, **kwargs, ): """ @@ -64,7 +63,7 @@ class Img2Img(Generator): ), ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) - def make_image(x_T): + def make_image(x_T: torch.Tensor, seed: int): # FIXME: use x_T for initial seeded noise # We're not at the moment because the pipeline automatically resizes init_image if # necessary, which the x_T input might not match. @@ -77,7 +76,7 @@ class Img2Img(Generator): conditioning_data, noise_func=self.get_noise_like, callback=step_callback, - seed=seed + seed=seed, ) if ( pipeline_output.attention_map_saver is not None @@ -88,9 +87,7 @@ class Img2Img(Generator): return make_image - def get_noise_like(self, like: torch.Tensor, seed: Optional[int]): - if seed is not None: - set_seed(seed) + def get_noise_like(self, like: torch.Tensor): device = like.device if device.type == "mps": x = torch.randn_like(like, device="cpu").to(device) diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index fa7560d43e..9b46ec9a20 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -311,7 +311,7 @@ class Inpaint(Img2Img): uc, c, cfg_scale ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) - def make_image(x_T): + def make_image(x_T: torch.Tensor, seed: int): pipeline_output = pipeline.inpaint_from_embeddings( init_image=init_image, mask=1 - mask, # expects white means "paint here." @@ -320,7 +320,7 @@ class Inpaint(Img2Img): conditioning_data=conditioning_data, noise_func=self.get_noise_like, callback=step_callback, - seed=seed + seed=seed, ) if ( diff --git a/invokeai/backend/generator/txt2img.py b/invokeai/backend/generator/txt2img.py index a9918f81ce..e5a96212f0 100644 --- a/invokeai/backend/generator/txt2img.py +++ b/invokeai/backend/generator/txt2img.py @@ -61,7 +61,7 @@ class Txt2Img(Generator): ), ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) - def make_image(x_T) -> PIL.Image.Image: + def make_image(x_T: torch.Tensor, _: int) -> PIL.Image.Image: pipeline_output = pipeline.image_from_embeddings( latents=torch.zeros_like(x_T, dtype=self.torch_dtype()), noise=x_T, diff --git a/invokeai/backend/generator/txt2img2img.py b/invokeai/backend/generator/txt2img2img.py index e0ec937b39..1e24a8b729 100644 --- a/invokeai/backend/generator/txt2img2img.py +++ b/invokeai/backend/generator/txt2img2img.py @@ -64,7 +64,7 @@ class Txt2Img2Img(Generator): ), ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) - def make_image(x_T): + def make_image(x_T: torch.Tensor, _: int): first_pass_latent_output, _ = pipeline.latents_from_embeddings( latents=torch.zeros_like(x_T), num_inference_steps=steps, diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 51e7b1ee1d..152e079693 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -9,6 +9,7 @@ from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union import einops import PIL.Image +from accelerate.utils import set_seed import psutil import torch import torchvision.transforms as T @@ -694,7 +695,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): device=self._model_group.device_for(self.unet), dtype=self.unet.dtype, ) - noise = noise_func(initial_latents, seed) + if seed is not None: + set_seed(seed) + noise = noise_func(initial_latents) return self.img2img_from_latents_and_embeddings( initial_latents, @@ -796,7 +799,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): init_image_latents = self.non_noised_latents_from_image( init_image, device=device, dtype=latents_dtype ) - noise = noise_func(init_image_latents, seed) + if seed is not None: + set_seed(seed) + noise = noise_func(init_image_latents) if mask.dim() == 3: mask = mask.unsqueeze(0) From 596ba754b19a9d9d426a793d566b3663aff72b70 Mon Sep 17 00:00:00 2001 From: JPPhoto Date: Mon, 13 Mar 2023 08:15:46 -0500 Subject: [PATCH 3/3] Removed seed from get_make_image. --- invokeai/backend/generator/inpaint.py | 1 - 1 file changed, 1 deletion(-) diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index 9b46ec9a20..f95951a955 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -223,7 +223,6 @@ class Inpaint(Img2Img): inpaint_height=None, inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), attention_maps_callback=None, - seed=None, **kwargs, ): """