From 9aaf67c5b41458aba815d9ce58ce796fb4d941be Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 6 Aug 2023 05:05:25 +0300 Subject: [PATCH 01/67] wip --- invokeai/app/invocations/compel.py | 9 + invokeai/app/invocations/latent.py | 6 +- .../stable_diffusion/diffusers_pipeline.py | 209 +++----------- .../diffusion/shared_invokeai_diffusion.py | 265 ++++++++++++------ 4 files changed, 232 insertions(+), 257 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 7c3ce7a819..d0b55cd185 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -37,6 +37,10 @@ class BasicConditioningInfo: # weight: float # mode: ConditioningAlgo + def to(self, device, dtype=None): + self.embeds = self.embeds.to(device=device, dtype=dtype) + return self + @dataclass class SDXLConditioningInfo(BasicConditioningInfo): @@ -44,6 +48,11 @@ class SDXLConditioningInfo(BasicConditioningInfo): pooled_embeds: torch.Tensor add_time_ids: torch.Tensor + def to(self, device, dtype=None): + self.pooled_embeds = self.pooled_embeds.to(device=device, dtype=dtype) + self.add_time_ids = self.add_time_ids.to(device=device, dtype=dtype) + return super().to(device=device, dtype=dtype) + ConditioningInfoType = Annotated[Union[BasicConditioningInfo, SDXLConditioningInfo], Field(discriminator="type")] diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 6e2e0838bc..a63f98de24 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -174,11 +174,11 @@ class TextToLatentsInvocation(BaseInvocation): unet, ) -> ConditioningData: positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) - c = positive_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype) - extra_conditioning_info = positive_cond_data.conditionings[0].extra_conditioning + c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) + extra_conditioning_info = c.extra_conditioning negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name) - uc = negative_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype) + uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) conditioning_data = ConditioningData( unconditioned_embeddings=uc, diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 624d47ff64..8a7616f1f1 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -212,8 +212,8 @@ class ControlNetData: @dataclass class ConditioningData: - unconditioned_embeddings: torch.Tensor - text_embeddings: torch.Tensor + unconditioned_embeddings: Any # TODO: type + text_embeddings: Any # TODO: type guidance_scale: Union[float, List[float]] """ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). @@ -392,48 +392,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): submodels.append(value) return submodels - def image_from_embeddings( - self, - latents: torch.Tensor, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, - ) -> InvokeAIStableDiffusionPipelineOutput: - r""" - Function invoked when calling the pipeline for generation. - - :param conditioning_data: - :param latents: Pre-generated un-noised latents, to be used as inputs for - image generation. Can be used to tweak the same generation with different prompts. - :param num_inference_steps: The number of denoising steps. More denoising steps usually lead to a higher quality - image at the expense of slower inference. - :param noise: Noise to add to the latents, sampled from a Gaussian distribution. - :param callback: - :param run_id: - """ - result_latents, result_attention_map_saver = self.latents_from_embeddings( - latents, - num_inference_steps, - conditioning_data, - noise=noise, - run_id=run_id, - callback=callback, - ) - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput( - images=image, - nsfw_content_detected=[], - attention_map_saver=result_attention_map_saver, - ) - return self.check_for_safety(output, dtype=conditioning_data.dtype) - def latents_from_embeddings( self, latents: torch.Tensor, @@ -492,13 +450,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): extra_conditioning_info=extra_conditioning_info, step_count=len(self.scheduler.timesteps), ): - yield PipelineIntermediateState( - run_id=run_id, - step=-1, - timestep=self.scheduler.config.num_train_timesteps, - latents=latents, - ) - batch_size = latents.shape[0] batched_t = torch.full( (batch_size,), @@ -506,8 +457,16 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): dtype=timesteps.dtype, device=self._model_group.device_for(self.unet), ) + #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers latents = self.scheduler.add_noise(latents, noise, batched_t) + yield PipelineIntermediateState( + run_id=run_id, + step=-1, + timestep=self.scheduler.config.num_train_timesteps, + latents=latents, + ) + attention_map_saver: Optional[AttentionMapSaver] = None # print("timesteps:", timesteps) for i, t in enumerate(self.progress_bar(timesteps)): @@ -569,95 +528,40 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # TODO: should this scaling happen here or inside self._unet_forward? # i.e. before or after passing it to InvokeAIDiffuserComponent - unet_latent_input = self.scheduler.scale_model_input(latents, timestep) + latent_model_input = self.scheduler.scale_model_input(latents, timestep) # default is no controlnet, so set controlnet processing output to None - down_block_res_samples, mid_block_res_sample = None, None - + controlnet_down_block_samples, controlnet_mid_block_sample = None, None if control_data is not None: - # control_data should be type List[ControlNetData] - # this loop covers both ControlNet (one ControlNetData in list) - # and MultiControlNet (multiple ControlNetData in list) - for i, control_datum in enumerate(control_data): - control_mode = control_datum.control_mode - # soft_injection and cfg_injection are the two ControlNet control_mode booleans - # that are combined at higher level to make control_mode enum - # soft_injection determines whether to do per-layer re-weighting adjustment (if True) - # or default weighting (if False) - soft_injection = control_mode == "more_prompt" or control_mode == "more_control" - # cfg_injection = determines whether to apply ControlNet to only the conditional (if True) - # or the default both conditional and unconditional (if False) - cfg_injection = control_mode == "more_control" or control_mode == "unbalanced" + controlnet_down_block_samples, controlnet_mid_block_sample = self.invokeai_diffuser.do_controlnet_step( + control_data=control_data, + sample=latent_model_input, + timestep=timestep, + step_index=step_index, + total_step_count=total_step_count, + conditioning_data=conditioning_data, + ) - first_control_step = math.floor(control_datum.begin_step_percent * total_step_count) - last_control_step = math.ceil(control_datum.end_step_percent * total_step_count) - # only apply controlnet if current step is within the controlnet's begin/end step range - if step_index >= first_control_step and step_index <= last_control_step: - if cfg_injection: - control_latent_input = unet_latent_input - else: - # expand the latents input to control model if doing classifier free guidance - # (which I think for now is always true, there is conditional elsewhere that stops execution if - # classifier_free_guidance is <= 1.0 ?) - control_latent_input = torch.cat([unet_latent_input] * 2) - - if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned - encoder_hidden_states = conditioning_data.text_embeddings - encoder_attention_mask = None - else: - ( - encoder_hidden_states, - encoder_attention_mask, - ) = self.invokeai_diffuser._concat_conditionings_for_batch( - conditioning_data.unconditioned_embeddings, - conditioning_data.text_embeddings, - ) - if isinstance(control_datum.weight, list): - # if controlnet has multiple weights, use the weight for the current step - controlnet_weight = control_datum.weight[step_index] - else: - # if controlnet has a single weight, use it for all steps - controlnet_weight = control_datum.weight - - # controlnet(s) inference - down_samples, mid_sample = control_datum.model( - sample=control_latent_input, - timestep=timestep, - encoder_hidden_states=encoder_hidden_states, - controlnet_cond=control_datum.image_tensor, - conditioning_scale=controlnet_weight, # controlnet specific, NOT the guidance scale - encoder_attention_mask=encoder_attention_mask, - guess_mode=soft_injection, # this is still called guess_mode in diffusers ControlNetModel - return_dict=False, - ) - if cfg_injection: - # Inferred ControlNet only for the conditional batch. - # To apply the output of ControlNet to both the unconditional and conditional batches, - # prepend zeros for unconditional batch - down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples] - mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample]) - - if down_block_res_samples is None and mid_block_res_sample is None: - down_block_res_samples, mid_block_res_sample = down_samples, mid_sample - else: - # add controlnet outputs together if have multiple controlnets - down_block_res_samples = [ - samples_prev + samples_curr - for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) - ] - mid_block_res_sample += mid_sample - - # predict the noise residual - noise_pred = self.invokeai_diffuser.do_diffusion_step( - x=unet_latent_input, - sigma=t, - unconditioning=conditioning_data.unconditioned_embeddings, - conditioning=conditioning_data.text_embeddings, - unconditional_guidance_scale=conditioning_data.guidance_scale, + uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( + sample=latent_model_input, + timestep=t, # TODO: debug how handled batched and non batched timesteps step_index=step_index, total_step_count=total_step_count, - down_block_additional_residuals=down_block_res_samples, # from controlnet(s) - mid_block_additional_residual=mid_block_res_sample, # from controlnet(s) + conditioning_data=conditioning_data, + + # extra: + down_block_additional_residuals=controlnet_down_block_samples, # from controlnet(s) + mid_block_additional_residual=controlnet_mid_block_sample, # from controlnet(s) + ) + + guidance_scale = conditioning_data.guidance_scale + if isinstance(guidance_scale, list): + guidance_scale = guidance_scale[step_index] + + noise_pred = self.invokeai_diffuser._combine( + uc_noise_pred, + c_noise_pred, + guidance_scale, ) # compute the previous noisy sample x_t -> x_t-1 @@ -738,41 +642,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): callback, ) - def img2img_from_latents_and_embeddings( - self, - initial_latents, - num_inference_steps, - conditioning_data: ConditioningData, - strength, - noise: torch.Tensor, - run_id=None, - callback=None, - ) -> InvokeAIStableDiffusionPipelineOutput: - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength) - result_latents, result_attention_maps = self.latents_from_embeddings( - latents=initial_latents - if strength < 1.0 - else torch.zeros_like(initial_latents, device=initial_latents.device, dtype=initial_latents.dtype), - num_inference_steps=num_inference_steps, - conditioning_data=conditioning_data, - timesteps=timesteps, - noise=noise, - run_id=run_id, - callback=callback, - ) - - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput( - images=image, - nsfw_content_detected=[], - attention_map_saver=result_attention_maps, - ) - return self.check_for_safety(output, dtype=conditioning_data.dtype) - def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device=None) -> (torch.Tensor, int): img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components) assert img2img_pipeline.scheduler is self.scheduler @@ -877,7 +746,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): nsfw_content_detected=[], attention_map_saver=result_attention_maps, ) - return self.check_for_safety(output, dtype=conditioning_data.dtype) + return self.check_for_safety(output, dtype=self.unet.dtype) def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype): init_image = init_image.to(device=device, dtype=dtype) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index c01cf82c57..b906719923 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -1,6 +1,6 @@ from contextlib import contextmanager from dataclasses import dataclass -from math import ceil +import math from typing import Any, Callable, Dict, Optional, Union, List import numpy as np @@ -127,33 +127,119 @@ class InvokeAIDiffuserComponent: for _, module in tokens_cross_attention_modules: module.set_attention_slice_calculated_callback(None) - def do_diffusion_step( + def do_controlnet_step( self, - x: torch.Tensor, - sigma: torch.Tensor, - unconditioning: Union[torch.Tensor, dict], - conditioning: Union[torch.Tensor, dict], - # unconditional_guidance_scale: float, - unconditional_guidance_scale: Union[float, List[float]], - step_index: Optional[int] = None, - total_step_count: Optional[int] = None, + control_data, + sample: torch.Tensor, + timestep: torch.Tensor, + step_index: int, + total_step_count: int, + conditioning_data, + ): + down_block_res_samples, mid_block_res_sample = None, None + + # control_data should be type List[ControlNetData] + # this loop covers both ControlNet (one ControlNetData in list) + # and MultiControlNet (multiple ControlNetData in list) + for i, control_datum in enumerate(control_data): + control_mode = control_datum.control_mode + # soft_injection and cfg_injection are the two ControlNet control_mode booleans + # that are combined at higher level to make control_mode enum + # soft_injection determines whether to do per-layer re-weighting adjustment (if True) + # or default weighting (if False) + soft_injection = control_mode == "more_prompt" or control_mode == "more_control" + # cfg_injection = determines whether to apply ControlNet to only the conditional (if True) + # or the default both conditional and unconditional (if False) + cfg_injection = control_mode == "more_control" or control_mode == "unbalanced" + + first_control_step = math.floor(control_datum.begin_step_percent * total_step_count) + last_control_step = math.ceil(control_datum.end_step_percent * total_step_count) + # only apply controlnet if current step is within the controlnet's begin/end step range + if step_index >= first_control_step and step_index <= last_control_step: + if cfg_injection: + sample_model_input = sample + else: + # expand the latents input to control model if doing classifier free guidance + # (which I think for now is always true, there is conditional elsewhere that stops execution if + # classifier_free_guidance is <= 1.0 ?) + sample_model_input = torch.cat([sample] * 2) + + added_cond_kwargs = None + + if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned + if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + added_cond_kwargs = { + "text_embeds": conditioning_data.text_embeddings.pooled_embeds, + "time_ids": conditioning_data.text_embeddings.add_time_ids, + } + encoder_hidden_states = conditioning_data.text_embeddings.embeds + encoder_attention_mask = None + else: + if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + added_cond_kwargs = { + "text_embeds": torch.cat([ + # TODO: how to pad? just by zeros? or even truncate? + conditioning_data.unconditioned_embeddings.pooled_embeds, + conditioning_data.text_embeddings.pooled_embeds, + ], dim=0), + "time_ids": torch.cat([ + conditioning_data.unconditioned_embeddings.add_time_ids, + conditioning_data.text_embeddings.add_time_ids, + ], dim=0), + } + ( + encoder_hidden_states, + encoder_attention_mask, + ) = self._concat_conditionings_for_batch( + conditioning_data.unconditioned_embeddings.embeds, + conditioning_data.text_embeddings.embeds, + ) + if isinstance(control_datum.weight, list): + # if controlnet has multiple weights, use the weight for the current step + controlnet_weight = control_datum.weight[step_index] + else: + # if controlnet has a single weight, use it for all steps + controlnet_weight = control_datum.weight + + # controlnet(s) inference + down_samples, mid_sample = control_datum.model( + sample=sample_model_input, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + controlnet_cond=control_datum.image_tensor, + conditioning_scale=controlnet_weight, # controlnet specific, NOT the guidance scale + encoder_attention_mask=encoder_attention_mask, + guess_mode=soft_injection, # this is still called guess_mode in diffusers ControlNetModel + return_dict=False, + ) + if cfg_injection: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # prepend zeros for unconditional batch + down_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_samples] + mid_sample = torch.cat([torch.zeros_like(mid_sample), mid_sample]) + + if down_block_res_samples is None and mid_block_res_sample is None: + down_block_res_samples, mid_block_res_sample = down_samples, mid_sample + else: + # add controlnet outputs together if have multiple controlnets + down_block_res_samples = [ + samples_prev + samples_curr + for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) + ] + mid_block_res_sample += mid_sample + + return down_block_res_samples, mid_block_res_sample + + def do_unet_step( + self, + sample: torch.Tensor, + timestep: torch.Tensor, + conditioning_data, # TODO: type + step_index: int, + total_step_count: int, **kwargs, ): - """ - :param x: current latents - :param sigma: aka t, passed to the internal model to control how much denoising will occur - :param unconditioning: embeddings for unconditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] - :param conditioning: embeddings for conditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] - :param unconditional_guidance_scale: aka CFG scale, controls how much effect the conditioning tensor has - :param step_index: counts upwards from 0 to (step_count-1) (as passed to setup_cross_attention_control, if using). May be called multiple times for a single step, therefore do not assume that its value will monotically increase. If None, will be estimated by comparing sigma against self.model.sigmas . - :return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning. - """ - - if isinstance(unconditional_guidance_scale, list): - guidance_scale = unconditional_guidance_scale[step_index] - else: - guidance_scale = unconditional_guidance_scale - cross_attention_control_types_to_do = [] context: Context = self.cross_attention_control_context if self.cross_attention_control_context is not None: @@ -163,25 +249,15 @@ class InvokeAIDiffuserComponent: ) wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 - wants_hybrid_conditioning = isinstance(conditioning, dict) - if wants_hybrid_conditioning: - unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning( - x, - sigma, - unconditioning, - conditioning, - **kwargs, - ) - elif wants_cross_attention_control: + if wants_cross_attention_control: ( unconditioned_next_x, conditioned_next_x, ) = self._apply_cross_attention_controlled_conditioning( - x, - sigma, - unconditioning, - conditioning, + sample, + timestep, + conditioning_data, cross_attention_control_types_to_do, **kwargs, ) @@ -190,10 +266,9 @@ class InvokeAIDiffuserComponent: unconditioned_next_x, conditioned_next_x, ) = self._apply_standard_conditioning_sequentially( - x, - sigma, - unconditioning, - conditioning, + sample, + timestep, + conditioning_data, **kwargs, ) @@ -202,21 +277,13 @@ class InvokeAIDiffuserComponent: unconditioned_next_x, conditioned_next_x, ) = self._apply_standard_conditioning( - x, - sigma, - unconditioning, - conditioning, + sample, + timestep, + conditioning_data, **kwargs, ) - combined_next_x = self._combine( - # unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale - unconditioned_next_x, - conditioned_next_x, - guidance_scale, - ) - - return combined_next_x + return unconditioned_next_x, conditioned_next_x def do_latent_postprocessing( self, @@ -281,17 +348,35 @@ class InvokeAIDiffuserComponent: # methods below are called from do_diffusion_step and should be considered private to this class. - def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs): + def _apply_standard_conditioning(self, x, sigma, conditioning_data, **kwargs): # fast batched path x_twice = torch.cat([x] * 2) sigma_twice = torch.cat([sigma] * 2) - both_conditionings, encoder_attention_mask = self._concat_conditionings_for_batch(unconditioning, conditioning) + added_cond_kwargs = None + if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + added_cond_kwargs = { + "text_embeds": torch.cat([ + # TODO: how to pad? just by zeros? or even truncate? + conditioning_data.unconditioned_embeddings.pooled_embeds, + conditioning_data.text_embeddings.pooled_embeds, + ], dim=0), + "time_ids": torch.cat([ + conditioning_data.unconditioned_embeddings.add_time_ids, + conditioning_data.text_embeddings.add_time_ids, + ], dim=0), + } + + both_conditionings, encoder_attention_mask = self._concat_conditionings_for_batch( + conditioning_data.unconditioned_embeddings.embeds, + conditioning_data.text_embeddings.embeds + ) both_results = self.model_forward_callback( x_twice, sigma_twice, both_conditionings, encoder_attention_mask=encoder_attention_mask, + added_cond_kwargs=added_cond_kwargs, **kwargs, ) unconditioned_next_x, conditioned_next_x = both_results.chunk(2) @@ -320,46 +405,41 @@ class InvokeAIDiffuserComponent: if mid_block_additional_residual is not None: uncond_mid_block, cond_mid_block = mid_block_additional_residual.chunk(2) + added_cond_kwargs = None + is_sdxl = type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo" + if is_sdxl: + added_cond_kwargs = { + "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds, + "time_ids": conditioning_data.unconditioned_embeddings.add_time_ids, + } + unconditioned_next_x = self.model_forward_callback( x, sigma, - unconditioning, + conditioning_data.unconditioned_embeddings.embeds, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + added_cond_kwargs=added_cond_kwargs, **kwargs, ) + + if is_sdxl: + added_cond_kwargs = { + "text_embeds": conditioning_data.text_embeddings.pooled_embeds, + "time_ids": conditioning_data.text_embeddings.add_time_ids, + } + conditioned_next_x = self.model_forward_callback( x, sigma, - conditioning, + conditioning_data.text_embeddings.embeds, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + added_cond_kwargs=added_cond_kwargs, **kwargs, ) return unconditioned_next_x, conditioned_next_x - # TODO: looks unused - def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning, **kwargs): - assert isinstance(conditioning, dict) - assert isinstance(unconditioning, dict) - x_twice = torch.cat([x] * 2) - sigma_twice = torch.cat([sigma] * 2) - both_conditionings = dict() - for k in conditioning: - if isinstance(conditioning[k], list): - both_conditionings[k] = [ - torch.cat([unconditioning[k][i], conditioning[k][i]]) for i in range(len(conditioning[k])) - ] - else: - both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]]) - unconditioned_next_x, conditioned_next_x = self.model_forward_callback( - x_twice, - sigma_twice, - both_conditionings, - **kwargs, - ).chunk(2) - return unconditioned_next_x, conditioned_next_x - def _apply_cross_attention_controlled_conditioning( self, x: torch.Tensor, @@ -391,26 +471,43 @@ class InvokeAIDiffuserComponent: mask=context.cross_attention_mask, cross_attention_types_to_do=[], ) + + added_cond_kwargs = None + is_sdxl = type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo" + if is_sdxl: + added_cond_kwargs = { + "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds, + "time_ids": conditioning_data.unconditioned_embeddings.add_time_ids, + } + # no cross attention for unconditioning (negative prompt) unconditioned_next_x = self.model_forward_callback( x, sigma, - unconditioning, + conditioning_data.unconditioned_embeddings.embeds, {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + added_cond_kwargs=added_cond_kwargs, **kwargs, ) + if is_sdxl: + added_cond_kwargs = { + "text_embeds": conditioning_data.text_embeddings.pooled_embeds, + "time_ids": conditioning_data.text_embeddings.add_time_ids, + } + # do requested cross attention types for conditioning (positive prompt) cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do conditioned_next_x = self.model_forward_callback( x, sigma, - conditioning, + conditioning_data.text_embeddings.embeds, {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + added_cond_kwargs=added_cond_kwargs, **kwargs, ) return unconditioned_next_x, conditioned_next_x @@ -564,7 +661,7 @@ class InvokeAIDiffuserComponent: # below is fugly omg conditionings = [uc] + [c for c, weight in weighted_cond_list] weights = [1] + [weight for c, weight in weighted_cond_list] - chunk_count = ceil(len(conditionings) / 2) + chunk_count = math.ceil(len(conditionings) / 2) deltas = None for chunk_index in range(chunk_count): offset = chunk_index * 2 From b0738b7f701bf001a99b769a959bcf88eda0c62c Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 7 Aug 2023 18:37:06 +0300 Subject: [PATCH 02/67] Fixes, zero tensor for empty negative prompt, remove raw prompt node --- invokeai/app/invocations/compel.py | 206 ++---------------- .../diffusion/shared_invokeai_diffusion.py | 6 +- 2 files changed, 20 insertions(+), 192 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index d0b55cd185..7fd101a3a0 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -185,7 +185,7 @@ class CompelInvocation(BaseInvocation): class SDXLPromptInvocationBase: - def run_clip_raw(self, context, clip_field, prompt, get_pooled, lora_prefix): + def run_clip_compel(self, context, clip_field, prompt, get_pooled, lora_prefix, zero_on_empty): tokenizer_info = context.services.model_manager.get_model( **clip_field.tokenizer.dict(), context=context, @@ -195,82 +195,21 @@ class SDXLPromptInvocationBase: context=context, ) - def _lora_loader(): - for lora in clip_field.loras: - lora_info = context.services.model_manager.get_model(**lora.dict(exclude={"weight"}), context=context) - yield (lora_info.context.model, lora.weight) - del lora_info - return - - # loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras] - - ti_list = [] - for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt): - name = trigger[1:-1] - try: - ti_list.append( - ( - name, - context.services.model_manager.get_model( - model_name=name, - base_model=clip_field.text_encoder.base_model, - model_type=ModelType.TextualInversion, - context=context, - ).context.model, - ) - ) - except ModelNotFoundException: - # print(e) - # import traceback - # print(traceback.format_exc()) - print(f'Warn: trigger: "{trigger}" not found') - - with ModelPatcher.apply_lora( - text_encoder_info.context.model, _lora_loader(), lora_prefix - ), ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( - tokenizer, - ti_manager, - ), ModelPatcher.apply_clip_skip( - text_encoder_info.context.model, clip_field.skipped_layers - ), text_encoder_info as text_encoder: - text_inputs = tokenizer( - prompt, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - prompt_embeds = text_encoder( - text_input_ids.to(text_encoder.device), - output_hidden_states=True, + # return zero on empty + if prompt == "" and zero_on_empty: + cpu_text_encoder = text_encoder_info.context.model + c = torch.zeros( + (1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size), + dtype=text_encoder_info.context.cache.precision, ) if get_pooled: - c_pooled = prompt_embeds[0] + c_pooled = torch.zeros( + (1, cpu_text_encoder.config.hidden_size), + dtype=c.dtype, + ) else: c_pooled = None - c = prompt_embeds.hidden_states[-2] - - del tokenizer - del text_encoder - del tokenizer_info - del text_encoder_info - - c = c.detach().to("cpu") - if c_pooled is not None: - c_pooled = c_pooled.detach().to("cpu") - - return c, c_pooled, None - - def run_clip_compel(self, context, clip_field, prompt, get_pooled, lora_prefix): - tokenizer_info = context.services.model_manager.get_model( - **clip_field.tokenizer.dict(), - context=context, - ) - text_encoder_info = context.services.model_manager.get_model( - **clip_field.text_encoder.dict(), - context=context, - ) + return c, c_pooled, None def _lora_loader(): for lora in clip_field.loras: @@ -375,11 +314,13 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: - c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip, self.prompt, False, "lora_te1_") + c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=False) if self.style.strip() == "": - c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.prompt, True, "lora_te2_") + c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.prompt, True, "lora_te2_", zero_on_empty=True) else: - c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "lora_te2_") + c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "lora_te2_", zero_on_empty=True) + + print(f"{c1.shape=} {c2.shape=} {c2_pooled.shape=} {self.prompt=}") original_size = (self.original_height, self.original_width) crop_coords = (self.crop_top, self.crop_left) @@ -434,118 +375,7 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: # TODO: if there will appear lora for refiner - write proper prefix - c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "") - - original_size = (self.original_height, self.original_width) - crop_coords = (self.crop_top, self.crop_left) - - add_time_ids = torch.tensor([original_size + crop_coords + (self.aesthetic_score,)]) - - conditioning_data = ConditioningFieldData( - conditionings=[ - SDXLConditioningInfo( - embeds=c2, - pooled_embeds=c2_pooled, - add_time_ids=add_time_ids, - extra_conditioning=ec2, # or None - ) - ] - ) - - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - context.services.latents.save(conditioning_name, conditioning_data) - - return CompelOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) - - -class SDXLRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): - """Pass unmodified prompt to conditioning without compel processing.""" - - type: Literal["sdxl_raw_prompt"] = "sdxl_raw_prompt" - - prompt: str = Field(default="", description="Prompt") - style: str = Field(default="", description="Style prompt") - original_width: int = Field(1024, description="") - original_height: int = Field(1024, description="") - crop_top: int = Field(0, description="") - crop_left: int = Field(0, description="") - target_width: int = Field(1024, description="") - target_height: int = Field(1024, description="") - clip: ClipField = Field(None, description="Clip to use") - clip2: ClipField = Field(None, description="Clip2 to use") - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": {"title": "SDXL Prompt (Raw)", "tags": ["prompt", "compel"], "type_hints": {"model": "model"}}, - } - - @torch.no_grad() - def invoke(self, context: InvocationContext) -> CompelOutput: - c1, c1_pooled, ec1 = self.run_clip_raw(context, self.clip, self.prompt, False, "lora_te1_") - if self.style.strip() == "": - c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.prompt, True, "lora_te2_") - else: - c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.style, True, "lora_te2_") - - original_size = (self.original_height, self.original_width) - crop_coords = (self.crop_top, self.crop_left) - target_size = (self.target_height, self.target_width) - - add_time_ids = torch.tensor([original_size + crop_coords + target_size]) - - conditioning_data = ConditioningFieldData( - conditionings=[ - SDXLConditioningInfo( - embeds=torch.cat([c1, c2], dim=-1), - pooled_embeds=c2_pooled, - add_time_ids=add_time_ids, - extra_conditioning=ec1, - ) - ] - ) - - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - context.services.latents.save(conditioning_name, conditioning_data) - - return CompelOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) - - -class SDXLRefinerRawPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): - """Parse prompt using compel package to conditioning.""" - - type: Literal["sdxl_refiner_raw_prompt"] = "sdxl_refiner_raw_prompt" - - style: str = Field(default="", description="Style prompt") # TODO: ? - original_width: int = Field(1024, description="") - original_height: int = Field(1024, description="") - crop_top: int = Field(0, description="") - crop_left: int = Field(0, description="") - aesthetic_score: float = Field(6.0, description="") - clip2: ClipField = Field(None, description="Clip to use") - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": { - "title": "SDXL Refiner Prompt (Raw)", - "tags": ["prompt", "compel"], - "type_hints": {"model": "model"}, - }, - } - - @torch.no_grad() - def invoke(self, context: InvocationContext) -> CompelOutput: - # TODO: if there will appear lora for refiner - write proper prefix - c2, c2_pooled, ec2 = self.run_clip_raw(context, self.clip2, self.style, True, "") + c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "", zero_on_empty=False) original_size = (self.original_height, self.original_width) crop_coords = (self.crop_top, self.crop_left) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index b906719923..4ff8c5abc7 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -386,8 +386,7 @@ class InvokeAIDiffuserComponent: self, x: torch.Tensor, sigma, - unconditioning: torch.Tensor, - conditioning: torch.Tensor, + conditioning_data, **kwargs, ): # low-memory sequential path @@ -444,8 +443,7 @@ class InvokeAIDiffuserComponent: self, x: torch.Tensor, sigma, - unconditioning, - conditioning, + conditioning_data, cross_attention_control_types_to_do, **kwargs, ): From 2539e26c18e8ff156e5f6722be3d71f5c00812de Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 7 Aug 2023 19:57:11 +0300 Subject: [PATCH 03/67] Apply denoising_start/end, add torch-sdp to memory effictiend attention func --- invokeai/app/invocations/compel.py | 2 - invokeai/app/invocations/latent.py | 64 ++++++++++++++---- .../stable_diffusion/diffusers_pipeline.py | 67 ++++++++++--------- 3 files changed, 88 insertions(+), 45 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 7fd101a3a0..41be7f7138 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -320,8 +320,6 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): else: c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "lora_te2_", zero_on_empty=True) - print(f"{c1.shape=} {c2.shape=} {c2_pooled.shape=} {self.prompt=}") - original_size = (self.original_height, self.original_width) crop_coords = (self.crop_top, self.crop_left) target_size = (self.target_height, self.target_width) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index a63f98de24..fef3bcbf6f 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -122,6 +122,7 @@ class TextToLatentsInvocation(BaseInvocation): scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" ) unet: UNetField = Field(default=None, description="UNet submodel") control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use") + denoising_end: float = Field(default=1.0, ge=0, le=1, description="") # seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) # seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'") # fmt: on @@ -310,6 +311,25 @@ class TextToLatentsInvocation(BaseInvocation): # MultiControlNetModel has been refactored out, just need list[ControlNetData] return control_data + def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): + # apply denoising_start + num_inference_steps = steps + scheduler.set_timesteps(num_inference_steps, device=device) + + t_start = int(round(denoising_start * num_inference_steps)) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + num_inference_steps = num_inference_steps - t_start + + # apply denoising_end + num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) + + skipped_final_steps = int(round((1 - denoising_end) * steps)) + num_inference_steps = num_inference_steps - skipped_final_steps + timesteps = timesteps[: num_warmup_steps + scheduler.order * num_inference_steps] + + return num_inference_steps, timesteps + + @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): @@ -359,12 +379,21 @@ class TextToLatentsInvocation(BaseInvocation): do_classifier_free_guidance=True, exit_stack=exit_stack, ) + + num_inference_steps, timesteps = self.init_scheduler( + scheduler, + device=unet.device, + steps=self.steps, + denoising_start=0.0, + denoising_end=self.denoising_end, + ) # TODO: Verify the noise is the right size result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)), noise=noise, - num_inference_steps=self.steps, + timesteps=timesteps, + num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, control_data=control_data, # list[ControlNetData] callback=step_callback, @@ -385,8 +414,12 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): type: Literal["l2l"] = "l2l" # Inputs - latents: Optional[LatentsField] = Field(description="The latents to use as a base image") - strength: float = Field(default=0.7, ge=0, le=1, description="The strength of the latents to use") + noise: Optional[LatentsField] = Field(description="The noise to use (test override for future optional)") + + denoising_start: float = Field(default=0.0, ge=0, le=1, description="") + #denoising_end: float = Field(default=1.0, ge=0, le=1, description="") + + latents: Optional[LatentsField] = Field(description="The latents to use as a base image") # Schema customisation class Config(InvocationConfig): @@ -405,7 +438,9 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): # this quenches NSFW nag from diffusers - noise = context.services.latents.get(self.noise.latents_name) + noise = None + if self.noise is not None: + noise = context.services.latents.get(self.noise.latents_name) latent = context.services.latents.get(self.latents.latents_name) # Get the source node id (we are invoking the prepared node) @@ -432,7 +467,8 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet( unet_info.context.model, _lora_loader() ), unet_info as unet: - noise = noise.to(device=unet.device, dtype=unet.dtype) + if noise is not None: + noise = noise.to(device=unet.device, dtype=unet.dtype) latent = latent.to(device=unet.device, dtype=unet.dtype) scheduler = get_scheduler( @@ -448,28 +484,30 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): model=pipeline, context=context, control_input=self.control, - latents_shape=noise.shape, + latents_shape=latent.shape, # do_classifier_free_guidance=(self.cfg_scale >= 1.0)) do_classifier_free_guidance=True, exit_stack=exit_stack, ) # TODO: Verify the noise is the right size - initial_latents = ( - latent if self.strength < 1.0 else torch.zeros_like(latent, device=unet.device, dtype=latent.dtype) - ) + initial_latents = latent + if self.denoising_start <= 0.0: + initial_latents = torch.zeros_like(latent, device=unet.device, dtype=latent.dtype) - timesteps, _ = pipeline.get_img2img_timesteps( - self.steps, - self.strength, + num_inference_steps, timesteps = self.init_scheduler( + scheduler, device=unet.device, + steps=self.steps, + denoising_start=self.denoising_start, + denoising_end=self.denoising_end, ) result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( latents=initial_latents, timesteps=timesteps, noise=noise, - num_inference_steps=self.steps, + num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, control_data=control_data, # list[ControlNetData] callback=step_callback, diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 8a7616f1f1..ed1c8deeb5 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -340,33 +340,39 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): if xformers is available, use it, otherwise use sliced attention. """ config = InvokeAIAppConfig.get_config() - if torch.cuda.is_available() and is_xformers_available() and not config.disable_xformers: - self.enable_xformers_memory_efficient_attention() + if self.unet.device.type == "cuda": + if is_xformers_available() and not config.disable_xformers: + self.enable_xformers_memory_efficient_attention() + return + elif hasattr(torch.nn.functional, "scaled_dot_product_attention"): + # diffusers enable sdp automatically + return + + + if self.device.type == "cpu" or self.device.type == "mps": + mem_free = psutil.virtual_memory().free + elif self.device.type == "cuda": + mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device)) else: - if self.device.type == "cpu" or self.device.type == "mps": - mem_free = psutil.virtual_memory().free - elif self.device.type == "cuda": - mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device)) - else: - raise ValueError(f"unrecognized device {self.device}") - # input tensor of [1, 4, h/8, w/8] - # output tensor of [16, (h/8 * w/8), (h/8 * w/8)] - bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 - max_size_required_for_baddbmm = ( - 16 - * latents.size(dim=2) - * latents.size(dim=3) - * latents.size(dim=2) - * latents.size(dim=3) - * bytes_per_element_needed_for_baddbmm_duplication - ) - if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code - self.enable_attention_slicing(slice_size="max") - elif torch.backends.mps.is_available(): - # diffusers recommends always enabling for mps - self.enable_attention_slicing(slice_size="max") - else: - self.disable_attention_slicing() + raise ValueError(f"unrecognized device {self.device}") + # input tensor of [1, 4, h/8, w/8] + # output tensor of [16, (h/8 * w/8), (h/8 * w/8)] + bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 + max_size_required_for_baddbmm = ( + 16 + * latents.size(dim=2) + * latents.size(dim=3) + * latents.size(dim=2) + * latents.size(dim=3) + * bytes_per_element_needed_for_baddbmm_duplication + ) + if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code + self.enable_attention_slicing(slice_size="max") + elif torch.backends.mps.is_available(): + # diffusers recommends always enabling for mps + self.enable_attention_slicing(slice_size="max") + else: + self.disable_attention_slicing() def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False): # overridden method; types match the superclass. @@ -398,7 +404,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): num_inference_steps: int, conditioning_data: ConditioningData, *, - noise: torch.Tensor, + noise: Optional[torch.Tensor], timesteps=None, additional_guidance: List[Callable] = None, run_id=None, @@ -434,7 +440,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): timesteps, conditioning_data: ConditioningData, *, - noise: torch.Tensor, + noise: Optional[torch.Tensor], run_id: str = None, additional_guidance: List[Callable] = None, control_data: List[ControlNetData] = None, @@ -457,8 +463,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): dtype=timesteps.dtype, device=self._model_group.device_for(self.unet), ) - #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers - latents = self.scheduler.add_noise(latents, noise, batched_t) + if noise is not None: + #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers + latents = self.scheduler.add_noise(latents, noise, batched_t) yield PipelineIntermediateState( run_id=run_id, From 1db2c93f754ca4506e6cd0a5d1eaed0c6d059095 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 7 Aug 2023 21:27:32 +0300 Subject: [PATCH 04/67] Fix preview, inpaint --- invokeai/app/invocations/generate.py | 12 ++-- invokeai/app/invocations/latent.py | 10 ++-- invokeai/app/util/step_callback.py | 60 ++++++++++++++----- .../stable_diffusion/diffusers_pipeline.py | 12 ---- 4 files changed, 58 insertions(+), 36 deletions(-) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index d48c9f922e..1239d578d9 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -16,7 +16,7 @@ from ..util.step_callback import stable_diffusion_step_callback from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext from .image import ImageOutput -from ...backend.model_management.lora import ModelPatcher +from ...backend.model_management import ModelPatcher, BaseModelType from ...backend.stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline from .model import UNetField, VaeField from .compel import ConditioningField @@ -140,6 +140,7 @@ class InpaintInvocation(BaseInvocation): self, context: InvocationContext, source_node_id: str, + base_model: BaseModelType, intermediate_state: PipelineIntermediateState, ) -> None: stable_diffusion_step_callback( @@ -147,15 +148,16 @@ class InpaintInvocation(BaseInvocation): intermediate_state=intermediate_state, node=self.dict(), source_node_id=source_node_id, + base_model=base_model, ) def get_conditioning(self, context, unet): positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) - c = positive_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype) - extra_conditioning_info = positive_cond_data.conditionings[0].extra_conditioning + c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) + extra_conditioning_info = c.extra_conditioning negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name) - uc = negative_cond_data.conditionings[0].embeds.to(device=unet.device, dtype=unet.dtype) + uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) return (uc, c, extra_conditioning_info) @@ -225,7 +227,7 @@ class InpaintInvocation(BaseInvocation): scheduler=scheduler, init_image=image, mask_image=mask, - step_callback=partial(self.dispatch_progress, context, source_node_id), + step_callback=partial(self.dispatch_progress, context, source_node_id, self.unet.unet.base_model), **self.dict( exclude={"positive_conditioning", "negative_conditioning", "scheduler", "image", "mask"} ), # Shorthand for passing all of the parameters above manually diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index fef3bcbf6f..25e411074a 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -24,7 +24,7 @@ from ...backend.stable_diffusion.diffusers_pipeline import ( ) from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP -from ...backend.model_management import ModelPatcher +from ...backend.model_management import ModelPatcher, BaseModelType from ...backend.util.devices import choose_torch_device, torch_dtype, choose_precision from ..models.image import ImageCategory, ImageField, ResourceOrigin from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext @@ -160,12 +160,14 @@ class TextToLatentsInvocation(BaseInvocation): context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState, + base_model: BaseModelType, ) -> None: stable_diffusion_step_callback( context=context, intermediate_state=intermediate_state, node=self.dict(), source_node_id=source_node_id, + base_model=base_model, ) def get_conditioning_data( @@ -340,7 +342,7 @@ class TextToLatentsInvocation(BaseInvocation): source_node_id = graph_execution_state.prepared_source_mapping[self.id] def step_callback(state: PipelineIntermediateState): - self.dispatch_progress(context, source_node_id, state) + self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model) def _lora_loader(): for lora in self.unet.loras: @@ -379,7 +381,7 @@ class TextToLatentsInvocation(BaseInvocation): do_classifier_free_guidance=True, exit_stack=exit_stack, ) - + num_inference_steps, timesteps = self.init_scheduler( scheduler, device=unet.device, @@ -448,7 +450,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): source_node_id = graph_execution_state.prepared_source_mapping[self.id] def step_callback(state: PipelineIntermediateState): - self.dispatch_progress(context, source_node_id, state) + self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model) def _lora_loader(): for lora in self.unet.loras: diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 994d83e705..aae06913fd 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -7,6 +7,7 @@ from ...backend.util.util import image_to_dataURL from ...backend.generator.base import Generator from ...backend.stable_diffusion import PipelineIntermediateState from invokeai.app.services.config import InvokeAIAppConfig +from ...backend.model_management.models import BaseModelType def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix=None): @@ -29,6 +30,7 @@ def stable_diffusion_step_callback( intermediate_state: PipelineIntermediateState, node: dict, source_node_id: str, + base_model: BaseModelType, ): if context.services.queue.is_canceled(context.graph_execution_state_id): raise CanceledException @@ -56,23 +58,51 @@ def stable_diffusion_step_callback( # TODO: only output a preview image when requested - # origingally adapted from code by @erucipe and @keturn here: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 + if base_model in [BaseModelType.StableDiffusionXL, BaseModelType.StableDiffusionXLRefiner]: + sdxl_latent_rgb_factors = torch.tensor( + [ + # R G B + [0.3816, 0.4930, 0.5320], + [-0.3753, 0.1631, 0.1739], + [0.1770, 0.3588, -0.2048], + [-0.4350, -0.2644, -0.4289], + ], + dtype=sample.dtype, + device=sample.device, + ) - # these updated numbers for v1.5 are from @torridgristle - v1_5_latent_rgb_factors = torch.tensor( - [ - # R G B - [0.3444, 0.1385, 0.0670], # L1 - [0.1247, 0.4027, 0.1494], # L2 - [-0.3192, 0.2513, 0.2103], # L3 - [-0.1307, -0.1874, -0.7445], # L4 - ], - dtype=sample.dtype, - device=sample.device, - ) + sdxl_smooth_matrix = torch.tensor( + [ + # [ 0.0478, 0.1285, 0.0478], + # [ 0.1285, 0.2948, 0.1285], + # [ 0.0478, 0.1285, 0.0478], + [0.0358, 0.0964, 0.0358], + [0.0964, 0.4711, 0.0964], + [0.0358, 0.0964, 0.0358], + ], + dtype=sample.dtype, + device=sample.device, + ) - image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors) + image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix) + else: + # origingally adapted from code by @erucipe and @keturn here: + # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 + + # these updated numbers for v1.5 are from @torridgristle + v1_5_latent_rgb_factors = torch.tensor( + [ + # R G B + [0.3444, 0.1385, 0.0670], # L1 + [0.1247, 0.4027, 0.1494], # L2 + [-0.3192, 0.2513, 0.2103], # L3 + [-0.1307, -0.1874, -0.7445], # L4 + ], + dtype=sample.dtype, + device=sample.device, + ) + + image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors) (width, height) = image.size width *= 8 diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index ed1c8deeb5..9d080e648d 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -50,7 +50,6 @@ from .offloading import FullyLoadedModelGroup, ModelGroup @dataclass class PipelineIntermediateState: - run_id: str step: int timestep: int latents: torch.Tensor @@ -407,7 +406,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): noise: Optional[torch.Tensor], timesteps=None, additional_guidance: List[Callable] = None, - run_id=None, callback: Callable[[PipelineIntermediateState], None] = None, control_data: List[ControlNetData] = None, ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: @@ -427,7 +425,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): timesteps, conditioning_data, noise=noise, - run_id=run_id, additional_guidance=additional_guidance, control_data=control_data, callback=callback, @@ -441,13 +438,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data: ConditioningData, *, noise: Optional[torch.Tensor], - run_id: str = None, additional_guidance: List[Callable] = None, control_data: List[ControlNetData] = None, ): self._adjust_memory_efficient_attention(latents) - if run_id is None: - run_id = secrets.token_urlsafe(self.ID_LENGTH) if additional_guidance is None: additional_guidance = [] extra_conditioning_info = conditioning_data.extra @@ -468,7 +462,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): latents = self.scheduler.add_noise(latents, noise, batched_t) yield PipelineIntermediateState( - run_id=run_id, step=-1, timestep=self.scheduler.config.num_train_timesteps, latents=latents, @@ -507,7 +500,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) yield PipelineIntermediateState( - run_id=run_id, step=i, timestep=int(t), latents=latents, @@ -619,7 +611,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data: ConditioningData, *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, noise_func=None, seed=None, ) -> InvokeAIStableDiffusionPipelineOutput: @@ -645,7 +636,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data, strength, noise, - run_id, callback, ) @@ -678,7 +668,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data: ConditioningData, *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, noise_func=None, seed=None, ) -> InvokeAIStableDiffusionPipelineOutput: @@ -737,7 +726,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): noise=noise, timesteps=timesteps, additional_guidance=guidance, - run_id=run_id, callback=callback, ) finally: From 492bfe002a092e7ec3a5c892ddbb5e84213cdf81 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 03:38:42 +0300 Subject: [PATCH 05/67] Remove sdxl t2l/l2l nodes --- invokeai/app/invocations/sdxl.py | 536 +------------------------------ 1 file changed, 3 insertions(+), 533 deletions(-) diff --git a/invokeai/app/invocations/sdxl.py b/invokeai/app/invocations/sdxl.py index 5bcd85db28..a5a1c2c641 100644 --- a/invokeai/app/invocations/sdxl.py +++ b/invokeai/app/invocations/sdxl.py @@ -1,17 +1,10 @@ import torch -import inspect -from tqdm import tqdm -from typing import List, Literal, Optional, Union +from typing import Literal +from pydantic import Field -from pydantic import Field, validator - -from ...backend.model_management import ModelType, SubModelType, ModelPatcher -from invokeai.app.util.step_callback import stable_diffusion_xl_step_callback +from ...backend.model_management import ModelType, SubModelType from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext - from .model import UNetField, ClipField, VaeField, MainModelField, ModelInfo -from .compel import ConditioningField -from .latent import LatentsField, SAMPLER_NAME_VALUES, LatentsOutput, get_scheduler, build_latents_output class SDXLModelLoaderOutput(BaseInvocationOutput): @@ -201,526 +194,3 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation): ), ), ) - - -# Text to image -class SDXLTextToLatentsInvocation(BaseInvocation): - """Generates latents from conditionings.""" - - type: Literal["t2l_sdxl"] = "t2l_sdxl" - - # Inputs - # fmt: off - positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation") - negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation") - noise: Optional[LatentsField] = Field(description="The noise to use") - steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") - cfg_scale: Union[float, List[float]] = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", ) - scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" ) - unet: UNetField = Field(default=None, description="UNet submodel") - denoising_end: float = Field(default=1.0, gt=0, le=1, description="") - # control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use") - # seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) - # seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'") - # fmt: on - - @validator("cfg_scale") - def ge_one(cls, v): - """validate that all cfg_scale values are >= 1""" - if isinstance(v, list): - for i in v: - if i < 1: - raise ValueError("cfg_scale must be greater than 1") - else: - if v < 1: - raise ValueError("cfg_scale must be greater than 1") - return v - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": { - "title": "SDXL Text To Latents", - "tags": ["latents"], - "type_hints": { - "model": "model", - # "cfg_scale": "float", - "cfg_scale": "number", - }, - }, - } - - def dispatch_progress( - self, - context: InvocationContext, - source_node_id: str, - sample, - step, - total_steps, - ) -> None: - stable_diffusion_xl_step_callback( - context=context, - node=self.dict(), - source_node_id=source_node_id, - sample=sample, - step=step, - total_steps=total_steps, - ) - - # based on - # https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375 - @torch.no_grad() - def invoke(self, context: InvocationContext) -> LatentsOutput: - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] - latents = context.services.latents.get(self.noise.latents_name) - - positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) - prompt_embeds = positive_cond_data.conditionings[0].embeds - pooled_prompt_embeds = positive_cond_data.conditionings[0].pooled_embeds - add_time_ids = positive_cond_data.conditionings[0].add_time_ids - - negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name) - negative_prompt_embeds = negative_cond_data.conditionings[0].embeds - negative_pooled_prompt_embeds = negative_cond_data.conditionings[0].pooled_embeds - add_neg_time_ids = negative_cond_data.conditionings[0].add_time_ids - - scheduler = get_scheduler( - context=context, - scheduler_info=self.unet.scheduler, - scheduler_name=self.scheduler, - ) - - num_inference_steps = self.steps - - def _lora_loader(): - for lora in self.unet.loras: - lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), - context=context, - ) - yield (lora_info.context.model, lora.weight) - del lora_info - return - - unet_info = context.services.model_manager.get_model(**self.unet.unet.dict(), context=context) - do_classifier_free_guidance = True - cross_attention_kwargs = None - with ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), unet_info as unet: - scheduler.set_timesteps(num_inference_steps, device=unet.device) - timesteps = scheduler.timesteps - - latents = latents.to(device=unet.device, dtype=unet.dtype) * scheduler.init_noise_sigma - - extra_step_kwargs = dict() - if "eta" in set(inspect.signature(scheduler.step).parameters.keys()): - extra_step_kwargs.update( - eta=0.0, - ) - if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): - extra_step_kwargs.update( - generator=torch.Generator(device=unet.device).manual_seed(0), - ) - - num_warmup_steps = len(timesteps) - self.steps * scheduler.order - - # apply denoising_end - skipped_final_steps = int(round((1 - self.denoising_end) * self.steps)) - num_inference_steps = num_inference_steps - skipped_final_steps - timesteps = timesteps[: num_warmup_steps + scheduler.order * num_inference_steps] - - if not context.services.configuration.sequential_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) - add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) - - prompt_embeds = prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_text_embeds = add_text_embeds.to(device=unet.device, dtype=unet.dtype) - add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype) - latents = latents.to(device=unet.device, dtype=unet.dtype) - - with tqdm(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - - latent_model_input = scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} - noise_pred = unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond) - # del noise_pred_uncond - # del noise_pred_text - - # if do_classifier_free_guidance and guidance_rescale > 0.0: - # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): - progress_bar.update() - self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps) - # if callback is not None and i % callback_steps == 0: - # callback(i, t, latents) - else: - negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - negative_prompt_embeds = negative_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_neg_time_ids = add_neg_time_ids.to(device=unet.device, dtype=unet.dtype) - pooled_prompt_embeds = pooled_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - prompt_embeds = prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype) - latents = latents.to(device=unet.device, dtype=unet.dtype) - - with tqdm(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - # latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - - latent_model_input = scheduler.scale_model_input(latents, t) - - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # predict the noise residual - - added_cond_kwargs = {"text_embeds": negative_pooled_prompt_embeds, "time_ids": add_neg_time_ids} - noise_pred_uncond = unet( - latent_model_input, - t, - encoder_hidden_states=negative_prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - added_cond_kwargs = {"text_embeds": pooled_prompt_embeds, "time_ids": add_time_ids} - noise_pred_text = unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - # perform guidance - noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond) - - # del noise_pred_text - # del noise_pred_uncond - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # if do_classifier_free_guidance and guidance_rescale > 0.0: - # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # del noise_pred - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): - progress_bar.update() - self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps) - # if callback is not None and i % callback_steps == 0: - # callback(i, t, latents) - - ################# - - latents = latents.to("cpu") - torch.cuda.empty_cache() - - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, latents) - return build_latents_output(latents_name=name, latents=latents) - - -class SDXLLatentsToLatentsInvocation(BaseInvocation): - """Generates latents from conditionings.""" - - type: Literal["l2l_sdxl"] = "l2l_sdxl" - - # Inputs - # fmt: off - positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation") - negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation") - noise: Optional[LatentsField] = Field(description="The noise to use") - steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") - cfg_scale: Union[float, List[float]] = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", ) - scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" ) - unet: UNetField = Field(default=None, description="UNet submodel") - latents: Optional[LatentsField] = Field(description="Initial latents") - - denoising_start: float = Field(default=0.0, ge=0, le=1, description="") - denoising_end: float = Field(default=1.0, ge=0, le=1, description="") - - # control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use") - # seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) - # seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'") - # fmt: on - - @validator("cfg_scale") - def ge_one(cls, v): - """validate that all cfg_scale values are >= 1""" - if isinstance(v, list): - for i in v: - if i < 1: - raise ValueError("cfg_scale must be greater than 1") - else: - if v < 1: - raise ValueError("cfg_scale must be greater than 1") - return v - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": { - "title": "SDXL Latents to Latents", - "tags": ["latents"], - "type_hints": { - "model": "model", - # "cfg_scale": "float", - "cfg_scale": "number", - }, - }, - } - - def dispatch_progress( - self, - context: InvocationContext, - source_node_id: str, - sample, - step, - total_steps, - ) -> None: - stable_diffusion_xl_step_callback( - context=context, - node=self.dict(), - source_node_id=source_node_id, - sample=sample, - step=step, - total_steps=total_steps, - ) - - # based on - # https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375 - @torch.no_grad() - def invoke(self, context: InvocationContext) -> LatentsOutput: - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] - latents = context.services.latents.get(self.latents.latents_name) - - positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) - prompt_embeds = positive_cond_data.conditionings[0].embeds - pooled_prompt_embeds = positive_cond_data.conditionings[0].pooled_embeds - add_time_ids = positive_cond_data.conditionings[0].add_time_ids - - negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name) - negative_prompt_embeds = negative_cond_data.conditionings[0].embeds - negative_pooled_prompt_embeds = negative_cond_data.conditionings[0].pooled_embeds - add_neg_time_ids = negative_cond_data.conditionings[0].add_time_ids - - scheduler = get_scheduler( - context=context, - scheduler_info=self.unet.scheduler, - scheduler_name=self.scheduler, - ) - - unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), - context=context, - ) - - def _lora_loader(): - for lora in self.unet.loras: - lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), - context=context, - ) - yield (lora_info.context.model, lora.weight) - del lora_info - return - - do_classifier_free_guidance = True - cross_attention_kwargs = None - with ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), unet_info as unet: - # apply denoising_start - num_inference_steps = self.steps - scheduler.set_timesteps(num_inference_steps, device=unet.device) - - t_start = int(round(self.denoising_start * num_inference_steps)) - timesteps = scheduler.timesteps[t_start * scheduler.order :] - num_inference_steps = num_inference_steps - t_start - - # apply noise(if provided) - if self.noise is not None and timesteps.shape[0] > 0: - noise = context.services.latents.get(self.noise.latents_name) - latents = scheduler.add_noise(latents, noise, timesteps[:1]) - del noise - - # apply scheduler extra args - extra_step_kwargs = dict() - if "eta" in set(inspect.signature(scheduler.step).parameters.keys()): - extra_step_kwargs.update( - eta=0.0, - ) - if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): - extra_step_kwargs.update( - generator=torch.Generator(device=unet.device).manual_seed(0), - ) - - num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) - - # apply denoising_end - skipped_final_steps = int(round((1 - self.denoising_end) * self.steps)) - num_inference_steps = num_inference_steps - skipped_final_steps - timesteps = timesteps[: num_warmup_steps + scheduler.order * num_inference_steps] - - if not context.services.configuration.sequential_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - add_text_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) - add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) - - prompt_embeds = prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_text_embeds = add_text_embeds.to(device=unet.device, dtype=unet.dtype) - add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype) - latents = latents.to(device=unet.device, dtype=unet.dtype) - - with tqdm(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - - latent_model_input = scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} - noise_pred = unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond) - # del noise_pred_uncond - # del noise_pred_text - - # if do_classifier_free_guidance and guidance_rescale > 0.0: - # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): - progress_bar.update() - self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps) - # if callback is not None and i % callback_steps == 0: - # callback(i, t, latents) - else: - negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - negative_prompt_embeds = negative_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_neg_time_ids = add_neg_time_ids.to(device=unet.device, dtype=unet.dtype) - pooled_prompt_embeds = pooled_prompt_embeds.to(device=unet.device, dtype=unet.dtype) - prompt_embeds = prompt_embeds.to(device=unet.device, dtype=unet.dtype) - add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype) - latents = latents.to(device=unet.device, dtype=unet.dtype) - - with tqdm(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - # latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - - latent_model_input = scheduler.scale_model_input(latents, t) - - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # predict the noise residual - - added_cond_kwargs = {"text_embeds": negative_pooled_prompt_embeds, "time_ids": add_time_ids} - noise_pred_uncond = unet( - latent_model_input, - t, - encoder_hidden_states=negative_prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - added_cond_kwargs = {"text_embeds": pooled_prompt_embeds, "time_ids": add_time_ids} - noise_pred_text = unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - # perform guidance - noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond) - - # del noise_pred_text - # del noise_pred_uncond - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # if do_classifier_free_guidance and guidance_rescale > 0.0: - # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # del noise_pred - # import gc - # gc.collect() - # torch.cuda.empty_cache() - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): - progress_bar.update() - self.dispatch_progress(context, source_node_id, latents, i, num_inference_steps) - # if callback is not None and i % callback_steps == 0: - # callback(i, t, latents) - - ################# - - latents = latents.to("cpu") - torch.cuda.empty_cache() - - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, latents) - return build_latents_output(latents_name=name, latents=latents) From 5f29526a8e7bfa99ad45f6c3845cff08e47f4fa0 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 04:00:33 +0300 Subject: [PATCH 06/67] Add seed to latents field --- invokeai/app/invocations/latent.py | 29 ++++++++++++++++++----------- invokeai/app/invocations/noise.py | 6 +++--- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 25e411074a..1fbcb2941a 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -49,6 +49,7 @@ class LatentsField(BaseModel): """A latents field used for passing latents between invocations""" latents_name: Optional[str] = Field(default=None, description="The name of the latents") + seed: Optional[int] = Field(description="Seed used to generate this latents") class Config: schema_extra = {"required": ["latents_name"]} @@ -67,9 +68,9 @@ class LatentsOutput(BaseInvocationOutput): # fmt: on -def build_latents_output(latents_name: str, latents: torch.Tensor): +def build_latents_output(latents_name: str, latents: torch.Tensor, seed: Optional[int]): return LatentsOutput( - latents=LatentsField(latents_name=latents_name), + latents=LatentsField(latents_name=latents_name, seed=seed), width=latents.size()[3] * 8, height=latents.size()[2] * 8, ) @@ -175,6 +176,7 @@ class TextToLatentsInvocation(BaseInvocation): context: InvocationContext, scheduler, unet, + seed, ) -> ConditioningData: positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) @@ -201,7 +203,7 @@ class TextToLatentsInvocation(BaseInvocation): # for ddim scheduler eta=0.0, # ddim_eta # for ancestral and sde schedulers - generator=torch.Generator(device=unet.device).manual_seed(0), + generator=torch.Generator(device=unet.device).manual_seed(seed), ) return conditioning_data @@ -336,6 +338,7 @@ class TextToLatentsInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): noise = context.services.latents.get(self.noise.latents_name) + seed = self.noise.seed or 0 # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -370,7 +373,7 @@ class TextToLatentsInvocation(BaseInvocation): ) pipeline = self.create_pipeline(unet, scheduler) - conditioning_data = self.get_conditioning_data(context, scheduler, unet) + conditioning_data = self.get_conditioning_data(context, scheduler, unet, seed) control_data = self.prep_control_data( model=pipeline, @@ -407,7 +410,7 @@ class TextToLatentsInvocation(BaseInvocation): name = f"{context.graph_execution_state_id}__{self.id}" context.services.latents.save(name, result_latents) - return build_latents_output(latents_name=name, latents=result_latents) + return build_latents_output(latents_name=name, latents=result_latents, seed=seed) class LatentsToLatentsInvocation(TextToLatentsInvocation): @@ -440,10 +443,14 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): # this quenches NSFW nag from diffusers + latent = context.services.latents.get(self.latents.latents_name) + seed = self.latents.seed or 0 + noise = None if self.noise is not None: noise = context.services.latents.get(self.noise.latents_name) - latent = context.services.latents.get(self.latents.latents_name) + if self.noise.seed is not None: + seed = self.noise.seed # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -480,7 +487,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): ) pipeline = self.create_pipeline(unet, scheduler) - conditioning_data = self.get_conditioning_data(context, scheduler, unet) + conditioning_data = self.get_conditioning_data(context, scheduler, unet, seed) control_data = self.prep_control_data( model=pipeline, @@ -521,7 +528,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): name = f"{context.graph_execution_state_id}__{self.id}" context.services.latents.save(name, result_latents) - return build_latents_output(latents_name=name, latents=result_latents) + return build_latents_output(latents_name=name, latents=result_latents, seed=seed) # Latent to image @@ -663,7 +670,7 @@ class ResizeLatentsInvocation(BaseInvocation): name = f"{context.graph_execution_state_id}__{self.id}" # context.services.latents.set(name, resized_latents) context.services.latents.save(name, resized_latents) - return build_latents_output(latents_name=name, latents=resized_latents) + return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) class ScaleLatentsInvocation(BaseInvocation): @@ -705,7 +712,7 @@ class ScaleLatentsInvocation(BaseInvocation): name = f"{context.graph_execution_state_id}__{self.id}" # context.services.latents.set(name, resized_latents) context.services.latents.save(name, resized_latents) - return build_latents_output(latents_name=name, latents=resized_latents) + return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) class ImageToLatentsInvocation(BaseInvocation): @@ -786,4 +793,4 @@ class ImageToLatentsInvocation(BaseInvocation): name = f"{context.graph_execution_state_id}__{self.id}" latents = latents.to("cpu") context.services.latents.save(name, latents) - return build_latents_output(latents_name=name, latents=latents) + return build_latents_output(latents_name=name, latents=latents, seed=None) diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index fff0f29f14..db64e5b6e5 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -71,9 +71,9 @@ class NoiseOutput(BaseInvocationOutput): # fmt: on -def build_noise_output(latents_name: str, latents: torch.Tensor): +def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int): return NoiseOutput( - noise=LatentsField(latents_name=latents_name), + noise=LatentsField(latents_name=latents_name, seed=seed), width=latents.size()[3] * 8, height=latents.size()[2] * 8, ) @@ -132,4 +132,4 @@ class NoiseInvocation(BaseInvocation): ) name = f"{context.graph_execution_state_id}__{self.id}" context.services.latents.save(name, noise) - return build_noise_output(latents_name=name, latents=noise) + return build_noise_output(latents_name=name, latents=noise, seed=self.seed) From 96b7248051744b8c3b9a077a4e3fddd619892800 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 18:50:36 +0300 Subject: [PATCH 07/67] Add mask to l2l --- invokeai/app/invocations/latent.py | 31 ++++++- .../stable_diffusion/diffusers_pipeline.py | 91 +++++++++++++++---- 2 files changed, 102 insertions(+), 20 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 1fbcb2941a..3872a04f7e 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -41,6 +41,9 @@ from diffusers.models.attention_processor import ( XFormersAttnProcessor, ) +import torchvision.transforms as T +from torchvision.transforms.functional import resize as tv_resize + DEFAULT_PRECISION = choose_precision(choose_torch_device()) @@ -397,6 +400,7 @@ class TextToLatentsInvocation(BaseInvocation): result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)), noise=noise, + seed=seed, timesteps=timesteps, num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, @@ -424,7 +428,11 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): denoising_start: float = Field(default=0.0, ge=0, le=1, description="") #denoising_end: float = Field(default=1.0, ge=0, le=1, description="") - latents: Optional[LatentsField] = Field(description="The latents to use as a base image") + latents: Optional[LatentsField] = Field(description="The latents to use as a base image") + + mask: Optional[ImageField] = Field( + None, description="Mask", + ) # Schema customisation class Config(InvocationConfig): @@ -440,6 +448,22 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): }, } + def prep_mask_tensor(self, mask, context, lantents): + if mask is None: + return None + + mask_image = context.services.images.get_pil_image(mask.image_name) + if mask_image.mode != "L": + # FIXME: why do we get passed an RGB image here? We can only use single-channel. + mask_image = mask_image.convert("L") + mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + if mask_tensor.dim() == 3: + mask_tensor = mask_tensor.unsqueeze(0) + mask_tensor = tv_resize( + mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR + ) + return mask_tensor + @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): # this quenches NSFW nag from diffusers @@ -452,6 +476,8 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): if self.noise.seed is not None: seed = self.noise.seed + mask = self.prep_mask_tensor(self.mask, context, latent) + # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) source_node_id = graph_execution_state.prepared_source_mapping[self.id] @@ -479,6 +505,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) latent = latent.to(device=unet.device, dtype=unet.dtype) + mask = mask.to(device=unet.device, dtype=unet.dtype) scheduler = get_scheduler( context=context, @@ -516,6 +543,8 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): latents=initial_latents, timesteps=timesteps, noise=noise, + seed=seed, + mask=mask, num_inference_steps=num_inference_steps, conditioning_data=conditioning_data, control_data=control_data, # list[ControlNetData] diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 9d080e648d..8623ef9bfb 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -100,7 +100,7 @@ class AddsMaskGuidance: mask: torch.FloatTensor mask_latents: torch.FloatTensor scheduler: SchedulerMixin - noise: torch.Tensor + noise: Optional[torch.Tensor] = None _debug: Optional[Callable] = None def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput: @@ -131,11 +131,10 @@ class AddsMaskGuidance: # some schedulers expect t to be one-dimensional. # TODO: file diffusers bug about inconsistency? t = einops.repeat(t, "-> batch", batch=batch_size) - # Noise shouldn't be re-randomized between steps here. The multistep schedulers - # get very confused about what is happening from step to step when we do that. - mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) - # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? - # mask_latents = self.scheduler.scale_model_input(mask_latents, t) + + if self.noise is not None: + mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) + mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) if self._debug: @@ -408,7 +407,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): additional_guidance: List[Callable] = None, callback: Callable[[PipelineIntermediateState], None] = None, control_data: List[ControlNetData] = None, + mask: Optional[torch.Tensor] = None, + seed: Optional[int] = None, ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: + # TODO: if self.scheduler.config.get("cpu_only", False): scheduler_device = torch.device("cpu") else: @@ -417,19 +419,74 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): if timesteps is None: self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device) timesteps = self.scheduler.timesteps + infer_latents_from_embeddings = GeneratorToCallbackinator( self.generate_latents_from_embeddings, PipelineIntermediateState ) - result: PipelineIntermediateState = infer_latents_from_embeddings( - latents, - timesteps, - conditioning_data, - noise=noise, - additional_guidance=additional_guidance, - control_data=control_data, - callback=callback, + + if additional_guidance is None: + additional_guidance = [] + + orig_latents = latents.clone() + + batch_size = latents.shape[0] + batched_t = torch.full( + (batch_size,), + timesteps[0], + dtype=timesteps.dtype, + device=self.unet.device, ) - return result.latents, result.attention_map_saver + + if noise is not None: + #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers + latents = self.scheduler.add_noise(latents, noise, batched_t) + + else: + # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) + if mask is not None: + noise = torch.randn( + orig_latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(seed or 0), + ).to(device=orig_latents.device, dtype=orig_latents.dtype) + + latents = self.scheduler.add_noise(latents, noise, batched_t) + latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) + + + if mask is not None: + if is_inpainting_model(self.unet): + # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint + # (that's why there's a mask!) but it seems to really want that blanked out. + #masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill + + # TODO: we should probably pass this in so we don't have to try/finally around setting it. + self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( + self._unet_forward, mask, orig_latents + ) + else: + additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) + + try: + result: PipelineIntermediateState = infer_latents_from_embeddings( + latents, + timesteps, + conditioning_data, + additional_guidance=additional_guidance, + control_data=control_data, + callback=callback, + ) + finally: + self.invokeai_diffuser.model_forward_callback = self._unet_forward + + latents = result.latents + + # restore unmasked part + if mask is not None: + latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) + + return latents, result.attention_map_saver def generate_latents_from_embeddings( self, @@ -437,7 +494,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): timesteps, conditioning_data: ConditioningData, *, - noise: Optional[torch.Tensor], additional_guidance: List[Callable] = None, control_data: List[ControlNetData] = None, ): @@ -457,9 +513,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): dtype=timesteps.dtype, device=self._model_group.device_for(self.unet), ) - if noise is not None: - #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers - latents = self.scheduler.add_noise(latents, noise, batched_t) yield PipelineIntermediateState( step=-1, From da0184a786177fe0e281f4c4b1a671ac5839b38d Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 20:01:49 +0300 Subject: [PATCH 08/67] Invert mask, fix l2l on no mask conntected, remove zeroing latents on zero start --- invokeai/app/invocations/latent.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 3872a04f7e..d658637aad 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -462,7 +462,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): mask_tensor = tv_resize( mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR ) - return mask_tensor + return 1 - mask_tensor @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: @@ -502,10 +502,11 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet( unet_info.context.model, _lora_loader() ), unet_info as unet: + latent = latent.to(device=unet.device, dtype=unet.dtype) if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) - latent = latent.to(device=unet.device, dtype=unet.dtype) - mask = mask.to(device=unet.device, dtype=unet.dtype) + if mask is not None: + mask = mask.to(device=unet.device, dtype=unet.dtype) scheduler = get_scheduler( context=context, @@ -526,11 +527,6 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): exit_stack=exit_stack, ) - # TODO: Verify the noise is the right size - initial_latents = latent - if self.denoising_start <= 0.0: - initial_latents = torch.zeros_like(latent, device=unet.device, dtype=latent.dtype) - num_inference_steps, timesteps = self.init_scheduler( scheduler, device=unet.device, @@ -540,7 +536,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): ) result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( - latents=initial_latents, + latents=latent, timesteps=timesteps, noise=noise, seed=seed, From a7e44678fb9f4242916045d730a711d265c8c57f Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 20:49:01 +0300 Subject: [PATCH 09/67] Remove legacy/unused code --- invokeai/app/invocations/generate.py | 253 -------- invokeai/app/util/step_callback.py | 55 -- invokeai/backend/__init__.py | 1 - invokeai/backend/generator/__init__.py | 12 - invokeai/backend/generator/base.py | 559 ------------------ invokeai/backend/generator/img2img.py | 92 --- invokeai/backend/generator/inpaint.py | 379 ------------ .../stable_diffusion/diffusers_pipeline.py | 224 +------ .../diffusion/shared_invokeai_diffusion.py | 70 --- .../backend/stable_diffusion/offloading.py | 253 -------- 10 files changed, 5 insertions(+), 1893 deletions(-) delete mode 100644 invokeai/app/invocations/generate.py delete mode 100644 invokeai/backend/generator/__init__.py delete mode 100644 invokeai/backend/generator/base.py delete mode 100644 invokeai/backend/generator/img2img.py delete mode 100644 invokeai/backend/generator/inpaint.py delete mode 100644 invokeai/backend/stable_diffusion/offloading.py diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py deleted file mode 100644 index 1239d578d9..0000000000 --- a/invokeai/app/invocations/generate.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - -from functools import partial -from typing import Literal, Optional, get_args - -import torch -from pydantic import Field - -from invokeai.app.models.image import ColorField, ImageCategory, ImageField, ResourceOrigin -from invokeai.app.util.misc import SEED_MAX, get_random_seed -from invokeai.backend.generator.inpaint import infill_methods - -from ...backend.generator import Inpaint, InvokeAIGenerator -from ...backend.stable_diffusion import PipelineIntermediateState -from ..util.step_callback import stable_diffusion_step_callback -from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext -from .image import ImageOutput - -from ...backend.model_management import ModelPatcher, BaseModelType -from ...backend.stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline -from .model import UNetField, VaeField -from .compel import ConditioningField -from contextlib import contextmanager, ExitStack, ContextDecorator - -SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())] -INFILL_METHODS = Literal[tuple(infill_methods())] -DEFAULT_INFILL_METHOD = "patchmatch" if "patchmatch" in get_args(INFILL_METHODS) else "tile" - - -from .latent import get_scheduler - - -class OldModelContext(ContextDecorator): - model: StableDiffusionGeneratorPipeline - - def __init__(self, model): - self.model = model - - def __enter__(self): - return self.model - - def __exit__(self, *exc): - return False - - -class OldModelInfo: - name: str - hash: str - context: OldModelContext - - def __init__(self, name: str, hash: str, model: StableDiffusionGeneratorPipeline): - self.name = name - self.hash = hash - self.context = OldModelContext( - model=model, - ) - - -class InpaintInvocation(BaseInvocation): - """Generates an image using inpaint.""" - - type: Literal["inpaint"] = "inpaint" - - positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation") - negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation") - seed: int = Field( - ge=0, le=SEED_MAX, description="The seed to use (omit for random)", default_factory=get_random_seed - ) - steps: int = Field(default=30, gt=0, description="The number of steps to use to generate the image") - width: int = Field( - default=512, - multiple_of=8, - gt=0, - description="The width of the resulting image", - ) - height: int = Field( - default=512, - multiple_of=8, - gt=0, - description="The height of the resulting image", - ) - cfg_scale: float = Field( - default=7.5, - ge=1, - description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", - ) - scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use") - unet: UNetField = Field(default=None, description="UNet model") - vae: VaeField = Field(default=None, description="Vae model") - - # Inputs - image: Optional[ImageField] = Field(description="The input image") - strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the original image") - fit: bool = Field( - default=True, - description="Whether or not the result should be fit to the aspect ratio of the input image", - ) - - # Inputs - mask: Optional[ImageField] = Field(description="The mask") - seam_size: int = Field(default=96, ge=1, description="The seam inpaint size (px)") - seam_blur: int = Field(default=16, ge=0, description="The seam inpaint blur radius (px)") - seam_strength: float = Field(default=0.75, gt=0, le=1, description="The seam inpaint strength") - seam_steps: int = Field(default=30, ge=1, description="The number of steps to use for seam inpaint") - tile_size: int = Field(default=32, ge=1, description="The tile infill method size (px)") - infill_method: INFILL_METHODS = Field( - default=DEFAULT_INFILL_METHOD, - description="The method used to infill empty regions (px)", - ) - inpaint_width: Optional[int] = Field( - default=None, - multiple_of=8, - gt=0, - description="The width of the inpaint region (px)", - ) - inpaint_height: Optional[int] = Field( - default=None, - multiple_of=8, - gt=0, - description="The height of the inpaint region (px)", - ) - inpaint_fill: Optional[ColorField] = Field( - default=ColorField(r=127, g=127, b=127, a=255), - description="The solid infill method color", - ) - inpaint_replace: float = Field( - default=0.0, - ge=0.0, - le=1.0, - description="The amount by which to replace masked areas with latent noise", - ) - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": {"tags": ["stable-diffusion", "image"], "title": "Inpaint"}, - } - - def dispatch_progress( - self, - context: InvocationContext, - source_node_id: str, - base_model: BaseModelType, - intermediate_state: PipelineIntermediateState, - ) -> None: - stable_diffusion_step_callback( - context=context, - intermediate_state=intermediate_state, - node=self.dict(), - source_node_id=source_node_id, - base_model=base_model, - ) - - def get_conditioning(self, context, unet): - positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name) - c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) - extra_conditioning_info = c.extra_conditioning - - negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name) - uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype) - - return (uc, c, extra_conditioning_info) - - @contextmanager - def load_model_old_way(self, context, scheduler): - def _lora_loader(): - for lora in self.unet.loras: - lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), - context=context, - ) - yield (lora_info.context.model, lora.weight) - del lora_info - return - - unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), - context=context, - ) - vae_info = context.services.model_manager.get_model( - **self.vae.vae.dict(), - context=context, - ) - - with vae_info as vae, ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), unet_info as unet: - device = context.services.model_manager.mgr.cache.execution_device - dtype = context.services.model_manager.mgr.cache.precision - - pipeline = StableDiffusionGeneratorPipeline( - vae=vae, - text_encoder=None, - tokenizer=None, - unet=unet, - scheduler=scheduler, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - precision="float16" if dtype == torch.float16 else "float32", - execution_device=device, - ) - - yield OldModelInfo( - name=self.unet.unet.model_name, - hash="", - model=pipeline, - ) - - def invoke(self, context: InvocationContext) -> ImageOutput: - image = None if self.image is None else context.services.images.get_pil_image(self.image.image_name) - mask = None if self.mask is None else context.services.images.get_pil_image(self.mask.image_name) - - # Get the source node id (we are invoking the prepared node) - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] - - scheduler = get_scheduler( - context=context, - scheduler_info=self.unet.scheduler, - scheduler_name=self.scheduler, - ) - - with self.load_model_old_way(context, scheduler) as model: - conditioning = self.get_conditioning(context, model.context.model.unet) - - outputs = Inpaint(model).generate( - conditioning=conditioning, - scheduler=scheduler, - init_image=image, - mask_image=mask, - step_callback=partial(self.dispatch_progress, context, source_node_id, self.unet.unet.base_model), - **self.dict( - exclude={"positive_conditioning", "negative_conditioning", "scheduler", "image", "mask"} - ), # Shorthand for passing all of the parameters above manually - ) - - # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object - # each time it is called. We only need the first one. - generator_output = next(outputs) - - image_dto = context.services.images.create( - image=generator_output.image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - session_id=context.graph_execution_state_id, - node_id=self.id, - is_intermediate=self.is_intermediate, - ) - - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index aae06913fd..5f802c796a 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -4,7 +4,6 @@ from invokeai.app.models.exceptions import CanceledException from invokeai.app.models.image import ProgressImage from ..invocations.baseinvocation import InvocationContext from ...backend.util.util import image_to_dataURL -from ...backend.generator.base import Generator from ...backend.stable_diffusion import PipelineIntermediateState from invokeai.app.services.config import InvokeAIAppConfig from ...backend.model_management.models import BaseModelType @@ -118,57 +117,3 @@ def stable_diffusion_step_callback( step=intermediate_state.step, total_steps=node["steps"], ) - - -def stable_diffusion_xl_step_callback( - context: InvocationContext, - node: dict, - source_node_id: str, - sample, - step, - total_steps, -): - if context.services.queue.is_canceled(context.graph_execution_state_id): - raise CanceledException - - sdxl_latent_rgb_factors = torch.tensor( - [ - # R G B - [0.3816, 0.4930, 0.5320], - [-0.3753, 0.1631, 0.1739], - [0.1770, 0.3588, -0.2048], - [-0.4350, -0.2644, -0.4289], - ], - dtype=sample.dtype, - device=sample.device, - ) - - sdxl_smooth_matrix = torch.tensor( - [ - # [ 0.0478, 0.1285, 0.0478], - # [ 0.1285, 0.2948, 0.1285], - # [ 0.0478, 0.1285, 0.0478], - [0.0358, 0.0964, 0.0358], - [0.0964, 0.4711, 0.0964], - [0.0358, 0.0964, 0.0358], - ], - dtype=sample.dtype, - device=sample.device, - ) - - image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix) - - (width, height) = image.size - width *= 8 - height *= 8 - - dataURL = image_to_dataURL(image, image_format="JPEG") - - context.services.events.emit_generator_progress( - graph_execution_state_id=context.graph_execution_state_id, - node=node, - source_node_id=source_node_id, - progress_image=ProgressImage(width=width, height=height, dataURL=dataURL), - step=step, - total_steps=total_steps, - ) diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index aa2a1f1ca6..a4e6f180ae 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -1,6 +1,5 @@ """ Initialization file for invokeai.backend """ -from .generator import InvokeAIGeneratorBasicParams, InvokeAIGenerator, InvokeAIGeneratorOutput, Img2Img, Inpaint from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo from .model_management.models import SilenceWarnings diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py deleted file mode 100644 index 8a7f1c9167..0000000000 --- a/invokeai/backend/generator/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Initialization file for the invokeai.generator package -""" -from .base import ( - InvokeAIGenerator, - InvokeAIGeneratorBasicParams, - InvokeAIGeneratorOutput, - Img2Img, - Inpaint, - Generator, -) -from .inpaint import infill_methods diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py deleted file mode 100644 index af3231a7d1..0000000000 --- a/invokeai/backend/generator/base.py +++ /dev/null @@ -1,559 +0,0 @@ -""" -Base class for invokeai.backend.generator.* -including img2img, txt2img, and inpaint -""" -from __future__ import annotations - -import itertools -import dataclasses -import diffusers -import os -import random -import traceback -from abc import ABCMeta -from argparse import Namespace -from contextlib import nullcontext - -import cv2 -import numpy as np -import torch -from PIL import Image, ImageChops, ImageFilter -from accelerate.utils import set_seed -from diffusers import DiffusionPipeline -from tqdm import trange -from typing import Callable, List, Iterator, Optional, Type, Union -from dataclasses import dataclass, field -from diffusers.schedulers import SchedulerMixin as Scheduler - -import invokeai.backend.util.logging as logger -from ..image_util import configure_model_padding -from ..util.util import rand_perlin_2d -from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline -from ..stable_diffusion.schedulers import SCHEDULER_MAP - -downsampling = 8 - - -@dataclass -class InvokeAIGeneratorBasicParams: - seed: Optional[int] = None - width: int = 512 - height: int = 512 - cfg_scale: float = 7.5 - steps: int = 20 - ddim_eta: float = 0.0 - scheduler: str = "ddim" - precision: str = "float16" - perlin: float = 0.0 - threshold: float = 0.0 - seamless: bool = False - seamless_axes: List[str] = field(default_factory=lambda: ["x", "y"]) - h_symmetry_time_pct: Optional[float] = None - v_symmetry_time_pct: Optional[float] = None - variation_amount: float = 0.0 - with_variations: list = field(default_factory=list) - - -@dataclass -class InvokeAIGeneratorOutput: - """ - InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation - operation, including the image, its seed, the model name used to generate the image - and the model hash, as well as all the generate() parameters that went into - generating the image (in .params, also available as attributes) - """ - - image: Image.Image - seed: int - model_hash: str - attention_maps_images: List[Image.Image] - params: Namespace - - -# we are interposing a wrapper around the original Generator classes so that -# old code that calls Generate will continue to work. -class InvokeAIGenerator(metaclass=ABCMeta): - def __init__( - self, - model_info: dict, - params: InvokeAIGeneratorBasicParams = InvokeAIGeneratorBasicParams(), - **kwargs, - ): - self.model_info = model_info - self.params = params - self.kwargs = kwargs - - def generate( - self, - conditioning: tuple, - scheduler, - callback: Optional[Callable] = None, - step_callback: Optional[Callable] = None, - iterations: int = 1, - **keyword_args, - ) -> Iterator[InvokeAIGeneratorOutput]: - """ - Return an iterator across the indicated number of generations. - Each time the iterator is called it will return an InvokeAIGeneratorOutput - object. Use like this: - - outputs = txt2img.generate(prompt='banana sushi', iterations=5) - for result in outputs: - print(result.image, result.seed) - - In the typical case of wanting to get just a single image, iterations - defaults to 1 and do: - - output = next(txt2img.generate(prompt='banana sushi') - - Pass None to get an infinite iterator. - - outputs = txt2img.generate(prompt='banana sushi', iterations=None) - for o in outputs: - print(o.image, o.seed) - - """ - generator_args = dataclasses.asdict(self.params) - generator_args.update(keyword_args) - - model_info = self.model_info - model_name = model_info.name - model_hash = model_info.hash - with model_info.context as model: - gen_class = self._generator_class() - generator = gen_class(model, self.params.precision, **self.kwargs) - if self.params.variation_amount > 0: - generator.set_variation( - generator_args.get("seed"), - generator_args.get("variation_amount"), - generator_args.get("with_variations"), - ) - - if isinstance(model, DiffusionPipeline): - for component in [model.unet, model.vae]: - configure_model_padding( - component, generator_args.get("seamless", False), generator_args.get("seamless_axes") - ) - else: - configure_model_padding( - model, generator_args.get("seamless", False), generator_args.get("seamless_axes") - ) - - iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1) - for i in iteration_count: - results = generator.generate( - conditioning=conditioning, - step_callback=step_callback, - sampler=scheduler, - **generator_args, - ) - output = InvokeAIGeneratorOutput( - image=results[0][0], - seed=results[0][1], - attention_maps_images=results[0][2], - model_hash=model_hash, - params=Namespace(model_name=model_name, **generator_args), - ) - if callback: - callback(output) - yield output - - @classmethod - def schedulers(self) -> List[str]: - """ - Return list of all the schedulers that we currently handle. - """ - return list(SCHEDULER_MAP.keys()) - - def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]): - return generator_class(model, self.params.precision) - - @classmethod - def _generator_class(cls) -> Type[Generator]: - """ - In derived classes return the name of the generator to apply. - If you don't override will return the name of the derived - class, which nicely parallels the generator class names. - """ - return Generator - - -# ------------------------------------ -class Img2Img(InvokeAIGenerator): - def generate( - self, init_image: Union[Image.Image, torch.FloatTensor], strength: float = 0.75, **keyword_args - ) -> Iterator[InvokeAIGeneratorOutput]: - return super().generate(init_image=init_image, strength=strength, **keyword_args) - - @classmethod - def _generator_class(cls): - from .img2img import Img2Img - - return Img2Img - - -# ------------------------------------ -# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff -class Inpaint(Img2Img): - def generate( - self, - mask_image: Union[Image.Image, torch.FloatTensor], - # Seam settings - when 0, doesn't fill seam - seam_size: int = 96, - seam_blur: int = 16, - seam_strength: float = 0.7, - seam_steps: int = 30, - tile_size: int = 32, - inpaint_replace=False, - infill_method=None, - inpaint_width=None, - inpaint_height=None, - inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), - **keyword_args, - ) -> Iterator[InvokeAIGeneratorOutput]: - return super().generate( - mask_image=mask_image, - seam_size=seam_size, - seam_blur=seam_blur, - seam_strength=seam_strength, - seam_steps=seam_steps, - tile_size=tile_size, - inpaint_replace=inpaint_replace, - infill_method=infill_method, - inpaint_width=inpaint_width, - inpaint_height=inpaint_height, - inpaint_fill=inpaint_fill, - **keyword_args, - ) - - @classmethod - def _generator_class(cls): - from .inpaint import Inpaint - - return Inpaint - - -class Generator: - downsampling_factor: int - latent_channels: int - precision: str - model: DiffusionPipeline - - def __init__(self, model: DiffusionPipeline, precision: str, **kwargs): - self.model = model - self.precision = precision - self.seed = None - self.latent_channels = model.unet.config.in_channels - self.downsampling_factor = downsampling # BUG: should come from model or config - self.perlin = 0.0 - self.threshold = 0 - self.variation_amount = 0 - self.with_variations = [] - self.use_mps_noise = False - self.free_gpu_mem = None - - # this is going to be overridden in img2img.py, txt2img.py and inpaint.py - def get_make_image(self, **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - """ - raise NotImplementedError("image_iterator() must be implemented in a descendent class") - - def set_variation(self, seed, variation_amount, with_variations): - self.seed = seed - self.variation_amount = variation_amount - self.with_variations = with_variations - - def generate( - self, - width, - height, - sampler, - init_image=None, - iterations=1, - seed=None, - image_callback=None, - step_callback=None, - threshold=0.0, - perlin=0.0, - h_symmetry_time_pct=None, - v_symmetry_time_pct=None, - free_gpu_mem: bool = False, - **kwargs, - ): - scope = nullcontext - self.free_gpu_mem = free_gpu_mem - attention_maps_images = [] - attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image()) - make_image = self.get_make_image( - sampler=sampler, - init_image=init_image, - width=width, - height=height, - step_callback=step_callback, - threshold=threshold, - perlin=perlin, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct, - attention_maps_callback=attention_maps_callback, - **kwargs, - ) - results = [] - seed = seed if seed is not None and seed >= 0 else self.new_seed() - first_seed = seed - seed, initial_noise = self.generate_initial_noise(seed, width, height) - - # There used to be an additional self.model.ema_scope() here, but it breaks - # the inpaint-1.5 model. Not sure what it did.... ? - with scope(self.model.device.type): - for n in trange(iterations, desc="Generating"): - x_T = None - if self.variation_amount > 0: - set_seed(seed) - target_noise = self.get_noise(width, height) - x_T = self.slerp(self.variation_amount, initial_noise, target_noise) - elif initial_noise is not None: - # i.e. we specified particular variations - x_T = initial_noise - else: - set_seed(seed) - try: - x_T = self.get_noise(width, height) - except: - logger.error("An error occurred while getting initial noise") - print(traceback.format_exc()) - - # Pass on the seed in case a layer beneath us needs to generate noise on its own. - image = make_image(x_T, seed) - - results.append([image, seed, attention_maps_images]) - - if image_callback is not None: - attention_maps_image = None if len(attention_maps_images) == 0 else attention_maps_images[-1] - image_callback( - image, - seed, - first_seed=first_seed, - attention_maps_image=attention_maps_image, - ) - - seed = self.new_seed() - - # Free up memory from the last generation. - clear_cuda_cache = kwargs["clear_cuda_cache"] if "clear_cuda_cache" in kwargs else None - if clear_cuda_cache is not None: - clear_cuda_cache() - - return results - - def sample_to_image(self, samples) -> Image.Image: - """ - Given samples returned from a sampler, converts - it into a PIL Image - """ - with torch.inference_mode(): - image = self.model.decode_latents(samples) - return self.model.numpy_to_pil(image)[0] - - def repaste_and_color_correct( - self, - result: Image.Image, - init_image: Image.Image, - init_mask: Image.Image, - mask_blur_radius: int = 8, - ) -> Image.Image: - if init_image is None or init_mask is None: - return result - - # Get the original alpha channel of the mask if there is one. - # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') - pil_init_mask = init_mask.getchannel("A") if init_mask.mode == "RGBA" else init_mask.convert("L") - pil_init_image = init_image.convert("RGBA") # Add an alpha channel if one doesn't exist - - # Build an image with only visible pixels from source to use as reference for color-matching. - init_rgb_pixels = np.asarray(init_image.convert("RGB"), dtype=np.uint8) - init_a_pixels = np.asarray(pil_init_image.getchannel("A"), dtype=np.uint8) - init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8) - - # Get numpy version of result - np_image = np.asarray(result, dtype=np.uint8) - - # Mask and calculate mean and standard deviation - mask_pixels = init_a_pixels * init_mask_pixels > 0 - np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :] - np_image_masked = np_image[mask_pixels, :] - - if np_init_rgb_pixels_masked.size > 0: - init_means = np_init_rgb_pixels_masked.mean(axis=0) - init_std = np_init_rgb_pixels_masked.std(axis=0) - gen_means = np_image_masked.mean(axis=0) - gen_std = np_image_masked.std(axis=0) - - # Color correct - np_matched_result = np_image.copy() - np_matched_result[:, :, :] = ( - ( - ( - (np_matched_result[:, :, :].astype(np.float32) - gen_means[None, None, :]) - / gen_std[None, None, :] - ) - * init_std[None, None, :] - + init_means[None, None, :] - ) - .clip(0, 255) - .astype(np.uint8) - ) - matched_result = Image.fromarray(np_matched_result, mode="RGB") - else: - matched_result = Image.fromarray(np_image, mode="RGB") - - # Blur the mask out (into init image) by specified amount - if mask_blur_radius > 0: - nm = np.asarray(pil_init_mask, dtype=np.uint8) - nmd = cv2.erode( - nm, - kernel=np.ones((3, 3), dtype=np.uint8), - iterations=int(mask_blur_radius / 2), - ) - pmd = Image.fromarray(nmd, mode="L") - blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius)) - else: - blurred_init_mask = pil_init_mask - - multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) - - # Paste original on color-corrected generation (using blurred mask) - matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) - return matched_result - - @staticmethod - def sample_to_lowres_estimated_image(samples): - # origingally adapted from code by @erucipe and @keturn here: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 - - # these updated numbers for v1.5 are from @torridgristle - v1_5_latent_rgb_factors = torch.tensor( - [ - # R G B - [0.3444, 0.1385, 0.0670], # L1 - [0.1247, 0.4027, 0.1494], # L2 - [-0.3192, 0.2513, 0.2103], # L3 - [-0.1307, -0.1874, -0.7445], # L4 - ], - dtype=samples.dtype, - device=samples.device, - ) - - latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors - latents_ubyte = ( - ((latent_image + 1) / 2).clamp(0, 1).mul(0xFF).byte() # change scale from -1..1 to 0..1 # to 0..255 - ).cpu() - - return Image.fromarray(latents_ubyte.numpy()) - - def generate_initial_noise(self, seed, width, height): - initial_noise = None - if self.variation_amount > 0 or len(self.with_variations) > 0: - # use fixed initial noise plus random noise per iteration - set_seed(seed) - initial_noise = self.get_noise(width, height) - for v_seed, v_weight in self.with_variations: - seed = v_seed - set_seed(seed) - next_noise = self.get_noise(width, height) - initial_noise = self.slerp(v_weight, initial_noise, next_noise) - if self.variation_amount > 0: - random.seed() # reset RNG to an actually random state, so we can get a random seed for variations - seed = random.randrange(0, np.iinfo(np.uint32).max) - return (seed, initial_noise) - - def get_perlin_noise(self, width, height): - fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(self.latent_channels, 4) - # round up to the nearest block of 8 - temp_width = int((width + 7) / 8) * 8 - temp_height = int((height + 7) / 8) * 8 - noise = torch.stack( - [ - rand_perlin_2d((temp_height, temp_width), (8, 8), device=self.model.device).to(fixdevice) - for _ in range(input_channels) - ], - dim=0, - ).to(self.model.device) - return noise[0:4, 0:height, 0:width] - - def new_seed(self): - self.seed = random.randrange(0, np.iinfo(np.uint32).max) - return self.seed - - def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): - """ - Spherical linear interpolation - Args: - t (float/np.ndarray): Float value between 0.0 and 1.0 - v0 (np.ndarray): Starting vector - v1 (np.ndarray): Final vector - DOT_THRESHOLD (float): Threshold for considering the two vectors as - colineal. Not recommended to alter this. - Returns: - v2 (np.ndarray): Interpolation vector between v0 and v1 - """ - inputs_are_torch = False - if not isinstance(v0, np.ndarray): - inputs_are_torch = True - v0 = v0.detach().cpu().numpy() - if not isinstance(v1, np.ndarray): - inputs_are_torch = True - v1 = v1.detach().cpu().numpy() - - dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) - if np.abs(dot) > DOT_THRESHOLD: - v2 = (1 - t) * v0 + t * v1 - else: - theta_0 = np.arccos(dot) - sin_theta_0 = np.sin(theta_0) - theta_t = theta_0 * t - sin_theta_t = np.sin(theta_t) - s0 = np.sin(theta_0 - theta_t) / sin_theta_0 - s1 = sin_theta_t / sin_theta_0 - v2 = s0 * v0 + s1 * v1 - - if inputs_are_torch: - v2 = torch.from_numpy(v2).to(self.model.device) - - return v2 - - # this is a handy routine for debugging use. Given a generated sample, - # convert it into a PNG image and store it at the indicated path - def save_sample(self, sample, filepath): - image = self.sample_to_image(sample) - dirname = os.path.dirname(filepath) or "." - if not os.path.exists(dirname): - logger.info(f"creating directory {dirname}") - os.makedirs(dirname, exist_ok=True) - image.save(filepath, "PNG") - - def torch_dtype(self) -> torch.dtype: - return torch.float16 if self.precision == "float16" else torch.float32 - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self, width, height): - device = self.model.device - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(self.latent_channels, 4) - x = torch.randn( - [ - 1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor, - ], - dtype=self.torch_dtype(), - device=device, - ) - if self.perlin > 0.0: - perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - x = (1 - self.perlin) * x + self.perlin * perlin_noise - return x diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py deleted file mode 100644 index 5490b2325c..0000000000 --- a/invokeai/backend/generator/img2img.py +++ /dev/null @@ -1,92 +0,0 @@ -""" -invokeai.backend.generator.img2img descends from .generator -""" -from typing import Optional - -import torch -from accelerate.utils import set_seed -from diffusers import logging - -from ..stable_diffusion import ( - ConditioningData, - PostprocessingSettings, - StableDiffusionGeneratorPipeline, -) -from .base import Generator - - -class Img2Img(Generator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None # by get_noise() - - def get_make_image( - self, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_image, - strength, - step_callback=None, - threshold=0.0, - warmup=0.2, - perlin=0.0, - h_symmetry_time_pct=None, - v_symmetry_time_pct=None, - attention_maps_callback=None, - **kwargs, - ): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - pipeline.scheduler = sampler - - uc, c, extra_conditioning_info = conditioning - conditioning_data = ConditioningData( - uc, - c, - cfg_scale, - extra_conditioning_info, - postprocessing_settings=PostprocessingSettings( - threshold=threshold, - warmup=warmup, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct, - ), - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) - - def make_image(x_T: torch.Tensor, seed: int): - # FIXME: use x_T for initial seeded noise - # We're not at the moment because the pipeline automatically resizes init_image if - # necessary, which the x_T input might not match. - # In the meantime, reset the seed prior to generating pipeline output so we at least get the same result. - logging.set_verbosity_error() # quench safety check warnings - pipeline_output = pipeline.img2img_from_embeddings( - init_image, - strength, - steps, - conditioning_data, - noise_func=self.get_noise_like, - callback=step_callback, - seed=seed, - ) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - return pipeline.numpy_to_pil(pipeline_output.images)[0] - - return make_image - - def get_noise_like(self, like: torch.Tensor): - device = like.device - x = torch.randn_like(like, device=device) - if self.perlin > 0.0: - shape = like.shape - x = (1 - self.perlin) * x + self.perlin * self.get_perlin_noise(shape[3], shape[2]) - return x diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py deleted file mode 100644 index 7aeb3d4809..0000000000 --- a/invokeai/backend/generator/inpaint.py +++ /dev/null @@ -1,379 +0,0 @@ -""" -invokeai.backend.generator.inpaint descends from .generator -""" -from __future__ import annotations - -import math -from typing import Tuple, Union, Optional - -import cv2 -import numpy as np -import torch -from PIL import Image, ImageChops, ImageFilter, ImageOps - -from ..image_util import PatchMatch, debug_image -from ..stable_diffusion.diffusers_pipeline import ( - ConditioningData, - StableDiffusionGeneratorPipeline, - image_resized_to_grid_as_tensor, -) -from .img2img import Img2Img - - -def infill_methods() -> list[str]: - methods = [ - "tile", - "solid", - ] - if PatchMatch.patchmatch_available(): - methods.insert(0, "patchmatch") - return methods - - -class Inpaint(Img2Img): - def __init__(self, model, precision): - self.inpaint_height = 0 - self.inpaint_width = 0 - self.enable_image_debugging = False - self.init_latent = None - self.pil_image = None - self.pil_mask = None - self.mask_blur_radius = 0 - self.infill_method = None - super().__init__(model, precision) - - # Outpaint support code - def get_tile_images(self, image: np.ndarray, width=8, height=8): - _nrows, _ncols, depth = image.shape - _strides = image.strides - - nrows, _m = divmod(_nrows, height) - ncols, _n = divmod(_ncols, width) - if _m != 0 or _n != 0: - return None - - return np.lib.stride_tricks.as_strided( - np.ravel(image), - shape=(nrows, ncols, height, width, depth), - strides=(height * _strides[0], width * _strides[1], *_strides), - writeable=False, - ) - - def infill_patchmatch(self, im: Image.Image) -> Image.Image: - if im.mode != "RGBA": - return im - - # Skip patchmatch if patchmatch isn't available - if not PatchMatch.patchmatch_available(): - return im - - # Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though) - im_patched_np = PatchMatch.inpaint(im.convert("RGB"), ImageOps.invert(im.split()[-1]), patch_size=3) - im_patched = Image.fromarray(im_patched_np, mode="RGB") - return im_patched - - def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: Optional[int] = None) -> Image.Image: - # Only fill if there's an alpha layer - if im.mode != "RGBA": - return im - - a = np.asarray(im, dtype=np.uint8) - - tile_size_tuple = (tile_size, tile_size) - - # Get the image as tiles of a specified size - tiles = self.get_tile_images(a, *tile_size_tuple).copy() - - # Get the mask as tiles - tiles_mask = tiles[:, :, :, :, 3] - - # Find any mask tiles with any fully transparent pixels (we will be replacing these later) - tmask_shape = tiles_mask.shape - tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape)) - n, ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) - tiles_mask = tiles_mask > 0 - tiles_mask = tiles_mask.reshape((n, ny)).all(axis=1) - - # Get RGB tiles in single array and filter by the mask - tshape = tiles.shape - tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), *tiles.shape[2:])) - filtered_tiles = tiles_all[tiles_mask] - - if len(filtered_tiles) == 0: - return im - - # Find all invalid tiles and replace with a random valid tile - replace_count = (tiles_mask == False).sum() - rng = np.random.default_rng(seed=seed) - tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[ - rng.choice(filtered_tiles.shape[0], replace_count), :, :, : - ] - - # Convert back to an image - tiles_all = tiles_all.reshape(tshape) - tiles_all = tiles_all.swapaxes(1, 2) - st = tiles_all.reshape( - ( - math.prod(tiles_all.shape[0:2]), - math.prod(tiles_all.shape[2:4]), - tiles_all.shape[4], - ) - ) - si = Image.fromarray(st, mode="RGBA") - - return si - - def mask_edge(self, mask: Image.Image, edge_size: int, edge_blur: int) -> Image.Image: - npimg = np.asarray(mask, dtype=np.uint8) - - # Detect any partially transparent regions - npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0))) - - # Detect hard edges - npedge = cv2.Canny(npimg, threshold1=100, threshold2=200) - - # Combine - npmask = npgradient + npedge - - # Expand - npmask = cv2.dilate(npmask, np.ones((3, 3), np.uint8), iterations=int(edge_size / 2)) - - new_mask = Image.fromarray(npmask) - - if edge_blur > 0: - new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur)) - - return ImageOps.invert(new_mask) - - def seam_paint( - self, - im: Image.Image, - seam_size: int, - seam_blur: int, - seed, - steps, - cfg_scale, - ddim_eta, - conditioning, - strength, - noise, - infill_method, - step_callback, - ) -> Image.Image: - hard_mask = self.pil_image.split()[-1].copy() - mask = self.mask_edge(hard_mask, seam_size, seam_blur) - - make_image = self.get_make_image( - steps, - cfg_scale, - ddim_eta, - conditioning, - init_image=im.copy().convert("RGBA"), - mask_image=mask, - strength=strength, - mask_blur_radius=0, - seam_size=0, - step_callback=step_callback, - inpaint_width=im.width, - inpaint_height=im.height, - infill_method=infill_method, - ) - - seam_noise = self.get_noise(im.width, im.height) - - result = make_image(seam_noise, seed=None) - - return result - - @torch.no_grad() - def get_make_image( - self, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_image: Union[Image.Image, torch.FloatTensor], - mask_image: Union[Image.Image, torch.FloatTensor], - strength: float, - mask_blur_radius: int = 8, - # Seam settings - when 0, doesn't fill seam - seam_size: int = 96, - seam_blur: int = 16, - seam_strength: float = 0.7, - seam_steps: int = 30, - tile_size: int = 32, - step_callback=None, - inpaint_replace=False, - enable_image_debugging=False, - infill_method=None, - inpaint_width=None, - inpaint_height=None, - inpaint_fill: Tuple[int, int, int, int] = (0x7F, 0x7F, 0x7F, 0xFF), - attention_maps_callback=None, - **kwargs, - ): - """ - Returns a function returning an image derived from the prompt and - the initial image + mask. Return value depends on the seed at - the time you call it. kwargs are 'init_latent' and 'strength' - """ - - self.enable_image_debugging = enable_image_debugging - infill_method = infill_method or infill_methods()[0] - self.infill_method = infill_method - - self.inpaint_width = inpaint_width - self.inpaint_height = inpaint_height - - if isinstance(init_image, Image.Image): - self.pil_image = init_image.copy() - - # Do infill - if infill_method == "patchmatch" and PatchMatch.patchmatch_available(): - init_filled = self.infill_patchmatch(self.pil_image.copy()) - elif infill_method == "tile": - init_filled = self.tile_fill_missing(self.pil_image.copy(), seed=self.seed, tile_size=tile_size) - elif infill_method == "solid": - solid_bg = Image.new("RGBA", init_image.size, inpaint_fill) - init_filled = Image.alpha_composite(solid_bg, init_image) - else: - raise ValueError(f"Non-supported infill type {infill_method}", infill_method) - init_filled.paste(init_image, (0, 0), init_image.split()[-1]) - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - init_filled = init_filled.resize((inpaint_width, inpaint_height)) - - debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) - - # Create init tensor - init_image = image_resized_to_grid_as_tensor(init_filled.convert("RGB")) - - if isinstance(mask_image, Image.Image): - self.pil_mask = mask_image.copy() - debug_image( - mask_image, - "mask_image BEFORE multiply with pil_image", - debug_status=self.enable_image_debugging, - ) - - init_alpha = self.pil_image.getchannel("A") - if mask_image.mode != "L": - # FIXME: why do we get passed an RGB image here? We can only use single-channel. - mask_image = mask_image.convert("L") - mask_image = ImageChops.multiply(mask_image, init_alpha) - self.pil_mask = mask_image - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - mask_image = mask_image.resize((inpaint_width, inpaint_height)) - - debug_image( - mask_image, - "mask_image AFTER multiply with pil_image", - debug_status=self.enable_image_debugging, - ) - mask: torch.FloatTensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) - else: - mask: torch.FloatTensor = mask_image - - self.mask_blur_radius = mask_blur_radius - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - - # todo: support cross-attention control - uc, c, _ = conditioning - conditioning_data = ConditioningData(uc, c, cfg_scale).add_scheduler_args_if_applicable( - pipeline.scheduler, eta=ddim_eta - ) - - def make_image(x_T: torch.Tensor, seed: int): - pipeline_output = pipeline.inpaint_from_embeddings( - init_image=init_image, - mask=1 - mask, # expects white means "paint here." - strength=strength, - num_inference_steps=steps, - conditioning_data=conditioning_data, - noise_func=self.get_noise_like, - callback=step_callback, - seed=seed, - ) - - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - - result = self.postprocess_size_and_mask(pipeline.numpy_to_pil(pipeline_output.images)[0]) - - # Seam paint if this is our first pass (seam_size set to 0 during seam painting) - if seam_size > 0: - old_image = self.pil_image or init_image - old_mask = self.pil_mask or mask_image - - result = self.seam_paint( - result, - seam_size, - seam_blur, - seed, - seam_steps, - cfg_scale, - ddim_eta, - conditioning, - seam_strength, - x_T, - infill_method, - step_callback, - ) - - # Restore original settings - self.get_make_image( - steps, - cfg_scale, - ddim_eta, - conditioning, - old_image, - old_mask, - strength, - mask_blur_radius, - seam_size, - seam_blur, - seam_strength, - seam_steps, - tile_size, - step_callback, - inpaint_replace, - enable_image_debugging, - inpaint_width=inpaint_width, - inpaint_height=inpaint_height, - infill_method=infill_method, - **kwargs, - ) - - return result - - return make_image - - def sample_to_image(self, samples) -> Image.Image: - gen_result = super().sample_to_image(samples).convert("RGB") - return self.postprocess_size_and_mask(gen_result) - - def postprocess_size_and_mask(self, gen_result: Image.Image) -> Image.Image: - debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging) - - # Resize if necessary - if self.inpaint_width and self.inpaint_height: - gen_result = gen_result.resize(self.pil_image.size) - - if self.pil_image is None or self.pil_mask is None: - return gen_result - - corrected_result = self.repaste_and_color_correct( - gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius - ) - debug_image( - corrected_result, - "corrected_result", - debug_status=self.enable_image_debugging, - ) - - return corrected_result diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 8623ef9bfb..f452dc1037 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -1,18 +1,14 @@ from __future__ import annotations import dataclasses -import inspect -import math -import secrets -from collections.abc import Sequence from dataclasses import dataclass, field +import inspect from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union from pydantic import Field import einops import PIL.Image import numpy as np -from accelerate.utils import set_seed import psutil import torch import torchvision.transforms as T @@ -23,15 +19,11 @@ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( StableDiffusionPipeline, ) -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import ( - StableDiffusionImg2ImgPipeline, -) from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput -from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.outputs import BaseOutput from torchvision.transforms.functional import resize as tv_resize @@ -45,7 +37,6 @@ from .diffusion import ( InvokeAIDiffuserComponent, PostprocessingSettings, ) -from .offloading import FullyLoadedModelGroup, ModelGroup @dataclass @@ -287,9 +278,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ - _model_group: ModelGroup - - ID_LENGTH = 8 def __init__( self, @@ -328,9 +316,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # control_model=control_model, ) self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward) - - self._model_group = FullyLoadedModelGroup(execution_device or self.unet.device) - self._model_group.install(*self._submodels) self.control_model = control_model def _adjust_memory_efficient_attention(self, latents: torch.Tensor): @@ -373,28 +358,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): self.disable_attention_slicing() def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False): - # overridden method; types match the superclass. - if torch_device is None: - return self - self._model_group.set_device(torch.device(torch_device)) - self._model_group.ready() + raise Exception("Should not be called") @property def device(self) -> torch.device: - return self._model_group.execution_device - - @property - def _submodels(self) -> Sequence[torch.nn.Module]: - module_names, _, _ = self.extract_init_dict(dict(self.config)) - submodels = [] - for name in module_names.keys(): - if hasattr(self, name): - value = getattr(self, name) - else: - value = getattr(self.config, name) - if isinstance(value, torch.nn.Module): - submodels.append(value) - return submodels + return self.unet.device def latents_from_embeddings( self, @@ -414,7 +382,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): if self.scheduler.config.get("cpu_only", False): scheduler_device = torch.device("cpu") else: - scheduler_device = self._model_group.device_for(self.unet) + scheduler_device = self.unet.device if timesteps is None: self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device) @@ -511,7 +479,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): (batch_size,), timesteps[0], dtype=timesteps.dtype, - device=self._model_group.device_for(self.unet), + device=self.unet.device, ) yield PipelineIntermediateState( @@ -655,185 +623,3 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): cross_attention_kwargs=cross_attention_kwargs, **kwargs, ).sample - - def img2img_from_embeddings( - self, - init_image: Union[torch.FloatTensor, PIL.Image.Image], - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, - callback: Callable[[PipelineIntermediateState], None] = None, - noise_func=None, - seed=None, - ) -> InvokeAIStableDiffusionPipelineOutput: - if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB")) - - if init_image.dim() == 3: - init_image = einops.rearrange(init_image, "c h w -> 1 c h w") - - # 6. Prepare latent variables - initial_latents = self.non_noised_latents_from_image( - init_image, - device=self._model_group.device_for(self.unet), - dtype=self.unet.dtype, - ) - if seed is not None: - set_seed(seed) - noise = noise_func(initial_latents) - - return self.img2img_from_latents_and_embeddings( - initial_latents, - num_inference_steps, - conditioning_data, - strength, - noise, - callback, - ) - - def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device=None) -> (torch.Tensor, int): - img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components) - assert img2img_pipeline.scheduler is self.scheduler - - if self.scheduler.config.get("cpu_only", False): - scheduler_device = torch.device("cpu") - else: - scheduler_device = self._model_group.device_for(self.unet) - - img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device) - timesteps, adjusted_steps = img2img_pipeline.get_timesteps( - num_inference_steps, strength, device=scheduler_device - ) - # Workaround for low strength resulting in zero timesteps. - # TODO: submit upstream fix for zero-step img2img - if timesteps.numel() == 0: - timesteps = self.scheduler.timesteps[-1:] - adjusted_steps = timesteps.numel() - return timesteps, adjusted_steps - - def inpaint_from_embeddings( - self, - init_image: torch.FloatTensor, - mask: torch.FloatTensor, - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, - callback: Callable[[PipelineIntermediateState], None] = None, - noise_func=None, - seed=None, - ) -> InvokeAIStableDiffusionPipelineOutput: - device = self._model_group.device_for(self.unet) - latents_dtype = self.unet.dtype - - if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB")) - - init_image = init_image.to(device=device, dtype=latents_dtype) - mask = mask.to(device=device, dtype=latents_dtype) - - if init_image.dim() == 3: - init_image = init_image.unsqueeze(0) - - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength) - - # 6. Prepare latent variables - # can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents - # because we have our own noise function - init_image_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype) - if seed is not None: - set_seed(seed) - noise = noise_func(init_image_latents) - - if mask.dim() == 3: - mask = mask.unsqueeze(0) - latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR).to( - device=device, dtype=latents_dtype - ) - - guidance: List[Callable] = [] - - if is_inpainting_model(self.unet): - # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint - # (that's why there's a mask!) but it seems to really want that blanked out. - masked_init_image = init_image * torch.where(mask < 0.5, 1, 0) - masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype) - - # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( - self._unet_forward, latent_mask, masked_latents - ) - else: - guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise)) - - try: - result_latents, result_attention_maps = self.latents_from_embeddings( - latents=init_image_latents - if strength < 1.0 - else torch.zeros_like( - init_image_latents, device=init_image_latents.device, dtype=init_image_latents.dtype - ), - num_inference_steps=num_inference_steps, - conditioning_data=conditioning_data, - noise=noise, - timesteps=timesteps, - additional_guidance=guidance, - callback=callback, - ) - finally: - self.invokeai_diffuser.model_forward_callback = self._unet_forward - - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput( - images=image, - nsfw_content_detected=[], - attention_map_saver=result_attention_maps, - ) - return self.check_for_safety(output, dtype=self.unet.dtype) - - def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype): - init_image = init_image.to(device=device, dtype=dtype) - with torch.inference_mode(): - self._model_group.load(self.vae) - init_latent_dist = self.vae.encode(init_image).latent_dist - init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible! - - init_latents = 0.18215 * init_latents - return init_latents - - def check_for_safety(self, output, dtype): - with torch.inference_mode(): - screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype) - screened_attention_map_saver = None - if has_nsfw_concept is None or not has_nsfw_concept: - screened_attention_map_saver = output.attention_map_saver - return InvokeAIStableDiffusionPipelineOutput( - screened_images, - has_nsfw_concept, - # block the attention maps if NSFW content is detected - attention_map_saver=screened_attention_map_saver, - ) - - def run_safety_checker(self, image, device=None, dtype=None): - # overriding to use the model group for device info instead of requiring the caller to know. - if self.safety_checker is not None: - device = self._model_group.device_for(self.safety_checker) - return super().run_safety_checker(image, device, dtype) - - def decode_latents(self, latents): - # Explicit call to get the vae loaded, since `decode` isn't the forward method. - self._model_group.load(self.vae) - return super().decode_latents(latents) - - def debug_latents(self, latents, msg): - from invokeai.backend.image_util import debug_image - - with torch.inference_mode(): - decoded = self.numpy_to_pil(self.decode_latents(latents)) - for i, img in enumerate(decoded): - debug_image(img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 4ff8c5abc7..1dc6c359a0 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -295,7 +295,6 @@ class InvokeAIDiffuserComponent: ) -> torch.Tensor: if postprocessing_settings is not None: percent_through = step_index / total_step_count - latents = self.apply_threshold(postprocessing_settings, latents, percent_through) latents = self.apply_symmetry(postprocessing_settings, latents, percent_through) return latents @@ -516,63 +515,6 @@ class InvokeAIDiffuserComponent: combined_next_x = unconditioned_next_x + scaled_delta return combined_next_x - def apply_threshold( - self, - postprocessing_settings: PostprocessingSettings, - latents: torch.Tensor, - percent_through: float, - ) -> torch.Tensor: - if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0: - return latents - - threshold = postprocessing_settings.threshold - warmup = postprocessing_settings.warmup - - if percent_through < warmup: - current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup)) - else: - current_threshold = threshold - - if current_threshold <= 0: - return latents - - maxval = latents.max().item() - minval = latents.min().item() - - scale = 0.7 # default value from #395 - - if self.debug_thresholding: - std, mean = [i.item() for i in torch.std_mean(latents)] - outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold)) - logger.info(f"Threshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})") - logger.debug(f"min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}") - logger.debug(f"{outside / latents.numel() * 100:.2f}% values outside threshold") - - if maxval < current_threshold and minval > -current_threshold: - return latents - - num_altered = 0 - - # MPS torch.rand_like is fine because torch.rand_like is wrapped in generate.py! - - if maxval > current_threshold: - latents = torch.clone(latents) - maxval = np.clip(maxval * scale, 1, current_threshold) - num_altered += torch.count_nonzero(latents > maxval) - latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval - - if minval < -current_threshold: - latents = torch.clone(latents) - minval = np.clip(minval * scale, -current_threshold, -1) - num_altered += torch.count_nonzero(latents < minval) - latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval - - if self.debug_thresholding: - logger.debug(f"min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})") - logger.debug(f"{num_altered / latents.numel() * 100:.2f}% values altered") - - return latents - def apply_symmetry( self, postprocessing_settings: PostprocessingSettings, @@ -634,18 +576,6 @@ class InvokeAIDiffuserComponent: self.last_percent_through = percent_through return latents.to(device=dev) - def estimate_percent_through(self, step_index, sigma): - if step_index is not None and self.cross_attention_control_context is not None: - # percent_through will never reach 1.0 (but this is intended) - return float(step_index) / float(self.cross_attention_control_context.step_count) - # find the best possible index of the current sigma in the sigma sequence - smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma) - sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0 - # flip because sigmas[0] is for the fully denoised image - # percent_through must be <1 - return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0]) - # print('estimated percent_through', percent_through, 'from sigma', sigma.item()) - # todo: make this work @classmethod def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale): diff --git a/invokeai/backend/stable_diffusion/offloading.py b/invokeai/backend/stable_diffusion/offloading.py deleted file mode 100644 index aa2426d514..0000000000 --- a/invokeai/backend/stable_diffusion/offloading.py +++ /dev/null @@ -1,253 +0,0 @@ -from __future__ import annotations - -import warnings -import weakref -from abc import ABCMeta, abstractmethod -from collections.abc import MutableMapping -from typing import Callable, Union - -import torch -from accelerate.utils import send_to_device -from torch.utils.hooks import RemovableHandle - -OFFLOAD_DEVICE = torch.device("cpu") - - -class _NoModel: - """Symbol that indicates no model is loaded. - - (We can't weakref.ref(None), so this was my best idea at the time to come up with something - type-checkable.) - """ - - def __bool__(self): - return False - - def to(self, device: torch.device): - pass - - def __repr__(self): - return "" - - -NO_MODEL = _NoModel() - - -class ModelGroup(metaclass=ABCMeta): - """ - A group of models. - - The use case I had in mind when writing this is the sub-models used by a DiffusionPipeline, - e.g. its text encoder, U-net, VAE, etc. - - Those models are :py:class:`diffusers.ModelMixin`, but "model" is interchangeable with - :py:class:`torch.nn.Module` here. - """ - - def __init__(self, execution_device: torch.device): - self.execution_device = execution_device - - @abstractmethod - def install(self, *models: torch.nn.Module): - """Add models to this group.""" - pass - - @abstractmethod - def uninstall(self, models: torch.nn.Module): - """Remove models from this group.""" - pass - - @abstractmethod - def uninstall_all(self): - """Remove all models from this group.""" - - @abstractmethod - def load(self, model: torch.nn.Module): - """Load this model to the execution device.""" - pass - - @abstractmethod - def offload_current(self): - """Offload the current model(s) from the execution device.""" - pass - - @abstractmethod - def ready(self): - """Ready this group for use.""" - pass - - @abstractmethod - def set_device(self, device: torch.device): - """Change which device models from this group will execute on.""" - pass - - @abstractmethod - def device_for(self, model) -> torch.device: - """Get the device the given model will execute on. - - The model should already be a member of this group. - """ - pass - - @abstractmethod - def __contains__(self, model): - """Check if the model is a member of this group.""" - pass - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} object at {id(self):x}: " f"device={self.execution_device} >" - - -class LazilyLoadedModelGroup(ModelGroup): - """ - Only one model from this group is loaded on the GPU at a time. - - Running the forward method of a model will displace the previously-loaded model, - offloading it to CPU. - - If you call other methods on the model, e.g. ``model.encode(x)`` instead of ``model(x)``, - you will need to explicitly load it with :py:method:`.load(model)`. - - This implementation relies on pytorch forward-pre-hooks, and it will copy forward arguments - to the appropriate execution device, as long as they are positional arguments and not keyword - arguments. (I didn't make the rules; that's the way the pytorch 1.13 API works for hooks.) - """ - - _hooks: MutableMapping[torch.nn.Module, RemovableHandle] - _current_model_ref: Callable[[], Union[torch.nn.Module, _NoModel]] - - def __init__(self, execution_device: torch.device): - super().__init__(execution_device) - self._hooks = weakref.WeakKeyDictionary() - self._current_model_ref = weakref.ref(NO_MODEL) - - def install(self, *models: torch.nn.Module): - for model in models: - self._hooks[model] = model.register_forward_pre_hook(self._pre_hook) - - def uninstall(self, *models: torch.nn.Module): - for model in models: - hook = self._hooks.pop(model) - hook.remove() - if self.is_current_model(model): - # no longer hooked by this object, so don't claim to manage it - self.clear_current_model() - - def uninstall_all(self): - self.uninstall(*self._hooks.keys()) - - def _pre_hook(self, module: torch.nn.Module, forward_input): - self.load(module) - if len(forward_input) == 0: - warnings.warn( - f"Hook for {module.__class__.__name__} got no input. " f"Inputs must be positional, not keywords.", - stacklevel=3, - ) - return send_to_device(forward_input, self.execution_device) - - def load(self, module): - if not self.is_current_model(module): - self.offload_current() - self._load(module) - - def offload_current(self): - module = self._current_model_ref() - if module is not NO_MODEL: - module.to(OFFLOAD_DEVICE) - self.clear_current_model() - - def _load(self, module: torch.nn.Module) -> torch.nn.Module: - assert self.is_empty(), f"A model is already loaded: {self._current_model_ref()}" - module = module.to(self.execution_device) - self.set_current_model(module) - return module - - def is_current_model(self, model: torch.nn.Module) -> bool: - """Is the given model the one currently loaded on the execution device?""" - return self._current_model_ref() is model - - def is_empty(self): - """Are none of this group's models loaded on the execution device?""" - return self._current_model_ref() is NO_MODEL - - def set_current_model(self, value): - self._current_model_ref = weakref.ref(value) - - def clear_current_model(self): - self._current_model_ref = weakref.ref(NO_MODEL) - - def set_device(self, device: torch.device): - if device == self.execution_device: - return - self.execution_device = device - current = self._current_model_ref() - if current is not NO_MODEL: - current.to(device) - - def device_for(self, model): - if model not in self: - raise KeyError(f"This does not manage this model {type(model).__name__}", model) - return self.execution_device # this implementation only dispatches to one device - - def ready(self): - pass # always ready to load on-demand - - def __contains__(self, model): - return model in self._hooks - - def __repr__(self) -> str: - return ( - f"<{self.__class__.__name__} object at {id(self):x}: " - f"current_model={type(self._current_model_ref()).__name__} >" - ) - - -class FullyLoadedModelGroup(ModelGroup): - """ - A group of models without any implicit loading or unloading. - - :py:meth:`.ready` loads _all_ the models to the execution device at once. - """ - - _models: weakref.WeakSet - - def __init__(self, execution_device: torch.device): - super().__init__(execution_device) - self._models = weakref.WeakSet() - - def install(self, *models: torch.nn.Module): - for model in models: - self._models.add(model) - model.to(self.execution_device) - - def uninstall(self, *models: torch.nn.Module): - for model in models: - self._models.remove(model) - - def uninstall_all(self): - self.uninstall(*self._models) - - def load(self, model): - model.to(self.execution_device) - - def offload_current(self): - for model in self._models: - model.to(OFFLOAD_DEVICE) - - def ready(self): - for model in self._models: - self.load(model) - - def set_device(self, device: torch.device): - self.execution_device = device - for model in self._models: - if model.device != OFFLOAD_DEVICE: - model.to(device) - - def device_for(self, model): - if model not in self: - raise KeyError("This does not manage this model f{type(model).__name__}", model) - return self.execution_device # this implementation only dispatches to one device - - def __contains__(self, model): - return model in self._models From f7aec3b9345e742bf701e0455fcf9c0318599b87 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 23:33:52 +0300 Subject: [PATCH 10/67] Move conditioning class to backend --- invokeai/app/invocations/compel.py | 32 ++--------------- invokeai/backend/stable_diffusion/__init__.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 35 ++++++++++++++++--- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 41be7f7138..8ecd22e2d7 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -16,7 +16,7 @@ from ...backend.util.devices import torch_dtype from ...backend.model_management import ModelType from ...backend.model_management.models import ModelNotFoundException from ...backend.model_management.lora import ModelPatcher -from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent +from ...backend.stable_diffusion import InvokeAIDiffuserComponent, BasicConditioningInfo, SDXLConditioningInfo from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext from .model import ClipField from dataclasses import dataclass @@ -29,37 +29,9 @@ class ConditioningField(BaseModel): schema_extra = {"required": ["conditioning_name"]} -@dataclass -class BasicConditioningInfo: - # type: Literal["basic_conditioning"] = "basic_conditioning" - embeds: torch.Tensor - extra_conditioning: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo] - # weight: float - # mode: ConditioningAlgo - - def to(self, device, dtype=None): - self.embeds = self.embeds.to(device=device, dtype=dtype) - return self - - -@dataclass -class SDXLConditioningInfo(BasicConditioningInfo): - # type: Literal["sdxl_conditioning"] = "sdxl_conditioning" - pooled_embeds: torch.Tensor - add_time_ids: torch.Tensor - - def to(self, device, dtype=None): - self.pooled_embeds = self.pooled_embeds.to(device=device, dtype=dtype) - self.add_time_ids = self.add_time_ids.to(device=device, dtype=dtype) - return super().to(device=device, dtype=dtype) - - -ConditioningInfoType = Annotated[Union[BasicConditioningInfo, SDXLConditioningInfo], Field(discriminator="type")] - - @dataclass class ConditioningFieldData: - conditionings: List[Union[BasicConditioningInfo, SDXLConditioningInfo]] + conditionings: List[BasicConditioningInfo] # unconditioned: Optional[torch.Tensor] diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py index 37024ccace..21273c6201 100644 --- a/invokeai/backend/stable_diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -8,4 +8,4 @@ from .diffusers_pipeline import ( ) from .diffusion import InvokeAIDiffuserComponent from .diffusion.cross_attention_map_saving import AttentionMapSaver -from .diffusion.shared_invokeai_diffusion import PostprocessingSettings +from .diffusion.shared_invokeai_diffusion import PostprocessingSettings, BasicConditioningInfo, SDXLConditioningInfo diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 1dc6c359a0..9b1630dc3a 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from contextlib import contextmanager from dataclasses import dataclass import math @@ -32,6 +34,29 @@ ModelForwardCallback: TypeAlias = Union[ ] +@dataclass +class BasicConditioningInfo: + embeds: torch.Tensor + extra_conditioning: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo] + # weight: float + # mode: ConditioningAlgo + + def to(self, device, dtype=None): + self.embeds = self.embeds.to(device=device, dtype=dtype) + return self + + +@dataclass +class SDXLConditioningInfo(BasicConditioningInfo): + pooled_embeds: torch.Tensor + add_time_ids: torch.Tensor + + def to(self, device, dtype=None): + self.pooled_embeds = self.pooled_embeds.to(device=device, dtype=dtype) + self.add_time_ids = self.add_time_ids.to(device=device, dtype=dtype) + return super().to(device=device, dtype=dtype) + + @dataclass(frozen=True) class PostprocessingSettings: threshold: float @@ -167,7 +192,7 @@ class InvokeAIDiffuserComponent: added_cond_kwargs = None if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned - if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + if type(conditioning_data.text_embeddings) is SDXLConditioningInfo: added_cond_kwargs = { "text_embeds": conditioning_data.text_embeddings.pooled_embeds, "time_ids": conditioning_data.text_embeddings.add_time_ids, @@ -175,7 +200,7 @@ class InvokeAIDiffuserComponent: encoder_hidden_states = conditioning_data.text_embeddings.embeds encoder_attention_mask = None else: - if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + if type(conditioning_data.text_embeddings) is SDXLConditioningInfo: added_cond_kwargs = { "text_embeds": torch.cat([ # TODO: how to pad? just by zeros? or even truncate? @@ -353,7 +378,7 @@ class InvokeAIDiffuserComponent: sigma_twice = torch.cat([sigma] * 2) added_cond_kwargs = None - if type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo": + if type(conditioning_data.text_embeddings) is SDXLConditioningInfo: added_cond_kwargs = { "text_embeds": torch.cat([ # TODO: how to pad? just by zeros? or even truncate? @@ -404,7 +429,7 @@ class InvokeAIDiffuserComponent: uncond_mid_block, cond_mid_block = mid_block_additional_residual.chunk(2) added_cond_kwargs = None - is_sdxl = type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo" + is_sdxl = type(conditioning_data.text_embeddings) is SDXLConditioningInfo if is_sdxl: added_cond_kwargs = { "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds, @@ -470,7 +495,7 @@ class InvokeAIDiffuserComponent: ) added_cond_kwargs = None - is_sdxl = type(conditioning_data.text_embeddings).__name__ == "SDXLConditioningInfo" + is_sdxl = type(conditioning_data.text_embeddings) is SDXLConditioningInfo if is_sdxl: added_cond_kwargs = { "text_embeds": conditioning_data.unconditioned_embeddings.pooled_embeds, From b4a74f65233d1e8dc38266cc7a0571873e10a76e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Tue, 8 Aug 2023 23:57:02 +0300 Subject: [PATCH 11/67] Add MaskEdge and ColorCorrect nodes Co-Authored-By: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> --- invokeai/app/invocations/image.py | 173 +++++++++++++++++++++++++++++- 1 file changed, 170 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 3f40ea3cbe..b6250a1fdc 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -2,6 +2,7 @@ from typing import Literal, Optional +import cv2 import numpy from PIL import Image, ImageFilter, ImageOps, ImageChops from pydantic import Field @@ -142,9 +143,10 @@ class ImagePasteInvocation(BaseInvocation, PILInvocationConfig): def invoke(self, context: InvocationContext) -> ImageOutput: base_image = context.services.images.get_pil_image(self.base_image.image_name) image = context.services.images.get_pil_image(self.image.image_name) - mask = ( - None if self.mask is None else ImageOps.invert(context.services.images.get_pil_image(self.mask.image_name)) - ) + mask = None + if self.mask is not None: + mask = context.services.images.get_pil_image(self.mask.image_name) + mask = ImageOps.invert(mask.convert("L")) # TODO: probably shouldn't invert mask here... should user be required to do it? min_x = min(0, self.x) @@ -650,3 +652,168 @@ class ImageWatermarkInvocation(BaseInvocation, PILInvocationConfig): width=image_dto.width, height=image_dto.height, ) + +class MaskEdgeInvocation(BaseInvocation, PILInvocationConfig): + """Applies an edge mask to an image""" + + # fmt: off + type: Literal["mask_edge"] = "mask_edge" + + # Inputs + image: Optional[ImageField] = Field(default=None, description="The image to apply the mask to") + edge_size: int = Field(description="The size of the edge") + edge_blur: int = Field(description="The amount of blur on the edge") + low_threshold: int = Field(description="First threshold for the hysteresis procedure in Canny edge detection") + high_threshold: int = Field(description="Second threshold for the hysteresis procedure in Canny edge detection") + # fmt: on + + def invoke(self, context: InvocationContext) -> MaskOutput: + mask = context.services.images.get_pil_image(self.image.image_name) + + npimg = numpy.asarray(mask, dtype=numpy.uint8) + npgradient = numpy.uint8( + 255 * (1.0 - numpy.floor(numpy.abs(0.5 - numpy.float32(npimg) / 255.0) * 2.0)) + ) + npedge = cv2.Canny(npimg, threshold1=self.low_threshold, threshold2=self.high_threshold) + npmask = npgradient + npedge + npmask = cv2.dilate( + npmask, numpy.ones((3, 3), numpy.uint8), iterations=int(self.edge_size / 2) + ) + + new_mask = Image.fromarray(npmask) + + if self.edge_blur > 0: + new_mask = new_mask.filter(ImageFilter.BoxBlur(self.edge_blur)) + + new_mask = ImageOps.invert(new_mask) + + image_dto = context.services.images.create( + image=new_mask, + image_origin=ResourceOrigin.INTERNAL, + image_category=ImageCategory.MASK, + node_id=self.id, + session_id=context.graph_execution_state_id, + is_intermediate=self.is_intermediate, + ) + + return MaskOutput( + mask=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, + ) + +class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): + + type: Literal["color_correct"] = "color_correct" + + init: Optional[ImageField] = Field(default=None, description="Initial image") + result: Optional[ImageField] = Field(default=None, description="Resulted image") + mask: Optional[ImageField] = Field(default=None, description="Mask image") + mask_blur_radius: float = Field(default=8, description="Mask blur radius") + + def invoke(self, context: InvocationContext) -> ImageOutput: + pil_init_mask = None + if self.mask is not None: + pil_init_mask = context.services.images.get_pil_image( + self.mask.image_name + ).convert("L") + + init_image = context.services.images.get_pil_image( + self.init.image_name + ) + + result = context.services.images.get_pil_image( + self.result.image_name + ).convert("RGBA") + + + #if init_image is None or init_mask is None: + # return result + + # Get the original alpha channel of the mask if there is one. + # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') + #pil_init_mask = ( + # init_mask.getchannel("A") + # if init_mask.mode == "RGBA" + # else init_mask.convert("L") + #) + pil_init_image = init_image.convert( + "RGBA" + ) # Add an alpha channel if one doesn't exist + + # Build an image with only visible pixels from source to use as reference for color-matching. + init_rgb_pixels = numpy.asarray(init_image.convert("RGB"), dtype=numpy.uint8) + init_a_pixels = numpy.asarray(pil_init_image.getchannel("A"), dtype=numpy.uint8) + init_mask_pixels = numpy.asarray(pil_init_mask, dtype=numpy.uint8) + + # Get numpy version of result + np_image = numpy.asarray(result.convert("RGB"), dtype=numpy.uint8) + + # Mask and calculate mean and standard deviation + mask_pixels = init_a_pixels * init_mask_pixels > 0 + np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :] + np_image_masked = np_image[mask_pixels, :] + + if np_init_rgb_pixels_masked.size > 0: + init_means = np_init_rgb_pixels_masked.mean(axis=0) + init_std = np_init_rgb_pixels_masked.std(axis=0) + gen_means = np_image_masked.mean(axis=0) + gen_std = np_image_masked.std(axis=0) + + # Color correct + np_matched_result = np_image.copy() + np_matched_result[:, :, :] = ( + ( + ( + ( + np_matched_result[:, :, :].astype(numpy.float32) + - gen_means[None, None, :] + ) + / gen_std[None, None, :] + ) + * init_std[None, None, :] + + init_means[None, None, :] + ) + .clip(0, 255) + .astype(numpy.uint8) + ) + matched_result = Image.fromarray(np_matched_result, mode="RGB") + else: + matched_result = Image.fromarray(np_image, mode="RGB") + + # Blur the mask out (into init image) by specified amount + if self.mask_blur_radius > 0: + nm = numpy.asarray(pil_init_mask, dtype=numpy.uint8) + nmd = cv2.erode( + nm, + kernel=numpy.ones((3, 3), dtype=numpy.uint8), + iterations=int(self.mask_blur_radius / 2), + ) + pmd = Image.fromarray(nmd, mode="L") + blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(self.mask_blur_radius)) + else: + blurred_init_mask = pil_init_mask + + + multiplied_blurred_init_mask = ImageChops.multiply( + blurred_init_mask, result.split()[-1] + ) + + # Paste original on color-corrected generation (using blurred mask) + matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) + + image_dto = context.services.images.create( + image=matched_result, + image_origin=ResourceOrigin.INTERNAL, + image_category=ImageCategory.GENERAL, + node_id=self.id, + session_id=context.graph_execution_state_id, + is_intermediate=self.is_intermediate, + ) + + return ImageOutput( + image=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, + ) + From e98f7eda2ef031f04dc836b131ccf62c48c8f0d4 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 9 Aug 2023 03:34:25 +0300 Subject: [PATCH 12/67] Fix total_steps in generation event, order field added --- invokeai/app/services/events.py | 2 ++ invokeai/app/util/step_callback.py | 3 ++- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 7 +++++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events.py index 30fa89bd29..a266fe4f18 100644 --- a/invokeai/app/services/events.py +++ b/invokeai/app/services/events.py @@ -35,6 +35,7 @@ class EventServiceBase: source_node_id: str, progress_image: Optional[ProgressImage], step: int, + order: int, total_steps: int, ) -> None: """Emitted when there is generation progress""" @@ -46,6 +47,7 @@ class EventServiceBase: source_node_id=source_node_id, progress_image=progress_image.dict() if progress_image is not None else None, step=step, + order=order, total_steps=total_steps, ), ) diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 5f802c796a..5770b61ec9 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -115,5 +115,6 @@ def stable_diffusion_step_callback( source_node_id=source_node_id, progress_image=ProgressImage(width=width, height=height, dataURL=dataURL), step=intermediate_state.step, - total_steps=node["steps"], + order=intermediate_state.order, + total_steps=intermediate_state.total_steps, ) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index f452dc1037..8b12101d69 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -6,6 +6,7 @@ import inspect from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union from pydantic import Field +import math import einops import PIL.Image import numpy as np @@ -42,6 +43,8 @@ from .diffusion import ( @dataclass class PipelineIntermediateState: step: int + order: int + total_steps: int timestep: int latents: torch.Tensor predicted_original: Optional[torch.Tensor] = None @@ -484,6 +487,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): yield PipelineIntermediateState( step=-1, + order=self.scheduler.order, + total_steps=len(timesteps), timestep=self.scheduler.config.num_train_timesteps, latents=latents, ) @@ -522,6 +527,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): yield PipelineIntermediateState( step=i, + order=self.scheduler.order, + total_steps=len(timesteps), timestep=int(t), latents=latents, predicted_original=predicted_original, From 17fed1c870e35469de1b7461975aa3bcf8ec6387 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Thu, 10 Aug 2023 05:03:33 +0300 Subject: [PATCH 13/67] Fix merge conflict errors --- invokeai/app/invocations/image.py | 1 - invokeai/app/invocations/latent.py | 17 +---------------- .../stable_diffusion/diffusers_pipeline.py | 5 ----- 3 files changed, 1 insertion(+), 22 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index ae09a0abfd..697df5c689 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -2,7 +2,6 @@ from typing import Literal, Optional -import cv2 import numpy import cv2 from PIL import Image, ImageFilter, ImageOps, ImageChops diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 02cee4cfa0..6d746b387d 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -25,7 +25,7 @@ from .controlnet_image_processors import ControlField from .image import ImageOutput from .model import ModelInfo, UNetField, VaeField from ..models.image import ImageCategory, ImageField, ResourceOrigin -from ...backend.model_management import ModelPatcher +from ...backend.model_management import ModelPatcher, BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( ConditioningData, @@ -35,22 +35,7 @@ from ...backend.stable_diffusion.diffusers_pipeline import ( ) from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP -from ...backend.model_management import ModelPatcher, BaseModelType from ...backend.util.devices import choose_torch_device, torch_dtype, choose_precision -from ..models.image import ImageCategory, ImageField, ResourceOrigin -from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext -from .compel import ConditioningField -from .controlnet_image_processors import ControlField -from .image import ImageOutput -from .model import ModelInfo, UNetField, VaeField -from invokeai.app.util.controlnet_utils import prepare_control_image - -from diffusers.models.attention_processor import ( - AttnProcessor2_0, - LoRAAttnProcessor2_0, - LoRAXFormersAttnProcessor, - XFormersAttnProcessor, -) import torchvision.transforms as T from torchvision.transforms.functional import resize as tv_resize diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index f6b0b9a6ae..e5bb1f0f3d 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -5,7 +5,6 @@ import inspect import math import secrets from dataclasses import dataclass, field -import inspect from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union import PIL.Image @@ -13,14 +12,12 @@ import einops import psutil import torch import torchvision.transforms as T -from accelerate.utils import set_seed from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.controlnet import ControlNetModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( StableDiffusionPipeline, ) - from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) @@ -29,7 +26,6 @@ from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutpu from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.outputs import BaseOutput from pydantic import Field -from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec @@ -334,7 +330,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # diffusers enable sdp automatically return - if self.unet.device.type == "cpu" or self.unet.device.type == "mps": mem_free = psutil.virtual_memory().free elif self.unet.device.type == "cuda": From e9ec5ab85cb233fe6e776519d6ba2b7cae153231 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Thu, 10 Aug 2023 06:19:22 +0300 Subject: [PATCH 14/67] Apply requested changes Co-Authored-By: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- invokeai/app/invocations/compel.py | 10 +++++++++- invokeai/app/invocations/image.py | 14 +++++++++----- invokeai/app/invocations/latent.py | 30 ++++++++++++++++++++---------- invokeai/app/util/step_callback.py | 5 ++--- 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 8ecd22e2d7..484d813dea 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -157,7 +157,15 @@ class CompelInvocation(BaseInvocation): class SDXLPromptInvocationBase: - def run_clip_compel(self, context, clip_field, prompt, get_pooled, lora_prefix, zero_on_empty): + def run_clip_compel( + self, + context: InvocationContext, + clip_field: ClipField, + prompt: str, + get_pooled: bool, + lora_prefix: str, + zero_on_empty: bool, + ): tokenizer_info = context.services.model_manager.get_model( **clip_field.tokenizer.dict(), context=context, diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 697df5c689..9e5cb9ecef 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -705,12 +705,16 @@ class MaskEdgeInvocation(BaseInvocation, PILInvocationConfig): class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): + """ + Shifts the colors of a target image to match the reference image, optionally + using a mask to only color-correct certain regions of the target image. + """ type: Literal["color_correct"] = "color_correct" - init: Optional[ImageField] = Field(default=None, description="Initial image") - result: Optional[ImageField] = Field(default=None, description="Resulted image") - mask: Optional[ImageField] = Field(default=None, description="Mask image") + image: Optional[ImageField] = Field(default=None, description="The image to color-correct") + reference: Optional[ImageField] = Field(default=None, description="Reference image for color-correction") + mask: Optional[ImageField] = Field(default=None, description="Mask to use when applying color-correction") mask_blur_radius: float = Field(default=8, description="Mask blur radius") def invoke(self, context: InvocationContext) -> ImageOutput: @@ -721,11 +725,11 @@ class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): ).convert("L") init_image = context.services.images.get_pil_image( - self.init.image_name + self.reference.image_name ) result = context.services.images.get_pil_image( - self.result.image_name + self.image.image_name ).convert("RGBA") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 6d746b387d..c24e9aeba1 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -336,7 +336,9 @@ class TextToLatentsInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): noise = context.services.latents.get(self.noise.latents_name) - seed = self.noise.seed or 0 + seed = self.noise.seed + if seed is None: + seed = 0 # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -420,6 +422,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): # Inputs noise: Optional[LatentsField] = Field(description="The noise to use (test override for future optional)") + # denoising_start = 1 - strength denoising_start: float = Field(default=0.0, ge=0, le=1, description="") #denoising_end: float = Field(default=1.0, ge=0, le=1, description="") @@ -462,16 +465,23 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: with SilenceWarnings(): # this quenches NSFW nag from diffusers - latent = context.services.latents.get(self.latents.latents_name) - seed = self.latents.seed or 0 - + seed = None noise = None if self.noise is not None: noise = context.services.latents.get(self.noise.latents_name) - if self.noise.seed is not None: - seed = self.noise.seed + seed = self.noise.seed - mask = self.prep_mask_tensor(self.mask, context, latent) + if self.latents is not None: + latents = context.services.latents.get(self.latents.latents_name) + if seed is None: + seed = self.latents.seed + else: + latents = torch.zeros_like(noise) + + if seed is None: + seed = 0 + + mask = self.prep_mask_tensor(self.mask, context, latents) # Get the source node id (we are invoking the prepared node) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) @@ -497,7 +507,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet( unet_info.context.model, _lora_loader() ), unet_info as unet: - latent = latent.to(device=unet.device, dtype=unet.dtype) + latents = latents.to(device=unet.device, dtype=unet.dtype) if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) if mask is not None: @@ -516,7 +526,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): model=pipeline, context=context, control_input=self.control, - latents_shape=latent.shape, + latents_shape=latents.shape, # do_classifier_free_guidance=(self.cfg_scale >= 1.0)) do_classifier_free_guidance=True, exit_stack=exit_stack, @@ -531,7 +541,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): ) result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( - latents=latent, + latents=latents, timesteps=timesteps, noise=noise, seed=seed, diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 5770b61ec9..3cadfa1c12 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -58,6 +58,8 @@ def stable_diffusion_step_callback( # TODO: only output a preview image when requested if base_model in [BaseModelType.StableDiffusionXL, BaseModelType.StableDiffusionXLRefiner]: + # fast latents preview matrix for sdxl + # generated by @StAlKeR7779 sdxl_latent_rgb_factors = torch.tensor( [ # R G B @@ -72,9 +74,6 @@ def stable_diffusion_step_callback( sdxl_smooth_matrix = torch.tensor( [ - # [ 0.0478, 0.1285, 0.0478], - # [ 0.1285, 0.2948, 0.1285], - # [ 0.0478, 0.1285, 0.0478], [0.0358, 0.0964, 0.0358], [0.0964, 0.4711, 0.0964], [0.0358, 0.0964, 0.0358], From 7c0023ad9e6b04cf9c47dbd2113349d699b14821 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 11 Aug 2023 22:20:37 +1200 Subject: [PATCH 15/67] feat: Remove TextToLatents / Rename Latents To Latents -> DenoiseLatents --- invokeai/app/invocations/latent.py | 178 ++++++----------------------- 1 file changed, 32 insertions(+), 146 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c24e9aeba1..743e3f54b2 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -5,6 +5,7 @@ from typing import List, Literal, Optional, Union import einops import torch +import torchvision.transforms as T from diffusers.image_processor import VaeImageProcessor from diffusers.models.attention_processor import ( AttnProcessor2_0, @@ -14,18 +15,14 @@ from diffusers.models.attention_processor import ( ) from diffusers.schedulers import SchedulerMixin as Scheduler from pydantic import BaseModel, Field, validator +from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_management.models import ModelType, SilenceWarnings -from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext -from .compel import ConditioningField -from .controlnet_image_processors import ControlField -from .image import ImageOutput -from .model import ModelInfo, UNetField, VaeField -from ..models.image import ImageCategory, ImageField, ResourceOrigin -from ...backend.model_management import ModelPatcher, BaseModelType + +from ...backend.model_management import BaseModelType, ModelPatcher from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( ConditioningData, @@ -35,11 +32,13 @@ from ...backend.stable_diffusion.diffusers_pipeline import ( ) from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP -from ...backend.util.devices import choose_torch_device, torch_dtype, choose_precision - -import torchvision.transforms as T -from torchvision.transforms.functional import resize as tv_resize - +from ...backend.util.devices import choose_precision, choose_torch_device, torch_dtype +from ..models.image import ImageCategory, ImageField, ResourceOrigin +from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext +from .compel import ConditioningField +from .controlnet_image_processors import ControlField +from .image import ImageOutput +from .model import ModelInfo, UNetField, VaeField DEFAULT_PRECISION = choose_precision(choose_torch_device()) @@ -106,26 +105,31 @@ def get_scheduler( return scheduler -# Text to image -class TextToLatentsInvocation(BaseInvocation): - """Generates latents from conditionings.""" +class DenoiseLatentsInvocation(BaseInvocation): + """Denoises noisy latents to decodable images""" - type: Literal["t2l"] = "t2l" + type: Literal["denoise_latents"] = "denoise_latents" # Inputs - # fmt: off positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation") negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation") noise: Optional[LatentsField] = Field(description="The noise to use") - steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") - cfg_scale: Union[float, List[float]] = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", ) - scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" ) + steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") + cfg_scale: Union[float, List[float]] = Field( + default=7.5, + ge=1, + description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", + ) + denoising_start: float = Field(default=0.0, ge=0, le=1, description="") + denoising_end: float = Field(default=1.0, ge=0, le=1, description="") + scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use") unet: UNetField = Field(default=None, description="UNet submodel") control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use") - denoising_end: float = Field(default=1.0, ge=0, le=1, description="") - # seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) - # seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'") - # fmt: on + latents: Optional[LatentsField] = Field(description="The latents to use as a base image") + mask: Optional[ImageField] = Field( + None, + description="Mask", + ) @validator("cfg_scale") def ge_one(cls, v): @@ -143,12 +147,11 @@ class TextToLatentsInvocation(BaseInvocation): class Config(InvocationConfig): schema_extra = { "ui": { - "title": "Text To Latents", - "tags": ["latents"], + "title": "Denoise Latents", + "tags": ["denoise", "latents"], "type_hints": { "model": "model", "control": "control", - # "cfg_scale": "float", "cfg_scale": "number", }, }, @@ -320,7 +323,7 @@ class TextToLatentsInvocation(BaseInvocation): t_start = int(round(denoising_start * num_inference_steps)) timesteps = scheduler.timesteps[t_start * scheduler.order :] - num_inference_steps = num_inference_steps - t_start + num_inference_steps = num_inference_steps - t_start # apply denoising_end num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) @@ -331,121 +334,6 @@ class TextToLatentsInvocation(BaseInvocation): return num_inference_steps, timesteps - - @torch.no_grad() - def invoke(self, context: InvocationContext) -> LatentsOutput: - with SilenceWarnings(): - noise = context.services.latents.get(self.noise.latents_name) - seed = self.noise.seed - if seed is None: - seed = 0 - - # Get the source node id (we are invoking the prepared node) - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] - - def step_callback(state: PipelineIntermediateState): - self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model) - - def _lora_loader(): - for lora in self.unet.loras: - lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"}), - context=context, - ) - yield (lora_info.context.model, lora.weight) - del lora_info - return - - unet_info = context.services.model_manager.get_model( - **self.unet.unet.dict(), - context=context, - ) - with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet( - unet_info.context.model, _lora_loader() - ), unet_info as unet: - noise = noise.to(device=unet.device, dtype=unet.dtype) - - scheduler = get_scheduler( - context=context, - scheduler_info=self.unet.scheduler, - scheduler_name=self.scheduler, - ) - - pipeline = self.create_pipeline(unet, scheduler) - conditioning_data = self.get_conditioning_data(context, scheduler, unet, seed) - - control_data = self.prep_control_data( - model=pipeline, - context=context, - control_input=self.control, - latents_shape=noise.shape, - # do_classifier_free_guidance=(self.cfg_scale >= 1.0)) - do_classifier_free_guidance=True, - exit_stack=exit_stack, - ) - - num_inference_steps, timesteps = self.init_scheduler( - scheduler, - device=unet.device, - steps=self.steps, - denoising_start=0.0, - denoising_end=self.denoising_end, - ) - - # TODO: Verify the noise is the right size - result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( - latents=torch.zeros_like(noise, dtype=torch_dtype(unet.device)), - noise=noise, - seed=seed, - timesteps=timesteps, - num_inference_steps=num_inference_steps, - conditioning_data=conditioning_data, - control_data=control_data, # list[ControlNetData] - callback=step_callback, - ) - - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - result_latents = result_latents.to("cpu") - torch.cuda.empty_cache() - - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, result_latents) - return build_latents_output(latents_name=name, latents=result_latents, seed=seed) - - -class LatentsToLatentsInvocation(TextToLatentsInvocation): - """Generates latents using latents as base image.""" - - type: Literal["l2l"] = "l2l" - - # Inputs - noise: Optional[LatentsField] = Field(description="The noise to use (test override for future optional)") - - # denoising_start = 1 - strength - denoising_start: float = Field(default=0.0, ge=0, le=1, description="") - #denoising_end: float = Field(default=1.0, ge=0, le=1, description="") - - latents: Optional[LatentsField] = Field(description="The latents to use as a base image") - - mask: Optional[ImageField] = Field( - None, description="Mask", - ) - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": { - "title": "Latent To Latents", - "tags": ["latents"], - "type_hints": { - "model": "model", - "control": "control", - "cfg_scale": "number", - }, - }, - } - def prep_mask_tensor(self, mask, context, lantents): if mask is None: return None @@ -457,9 +345,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation): mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) if mask_tensor.dim() == 3: mask_tensor = mask_tensor.unsqueeze(0) - mask_tensor = tv_resize( - mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR - ) + mask_tensor = tv_resize(mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR) return 1 - mask_tensor @torch.no_grad() From 87ce4ab27c111c82c195da9f3c1be4cf03c655eb Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 11 Aug 2023 22:21:13 +1200 Subject: [PATCH 16/67] fix: Update default_graph to use new DenoiseLatents --- invokeai/app/services/default_graphs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/app/services/default_graphs.py b/invokeai/app/services/default_graphs.py index cafb6f0339..caee5b631e 100644 --- a/invokeai/app/services/default_graphs.py +++ b/invokeai/app/services/default_graphs.py @@ -1,4 +1,4 @@ -from ..invocations.latent import LatentsToImageInvocation, TextToLatentsInvocation +from ..invocations.latent import LatentsToImageInvocation, DenoiseLatentsInvocation from ..invocations.image import ImageNSFWBlurInvocation from ..invocations.noise import NoiseInvocation from ..invocations.compel import CompelInvocation @@ -23,7 +23,7 @@ def create_text_to_image() -> LibraryGraph: "3": NoiseInvocation(id="3"), "4": CompelInvocation(id="4"), "5": CompelInvocation(id="5"), - "6": TextToLatentsInvocation(id="6"), + "6": DenoiseLatentsInvocation(id="6"), "7": LatentsToImageInvocation(id="7"), "8": ImageNSFWBlurInvocation(id="8"), }, From 7479f9cc02d51857cee652c4a576a9c4915f87eb Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri, 11 Aug 2023 22:22:01 +1200 Subject: [PATCH 17/67] feat: Update LinearUI to use new backend (except Inpaint) --- invokeai/frontend/web/scripts/typegen.js | 2 +- .../graphBuilders/addSDXLRefinerToGraph.ts | 18 +- .../buildCanvasImageToImageGraph.ts | 27 +- .../buildCanvasTextToImageGraph.ts | 36 +- .../buildLinearImageToImageGraph.ts | 27 +- .../buildLinearSDXLImageToImageGraph.ts | 26 +- .../buildLinearSDXLTextToImageGraph.ts | 23 +- .../buildLinearTextToImageGraph.ts | 36 +- .../nodes/util/graphBuilders/constants.ts | 2 +- .../src/features/system/store/configSlice.ts | 2 +- .../frontend/web/src/services/api/schema.d.ts | 816 +++++------------- .../frontend/web/src/services/api/types.ts | 10 +- 12 files changed, 317 insertions(+), 708 deletions(-) diff --git a/invokeai/frontend/web/scripts/typegen.js b/invokeai/frontend/web/scripts/typegen.js index 015ae918ab..ec67c48f2d 100644 --- a/invokeai/frontend/web/scripts/typegen.js +++ b/invokeai/frontend/web/scripts/typegen.js @@ -1,7 +1,7 @@ import fs from 'node:fs'; import openapiTS from 'openapi-typescript'; -const OPENAPI_URL = 'http://localhost:9090/openapi.json'; +const OPENAPI_URL = 'http://127.0.0.1:9090/openapi.json'; const OUTPUT_FILE = 'src/services/api/schema.d.ts'; async function main() { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index c47c7be8b4..3faf72fb2e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -2,12 +2,12 @@ import { RootState } from 'app/store/store'; import { MetadataAccumulatorInvocation } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { + DENOISE_LATENTS, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, SDXL_LATENTS_TO_LATENTS, SDXL_MODEL_LOADER, - SDXL_REFINER_LATENTS_TO_LATENTS, SDXL_REFINER_MODEL_LOADER, SDXL_REFINER_NEGATIVE_CONDITIONING, SDXL_REFINER_POSITIVE_CONDITIONING, @@ -91,9 +91,9 @@ export const addSDXLRefinerToGraph = ( style: `${negativePrompt} ${negativeStylePrompt}`, aesthetic_score: refinerAestheticScore, }; - graph.nodes[SDXL_REFINER_LATENTS_TO_LATENTS] = { - type: 'l2l_sdxl', - id: SDXL_REFINER_LATENTS_TO_LATENTS, + graph.nodes[DENOISE_LATENTS] = { + type: 'denoise_latents', + id: DENOISE_LATENTS, cfg_scale: refinerCFGScale, steps: refinerSteps / (1 - Math.min(refinerStart, 0.99)), scheduler: refinerScheduler, @@ -108,7 +108,7 @@ export const addSDXLRefinerToGraph = ( field: 'unet', }, destination: { - node_id: SDXL_REFINER_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -148,7 +148,7 @@ export const addSDXLRefinerToGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_REFINER_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -158,7 +158,7 @@ export const addSDXLRefinerToGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_REFINER_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -168,13 +168,13 @@ export const addSDXLRefinerToGraph = ( field: 'latents', }, destination: { - node_id: SDXL_REFINER_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, }, { source: { - node_id: SDXL_REFINER_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index 42f768c107..181a0d9294 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -15,10 +15,10 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CLIP_SKIP, + DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, - LATENTS_TO_LATENTS, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -105,14 +105,15 @@ export const buildCanvasImageToImageGraph = ( is_intermediate: true, skipped_layers: clipSkip, }, - [LATENTS_TO_LATENTS]: { - type: 'l2l', - id: LATENTS_TO_LATENTS, + [DENOISE_LATENTS]: { + type: 'denoise_latents', + id: DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, steps, - strength, + denoising_start: 1 - strength, + denoising_end: 1, }, [IMAGE_TO_LATENTS]: { type: 'i2l', @@ -162,7 +163,7 @@ export const buildCanvasImageToImageGraph = ( }, { source: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -176,7 +177,7 @@ export const buildCanvasImageToImageGraph = ( field: 'latents', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, }, @@ -186,7 +187,7 @@ export const buildCanvasImageToImageGraph = ( field: 'noise', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -196,7 +197,7 @@ export const buildCanvasImageToImageGraph = ( field: 'unet', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -206,7 +207,7 @@ export const buildCanvasImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -216,7 +217,7 @@ export const buildCanvasImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -324,7 +325,7 @@ export const buildCanvasImageToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS); + addLoRAsToGraph(state, graph, DENOISE_LATENTS); // optionally add custom VAE addVAEToGraph(state, graph); @@ -333,7 +334,7 @@ export const buildCanvasImageToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 5b636b482a..9e25f97586 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -2,6 +2,10 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; +import { + DenoiseLatentsInvocation, + ONNXTextToLatentsInvocation, +} from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; @@ -10,20 +14,16 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CLIP_SKIP, + DENOISE_LATENTS, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - ONNX_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, + ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, TEXT_TO_IMAGE_GRAPH, - TEXT_TO_LATENTS, } from './constants'; -import { - ONNXTextToLatentsInvocation, - TextToLatentsInvocation, -} from 'services/api/types'; /** * Builds the Canvas tab's Text to Image graph. @@ -64,23 +64,25 @@ export const buildCanvasTextToImageGraph = ( const modelLoaderNodeType = isUsingOnnxModel ? 'onnx_model_loader' : 'main_model_loader'; - const t2lNode: TextToLatentsInvocation | ONNXTextToLatentsInvocation = + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = isUsingOnnxModel ? { type: 't2l_onnx', - id: TEXT_TO_LATENTS, + id: DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, steps, } : { - type: 't2l', - id: TEXT_TO_LATENTS, + type: 'denoise_latents', + id: DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, steps, + denoising_start: 0, + denoising_end: 1, }; /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the @@ -142,7 +144,7 @@ export const buildCanvasTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -152,7 +154,7 @@ export const buildCanvasTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -192,13 +194,13 @@ export const buildCanvasTextToImageGraph = ( field: 'unet', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, { source: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -212,7 +214,7 @@ export const buildCanvasTextToImageGraph = ( field: 'noise', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -252,7 +254,7 @@ export const buildCanvasTextToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, TEXT_TO_LATENTS, modelLoaderNodeId); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); @@ -261,7 +263,7 @@ export const buildCanvasTextToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts index f264edc6be..69d5227ea2 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts @@ -14,10 +14,10 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CLIP_SKIP, + DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, - LATENTS_TO_LATENTS, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -118,13 +118,14 @@ export const buildLinearImageToImageGraph = ( id: LATENTS_TO_IMAGE, fp32: vaePrecision === 'fp32' ? true : false, }, - [LATENTS_TO_LATENTS]: { - type: 'l2l', - id: LATENTS_TO_LATENTS, + [DENOISE_LATENTS]: { + type: 'denoise_latents', + id: DENOISE_LATENTS, cfg_scale, scheduler, steps, - strength, + denoising_start: 1 - strength, + denoising_end: 1, }, [IMAGE_TO_LATENTS]: { type: 'i2l', @@ -143,7 +144,7 @@ export const buildLinearImageToImageGraph = ( field: 'unet', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -179,7 +180,7 @@ export const buildLinearImageToImageGraph = ( }, { source: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -193,7 +194,7 @@ export const buildLinearImageToImageGraph = ( field: 'latents', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, }, @@ -203,7 +204,7 @@ export const buildLinearImageToImageGraph = ( field: 'noise', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -213,7 +214,7 @@ export const buildLinearImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -223,7 +224,7 @@ export const buildLinearImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -334,7 +335,7 @@ export const buildLinearImageToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS); + addLoRAsToGraph(state, graph, DENOISE_LATENTS); // optionally add custom VAE addVAEToGraph(state, graph); @@ -343,7 +344,7 @@ export const buildLinearImageToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, LATENTS_TO_LATENTS); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index 0ec4e096d9..98f9458ae0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -8,9 +8,11 @@ import { } from 'services/api/types'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + DENOISE_LATENTS, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, @@ -19,10 +21,8 @@ import { POSITIVE_CONDITIONING, RESIZE, SDXL_IMAGE_TO_IMAGE_GRAPH, - SDXL_LATENTS_TO_LATENTS, SDXL_MODEL_LOADER, } from './constants'; -import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; /** * Builds the Image to Image tab graph. @@ -115,9 +115,9 @@ export const buildLinearSDXLImageToImageGraph = ( id: LATENTS_TO_IMAGE, fp32: vaePrecision === 'fp32' ? true : false, }, - [SDXL_LATENTS_TO_LATENTS]: { - type: 'l2l_sdxl', - id: SDXL_LATENTS_TO_LATENTS, + [DENOISE_LATENTS]: { + type: 'denoise_latents', + id: DENOISE_LATENTS, cfg_scale, scheduler, steps, @@ -143,7 +143,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'unet', }, destination: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -209,7 +209,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, { source: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -223,7 +223,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'latents', }, destination: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, }, @@ -233,7 +233,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'noise', }, destination: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -243,7 +243,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -253,7 +253,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_LATENTS_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -365,11 +365,11 @@ export const buildLinearSDXLImageToImageGraph = ( }, }); - addSDXLLoRAsToGraph(state, graph, SDXL_LATENTS_TO_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, SDXL_MODEL_LOADER); // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, SDXL_LATENTS_TO_LATENTS); + addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); } // add dynamic prompts - also sets up core iteration and seed diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index 21b7c1e0ac..566eb6536e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -8,6 +8,7 @@ import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + DENOISE_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -15,7 +16,6 @@ import { POSITIVE_CONDITIONING, SDXL_MODEL_LOADER, SDXL_TEXT_TO_IMAGE_GRAPH, - SDXL_TEXT_TO_LATENTS, } from './constants'; export const buildLinearSDXLTextToImageGraph = ( @@ -95,12 +95,13 @@ export const buildLinearSDXLTextToImageGraph = ( height, use_cpu, }, - [SDXL_TEXT_TO_LATENTS]: { - type: 't2l_sdxl', - id: SDXL_TEXT_TO_LATENTS, + [DENOISE_LATENTS]: { + type: 'denoise_latents', + id: DENOISE_LATENTS, cfg_scale, scheduler, steps, + denoising_start: 0, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, [LATENTS_TO_IMAGE]: { @@ -116,7 +117,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'unet', }, destination: { - node_id: SDXL_TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -176,7 +177,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -186,7 +187,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: SDXL_TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -196,13 +197,13 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'noise', }, destination: { - node_id: SDXL_TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, { source: { - node_id: SDXL_TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -247,11 +248,11 @@ export const buildLinearSDXLTextToImageGraph = ( }, }); - addSDXLLoRAsToGraph(state, graph, SDXL_TEXT_TO_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, SDXL_MODEL_LOADER); // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, SDXL_TEXT_TO_LATENTS); + addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); } // add dynamic prompts - also sets up core iteration and seed diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts index 9dcc502d14..dcbbe69290 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts @@ -2,6 +2,10 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; +import { + DenoiseLatentsInvocation, + ONNXTextToLatentsInvocation, +} from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; @@ -10,20 +14,16 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CLIP_SKIP, + DENOISE_LATENTS, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, - ONNX_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, + ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, TEXT_TO_IMAGE_GRAPH, - TEXT_TO_LATENTS, } from './constants'; -import { - ONNXTextToLatentsInvocation, - TextToLatentsInvocation, -} from 'services/api/types'; export const buildLinearTextToImageGraph = ( state: RootState @@ -60,23 +60,25 @@ export const buildLinearTextToImageGraph = ( const modelLoaderNodeType = isUsingOnnxModel ? 'onnx_model_loader' : 'main_model_loader'; - const t2lNode: TextToLatentsInvocation | ONNXTextToLatentsInvocation = + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = isUsingOnnxModel ? { type: 't2l_onnx', - id: TEXT_TO_LATENTS, + id: DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, steps, } : { - type: 't2l', - id: TEXT_TO_LATENTS, + type: 'denoise_latents', + id: DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, steps, + denoising_start: 0, + denoising_end: 1, }; /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the @@ -149,7 +151,7 @@ export const buildLinearTextToImageGraph = ( field: 'unet', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -179,7 +181,7 @@ export const buildLinearTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -189,13 +191,13 @@ export const buildLinearTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, { source: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -209,7 +211,7 @@ export const buildLinearTextToImageGraph = ( field: 'noise', }, destination: { - node_id: TEXT_TO_LATENTS, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -249,7 +251,7 @@ export const buildLinearTextToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, TEXT_TO_LATENTS, modelLoaderNodeId); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); @@ -258,7 +260,7 @@ export const buildLinearTextToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, TEXT_TO_LATENTS); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 7fa87c7f20..2282184521 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -1,7 +1,7 @@ // friendly node ids export const POSITIVE_CONDITIONING = 'positive_conditioning'; export const NEGATIVE_CONDITIONING = 'negative_conditioning'; -export const TEXT_TO_LATENTS = 'text_to_latents'; +export const DENOISE_LATENTS = 'denoise_latents'; export const LATENTS_TO_IMAGE = 'latents_to_image'; export const NSFW_CHECKER = 'nsfw_checker'; export const WATERMARKER = 'invisible_watermark'; diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts index 6cff92a136..ff7a0c0700 100644 --- a/invokeai/frontend/web/src/features/system/store/configSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/configSlice.ts @@ -61,7 +61,7 @@ export const initialConfigState: AppConfig = { }, img2imgStrength: { initial: 0.7, - min: 0, + min: 0.01, sliderMax: 1, inputMax: 1, fineStep: 0.01, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index fc3397820e..9755af9e32 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -179,6 +179,11 @@ export type paths = { * @description Gets a full-resolution image file */ get: operations["get_image_full"]; + /** + * Get Image Full + * @description Gets a full-resolution image file + */ + head: operations["get_image_full"]; }; "/api/v1/images/i/{image_name}/thumbnail": { /** @@ -707,6 +712,51 @@ export type components = { */ collection: (unknown)[]; }; + /** + * ColorCorrectInvocation + * @description Shifts the colors of a target image to match the reference image, optionally + * using a mask to only color-correct certain regions of the target image. + */ + ColorCorrectInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default color_correct + * @enum {string} + */ + type?: "color_correct"; + /** + * Image + * @description The image to color-correct + */ + image?: components["schemas"]["ImageField"]; + /** + * Reference + * @description Reference image for color-correction + */ + reference?: components["schemas"]["ImageField"]; + /** + * Mask + * @description Mask to use when applying color-correction + */ + mask?: components["schemas"]["ImageField"]; + /** + * Mask Blur Radius + * @description Mask blur radius + * @default 8 + */ + mask_blur_radius?: number; + }; /** ColorField */ ColorField: { /** @@ -1037,6 +1087,12 @@ export type components = { * @description Core generation metadata for an image generated in InvokeAI. */ CoreMetadata: { + /** + * App Version + * @description The version of InvokeAI used to generate this image + * @default 3.0.2 + */ + app_version?: string; /** * Generation Mode * @description The generation mode that output this image @@ -1219,6 +1275,93 @@ export type components = { /** Deleted Images */ deleted_images: (string)[]; }; + /** + * DenoiseLatentsInvocation + * @description Denoises noisy latents to decodable images + */ + DenoiseLatentsInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default denoise_latents + * @enum {string} + */ + type?: "denoise_latents"; + /** + * Positive Conditioning + * @description Positive conditioning for generation + */ + positive_conditioning?: components["schemas"]["ConditioningField"]; + /** + * Negative Conditioning + * @description Negative conditioning for generation + */ + negative_conditioning?: components["schemas"]["ConditioningField"]; + /** + * Noise + * @description The noise to use + */ + noise?: components["schemas"]["LatentsField"]; + /** + * Steps + * @description The number of steps to use to generate the image + * @default 10 + */ + steps?: number; + /** + * Cfg Scale + * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt + * @default 7.5 + */ + cfg_scale?: number | (number)[]; + /** + * Denoising Start + * @default 0 + */ + denoising_start?: number; + /** + * Denoising End + * @default 1 + */ + denoising_end?: number; + /** + * Scheduler + * @description The scheduler to use + * @default euler + * @enum {string} + */ + scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; + /** + * Unet + * @description UNet submodel + */ + unet?: components["schemas"]["UNetField"]; + /** + * Control + * @description The control to use + */ + control?: components["schemas"]["ControlField"] | (components["schemas"]["ControlField"])[]; + /** + * Latents + * @description The latents to use as a base image + */ + latents?: components["schemas"]["LatentsField"]; + /** + * Mask + * @description Mask + */ + mask?: components["schemas"]["ImageField"]; + }; /** * DivideInvocation * @description Divides two numbers @@ -1443,7 +1586,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined; + [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; }; /** * Edges @@ -1486,7 +1629,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; + [key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; }; /** * Errors @@ -2593,171 +2736,6 @@ export type components = { */ seed?: number; }; - /** - * InpaintInvocation - * @description Generates an image using inpaint. - */ - InpaintInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default inpaint - * @enum {string} - */ - type?: "inpaint"; - /** - * Positive Conditioning - * @description Positive conditioning for generation - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning for generation - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Seed - * @description The seed to use (omit for random) - */ - seed?: number; - /** - * Steps - * @description The number of steps to use to generate the image - * @default 30 - */ - steps?: number; - /** - * Width - * @description The width of the resulting image - * @default 512 - */ - width?: number; - /** - * Height - * @description The height of the resulting image - * @default 512 - */ - height?: number; - /** - * Cfg Scale - * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt - * @default 7.5 - */ - cfg_scale?: number; - /** - * Scheduler - * @description The scheduler to use - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** - * Unet - * @description UNet model - */ - unet?: components["schemas"]["UNetField"]; - /** - * Vae - * @description Vae model - */ - vae?: components["schemas"]["VaeField"]; - /** - * Image - * @description The input image - */ - image?: components["schemas"]["ImageField"]; - /** - * Strength - * @description The strength of the original image - * @default 0.75 - */ - strength?: number; - /** - * Fit - * @description Whether or not the result should be fit to the aspect ratio of the input image - * @default true - */ - fit?: boolean; - /** - * Mask - * @description The mask - */ - mask?: components["schemas"]["ImageField"]; - /** - * Seam Size - * @description The seam inpaint size (px) - * @default 96 - */ - seam_size?: number; - /** - * Seam Blur - * @description The seam inpaint blur radius (px) - * @default 16 - */ - seam_blur?: number; - /** - * Seam Strength - * @description The seam inpaint strength - * @default 0.75 - */ - seam_strength?: number; - /** - * Seam Steps - * @description The number of steps to use for seam inpaint - * @default 30 - */ - seam_steps?: number; - /** - * Tile Size - * @description The tile infill method size (px) - * @default 32 - */ - tile_size?: number; - /** - * Infill Method - * @description The method used to infill empty regions (px) - * @default patchmatch - * @enum {string} - */ - infill_method?: "patchmatch" | "tile" | "solid"; - /** - * Inpaint Width - * @description The width of the inpaint region (px) - */ - inpaint_width?: number; - /** - * Inpaint Height - * @description The height of the inpaint region (px) - */ - inpaint_height?: number; - /** - * Inpaint Fill - * @description The solid infill method color - * @default { - * "r": 127, - * "g": 127, - * "b": 127, - * "a": 255 - * } - */ - inpaint_fill?: components["schemas"]["ColorField"]; - /** - * Inpaint Replace - * @description The amount by which to replace masked areas with latent noise - * @default 0 - */ - inpaint_replace?: number; - }; /** * IntCollectionOutput * @description A collection of integers @@ -2854,6 +2832,11 @@ export type components = { * @description The name of the latents */ latents_name: string; + /** + * Seed + * @description Seed used to generate this latents + */ + seed?: number; }; /** * LatentsOutput @@ -2932,84 +2915,6 @@ export type components = { */ metadata?: components["schemas"]["CoreMetadata"]; }; - /** - * LatentsToLatentsInvocation - * @description Generates latents using latents as base image. - */ - LatentsToLatentsInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default l2l - * @enum {string} - */ - type?: "l2l"; - /** - * Positive Conditioning - * @description Positive conditioning for generation - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning for generation - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description The noise to use - */ - noise?: components["schemas"]["LatentsField"]; - /** - * Steps - * @description The number of steps to use to generate the image - * @default 10 - */ - steps?: number; - /** - * Cfg Scale - * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt - * @default 7.5 - */ - cfg_scale?: number | (number)[]; - /** - * Scheduler - * @description The scheduler to use - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** - * Unet - * @description UNet submodel - */ - unet?: components["schemas"]["UNetField"]; - /** - * Control - * @description The control to use - */ - control?: components["schemas"]["ControlField"] | (components["schemas"]["ControlField"])[]; - /** - * Latents - * @description The latents to use as a base image - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Strength - * @description The strength of the latents to use - * @default 0.7 - */ - strength?: number; - }; /** * LeresImageProcessorInvocation * @description Applies leres processing to image @@ -3368,6 +3273,54 @@ export type components = { */ model: components["schemas"]["MainModelField"]; }; + /** + * MaskEdgeInvocation + * @description Applies an edge mask to an image + */ + MaskEdgeInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default mask_edge + * @enum {string} + */ + type?: "mask_edge"; + /** + * Image + * @description The image to apply the mask to + */ + image?: components["schemas"]["ImageField"]; + /** + * Edge Size + * @description The size of the edge + */ + edge_size: number; + /** + * Edge Blur + * @description The amount of blur on the edge + */ + edge_blur: number; + /** + * Low Threshold + * @description First threshold for the hysteresis procedure in Canny edge detection + */ + low_threshold: number; + /** + * High Threshold + * @description Second threshold for the hysteresis procedure in Canny edge detection + */ + high_threshold: number; + }; /** * MaskFromAlphaInvocation * @description Extracts the alpha channel of an image as a mask. @@ -4937,83 +4890,6 @@ export type components = { */ clip2?: components["schemas"]["ClipField"]; }; - /** - * SDXLLatentsToLatentsInvocation - * @description Generates latents from conditionings. - */ - SDXLLatentsToLatentsInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default l2l_sdxl - * @enum {string} - */ - type?: "l2l_sdxl"; - /** - * Positive Conditioning - * @description Positive conditioning for generation - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning for generation - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description The noise to use - */ - noise?: components["schemas"]["LatentsField"]; - /** - * Steps - * @description The number of steps to use to generate the image - * @default 10 - */ - steps?: number; - /** - * Cfg Scale - * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt - * @default 7.5 - */ - cfg_scale?: number | (number)[]; - /** - * Scheduler - * @description The scheduler to use - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** - * Unet - * @description UNet submodel - */ - unet?: components["schemas"]["UNetField"]; - /** - * Latents - * @description Initial latents - */ - latents?: components["schemas"]["LatentsField"]; - /** - * Denoising Start - * @default 0 - */ - denoising_start?: number; - /** - * Denoising End - * @default 1 - */ - denoising_end?: number; - }; /** * SDXLLoraLoaderInvocation * @description Apply selected lora to unet and text_encoder. @@ -5150,81 +5026,6 @@ export type components = { */ vae?: components["schemas"]["VaeField"]; }; - /** - * SDXLRawPromptInvocation - * @description Pass unmodified prompt to conditioning without compel processing. - */ - SDXLRawPromptInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default sdxl_raw_prompt - * @enum {string} - */ - type?: "sdxl_raw_prompt"; - /** - * Prompt - * @description Prompt - * @default - */ - prompt?: string; - /** - * Style - * @description Style prompt - * @default - */ - style?: string; - /** - * Original Width - * @default 1024 - */ - original_width?: number; - /** - * Original Height - * @default 1024 - */ - original_height?: number; - /** - * Crop Top - * @default 0 - */ - crop_top?: number; - /** - * Crop Left - * @default 0 - */ - crop_left?: number; - /** - * Target Width - * @default 1024 - */ - target_width?: number; - /** - * Target Height - * @default 1024 - */ - target_height?: number; - /** - * Clip - * @description Clip to use - */ - clip?: components["schemas"]["ClipField"]; - /** - * Clip2 - * @description Clip2 to use - */ - clip2?: components["schemas"]["ClipField"]; - }; /** * SDXLRefinerCompelPromptInvocation * @description Parse prompt using compel package to conditioning. @@ -5339,132 +5140,6 @@ export type components = { */ vae?: components["schemas"]["VaeField"]; }; - /** - * SDXLRefinerRawPromptInvocation - * @description Parse prompt using compel package to conditioning. - */ - SDXLRefinerRawPromptInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default sdxl_refiner_raw_prompt - * @enum {string} - */ - type?: "sdxl_refiner_raw_prompt"; - /** - * Style - * @description Style prompt - * @default - */ - style?: string; - /** - * Original Width - * @default 1024 - */ - original_width?: number; - /** - * Original Height - * @default 1024 - */ - original_height?: number; - /** - * Crop Top - * @default 0 - */ - crop_top?: number; - /** - * Crop Left - * @default 0 - */ - crop_left?: number; - /** - * Aesthetic Score - * @default 6 - */ - aesthetic_score?: number; - /** - * Clip2 - * @description Clip to use - */ - clip2?: components["schemas"]["ClipField"]; - }; - /** - * SDXLTextToLatentsInvocation - * @description Generates latents from conditionings. - */ - SDXLTextToLatentsInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default t2l_sdxl - * @enum {string} - */ - type?: "t2l_sdxl"; - /** - * Positive Conditioning - * @description Positive conditioning for generation - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning for generation - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description The noise to use - */ - noise?: components["schemas"]["LatentsField"]; - /** - * Steps - * @description The number of steps to use to generate the image - * @default 10 - */ - steps?: number; - /** - * Cfg Scale - * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt - * @default 7.5 - */ - cfg_scale?: number | (number)[]; - /** - * Scheduler - * @description The scheduler to use - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** - * Unet - * @description UNet submodel - */ - unet?: components["schemas"]["UNetField"]; - /** - * Denoising End - * @default 1 - */ - denoising_end?: number; - }; /** * ScaleLatentsInvocation * @description Scales latents by a given factor. @@ -5863,73 +5538,6 @@ export type components = { */ b?: number; }; - /** - * TextToLatentsInvocation - * @description Generates latents from conditionings. - */ - TextToLatentsInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default t2l - * @enum {string} - */ - type?: "t2l"; - /** - * Positive Conditioning - * @description Positive conditioning for generation - */ - positive_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Negative Conditioning - * @description Negative conditioning for generation - */ - negative_conditioning?: components["schemas"]["ConditioningField"]; - /** - * Noise - * @description The noise to use - */ - noise?: components["schemas"]["LatentsField"]; - /** - * Steps - * @description The number of steps to use to generate the image - * @default 10 - */ - steps?: number; - /** - * Cfg Scale - * @description The Classifier-Free Guidance, higher values may result in a result closer to the prompt - * @default 7.5 - */ - cfg_scale?: number | (number)[]; - /** - * Scheduler - * @description The scheduler to use - * @default euler - * @enum {string} - */ - scheduler?: "ddim" | "ddpm" | "deis" | "lms" | "lms_k" | "pndm" | "heun" | "heun_k" | "euler" | "euler_k" | "euler_a" | "kdpm_2" | "kdpm_2_a" | "dpmpp_2s" | "dpmpp_2s_k" | "dpmpp_2m" | "dpmpp_2m_k" | "dpmpp_2m_sde" | "dpmpp_2m_sde_k" | "dpmpp_sde" | "dpmpp_sde_k" | "unipc"; - /** - * Unet - * @description UNet submodel - */ - unet?: components["schemas"]["UNetField"]; - /** - * Control - * @description The control to use - */ - control?: components["schemas"]["ControlField"] | (components["schemas"]["ControlField"])[]; - }; /** TextualInversionModelConfig */ TextualInversionModelConfig: { /** Model Name */ @@ -6139,36 +5747,36 @@ export type components = { */ image?: components["schemas"]["ImageField"]; }; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -6279,7 +5887,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -6316,7 +5924,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index e7e3accdad..7c8f0908b0 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -120,9 +120,6 @@ export type RandomRangeInvocation = TypeReq< export type RangeOfSizeInvocation = TypeReq< components['schemas']['RangeOfSizeInvocation'] >; -export type InpaintInvocation = TypeReq< - components['schemas']['InpaintInvocation'] ->; export type ImageResizeInvocation = TypeReq< components['schemas']['ImageResizeInvocation'] >; @@ -139,14 +136,11 @@ export type DynamicPromptInvocation = TypeReq< components['schemas']['DynamicPromptInvocation'] >; export type NoiseInvocation = TypeReq; -export type TextToLatentsInvocation = TypeReq< - components['schemas']['TextToLatentsInvocation'] ->; export type ONNXTextToLatentsInvocation = TypeReq< components['schemas']['ONNXTextToLatentsInvocation'] >; -export type LatentsToLatentsInvocation = TypeReq< - components['schemas']['LatentsToLatentsInvocation'] +export type DenoiseLatentsInvocation = TypeReq< + components['schemas']['DenoiseLatentsInvocation'] >; export type ImageToLatentsInvocation = TypeReq< components['schemas']['ImageToLatentsInvocation'] From f3ae52ff97a2c4983ec07c38ceb354792589acfd Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Fri, 11 Aug 2023 15:46:16 +0300 Subject: [PATCH 18/67] Fix error at high denoising_start, fix unipc(cpu_only) --- invokeai/app/invocations/latent.py | 10 +++- .../stable_diffusion/diffusers_pipeline.py | 47 +++++++------------ 2 files changed, 25 insertions(+), 32 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 743e3f54b2..15e7c15802 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -317,6 +317,9 @@ class DenoiseLatentsInvocation(BaseInvocation): return control_data def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): + if scheduler.config.get("cpu_only", False): + device = torch.device("cpu") + # apply denoising_start num_inference_steps = steps scheduler.set_timesteps(num_inference_steps, device=device) @@ -325,6 +328,8 @@ class DenoiseLatentsInvocation(BaseInvocation): timesteps = scheduler.timesteps[t_start * scheduler.order :] num_inference_steps = num_inference_steps - t_start + init_timestep = timesteps[:1] + # apply denoising_end num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) @@ -332,7 +337,7 @@ class DenoiseLatentsInvocation(BaseInvocation): num_inference_steps = num_inference_steps - skipped_final_steps timesteps = timesteps[: num_warmup_steps + scheduler.order * num_inference_steps] - return num_inference_steps, timesteps + return num_inference_steps, timesteps, init_timestep def prep_mask_tensor(self, mask, context, lantents): if mask is None: @@ -418,7 +423,7 @@ class DenoiseLatentsInvocation(BaseInvocation): exit_stack=exit_stack, ) - num_inference_steps, timesteps = self.init_scheduler( + num_inference_steps, timesteps, init_timestep = self.init_scheduler( scheduler, device=unet.device, steps=self.steps, @@ -429,6 +434,7 @@ class DenoiseLatentsInvocation(BaseInvocation): result_latents, result_attention_map_saver = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, + init_timestep=init_timestep, noise=noise, seed=seed, mask=mask, diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index e5bb1f0f3d..8e0edb3c30 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -365,22 +365,16 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): conditioning_data: ConditioningData, *, noise: Optional[torch.Tensor], - timesteps=None, + timesteps: torch.Tensor, + init_timestep: torch.Tensor, additional_guidance: List[Callable] = None, callback: Callable[[PipelineIntermediateState], None] = None, control_data: List[ControlNetData] = None, mask: Optional[torch.Tensor] = None, seed: Optional[int] = None, ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: - # TODO: - if self.scheduler.config.get("cpu_only", False): - scheduler_device = torch.device("cpu") - else: - scheduler_device = self.unet.device - - if timesteps is None: - self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device) - timesteps = self.scheduler.timesteps + if init_timestep.shape[0] == 0: + return latents, None infer_latents_from_embeddings = GeneratorToCallbackinator( self.generate_latents_from_embeddings, PipelineIntermediateState @@ -392,31 +386,12 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): orig_latents = latents.clone() batch_size = latents.shape[0] - batched_t = torch.full( - (batch_size,), - timesteps[0], - dtype=timesteps.dtype, - device=self.unet.device, - ) + batched_t = init_timestep.repeat(batch_size) if noise is not None: #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers latents = self.scheduler.add_noise(latents, noise, batched_t) - else: - # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) - if mask is not None: - noise = torch.randn( - orig_latents.shape, - dtype=torch.float32, - device="cpu", - generator=torch.Generator(device="cpu").manual_seed(seed or 0), - ).to(device=orig_latents.device, dtype=orig_latents.dtype) - - latents = self.scheduler.add_noise(latents, noise, batched_t) - latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) - - if mask is not None: if is_inpainting_model(self.unet): # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint @@ -428,6 +403,18 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): self._unet_forward, mask, orig_latents ) else: + # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) + if noise is None: + noise = torch.randn( + orig_latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(seed or 0), + ).to(device=orig_latents.device, dtype=orig_latents.dtype) + + latents = self.scheduler.add_noise(latents, noise, batched_t) + latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) + additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) try: From 69a9dc7b36091a0fc74355479b13aaa566f69ced Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 02:42:13 +1200 Subject: [PATCH 19/67] wip: Add initial Inpaint Graph --- .../nodes/util/graphBuilders/addVAEToGraph.ts | 10 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 109 +++++++++++++----- .../nodes/util/graphBuilders/constants.ts | 9 ++ 3 files changed, 93 insertions(+), 35 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 1472b3ea3f..85e8fed572 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -4,8 +4,8 @@ import { MetadataAccumulatorInvocation } from 'services/api/types'; import { IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, - INPAINT, INPAINT_GRAPH, + INPAINT_IMAGE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, @@ -35,7 +35,11 @@ export const addVAEToGraph = ( }; } const isOnnxModel = modelLoaderNodeId == ONNX_MODEL_LOADER; - if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) { + if ( + graph.id === TEXT_TO_IMAGE_GRAPH || + graph.id === IMAGE_TO_IMAGE_GRAPH || + graph.id === INPAINT_GRAPH + ) { graph.edges.push({ source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, @@ -68,7 +72,7 @@ export const addVAEToGraph = ( field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', }, destination: { - node_id: INPAINT, + node_id: INPAINT_IMAGE, field: 'vae', }, }); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 3cec76757f..892cdc2d7c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -3,10 +3,10 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO, - InpaintInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; @@ -15,9 +15,12 @@ import { CLIP_SKIP, INPAINT, INPAINT_GRAPH, + INPAINT_IMAGE, ITERATE, + LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, NEGATIVE_CONDITIONING, + NOISE, POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, @@ -44,6 +47,9 @@ export const buildCanvasInpaintGraph = ( iterations, seed, shouldRandomizeSeed, + vaePrecision, + shouldUseNoiseSettings, + shouldUseCpuNoise, seamSize, seamBlur, seamSteps, @@ -68,40 +74,38 @@ export const buildCanvasInpaintGraph = ( shouldAutoSave, } = state.canvas; + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : shouldUseCpuNoise; + const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { [INPAINT]: { - is_intermediate: !shouldAutoSave, - type: 'inpaint', + type: 'denoise_latents', id: INPAINT, - steps, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: 1, + mask: canvasMaskImage, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + image: canvasInitImage, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [NOISE]: { + type: 'noise', + id: NOISE, width, height, - cfg_scale, - scheduler, - image: { - image_name: canvasInitImage.image_name, - }, - strength, - fit: shouldFitToWidthHeight, - mask: { - image_name: canvasMaskImage.image_name, - }, - seam_size: seamSize, - seam_blur: seamBlur, - seam_strength: seamStrength, - seam_steps: seamSteps, - tile_size: infillMethod === 'tile' ? tileSize : undefined, - infill_method: infillMethod as InpaintInvocation['infill_method'], - inpaint_width: - boundingBoxScaleMethod !== 'none' - ? scaledBoundingBoxDimensions.width - : undefined, - inpaint_height: - boundingBoxScaleMethod !== 'none' - ? scaledBoundingBoxDimensions.height - : undefined, + use_cpu, + is_intermediate: true, }, [POSITIVE_CONDITIONING]: { type: 'compel', @@ -121,12 +125,19 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, model, }, + [LATENTS_TO_IMAGE]: { + type: 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [CLIP_SKIP]: { type: 'clip_skip', id: CLIP_SKIP, is_intermediate: true, skipped_layers: clipSkip, }, + [RANGE_OF_SIZE]: { type: 'range_of_size', id: RANGE_OF_SIZE, @@ -203,6 +214,26 @@ export const buildCanvasInpaintGraph = ( field: 'positive_conditioning', }, }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, { source: { node_id: RANGE_OF_SIZE, @@ -219,17 +250,25 @@ export const buildCanvasInpaintGraph = ( field: 'item', }, destination: { - node_id: INPAINT, + node_id: NOISE, field: 'seed', }, }, + { + source: { + node_id: INPAINT, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, ], }; - addLoRAsToGraph(state, graph, INPAINT); - // Add VAE - addVAEToGraph(state, graph); + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); // handle seed if (shouldRandomizeSeed) { @@ -251,6 +290,12 @@ export const buildCanvasInpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // add LoRA support + addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, INPAINT); + // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 2282184521..6a66093da0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -18,6 +18,15 @@ export const IMAGE_TO_LATENTS = 'image_to_latents'; export const LATENTS_TO_LATENTS = 'latents_to_latents'; export const RESIZE = 'resize_image'; export const INPAINT = 'inpaint'; +export const INPAINT_SEAM_FIX = 'inpaint_seam_fix'; +export const INPAINT_IMAGE = 'inpaint_image'; +export const INFILL_TILE = 'infill_tile'; +export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; +export const MASK_FROM_ALPHA = 'tomask'; +export const MASK_EDGE = 'mask_edge'; +export const MASK_BLUR = 'mask_blur'; +export const COLOR_CORRECT = 'color_correct'; +export const PASTE_IMAGE = 'img_paste'; export const CONTROL_NET_COLLECT = 'control_net_collect'; export const DYNAMIC_PROMPT = 'dynamic_prompt'; export const IMAGE_COLLECTION = 'image_collection'; From 1affb7f64790b15c3a4b8e3a97e77aeeae6ac756 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 03:28:19 +1200 Subject: [PATCH 20/67] feat: Add Paste / Mask Blur / Color Correction to Inpainting Seam options are now removed. They are replaced by two options --Mask Blur and Mask Blur Method .. which control the softness of the mask that is being painted. --- invokeai/frontend/web/public/locales/en.json | 3 + .../graphBuilders/buildCanvasInpaintGraph.ts | 72 +++++++++++++++++-- .../ParamMaskAdjustmentCollapse.tsx | 21 ++++++ .../ParamMaskBlur.tsx} | 16 ++--- .../MaskAdjustment/ParamMaskBlurMethod.tsx | 36 ++++++++++ .../ParamSeamCorrectionCollapse.tsx | 22 ------ .../Canvas/SeamCorrection/ParamSeamSize.tsx | 31 -------- .../Canvas/SeamCorrection/ParamSeamSteps.tsx | 32 --------- .../SeamCorrection/ParamSeamStrength.tsx | 32 --------- .../parameters/store/generationSlice.ts | 33 +++------ .../UnifiedCanvas/UnifiedCanvasParameters.tsx | 4 +- 11 files changed, 147 insertions(+), 155 deletions(-) create mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse.tsx rename invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/{SeamCorrection/ParamSeamBlur.tsx => MaskAdjustment/ParamMaskBlur.tsx} (62%) create mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSize.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSteps.tsx delete mode 100644 invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamStrength.tsx diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 63380a19fa..fbae5b4a30 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -503,6 +503,9 @@ "hiresStrength": "High Res Strength", "imageFit": "Fit Initial Image To Output Size", "codeformerFidelity": "Fidelity", + "maskAdjustmentsHeader": "Mask Adjustments", + "maskBlur": "Mask Blur", + "maskBlurMethod": "Mask Blur Method", "seamSize": "Seam Size", "seamBlur": "Seam Blur", "seamStrength": "Seam Strength", diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 892cdc2d7c..817e8d5176 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -13,12 +13,15 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CLIP_SKIP, + COLOR_CORRECT, INPAINT, + INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, ITERATE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, + MASK_BLUR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -50,10 +53,8 @@ export const buildCanvasInpaintGraph = ( vaePrecision, shouldUseNoiseSettings, shouldUseCpuNoise, - seamSize, - seamBlur, - seamSteps, - seamStrength, + maskBlur, + maskBlurMethod, tileSize, infillMethod, clipSkip, @@ -90,7 +91,6 @@ export const buildCanvasInpaintGraph = ( scheduler: scheduler, denoising_start: 1 - strength, denoising_end: 1, - mask: canvasMaskImage, }, [INPAINT_IMAGE]: { type: 'i2l', @@ -137,7 +137,27 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, skipped_layers: clipSkip, }, - + [COLOR_CORRECT]: { + type: 'color_correct', + id: COLOR_CORRECT, + is_intermediate: true, + reference: canvasInitImage, + mask: canvasMaskImage, + }, + [MASK_BLUR]: { + type: 'img_blur', + id: MASK_BLUR, + is_intermediate: true, + image: canvasMaskImage, + radius: maskBlur, + blur_type: maskBlurMethod, + }, + [INPAINT_FINAL_IMAGE]: { + type: 'img_paste', + id: INPAINT_FINAL_IMAGE, + is_intermediate: true, + base_image: canvasInitImage, + }, [RANGE_OF_SIZE]: { type: 'range_of_size', id: RANGE_OF_SIZE, @@ -234,6 +254,16 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: INPAINT, + field: 'mask', + }, + }, { source: { node_id: RANGE_OF_SIZE, @@ -264,6 +294,36 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'image', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse.tsx new file mode 100644 index 0000000000..9ca6503d3d --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse.tsx @@ -0,0 +1,21 @@ +import { Flex } from '@chakra-ui/react'; +import IAICollapse from 'common/components/IAICollapse'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import ParamMaskBlur from './ParamMaskBlur'; +import ParamMaskBlurMethod from './ParamMaskBlurMethod'; + +const ParamMaskAdjustmentCollapse = () => { + const { t } = useTranslation(); + + return ( + + + + + + + ); +}; + +export default memo(ParamMaskAdjustmentCollapse); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamBlur.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlur.tsx similarity index 62% rename from invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamBlur.tsx rename to invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlur.tsx index 5c20ba7a13..82b82228e2 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamBlur.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlur.tsx @@ -1,31 +1,31 @@ import type { RootState } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAISlider from 'common/components/IAISlider'; -import { setSeamBlur } from 'features/parameters/store/generationSlice'; +import { setMaskBlur } from 'features/parameters/store/generationSlice'; import { useTranslation } from 'react-i18next'; -export default function ParamSeamBlur() { +export default function ParamMaskBlur() { const dispatch = useAppDispatch(); - const seamBlur = useAppSelector( - (state: RootState) => state.generation.seamBlur + const maskBlur = useAppSelector( + (state: RootState) => state.generation.maskBlur ); const { t } = useTranslation(); return ( { - dispatch(setSeamBlur(v)); + dispatch(setMaskBlur(v)); }} withInput withSliderMarks withReset handleReset={() => { - dispatch(setSeamBlur(16)); + dispatch(setMaskBlur(16)); }} /> ); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx new file mode 100644 index 0000000000..b45dc8b884 --- /dev/null +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx @@ -0,0 +1,36 @@ +import { SelectItem } from '@mantine/core'; +import { RootState } from 'app/store/store'; + +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import IAIMantineSelect from 'common/components/IAIMantineSelect'; +import { setMaskBlurMethod } from 'features/parameters/store/generationSlice'; +import { useTranslation } from 'react-i18next'; + +export type MaskBlurMethods = 'box' | 'gaussian'; + +const maskBlurMethods: SelectItem[] = [ + { label: 'Box Blur', value: 'box' }, + { label: 'Gaussian Blur', value: 'gaussian' }, +]; + +export default function ParamMaskBlurMethod() { + const maskBlurMethod = useAppSelector( + (state: RootState) => state.generation.maskBlurMethod + ); + const dispatch = useAppDispatch(); + const { t } = useTranslation(); + + const handleMaskBlurMethodChange = (v: string | null) => { + if (!v) return; + dispatch(setMaskBlurMethod(v as MaskBlurMethods)); + }; + + return ( + + ); +} diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse.tsx deleted file mode 100644 index 88d839fa15..0000000000 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import IAICollapse from 'common/components/IAICollapse'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import ParamSeamBlur from './ParamSeamBlur'; -import ParamSeamSize from './ParamSeamSize'; -import ParamSeamSteps from './ParamSeamSteps'; -import ParamSeamStrength from './ParamSeamStrength'; - -const ParamSeamCorrectionCollapse = () => { - const { t } = useTranslation(); - - return ( - - - - - - - ); -}; - -export default memo(ParamSeamCorrectionCollapse); diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSize.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSize.tsx deleted file mode 100644 index 8e56cded7b..0000000000 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSize.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import type { RootState } from 'app/store/store'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAISlider from 'common/components/IAISlider'; -import { setSeamSize } from 'features/parameters/store/generationSlice'; -import { useTranslation } from 'react-i18next'; - -export default function ParamSeamSize() { - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const seamSize = useAppSelector( - (state: RootState) => state.generation.seamSize - ); - - return ( - { - dispatch(setSeamSize(v)); - }} - withInput - withSliderMarks - withReset - handleReset={() => dispatch(setSeamSize(96))} - /> - ); -} diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSteps.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSteps.tsx deleted file mode 100644 index 8ca5226621..0000000000 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamSteps.tsx +++ /dev/null @@ -1,32 +0,0 @@ -import type { RootState } from 'app/store/store'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAISlider from 'common/components/IAISlider'; -import { setSeamSteps } from 'features/parameters/store/generationSlice'; -import { useTranslation } from 'react-i18next'; - -export default function ParamSeamSteps() { - const { t } = useTranslation(); - const seamSteps = useAppSelector( - (state: RootState) => state.generation.seamSteps - ); - const dispatch = useAppDispatch(); - - return ( - { - dispatch(setSeamSteps(v)); - }} - withInput - withSliderMarks - withReset - handleReset={() => { - dispatch(setSeamSteps(30)); - }} - /> - ); -} diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamStrength.tsx deleted file mode 100644 index de74156cd3..0000000000 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamStrength.tsx +++ /dev/null @@ -1,32 +0,0 @@ -import { RootState } from 'app/store/store'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import IAISlider from 'common/components/IAISlider'; -import { setSeamStrength } from 'features/parameters/store/generationSlice'; -import { useTranslation } from 'react-i18next'; - -export default function ParamSeamStrength() { - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - const seamStrength = useAppSelector( - (state: RootState) => state.generation.seamStrength - ); - - return ( - { - dispatch(setSeamStrength(v)); - }} - withInput - withSliderMarks - withReset - handleReset={() => { - dispatch(setSeamStrength(0.7)); - }} - /> - ); -} diff --git a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts index 12f2c3eabf..33a76da4e6 100644 --- a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts +++ b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts @@ -4,6 +4,7 @@ import { roundToMultiple } from 'common/util/roundDownToMultiple'; import { configChanged } from 'features/system/store/configSlice'; import { clamp } from 'lodash-es'; import { ImageDTO } from 'services/api/types'; +import { MaskBlurMethods } from '../components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod'; import { clipSkipMap } from '../types/constants'; import { CfgScaleParam, @@ -33,10 +34,8 @@ export interface GenerationState { positivePrompt: PositivePromptParam; negativePrompt: NegativePromptParam; scheduler: SchedulerParam; - seamBlur: number; - seamSize: number; - seamSteps: number; - seamStrength: number; + maskBlur: number; + maskBlurMethod: MaskBlurMethods; seed: SeedParam; seedWeights: string; shouldFitToWidthHeight: boolean; @@ -72,10 +71,8 @@ export const initialGenerationState: GenerationState = { positivePrompt: '', negativePrompt: '', scheduler: 'euler', - seamBlur: 16, - seamSize: 96, - seamSteps: 30, - seamStrength: 0.7, + maskBlur: 16, + maskBlurMethod: 'box', seed: 0, seedWeights: '', shouldFitToWidthHeight: true, @@ -196,17 +193,11 @@ export const generationSlice = createSlice({ clearInitialImage: (state) => { state.initialImage = undefined; }, - setSeamSize: (state, action: PayloadAction) => { - state.seamSize = action.payload; + setMaskBlur: (state, action: PayloadAction) => { + state.maskBlur = action.payload; }, - setSeamBlur: (state, action: PayloadAction) => { - state.seamBlur = action.payload; - }, - setSeamStrength: (state, action: PayloadAction) => { - state.seamStrength = action.payload; - }, - setSeamSteps: (state, action: PayloadAction) => { - state.seamSteps = action.payload; + setMaskBlurMethod: (state, action: PayloadAction) => { + state.maskBlurMethod = action.payload; }, setTileSize: (state, action: PayloadAction) => { state.tileSize = action.payload; @@ -312,10 +303,8 @@ export const { setPositivePrompt, setNegativePrompt, setScheduler, - setSeamBlur, - setSeamSize, - setSeamSteps, - setSeamStrength, + setMaskBlur, + setMaskBlurMethod, setSeed, setSeedWeights, setShouldFitToWidthHeight, diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx index de7ce3b084..fcfffee48b 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasParameters.tsx @@ -2,10 +2,10 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/Para import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse'; import ParamInfillAndScalingCollapse from 'features/parameters/components/Parameters/Canvas/InfillAndScaling/ParamInfillAndScalingCollapse'; -import ParamSeamCorrectionCollapse from 'features/parameters/components/Parameters/Canvas/SeamCorrection/ParamSeamCorrectionCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamSymmetryCollapse from 'features/parameters/components/Parameters/Symmetry/ParamSymmetryCollapse'; // import ParamVariationCollapse from 'features/parameters/components/Parameters/Variations/ParamVariationCollapse'; +import ParamMaskAdjustmentCollapse from 'features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse'; import ParamPromptArea from 'features/parameters/components/Parameters/Prompt/ParamPromptArea'; import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; import UnifiedCanvasCoreParameters from './UnifiedCanvasCoreParameters'; @@ -21,7 +21,7 @@ const UnifiedCanvasParameters = () => { {/* */} - + From 5629d8fa372a170dfba945c251775a147e4a61ee Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 04:43:40 +1200 Subject: [PATCH 21/67] fix; Key issue in Lora List --- .../web/src/features/lora/components/ParamLoraList.tsx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/frontend/web/src/features/lora/components/ParamLoraList.tsx b/invokeai/frontend/web/src/features/lora/components/ParamLoraList.tsx index f10084e585..5ba4e711ef 100644 --- a/invokeai/frontend/web/src/features/lora/components/ParamLoraList.tsx +++ b/invokeai/frontend/web/src/features/lora/components/ParamLoraList.tsx @@ -1,4 +1,4 @@ -import { Divider } from '@chakra-ui/react'; +import { Divider, Flex } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppSelector } from 'app/store/storeHooks'; @@ -20,10 +20,10 @@ const ParamLoraList = () => { return ( <> {lorasArray.map((lora, i) => ( - <> - {i > 0 && } - - + + {i > 0 && } + + ))} ); From 58a48bf19717d76904e21299cb6b5f32a41a4b33 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 04:47:15 +1200 Subject: [PATCH 22/67] fix: LoRA list name sorting --- .../web/src/features/lora/components/ParamLoraSelect.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx b/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx index 951037f9e3..2046d36ab2 100644 --- a/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx +++ b/invokeai/frontend/web/src/features/lora/components/ParamLoraSelect.tsx @@ -54,6 +54,8 @@ const ParamLoRASelect = () => { }); }); + data.sort((a, b) => (a.label && !b.label ? 1 : -1)); + return data.sort((a, b) => (a.disabled && !b.disabled ? 1 : -1)); }, [loras, loraModels, currentMainModel?.base_model]); From d7d6298ec0538f539df255c6dc3f589b537a61f0 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 05:32:11 +1200 Subject: [PATCH 23/67] feat: Add Infill Method support --- .../graphBuilders/buildCanvasInpaintGraph.ts | 34 ++++++++++++++++++- .../nodes/util/graphBuilders/constants.ts | 2 +- .../frontend/web/src/services/api/types.ts | 6 ++++ 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 817e8d5176..968e32efe9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -3,6 +3,8 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO, + InfillPatchmatchInvocation, + InfillTileInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -18,6 +20,7 @@ import { INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, + INPAINT_INFILL, ITERATE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, @@ -60,6 +63,8 @@ export const buildCanvasInpaintGraph = ( clipSkip, } = state.generation; + const { generationMode } = state.canvas; + if (!model) { log.error('No model found in state'); throw new Error('No model found in state'); @@ -79,6 +84,23 @@ export const buildCanvasInpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; + let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + tile_size: tileSize, + }; + + if (infillMethod === 'patchmatch') { + infillNode = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + }; + } + const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { @@ -92,11 +114,11 @@ export const buildCanvasInpaintGraph = ( denoising_start: 1 - strength, denoising_end: 1, }, + [infillNode.id]: infillNode, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, is_intermediate: true, - image: canvasInitImage, fp32: vaePrecision === 'fp32' ? true : false, }, [NOISE]: { @@ -244,6 +266,16 @@ export const buildCanvasInpaintGraph = ( field: 'noise', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, { source: { node_id: INPAINT_IMAGE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 6a66093da0..b75dfbf4c6 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -20,7 +20,7 @@ export const RESIZE = 'resize_image'; export const INPAINT = 'inpaint'; export const INPAINT_SEAM_FIX = 'inpaint_seam_fix'; export const INPAINT_IMAGE = 'inpaint_image'; -export const INFILL_TILE = 'infill_tile'; +export const INPAINT_INFILL = 'inpaint_infill'; export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; export const MASK_FROM_ALPHA = 'tomask'; export const MASK_EDGE = 'mask_edge'; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 7c8f0908b0..471c995f4d 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -172,6 +172,12 @@ export type ESRGANInvocation = TypeReq< export type DivideInvocation = TypeReq< components['schemas']['DivideInvocation'] >; +export type InfillTileInvocation = TypeReq< + components['schemas']['InfillTileInvocation'] +>; +export type InfillPatchmatchInvocation = TypeReq< + components['schemas']['InfillPatchMatchInvocation'] +>; export type ImageNSFWBlurInvocation = TypeReq< components['schemas']['ImageNSFWBlurInvocation'] >; From f343ab03022ff61498900d22b637439a95de5f6b Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 06:15:59 +1200 Subject: [PATCH 24/67] wip: Port Outpainting to new backend --- invokeai/app/invocations/image.py | 113 ++++++++++-------- .../graphBuilders/buildCanvasInpaintGraph.ts | 84 +++++++++++-- .../nodes/util/graphBuilders/constants.ts | 1 + .../frontend/web/src/services/api/schema.d.ts | 53 ++++++-- 4 files changed, 176 insertions(+), 75 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 9e5cb9ecef..cc05b529b5 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -1,29 +1,19 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Literal, Optional - -import numpy -import cv2 -from PIL import Image, ImageFilter, ImageOps, ImageChops -from pydantic import Field from pathlib import Path -from typing import Union +from typing import Literal, Optional, Union + +import cv2 +import numpy +from PIL import Image, ImageChops, ImageFilter, ImageOps +from pydantic import Field + from invokeai.app.invocations.metadata import CoreMetadata -from ..models.image import ( - ImageCategory, - ImageField, - ResourceOrigin, - PILInvocationConfig, - ImageOutput, - MaskOutput, -) -from .baseinvocation import ( - BaseInvocation, - InvocationContext, - InvocationConfig, -) -from invokeai.backend.image_util.safety_checker import SafetyChecker from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark +from invokeai.backend.image_util.safety_checker import SafetyChecker + +from ..models.image import ImageCategory, ImageField, ImageOutput, MaskOutput, PILInvocationConfig, ResourceOrigin +from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext class LoadImageInvocation(BaseInvocation): @@ -672,14 +662,10 @@ class MaskEdgeInvocation(BaseInvocation, PILInvocationConfig): mask = context.services.images.get_pil_image(self.image.image_name) npimg = numpy.asarray(mask, dtype=numpy.uint8) - npgradient = numpy.uint8( - 255 * (1.0 - numpy.floor(numpy.abs(0.5 - numpy.float32(npimg) / 255.0) * 2.0)) - ) + npgradient = numpy.uint8(255 * (1.0 - numpy.floor(numpy.abs(0.5 - numpy.float32(npimg) / 255.0) * 2.0))) npedge = cv2.Canny(npimg, threshold1=self.low_threshold, threshold2=self.high_threshold) npmask = npgradient + npedge - npmask = cv2.dilate( - npmask, numpy.ones((3, 3), numpy.uint8), iterations=int(self.edge_size / 2) - ) + npmask = cv2.dilate(npmask, numpy.ones((3, 3), numpy.uint8), iterations=int(self.edge_size / 2)) new_mask = Image.fromarray(npmask) @@ -704,9 +690,47 @@ class MaskEdgeInvocation(BaseInvocation, PILInvocationConfig): ) +class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): + """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" + + # fmt: off + type: Literal["mask_combine"] = "mask_combine" + + # Inputs + mask1: Optional[ImageField] = Field(default=None, description="The first mask to combine") + mask2: Optional[ImageField] = Field(default=None, description="The second image to combine") + # fmt: on + + class Config(InvocationConfig): + schema_extra = { + "ui": {"title": "Mask Combine", "tags": ["mask", "combine"]}, + } + + def invoke(self, context: InvocationContext) -> ImageOutput: + mask1 = context.services.images.get_pil_image(self.mask1.image_name).convert("L") + mask2 = context.services.images.get_pil_image(self.mask2.image_name).convert("L") + + combined_mask = ImageChops.multiply(mask1, mask2) + + image_dto = context.services.images.create( + image=combined_mask, + image_origin=ResourceOrigin.INTERNAL, + image_category=ImageCategory.GENERAL, + node_id=self.id, + session_id=context.graph_execution_state_id, + is_intermediate=self.is_intermediate, + ) + + return ImageOutput( + image=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, + ) + + class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): """ - Shifts the colors of a target image to match the reference image, optionally + Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. """ @@ -720,32 +744,23 @@ class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): def invoke(self, context: InvocationContext) -> ImageOutput: pil_init_mask = None if self.mask is not None: - pil_init_mask = context.services.images.get_pil_image( - self.mask.image_name - ).convert("L") + pil_init_mask = context.services.images.get_pil_image(self.mask.image_name).convert("L") - init_image = context.services.images.get_pil_image( - self.reference.image_name - ) + init_image = context.services.images.get_pil_image(self.reference.image_name) - result = context.services.images.get_pil_image( - self.image.image_name - ).convert("RGBA") + result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA") - - #if init_image is None or init_mask is None: + # if init_image is None or init_mask is None: # return result # Get the original alpha channel of the mask if there is one. # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') - #pil_init_mask = ( + # pil_init_mask = ( # init_mask.getchannel("A") # if init_mask.mode == "RGBA" # else init_mask.convert("L") - #) - pil_init_image = init_image.convert( - "RGBA" - ) # Add an alpha channel if one doesn't exist + # ) + pil_init_image = init_image.convert("RGBA") # Add an alpha channel if one doesn't exist # Build an image with only visible pixels from source to use as reference for color-matching. init_rgb_pixels = numpy.asarray(init_image.convert("RGB"), dtype=numpy.uint8) @@ -771,10 +786,7 @@ class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): np_matched_result[:, :, :] = ( ( ( - ( - np_matched_result[:, :, :].astype(numpy.float32) - - gen_means[None, None, :] - ) + (np_matched_result[:, :, :].astype(numpy.float32) - gen_means[None, None, :]) / gen_std[None, None, :] ) * init_std[None, None, :] @@ -800,10 +812,7 @@ class ColorCorrectInvocation(BaseInvocation, PILInvocationConfig): else: blurred_init_mask = pil_init_mask - - multiplied_blurred_init_mask = ImageChops.multiply( - blurred_init_mask, result.split()[-1] - ) + multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, result.split()[-1]) # Paste original on color-corrected generation (using blurred mask) matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 968e32efe9..1e2b9d76d8 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -25,6 +25,8 @@ import { LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, MASK_BLUR, + MASK_COMBINE, + MASK_FROM_ALPHA, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -63,8 +65,6 @@ export const buildCanvasInpaintGraph = ( clipSkip, } = state.generation; - const { generationMode } = state.canvas; - if (!model) { log.error('No model found in state'); throw new Error('No model found in state'); @@ -115,6 +115,25 @@ export const buildCanvasInpaintGraph = ( denoising_end: 1, }, [infillNode.id]: infillNode, + [MASK_FROM_ALPHA]: { + type: 'tomask', + id: MASK_FROM_ALPHA, + is_intermediate: true, + image: canvasInitImage, + }, + [MASK_COMBINE]: { + type: 'mask_combine', + id: MASK_COMBINE, + is_intermediate: true, + mask2: canvasMaskImage, + }, + [MASK_BLUR]: { + type: 'img_blur', + id: MASK_BLUR, + is_intermediate: true, + radius: maskBlur, + blur_type: maskBlurMethod, + }, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, @@ -163,22 +182,11 @@ export const buildCanvasInpaintGraph = ( type: 'color_correct', id: COLOR_CORRECT, is_intermediate: true, - reference: canvasInitImage, - mask: canvasMaskImage, - }, - [MASK_BLUR]: { - type: 'img_blur', - id: MASK_BLUR, - is_intermediate: true, - image: canvasMaskImage, - radius: maskBlur, - blur_type: maskBlurMethod, }, [INPAINT_FINAL_IMAGE]: { type: 'img_paste', id: INPAINT_FINAL_IMAGE, is_intermediate: true, - base_image: canvasInitImage, }, [RANGE_OF_SIZE]: { type: 'range_of_size', @@ -286,6 +294,26 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + { + source: { + node_id: MASK_FROM_ALPHA, + field: 'mask', + }, + destination: { + node_id: MASK_COMBINE, + field: 'mask1', + }, + }, + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, { source: { node_id: MASK_BLUR, @@ -326,6 +354,26 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, { source: { node_id: LATENTS_TO_IMAGE, @@ -336,6 +384,16 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'base_image', + }, + }, { source: { node_id: MASK_BLUR, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index b75dfbf4c6..076c92eb76 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -25,6 +25,7 @@ export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; export const MASK_FROM_ALPHA = 'tomask'; export const MASK_EDGE = 'mask_edge'; export const MASK_BLUR = 'mask_blur'; +export const MASK_COMBINE = 'mask_combine'; export const COLOR_CORRECT = 'color_correct'; export const PASTE_IMAGE = 'img_paste'; export const CONTROL_NET_COLLECT = 'control_net_collect'; diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 9755af9e32..b4fcf7f75c 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -714,7 +714,7 @@ export type components = { }; /** * ColorCorrectInvocation - * @description Shifts the colors of a target image to match the reference image, optionally + * @description Shifts the colors of a target image to match the reference image, optionally * using a mask to only color-correct certain regions of the target image. */ ColorCorrectInvocation: { @@ -1586,7 +1586,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; + [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; }; /** * Edges @@ -3273,6 +3273,39 @@ export type components = { */ model: components["schemas"]["MainModelField"]; }; + /** + * MaskCombineInvocation + * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. + */ + MaskCombineInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default mask_combine + * @enum {string} + */ + type?: "mask_combine"; + /** + * Mask1 + * @description The first mask to combine + */ + mask1?: components["schemas"]["ImageField"]; + /** + * Mask2 + * @description The second image to combine + */ + mask2?: components["schemas"]["ImageField"]; + }; /** * MaskEdgeInvocation * @description Applies an edge mask to an image @@ -5753,12 +5786,6 @@ export type components = { * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * ControlNetModelFormat * @description An enumeration. @@ -5771,6 +5798,12 @@ export type components = { * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion1ModelFormat * @description An enumeration. @@ -5887,7 +5920,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -5924,7 +5957,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { From 7293a6036ad27e400188a9b774156f3224784500 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 08:16:05 +1200 Subject: [PATCH 25/67] feat(wip): Add SDXL To Canvas --- .../listeners/tabChanged.ts | 9 +- .../util/graphBuilders/buildCanvasGraph.ts | 35 +- .../buildCanvasSDXLImageToImageGraph.ts | 373 ++++++++++++++ .../buildCanvasSDXLInpaintGraph.ts | 480 ++++++++++++++++++ .../buildCanvasSDXLTextToImageGraph.ts | 304 +++++++++++ .../MainModel/ParamMainModelSelect.tsx | 7 +- .../SDXLUnifiedCanvasTabParameters.tsx | 29 ++ .../tabs/UnifiedCanvas/UnifiedCanvasTab.tsx | 10 +- 8 files changed, 1236 insertions(+), 11 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts create mode 100644 invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/tabChanged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/tabChanged.ts index 6d3e599ae2..6791324fdd 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/tabChanged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/tabChanged.ts @@ -12,7 +12,10 @@ export const addTabChangedListener = () => { if (activeTabName === 'unifiedCanvas') { const currentBaseModel = getState().generation.model?.base_model; - if (currentBaseModel && ['sd-1', 'sd-2'].includes(currentBaseModel)) { + if ( + currentBaseModel && + ['sd-1', 'sd-2', 'sdxl'].includes(currentBaseModel) + ) { // if we're already on a valid model, no change needed return; } @@ -36,7 +39,9 @@ export const addTabChangedListener = () => { const validCanvasModels = mainModelsAdapter .getSelectors() .selectAll(models) - .filter((model) => ['sd-1', 'sd-2'].includes(model.base_model)); + .filter((model) => + ['sd-1', 'sd-2', 'sxdl'].includes(model.base_model) + ); const firstValidCanvasModel = validCanvasModels[0]; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts index 8a7716071f..dd0a5e6619 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts @@ -3,6 +3,9 @@ import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO } from 'services/api/types'; import { buildCanvasImageToImageGraph } from './buildCanvasImageToImageGraph'; import { buildCanvasInpaintGraph } from './buildCanvasInpaintGraph'; +import { buildCanvasSDXLImageToImageGraph } from './buildCanvasSDXLImageToImageGraph'; +import { buildCanvasSDXLInpaintGraph } from './buildCanvasSDXLInpaintGraph'; +import { buildCanvasSDXLTextToImageGraph } from './buildCanvasSDXLTextToImageGraph'; import { buildCanvasTextToImageGraph } from './buildCanvasTextToImageGraph'; export const buildCanvasGraph = ( @@ -14,17 +17,43 @@ export const buildCanvasGraph = ( let graph: NonNullableGraph; if (generationMode === 'txt2img') { - graph = buildCanvasTextToImageGraph(state); + if ( + state.generation.model && + state.generation.model.base_model === 'sdxl' + ) { + graph = buildCanvasSDXLTextToImageGraph(state); + } else { + graph = buildCanvasTextToImageGraph(state); + } } else if (generationMode === 'img2img') { if (!canvasInitImage) { throw new Error('Missing canvas init image'); } - graph = buildCanvasImageToImageGraph(state, canvasInitImage); + if ( + state.generation.model && + state.generation.model.base_model === 'sdxl' + ) { + graph = buildCanvasSDXLImageToImageGraph(state, canvasInitImage); + } else { + graph = buildCanvasImageToImageGraph(state, canvasInitImage); + } } else { if (!canvasInitImage || !canvasMaskImage) { throw new Error('Missing canvas init and mask images'); } - graph = buildCanvasInpaintGraph(state, canvasInitImage, canvasMaskImage); + + if ( + state.generation.model && + state.generation.model.base_model === 'sdxl' + ) { + graph = buildCanvasSDXLInpaintGraph( + state, + canvasInitImage, + canvasMaskImage + ); + } else { + graph = buildCanvasInpaintGraph(state, canvasInitImage, canvasMaskImage); + } } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts new file mode 100644 index 0000000000..b8322fd612 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -0,0 +1,373 @@ +import { logger } from 'app/logging/logger'; +import { RootState } from 'app/store/store'; +import { NonNullableGraph } from 'features/nodes/types/types'; +import { initialGenerationState } from 'features/parameters/store/generationSlice'; +import { + ImageDTO, + ImageResizeInvocation, + ImageToLatentsInvocation, +} from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; +import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; +import { addLoRAsToGraph } from './addLoRAsToGraph'; +import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; +import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { + DENOISE_LATENTS, + IMAGE_TO_IMAGE_GRAPH, + IMAGE_TO_LATENTS, + LATENTS_TO_IMAGE, + METADATA_ACCUMULATOR, + NEGATIVE_CONDITIONING, + NOISE, + POSITIVE_CONDITIONING, + RESIZE, + SDXL_MODEL_LOADER, +} from './constants'; + +/** + * Builds the Canvas tab's Image to Image graph. + */ +export const buildCanvasSDXLImageToImageGraph = ( + state: RootState, + initialImage: ImageDTO +): NonNullableGraph => { + const log = logger('nodes'); + const { + positivePrompt, + negativePrompt, + model, + cfgScale: cfg_scale, + scheduler, + steps, + clipSkip, + shouldUseCpuNoise, + shouldUseNoiseSettings, + } = state.generation; + + const { + positiveStylePrompt, + negativeStylePrompt, + shouldConcatSDXLStylePrompt, + shouldUseSDXLRefiner, + refinerStart, + sdxlImg2ImgDenoisingStrength: strength, + } = state.sdxl; + + // The bounding box determines width and height, not the width and height params + const { width, height } = state.canvas.boundingBoxDimensions; + + const { shouldAutoSave } = state.canvas; + + if (!model) { + log.error('No model found in state'); + throw new Error('No model found in state'); + } + + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : initialGenerationState.shouldUseCpuNoise; + + /** + * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the + * full graph here as a template. Then use the parameters from app state and set friendlier node + * ids. + * + * The only thing we need extra logic for is handling randomized seed, control net, and for img2img, + * the `fit` param. These are added to the graph at the end. + */ + + // copy-pasted graph from node editor, filled in with state values & friendly node ids + const graph: NonNullableGraph = { + id: IMAGE_TO_IMAGE_GRAPH, + nodes: { + [SDXL_MODEL_LOADER]: { + type: 'sdxl_model_loader', + id: SDXL_MODEL_LOADER, + model, + }, + [POSITIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: POSITIVE_CONDITIONING, + prompt: positivePrompt, + style: shouldConcatSDXLStylePrompt + ? `${positivePrompt} ${positiveStylePrompt}` + : positiveStylePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: NEGATIVE_CONDITIONING, + prompt: negativePrompt, + style: shouldConcatSDXLStylePrompt + ? `${negativePrompt} ${negativeStylePrompt}` + : negativeStylePrompt, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + is_intermediate: true, + use_cpu, + }, + [DENOISE_LATENTS]: { + type: 'denoise_latents', + id: DENOISE_LATENTS, + is_intermediate: true, + cfg_scale, + scheduler, + steps, + denoising_start: shouldUseSDXLRefiner + ? Math.min(refinerStart, 1 - strength) + : 1 - strength, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, + }, + [IMAGE_TO_LATENTS]: { + type: 'i2l', + id: IMAGE_TO_LATENTS, + is_intermediate: true, + // must be set manually later, bc `fit` parameter may require a resize node inserted + // image: { + // image_name: initialImage.image_name, + // }, + }, + [LATENTS_TO_IMAGE]: { + type: 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: !shouldAutoSave, + }, + }, + edges: [ + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, + { + source: { + node_id: IMAGE_TO_LATENTS, + field: 'latents', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'noise', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'unet', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'positive_conditioning', + }, + }, + ], + }; + + // handle `fit` + if (initialImage.width !== width || initialImage.height !== height) { + // The init image needs to be resized to the specified width and height before being passed to `IMAGE_TO_LATENTS` + + // Create a resize node, explicitly setting its image + const resizeNode: ImageResizeInvocation = { + id: RESIZE, + type: 'img_resize', + image: { + image_name: initialImage.image_name, + }, + is_intermediate: true, + width, + height, + }; + + graph.nodes[RESIZE] = resizeNode; + + // The `RESIZE` node then passes its image to `IMAGE_TO_LATENTS` + graph.edges.push({ + source: { node_id: RESIZE, field: 'image' }, + destination: { + node_id: IMAGE_TO_LATENTS, + field: 'image', + }, + }); + + // The `RESIZE` node also passes its width and height to `NOISE` + graph.edges.push({ + source: { node_id: RESIZE, field: 'width' }, + destination: { + node_id: NOISE, + field: 'width', + }, + }); + + graph.edges.push({ + source: { node_id: RESIZE, field: 'height' }, + destination: { + node_id: NOISE, + field: 'height', + }, + }); + } else { + // We are not resizing, so we need to set the image on the `IMAGE_TO_LATENTS` node explicitly + (graph.nodes[IMAGE_TO_LATENTS] as ImageToLatentsInvocation).image = { + image_name: initialImage.image_name, + }; + + // Pass the image's dimensions to the `NOISE` node + graph.edges.push({ + source: { node_id: IMAGE_TO_LATENTS, field: 'width' }, + destination: { + node_id: NOISE, + field: 'width', + }, + }); + graph.edges.push({ + source: { node_id: IMAGE_TO_LATENTS, field: 'height' }, + destination: { + node_id: NOISE, + field: 'height', + }, + }); + } + + // add metadata accumulator, which is only mostly populated - some fields are added later + graph.nodes[METADATA_ACCUMULATOR] = { + id: METADATA_ACCUMULATOR, + type: 'metadata_accumulator', + generation_mode: 'img2img', + cfg_scale, + height, + width, + positive_prompt: '', // set in addDynamicPromptsToGraph + negative_prompt: negativePrompt, + model, + seed: 0, // set in addDynamicPromptsToGraph + steps, + rand_device: use_cpu ? 'cpu' : 'cuda', + scheduler, + vae: undefined, // option; set in addVAEToGraph + controlnets: [], // populated in addControlNetToLinearGraph + loras: [], // populated in addLoRAsToGraph + clip_skip: clipSkip, + strength, + init_image: initialImage.image_name, + }; + + graph.edges.push({ + source: { + node_id: METADATA_ACCUMULATOR, + field: 'metadata', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'metadata', + }, + }); + + // add LoRA support + addLoRAsToGraph(state, graph, DENOISE_LATENTS); + + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + } + + // optionally add custom VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + + // add dynamic prompts - also sets up core iteration and seed + addDynamicPromptsToGraph(state, graph); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); + + // NSFW & watermark - must be last thing added to graph + if (state.system.shouldUseNSFWChecker) { + // must add before watermarker! + addNSFWCheckerToGraph(state, graph); + } + + if (state.system.shouldUseWatermarker) { + // must add after nsfw checker! + addWatermarkerToGraph(state, graph); + } + + return graph; +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts new file mode 100644 index 0000000000..04cc120cbe --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -0,0 +1,480 @@ +import { logger } from 'app/logging/logger'; +import { RootState } from 'app/store/store'; +import { NonNullableGraph } from 'features/nodes/types/types'; +import { + ImageDTO, + InfillPatchmatchInvocation, + InfillTileInvocation, + RandomIntInvocation, + RangeOfSizeInvocation, +} from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; +import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; +import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; +import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { + COLOR_CORRECT, + INPAINT, + INPAINT_FINAL_IMAGE, + INPAINT_GRAPH, + INPAINT_IMAGE, + INPAINT_INFILL, + ITERATE, + LATENTS_TO_IMAGE, + MASK_BLUR, + MASK_COMBINE, + MASK_FROM_ALPHA, + NEGATIVE_CONDITIONING, + NOISE, + POSITIVE_CONDITIONING, + RANDOM_INT, + RANGE_OF_SIZE, + SDXL_MODEL_LOADER, +} from './constants'; + +/** + * Builds the Canvas tab's Inpaint graph. + */ +export const buildCanvasSDXLInpaintGraph = ( + state: RootState, + canvasInitImage: ImageDTO, + canvasMaskImage: ImageDTO +): NonNullableGraph => { + const log = logger('nodes'); + const { + positivePrompt, + negativePrompt, + model, + cfgScale: cfg_scale, + scheduler, + steps, + img2imgStrength: strength, + shouldFitToWidthHeight, + iterations, + seed, + shouldRandomizeSeed, + vaePrecision, + shouldUseNoiseSettings, + shouldUseCpuNoise, + maskBlur, + maskBlurMethod, + tileSize, + infillMethod, + } = state.generation; + + const { + positiveStylePrompt, + negativeStylePrompt, + shouldConcatSDXLStylePrompt, + shouldUseSDXLRefiner, + refinerStart, + } = state.sdxl; + + if (!model) { + log.error('No model found in state'); + throw new Error('No model found in state'); + } + + // The bounding box determines width and height, not the width and height params + const { width, height } = state.canvas.boundingBoxDimensions; + + // We may need to set the inpaint width and height to scale the image + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : shouldUseCpuNoise; + + let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + tile_size: tileSize, + }; + + if (infillMethod === 'patchmatch') { + infillNode = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + }; + } + + const graph: NonNullableGraph = { + id: INPAINT_GRAPH, + nodes: { + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, + }, + [infillNode.id]: infillNode, + [MASK_FROM_ALPHA]: { + type: 'tomask', + id: MASK_FROM_ALPHA, + is_intermediate: true, + image: canvasInitImage, + }, + [MASK_COMBINE]: { + type: 'mask_combine', + id: MASK_COMBINE, + is_intermediate: true, + mask2: canvasMaskImage, + }, + [MASK_BLUR]: { + type: 'img_blur', + id: MASK_BLUR, + is_intermediate: true, + radius: maskBlur, + blur_type: maskBlurMethod, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + width, + height, + use_cpu, + is_intermediate: true, + }, + [POSITIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: POSITIVE_CONDITIONING, + prompt: positivePrompt, + style: shouldConcatSDXLStylePrompt + ? `${positivePrompt} ${positiveStylePrompt}` + : positiveStylePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: NEGATIVE_CONDITIONING, + prompt: negativePrompt, + style: shouldConcatSDXLStylePrompt + ? `${negativePrompt} ${negativeStylePrompt}` + : negativeStylePrompt, + }, + [SDXL_MODEL_LOADER]: { + type: 'sdxl_model_loader', + id: SDXL_MODEL_LOADER, + model, + }, + [LATENTS_TO_IMAGE]: { + type: 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [COLOR_CORRECT]: { + type: 'color_correct', + id: COLOR_CORRECT, + is_intermediate: true, + }, + [INPAINT_FINAL_IMAGE]: { + type: 'img_paste', + id: INPAINT_FINAL_IMAGE, + is_intermediate: true, + }, + [RANGE_OF_SIZE]: { + type: 'range_of_size', + id: RANGE_OF_SIZE, + is_intermediate: true, + // seed - must be connected manually + // start: 0, + size: iterations, + step: 1, + }, + [ITERATE]: { + type: 'iterate', + id: ITERATE, + is_intermediate: true, + }, + }, + edges: [ + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: INPAINT, + field: 'unet', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, + { + source: { + node_id: MASK_FROM_ALPHA, + field: 'mask', + }, + destination: { + node_id: MASK_COMBINE, + field: 'mask1', + }, + }, + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: INPAINT, + field: 'mask', + }, + }, + { + source: { + node_id: RANGE_OF_SIZE, + field: 'collection', + }, + destination: { + node_id: ITERATE, + field: 'collection', + }, + }, + { + source: { + node_id: ITERATE, + field: 'item', + }, + destination: { + node_id: NOISE, + field: 'seed', + }, + }, + { + source: { + node_id: INPAINT, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'base_image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'image', + }, + }, + ], + }; + + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, INPAINT); + } + + // Add VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + + // handle seed + if (shouldRandomizeSeed) { + // Random int node to generate the starting seed + const randomIntNode: RandomIntInvocation = { + id: RANDOM_INT, + type: 'rand_int', + }; + + graph.nodes[RANDOM_INT] = randomIntNode; + + // Connect random int to the start of the range of size so the range starts on the random first seed + graph.edges.push({ + source: { node_id: RANDOM_INT, field: 'a' }, + destination: { node_id: RANGE_OF_SIZE, field: 'start' }, + }); + } else { + // User specified seed, so set the start of the range of size to the seed + (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; + } + + // add LoRA support + addSDXLLoRAsToGraph(state, graph, INPAINT, SDXL_MODEL_LOADER); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, INPAINT); + + // NSFW & watermark - must be last thing added to graph + if (state.system.shouldUseNSFWChecker) { + // must add before watermarker! + addNSFWCheckerToGraph(state, graph, INPAINT); + } + + if (state.system.shouldUseWatermarker) { + // must add after nsfw checker! + addWatermarkerToGraph(state, graph, INPAINT); + } + + return graph; +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts new file mode 100644 index 0000000000..ed0fb74165 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -0,0 +1,304 @@ +import { logger } from 'app/logging/logger'; +import { RootState } from 'app/store/store'; +import { NonNullableGraph } from 'features/nodes/types/types'; +import { initialGenerationState } from 'features/parameters/store/generationSlice'; +import { + DenoiseLatentsInvocation, + ONNXTextToLatentsInvocation, +} from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; +import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; +import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; +import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; +import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { + DENOISE_LATENTS, + LATENTS_TO_IMAGE, + METADATA_ACCUMULATOR, + NEGATIVE_CONDITIONING, + NOISE, + ONNX_MODEL_LOADER, + POSITIVE_CONDITIONING, + SDXL_MODEL_LOADER, + TEXT_TO_IMAGE_GRAPH, +} from './constants'; + +/** + * Builds the Canvas tab's Text to Image graph. + */ +export const buildCanvasSDXLTextToImageGraph = ( + state: RootState +): NonNullableGraph => { + const log = logger('nodes'); + const { + positivePrompt, + negativePrompt, + model, + cfgScale: cfg_scale, + scheduler, + steps, + clipSkip, + shouldUseCpuNoise, + shouldUseNoiseSettings, + } = state.generation; + + // The bounding box determines width and height, not the width and height params + const { width, height } = state.canvas.boundingBoxDimensions; + + const { shouldAutoSave } = state.canvas; + + const { + positiveStylePrompt, + negativeStylePrompt, + shouldConcatSDXLStylePrompt, + shouldUseSDXLRefiner, + refinerStart, + } = state.sdxl; + + if (!model) { + log.error('No model found in state'); + throw new Error('No model found in state'); + } + + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : initialGenerationState.shouldUseCpuNoise; + const isUsingOnnxModel = model.model_type === 'onnx'; + const modelLoaderNodeId = isUsingOnnxModel + ? ONNX_MODEL_LOADER + : SDXL_MODEL_LOADER; + const modelLoaderNodeType = isUsingOnnxModel + ? 'onnx_model_loader' + : 'sdxl_model_loader'; + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = + isUsingOnnxModel + ? { + type: 't2l_onnx', + id: DENOISE_LATENTS, + is_intermediate: true, + cfg_scale, + scheduler, + steps, + } + : { + type: 'denoise_latents', + id: DENOISE_LATENTS, + is_intermediate: true, + cfg_scale, + scheduler, + steps, + denoising_start: 0, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, + }; + /** + * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the + * full graph here as a template. Then use the parameters from app state and set friendlier node + * ids. + * + * The only thing we need extra logic for is handling randomized seed, control net, and for img2img, + * the `fit` param. These are added to the graph at the end. + */ + + // copy-pasted graph from node editor, filled in with state values & friendly node ids + // TODO: Actually create the graph correctly for ONNX + const graph: NonNullableGraph = { + id: TEXT_TO_IMAGE_GRAPH, + nodes: { + [POSITIVE_CONDITIONING]: { + type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt', + id: POSITIVE_CONDITIONING, + is_intermediate: true, + prompt: positivePrompt, + style: shouldConcatSDXLStylePrompt + ? `${positivePrompt} ${positiveStylePrompt}` + : positiveStylePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt', + id: NEGATIVE_CONDITIONING, + is_intermediate: true, + prompt: negativePrompt, + style: shouldConcatSDXLStylePrompt + ? `${negativePrompt} ${negativeStylePrompt}` + : negativeStylePrompt, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + is_intermediate: true, + width, + height, + use_cpu, + }, + [t2lNode.id]: t2lNode, + [modelLoaderNodeId]: { + type: modelLoaderNodeType, + id: modelLoaderNodeId, + is_intermediate: true, + model, + }, + + [LATENTS_TO_IMAGE]: { + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: !shouldAutoSave, + }, + }, + edges: [ + { + source: { + node_id: modelLoaderNodeId, + field: 'unet', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'unet', + }, + }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip2', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip2', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'noise', + }, + }, + ], + }; + + // add metadata accumulator, which is only mostly populated - some fields are added later + graph.nodes[METADATA_ACCUMULATOR] = { + id: METADATA_ACCUMULATOR, + type: 'metadata_accumulator', + generation_mode: 'txt2img', + cfg_scale, + height, + width, + positive_prompt: '', // set in addDynamicPromptsToGraph + negative_prompt: negativePrompt, + model, + seed: 0, // set in addDynamicPromptsToGraph + steps, + rand_device: use_cpu ? 'cpu' : 'cuda', + scheduler, + vae: undefined, // option; set in addVAEToGraph + controlnets: [], // populated in addControlNetToLinearGraph + loras: [], // populated in addLoRAsToGraph + clip_skip: clipSkip, + }; + + graph.edges.push({ + source: { + node_id: METADATA_ACCUMULATOR, + field: 'metadata', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'metadata', + }, + }); + + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + } + + // add LoRA support + addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); + + // optionally add custom VAE + addVAEToGraph(state, graph, modelLoaderNodeId); + + // add dynamic prompts - also sets up core iteration and seed + addDynamicPromptsToGraph(state, graph); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); + + // NSFW & watermark - must be last thing added to graph + if (state.system.shouldUseNSFWChecker) { + // must add before watermarker! + addNSFWCheckerToGraph(state, graph); + } + + if (state.system.shouldUseWatermarker) { + // must add after nsfw checker! + addWatermarkerToGraph(state, graph); + } + + return graph; +}; diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/MainModel/ParamMainModelSelect.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/MainModel/ParamMainModelSelect.tsx index 0a18d4f556..05b5b6468a 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/MainModel/ParamMainModelSelect.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/MainModel/ParamMainModelSelect.tsx @@ -15,11 +15,11 @@ import { modelIdToMainModelParam } from 'features/parameters/util/modelIdToMainM import SyncModelsButton from 'features/ui/components/tabs/ModelManager/subpanels/ModelManagerSettingsPanel/SyncModelsButton'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { forEach } from 'lodash-es'; +import { NON_REFINER_BASE_MODELS } from 'services/api/constants'; import { useGetMainModelsQuery, useGetOnnxModelsQuery, } from 'services/api/endpoints/models'; -import { NON_REFINER_BASE_MODELS } from 'services/api/constants'; import { useFeatureStatus } from '../../../../system/hooks/useFeatureStatus'; const selector = createSelector( @@ -52,10 +52,7 @@ const ParamMainModelSelect = () => { const data: SelectItem[] = []; forEach(mainModels.entities, (model, id) => { - if ( - !model || - (activeTabName === 'unifiedCanvas' && model.base_model === 'sdxl') - ) { + if (!model) { return; } diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx new file mode 100644 index 0000000000..270e839894 --- /dev/null +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx @@ -0,0 +1,29 @@ +import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse'; +import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; +import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse'; +import ParamInfillAndScalingCollapse from 'features/parameters/components/Parameters/Canvas/InfillAndScaling/ParamInfillAndScalingCollapse'; +import ParamMaskAdjustmentCollapse from 'features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse'; +import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; +import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; +import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; +import UnifiedCanvasCoreParameters from 'features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters'; +import ParamSDXLPromptArea from './ParamSDXLPromptArea'; +import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; + +export default function SDXLUnifiedCanvasTabParameters() { + return ( + <> + + + + + + + + + + + + + ); +} diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasTab.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasTab.tsx index 4c36c45e13..0a5b872e4b 100644 --- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasTab.tsx +++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasTab.tsx @@ -1,14 +1,22 @@ import { Flex } from '@chakra-ui/react'; +import { RootState } from 'app/store/store'; +import { useAppSelector } from 'app/store/storeHooks'; +import SDXLUnifiedCanvasTabParameters from 'features/sdxl/components/SDXLUnifiedCanvasTabParameters'; import { memo } from 'react'; import ParametersPinnedWrapper from '../../ParametersPinnedWrapper'; import UnifiedCanvasContent from './UnifiedCanvasContent'; import UnifiedCanvasParameters from './UnifiedCanvasParameters'; const UnifiedCanvasTab = () => { + const model = useAppSelector((state: RootState) => state.generation.model); return ( - + {model && model.base_model === 'sdxl' ? ( + + ) : ( + + )} From 8acd7eeca5a29120715503d3ccdd7a8c5b00e9e8 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 08:18:30 +1200 Subject: [PATCH 26/67] feat: Disable clip skip for SDXL Canvas --- .../features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx | 2 -- 1 file changed, 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx index 270e839894..6faafc6891 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx @@ -1,6 +1,5 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse'; import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; -import ParamAdvancedCollapse from 'features/parameters/components/Parameters/Advanced/ParamAdvancedCollapse'; import ParamInfillAndScalingCollapse from 'features/parameters/components/Parameters/Canvas/InfillAndScaling/ParamInfillAndScalingCollapse'; import ParamMaskAdjustmentCollapse from 'features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskAdjustmentCollapse'; import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; @@ -23,7 +22,6 @@ export default function SDXLUnifiedCanvasTabParameters() { - ); } From ce3675fc1499af9d88d68d37d011a981b9a14942 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 12 Aug 2023 03:19:49 +0300 Subject: [PATCH 27/67] Apply denoising_start/end according on timestep value --- invokeai/app/invocations/latent.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 15e7c15802..fc934618cf 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -316,26 +316,36 @@ class DenoiseLatentsInvocation(BaseInvocation): # MultiControlNetModel has been refactored out, just need list[ControlNetData] return control_data + # original idea by https://github.com/AmericanPresidentJimmyCarter def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): if scheduler.config.get("cpu_only", False): device = torch.device("cpu") - - # apply denoising_start + num_inference_steps = steps scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = scheduler.timesteps - t_start = int(round(denoising_start * num_inference_steps)) - timesteps = scheduler.timesteps[t_start * scheduler.order :] - num_inference_steps = num_inference_steps - t_start + # apply denoising_start + t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) + t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, timesteps))) + timesteps = timesteps[t_start_idx:] + if scheduler.order == 2: + # TODO: research for second order schedulers timesteps + timesteps = timesteps[1:] + # save start timestep to apply noise init_timestep = timesteps[:1] # apply denoising_end - num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) + t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end))) + t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, timesteps))) + timesteps = timesteps[:t_end_idx] - skipped_final_steps = int(round((1 - denoising_end) * steps)) - num_inference_steps = num_inference_steps - skipped_final_steps - timesteps = timesteps[: num_warmup_steps + scheduler.order * num_inference_steps] + # calculate step count based on scheduler order + num_inference_steps = len(timesteps) + if scheduler.order == 2: + num_inference_steps += (num_inference_steps % 2) + num_inference_steps = num_inference_steps // 2 return num_inference_steps, timesteps, init_timestep From 6034fa12debd5dd95a2088a4d75e999e0431654d Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 16:20:58 +1200 Subject: [PATCH 28/67] feat: Add Mask Blur node --- invokeai/app/invocations/image.py | 45 +++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index cc05b529b5..846812435d 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -706,7 +706,7 @@ class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): "ui": {"title": "Mask Combine", "tags": ["mask", "combine"]}, } - def invoke(self, context: InvocationContext) -> ImageOutput: + def invoke(self, context: InvocationContext) -> MaskOutput: mask1 = context.services.images.get_pil_image(self.mask1.image_name).convert("L") mask2 = context.services.images.get_pil_image(self.mask2.image_name).convert("L") @@ -721,7 +721,48 @@ class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): is_intermediate=self.is_intermediate, ) - return ImageOutput( + return MaskOutput( + image=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, + ) + + +class MaskBlurInvocation(BaseInvocation, PILInvocationConfig): + """Blurs a mask""" + + # fmt: off + type: Literal["mask_blur"] = "mask_blur" + + # Inputs + mask: Optional[ImageField] = Field(default=None, description="The mask image to blur") + radius: float = Field(default=8.0, ge=0, description="The blur radius") + blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur") + # fmt: on + + class Config(InvocationConfig): + schema_extra = { + "ui": {"title": "Mask Blur", "tags": ["mask", "blur"]}, + } + + def invoke(self, context: InvocationContext) -> MaskOutput: + mask = context.services.images.get_pil_image(self.mask.image_name) + + blur = ( + ImageFilter.GaussianBlur(self.radius) if self.blur_type == "gaussian" else ImageFilter.BoxBlur(self.radius) + ) + blur_mask = mask.filter(blur) + + image_dto = context.services.images.create( + image=blur_mask, + image_origin=ResourceOrigin.INTERNAL, + image_category=ImageCategory.GENERAL, + node_id=self.id, + session_id=context.graph_execution_state_id, + is_intermediate=self.is_intermediate, + ) + + return MaskOutput( image=ImageField(image_name=image_dto.image_name), width=image_dto.width, height=image_dto.height, From 7254ffc3e7e06163a133aa38ca8b6fdbd4e70e2d Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 16:30:20 +1200 Subject: [PATCH 29/67] chore: Split Inpaint and Outpaint Graphs --- .../util/graphBuilders/buildCanvasGraph.ts | 21 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 108 +--- .../graphBuilders/buildCanvasOutpaintGraph.ts | 461 +++++++++++++++++ .../buildCanvasSDXLInpaintGraph.ts | 212 +++----- .../buildCanvasSDXLOutpaintGraph.ts | 480 ++++++++++++++++++ .../frontend/web/src/services/api/schema.d.ts | 71 ++- 6 files changed, 1084 insertions(+), 269 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts index dd0a5e6619..d268a3990d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasGraph.ts @@ -3,8 +3,10 @@ import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO } from 'services/api/types'; import { buildCanvasImageToImageGraph } from './buildCanvasImageToImageGraph'; import { buildCanvasInpaintGraph } from './buildCanvasInpaintGraph'; +import { buildCanvasOutpaintGraph } from './buildCanvasOutpaintGraph'; import { buildCanvasSDXLImageToImageGraph } from './buildCanvasSDXLImageToImageGraph'; import { buildCanvasSDXLInpaintGraph } from './buildCanvasSDXLInpaintGraph'; +import { buildCanvasSDXLOutpaintGraph } from './buildCanvasSDXLOutpaintGraph'; import { buildCanvasSDXLTextToImageGraph } from './buildCanvasSDXLTextToImageGraph'; import { buildCanvasTextToImageGraph } from './buildCanvasTextToImageGraph'; @@ -37,11 +39,10 @@ export const buildCanvasGraph = ( } else { graph = buildCanvasImageToImageGraph(state, canvasInitImage); } - } else { + } else if (generationMode === 'inpaint') { if (!canvasInitImage || !canvasMaskImage) { throw new Error('Missing canvas init and mask images'); } - if ( state.generation.model && state.generation.model.base_model === 'sdxl' @@ -54,6 +55,22 @@ export const buildCanvasGraph = ( } else { graph = buildCanvasInpaintGraph(state, canvasInitImage, canvasMaskImage); } + } else { + if (!canvasInitImage) { + throw new Error('Missing canvas init image'); + } + if ( + state.generation.model && + state.generation.model.base_model === 'sdxl' + ) { + graph = buildCanvasSDXLOutpaintGraph( + state, + canvasInitImage, + canvasMaskImage + ); + } else { + graph = buildCanvasOutpaintGraph(state, canvasInitImage, canvasMaskImage); + } } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 1e2b9d76d8..5afcd80d85 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -3,8 +3,6 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO, - InfillPatchmatchInvocation, - InfillTileInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -20,13 +18,10 @@ import { INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, - INPAINT_INFILL, ITERATE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, MASK_BLUR, - MASK_COMBINE, - MASK_FROM_ALPHA, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -60,8 +55,6 @@ export const buildCanvasInpaintGraph = ( shouldUseCpuNoise, maskBlur, maskBlurMethod, - tileSize, - infillMethod, clipSkip, } = state.generation; @@ -84,23 +77,6 @@ export const buildCanvasInpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; - let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { - type: 'infill_tile', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - tile_size: tileSize, - }; - - if (infillMethod === 'patchmatch') { - infillNode = { - type: 'infill_patchmatch', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - }; - } - const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { @@ -114,31 +90,20 @@ export const buildCanvasInpaintGraph = ( denoising_start: 1 - strength, denoising_end: 1, }, - [infillNode.id]: infillNode, - [MASK_FROM_ALPHA]: { - type: 'tomask', - id: MASK_FROM_ALPHA, - is_intermediate: true, - image: canvasInitImage, - }, - [MASK_COMBINE]: { - type: 'mask_combine', - id: MASK_COMBINE, - is_intermediate: true, - mask2: canvasMaskImage, - }, [MASK_BLUR]: { - type: 'img_blur', + type: 'mask_blur', id: MASK_BLUR, is_intermediate: true, radius: maskBlur, blur_type: maskBlurMethod, + mask: canvasMaskImage, }, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, + image: canvasInitImage, }, [NOISE]: { type: 'noise', @@ -182,11 +147,14 @@ export const buildCanvasInpaintGraph = ( type: 'color_correct', id: COLOR_CORRECT, is_intermediate: true, + reference: canvasInitImage, + mask: canvasMaskImage, }, [INPAINT_FINAL_IMAGE]: { type: 'img_paste', id: INPAINT_FINAL_IMAGE, is_intermediate: true, + base_image: canvasInitImage, }, [RANGE_OF_SIZE]: { type: 'range_of_size', @@ -274,16 +242,6 @@ export const buildCanvasInpaintGraph = ( field: 'noise', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: INPAINT_IMAGE, - field: 'image', - }, - }, { source: { node_id: INPAINT_IMAGE, @@ -296,29 +254,9 @@ export const buildCanvasInpaintGraph = ( }, { source: { - node_id: MASK_FROM_ALPHA, + node_id: MASK_BLUR, field: 'mask', }, - destination: { - node_id: MASK_COMBINE, - field: 'mask1', - }, - }, - { - source: { - node_id: MASK_COMBINE, - field: 'image', - }, - destination: { - node_id: MASK_BLUR, - field: 'image', - }, - }, - { - source: { - node_id: MASK_BLUR, - field: 'image', - }, destination: { node_id: INPAINT, field: 'mask', @@ -354,26 +292,6 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'reference', - }, - }, - { - source: { - node_id: MASK_BLUR, - field: 'image', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'mask', - }, - }, { source: { node_id: LATENTS_TO_IMAGE, @@ -384,20 +302,10 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'base_image', - }, - }, { source: { node_id: MASK_BLUR, - field: 'image', + field: 'mask', }, destination: { node_id: INPAINT_FINAL_IMAGE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts new file mode 100644 index 0000000000..a11473314d --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -0,0 +1,461 @@ +import { logger } from 'app/logging/logger'; +import { RootState } from 'app/store/store'; +import { NonNullableGraph } from 'features/nodes/types/types'; +import { + ImageDTO, + InfillPatchmatchInvocation, + InfillTileInvocation, + RandomIntInvocation, + RangeOfSizeInvocation, +} from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; +import { addLoRAsToGraph } from './addLoRAsToGraph'; +import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; +import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { + CLIP_SKIP, + COLOR_CORRECT, + INPAINT, + INPAINT_FINAL_IMAGE, + INPAINT_GRAPH, + INPAINT_IMAGE, + INPAINT_INFILL, + ITERATE, + LATENTS_TO_IMAGE, + MAIN_MODEL_LOADER, + MASK_BLUR, + MASK_COMBINE, + MASK_FROM_ALPHA, + NEGATIVE_CONDITIONING, + NOISE, + POSITIVE_CONDITIONING, + RANDOM_INT, + RANGE_OF_SIZE, +} from './constants'; + +/** + * Builds the Canvas tab's Outpaint graph. + */ +export const buildCanvasOutpaintGraph = ( + state: RootState, + canvasInitImage: ImageDTO, + canvasMaskImage?: ImageDTO +): NonNullableGraph => { + const log = logger('nodes'); + const { + positivePrompt, + negativePrompt, + model, + cfgScale: cfg_scale, + scheduler, + steps, + img2imgStrength: strength, + shouldFitToWidthHeight, + iterations, + seed, + shouldRandomizeSeed, + vaePrecision, + shouldUseNoiseSettings, + shouldUseCpuNoise, + maskBlur, + maskBlurMethod, + tileSize, + infillMethod, + clipSkip, + } = state.generation; + + if (!model) { + log.error('No model found in state'); + throw new Error('No model found in state'); + } + + // The bounding box determines width and height, not the width and height params + const { width, height } = state.canvas.boundingBoxDimensions; + + // We may need to set the inpaint width and height to scale the image + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : shouldUseCpuNoise; + + let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + tile_size: tileSize, + }; + + if (infillMethod === 'patchmatch') { + infillNode = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + }; + } + + const graph: NonNullableGraph = { + id: INPAINT_GRAPH, + nodes: { + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: 1, + }, + [infillNode.id]: infillNode, + [MASK_FROM_ALPHA]: { + type: 'tomask', + id: MASK_FROM_ALPHA, + is_intermediate: true, + image: canvasInitImage, + }, + [MASK_COMBINE]: { + type: 'mask_combine', + id: MASK_COMBINE, + is_intermediate: true, + mask2: canvasMaskImage, + }, + [MASK_BLUR]: { + type: 'mask_blur', + id: MASK_BLUR, + is_intermediate: true, + radius: maskBlur, + blur_type: maskBlurMethod, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + width, + height, + use_cpu, + is_intermediate: true, + }, + [POSITIVE_CONDITIONING]: { + type: 'compel', + id: POSITIVE_CONDITIONING, + is_intermediate: true, + prompt: positivePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'compel', + id: NEGATIVE_CONDITIONING, + is_intermediate: true, + prompt: negativePrompt, + }, + [MAIN_MODEL_LOADER]: { + type: 'main_model_loader', + id: MAIN_MODEL_LOADER, + is_intermediate: true, + model, + }, + [LATENTS_TO_IMAGE]: { + type: 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + is_intermediate: true, + skipped_layers: clipSkip, + }, + [COLOR_CORRECT]: { + type: 'color_correct', + id: COLOR_CORRECT, + is_intermediate: true, + }, + [INPAINT_FINAL_IMAGE]: { + type: 'img_paste', + id: INPAINT_FINAL_IMAGE, + is_intermediate: true, + }, + [RANGE_OF_SIZE]: { + type: 'range_of_size', + id: RANGE_OF_SIZE, + is_intermediate: true, + // seed - must be connected manually + // start: 0, + size: iterations, + step: 1, + }, + [ITERATE]: { + type: 'iterate', + id: ITERATE, + is_intermediate: true, + }, + }, + edges: [ + { + source: { + node_id: MAIN_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: INPAINT, + field: 'unet', + }, + }, + { + source: { + node_id: MAIN_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: CLIP_SKIP, + field: 'clip', + }, + }, + { + source: { + node_id: CLIP_SKIP, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: CLIP_SKIP, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, + { + source: { + node_id: MASK_FROM_ALPHA, + field: 'mask', + }, + destination: { + node_id: MASK_COMBINE, + field: 'mask1', + }, + }, + { + source: { + node_id: MASK_COMBINE, + field: 'mask', + }, + destination: { + node_id: MASK_BLUR, + field: 'mask', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT, + field: 'mask', + }, + }, + { + source: { + node_id: RANGE_OF_SIZE, + field: 'collection', + }, + destination: { + node_id: ITERATE, + field: 'collection', + }, + }, + { + source: { + node_id: ITERATE, + field: 'item', + }, + destination: { + node_id: NOISE, + field: 'seed', + }, + }, + { + source: { + node_id: INPAINT, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'base_image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'image', + }, + }, + ], + }; + + // Add VAE + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + + // handle seed + if (shouldRandomizeSeed) { + // Random int node to generate the starting seed + const randomIntNode: RandomIntInvocation = { + id: RANDOM_INT, + type: 'rand_int', + }; + + graph.nodes[RANDOM_INT] = randomIntNode; + + // Connect random int to the start of the range of size so the range starts on the random first seed + graph.edges.push({ + source: { node_id: RANDOM_INT, field: 'a' }, + destination: { node_id: RANGE_OF_SIZE, field: 'start' }, + }); + } else { + // User specified seed, so set the start of the range of size to the seed + (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; + } + + // add LoRA support + addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, INPAINT); + + // NSFW & watermark - must be last thing added to graph + if (state.system.shouldUseNSFWChecker) { + // must add before watermarker! + addNSFWCheckerToGraph(state, graph, INPAINT); + } + + if (state.system.shouldUseWatermarker) { + // must add after nsfw checker! + addWatermarkerToGraph(state, graph, INPAINT); + } + + return graph; +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 04cc120cbe..1d05d05073 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -3,8 +3,6 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { ImageDTO, - InfillPatchmatchInvocation, - InfillTileInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -20,12 +18,9 @@ import { INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, - INPAINT_INFILL, ITERATE, LATENTS_TO_IMAGE, MASK_BLUR, - MASK_COMBINE, - MASK_FROM_ALPHA, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -60,8 +55,6 @@ export const buildCanvasSDXLInpaintGraph = ( shouldUseCpuNoise, maskBlur, maskBlurMethod, - tileSize, - infillMethod, } = state.generation; const { @@ -91,69 +84,13 @@ export const buildCanvasSDXLInpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; - let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { - type: 'infill_tile', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - tile_size: tileSize, - }; - - if (infillMethod === 'patchmatch') { - infillNode = { - type: 'infill_patchmatch', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - }; - } - const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { - [INPAINT]: { - type: 'denoise_latents', - id: INPAINT, - is_intermediate: true, - steps: steps, - cfg_scale: cfg_scale, - scheduler: scheduler, - denoising_start: 1 - strength, - denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, - }, - [infillNode.id]: infillNode, - [MASK_FROM_ALPHA]: { - type: 'tomask', - id: MASK_FROM_ALPHA, - is_intermediate: true, - image: canvasInitImage, - }, - [MASK_COMBINE]: { - type: 'mask_combine', - id: MASK_COMBINE, - is_intermediate: true, - mask2: canvasMaskImage, - }, - [MASK_BLUR]: { - type: 'img_blur', - id: MASK_BLUR, - is_intermediate: true, - radius: maskBlur, - blur_type: maskBlurMethod, - }, - [INPAINT_IMAGE]: { - type: 'i2l', - id: INPAINT_IMAGE, - is_intermediate: true, - fp32: vaePrecision === 'fp32' ? true : false, - }, - [NOISE]: { - type: 'noise', - id: NOISE, - width, - height, - use_cpu, - is_intermediate: true, + [SDXL_MODEL_LOADER]: { + type: 'sdxl_model_loader', + id: SDXL_MODEL_LOADER, + model, }, [POSITIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', @@ -171,10 +108,38 @@ export const buildCanvasSDXLInpaintGraph = ( ? `${negativePrompt} ${negativeStylePrompt}` : negativeStylePrompt, }, - [SDXL_MODEL_LOADER]: { - type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, - model, + [MASK_BLUR]: { + type: 'mask_blur', + id: MASK_BLUR, + is_intermediate: true, + radius: maskBlur, + blur_type: maskBlurMethod, + mask: canvasMaskImage, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + image: canvasInitImage, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + width, + height, + use_cpu, + is_intermediate: true, + }, + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, [LATENTS_TO_IMAGE]: { type: 'l2i', @@ -186,11 +151,14 @@ export const buildCanvasSDXLInpaintGraph = ( type: 'color_correct', id: COLOR_CORRECT, is_intermediate: true, + reference: canvasInitImage, + mask: canvasMaskImage, }, [INPAINT_FINAL_IMAGE]: { type: 'img_paste', id: INPAINT_FINAL_IMAGE, is_intermediate: true, + base_image: canvasInitImage, }, [RANGE_OF_SIZE]: { type: 'range_of_size', @@ -258,16 +226,6 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'clip2', }, }, - { - source: { - node_id: NEGATIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'negative_conditioning', - }, - }, { source: { node_id: POSITIVE_CONDITIONING, @@ -278,6 +236,16 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'positive_conditioning', }, }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, { source: { node_id: NOISE, @@ -288,16 +256,6 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'noise', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: INPAINT_IMAGE, - field: 'image', - }, - }, { source: { node_id: INPAINT_IMAGE, @@ -310,29 +268,9 @@ export const buildCanvasSDXLInpaintGraph = ( }, { source: { - node_id: MASK_FROM_ALPHA, + node_id: MASK_BLUR, field: 'mask', }, - destination: { - node_id: MASK_COMBINE, - field: 'mask1', - }, - }, - { - source: { - node_id: MASK_COMBINE, - field: 'image', - }, - destination: { - node_id: MASK_BLUR, - field: 'image', - }, - }, - { - source: { - node_id: MASK_BLUR, - field: 'image', - }, destination: { node_id: INPAINT, field: 'mask', @@ -368,26 +306,6 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'reference', - }, - }, - { - source: { - node_id: MASK_BLUR, - field: 'image', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'mask', - }, - }, { source: { node_id: LATENTS_TO_IMAGE, @@ -398,26 +316,6 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, - { - source: { - node_id: INPAINT_INFILL, - field: 'image', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'base_image', - }, - }, - { - source: { - node_id: MASK_BLUR, - field: 'image', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'mask', - }, - }, { source: { node_id: COLOR_CORRECT, @@ -428,6 +326,16 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts new file mode 100644 index 0000000000..2b60019003 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -0,0 +1,480 @@ +import { logger } from 'app/logging/logger'; +import { RootState } from 'app/store/store'; +import { NonNullableGraph } from 'features/nodes/types/types'; +import { + ImageDTO, + InfillPatchmatchInvocation, + InfillTileInvocation, + RandomIntInvocation, + RangeOfSizeInvocation, +} from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; +import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; +import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; +import { addWatermarkerToGraph } from './addWatermarkerToGraph'; +import { + COLOR_CORRECT, + INPAINT, + INPAINT_FINAL_IMAGE, + INPAINT_GRAPH, + INPAINT_IMAGE, + INPAINT_INFILL, + ITERATE, + LATENTS_TO_IMAGE, + MASK_BLUR, + MASK_COMBINE, + MASK_FROM_ALPHA, + NEGATIVE_CONDITIONING, + NOISE, + POSITIVE_CONDITIONING, + RANDOM_INT, + RANGE_OF_SIZE, + SDXL_MODEL_LOADER, +} from './constants'; + +/** + * Builds the Canvas tab's Outpaint graph. + */ +export const buildCanvasSDXLOutpaintGraph = ( + state: RootState, + canvasInitImage: ImageDTO, + canvasMaskImage?: ImageDTO +): NonNullableGraph => { + const log = logger('nodes'); + const { + positivePrompt, + negativePrompt, + model, + cfgScale: cfg_scale, + scheduler, + steps, + img2imgStrength: strength, + shouldFitToWidthHeight, + iterations, + seed, + shouldRandomizeSeed, + vaePrecision, + shouldUseNoiseSettings, + shouldUseCpuNoise, + maskBlur, + maskBlurMethod, + tileSize, + infillMethod, + } = state.generation; + + const { + positiveStylePrompt, + negativeStylePrompt, + shouldConcatSDXLStylePrompt, + shouldUseSDXLRefiner, + refinerStart, + } = state.sdxl; + + if (!model) { + log.error('No model found in state'); + throw new Error('No model found in state'); + } + + // The bounding box determines width and height, not the width and height params + const { width, height } = state.canvas.boundingBoxDimensions; + + // We may need to set the inpaint width and height to scale the image + const { + scaledBoundingBoxDimensions, + boundingBoxScaleMethod, + shouldAutoSave, + } = state.canvas; + + const use_cpu = shouldUseNoiseSettings + ? shouldUseCpuNoise + : shouldUseCpuNoise; + + let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + tile_size: tileSize, + }; + + if (infillMethod === 'patchmatch') { + infillNode = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + image: canvasInitImage, + }; + } + + const graph: NonNullableGraph = { + id: INPAINT_GRAPH, + nodes: { + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, + }, + [infillNode.id]: infillNode, + [MASK_FROM_ALPHA]: { + type: 'tomask', + id: MASK_FROM_ALPHA, + is_intermediate: true, + image: canvasInitImage, + }, + [MASK_COMBINE]: { + type: 'mask_combine', + id: MASK_COMBINE, + is_intermediate: true, + mask2: canvasMaskImage, + }, + [MASK_BLUR]: { + type: 'mask_blur', + id: MASK_BLUR, + is_intermediate: true, + radius: maskBlur, + blur_type: maskBlurMethod, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [NOISE]: { + type: 'noise', + id: NOISE, + width, + height, + use_cpu, + is_intermediate: true, + }, + [POSITIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: POSITIVE_CONDITIONING, + prompt: positivePrompt, + style: shouldConcatSDXLStylePrompt + ? `${positivePrompt} ${positiveStylePrompt}` + : positiveStylePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: NEGATIVE_CONDITIONING, + prompt: negativePrompt, + style: shouldConcatSDXLStylePrompt + ? `${negativePrompt} ${negativeStylePrompt}` + : negativeStylePrompt, + }, + [SDXL_MODEL_LOADER]: { + type: 'sdxl_model_loader', + id: SDXL_MODEL_LOADER, + model, + }, + [LATENTS_TO_IMAGE]: { + type: 'l2i', + id: LATENTS_TO_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, + [COLOR_CORRECT]: { + type: 'color_correct', + id: COLOR_CORRECT, + is_intermediate: true, + }, + [INPAINT_FINAL_IMAGE]: { + type: 'img_paste', + id: INPAINT_FINAL_IMAGE, + is_intermediate: true, + }, + [RANGE_OF_SIZE]: { + type: 'range_of_size', + id: RANGE_OF_SIZE, + is_intermediate: true, + // seed - must be connected manually + // start: 0, + size: iterations, + step: 1, + }, + [ITERATE]: { + type: 'iterate', + id: ITERATE, + is_intermediate: true, + }, + }, + edges: [ + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: INPAINT, + field: 'unet', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: SDXL_MODEL_LOADER, + field: 'clip2', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip2', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, + { + source: { + node_id: MASK_FROM_ALPHA, + field: 'mask', + }, + destination: { + node_id: MASK_COMBINE, + field: 'mask1', + }, + }, + { + source: { + node_id: MASK_COMBINE, + field: 'mask', + }, + destination: { + node_id: MASK_BLUR, + field: 'mask', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT, + field: 'mask', + }, + }, + { + source: { + node_id: RANGE_OF_SIZE, + field: 'collection', + }, + destination: { + node_id: ITERATE, + field: 'collection', + }, + }, + { + source: { + node_id: ITERATE, + field: 'item', + }, + destination: { + node_id: NOISE, + field: 'seed', + }, + }, + { + source: { + node_id: INPAINT, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'base_image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'image', + }, + }, + ], + }; + + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, INPAINT); + } + + // Add VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + + // handle seed + if (shouldRandomizeSeed) { + // Random int node to generate the starting seed + const randomIntNode: RandomIntInvocation = { + id: RANDOM_INT, + type: 'rand_int', + }; + + graph.nodes[RANDOM_INT] = randomIntNode; + + // Connect random int to the start of the range of size so the range starts on the random first seed + graph.edges.push({ + source: { node_id: RANDOM_INT, field: 'a' }, + destination: { node_id: RANGE_OF_SIZE, field: 'start' }, + }); + } else { + // User specified seed, so set the start of the range of size to the seed + (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; + } + + // add LoRA support + addSDXLLoRAsToGraph(state, graph, INPAINT, SDXL_MODEL_LOADER); + + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, INPAINT); + + // NSFW & watermark - must be last thing added to graph + if (state.system.shouldUseNSFWChecker) { + // must add before watermarker! + addNSFWCheckerToGraph(state, graph, INPAINT); + } + + if (state.system.shouldUseWatermarker) { + // must add after nsfw checker! + addWatermarkerToGraph(state, graph, INPAINT); + } + + return graph; +}; diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index b4fcf7f75c..84053db29c 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1586,7 +1586,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; + [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; }; /** * Edges @@ -3273,6 +3273,47 @@ export type components = { */ model: components["schemas"]["MainModelField"]; }; + /** + * MaskBlurInvocation + * @description Blurs a mask + */ + MaskBlurInvocation: { + /** + * Id + * @description The id of this node. Must be unique among all nodes. + */ + id: string; + /** + * Is Intermediate + * @description Whether or not this node is an intermediate node. + * @default false + */ + is_intermediate?: boolean; + /** + * Type + * @default mask_blur + * @enum {string} + */ + type?: "mask_blur"; + /** + * Mask + * @description The mask image to blur + */ + mask?: components["schemas"]["ImageField"]; + /** + * Radius + * @description The blur radius + * @default 8 + */ + radius?: number; + /** + * Blur Type + * @description The type of blur + * @default gaussian + * @enum {string} + */ + blur_type?: "gaussian" | "box"; + }; /** * MaskCombineInvocation * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. @@ -5780,12 +5821,24 @@ export type components = { */ image?: components["schemas"]["ImageField"]; }; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * ControlNetModelFormat * @description An enumeration. @@ -5798,18 +5851,6 @@ export type components = { * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -5920,7 +5961,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -5957,7 +5998,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { From 7587b54787e022dec863f88c3b163f77b14fa133 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 17:17:46 +1200 Subject: [PATCH 30/67] chore: Cleanup, comment and organize Node Graphs Before it gets too chaotic --- .../buildCanvasImageToImageGraph.ts | 116 +++++----- .../graphBuilders/buildCanvasInpaintGraph.ts | 113 +++++----- .../graphBuilders/buildCanvasOutpaintGraph.ts | 197 ++++++++--------- .../buildCanvasSDXLImageToImageGraph.ts | 87 ++++---- .../buildCanvasSDXLInpaintGraph.ts | 6 + .../buildCanvasSDXLOutpaintGraph.ts | 198 +++++++++--------- .../buildCanvasSDXLTextToImageGraph.ts | 46 ++-- .../buildCanvasTextToImageGraph.ts | 129 ++++++------ .../buildLinearImageToImageGraph.ts | 62 +++--- .../buildLinearSDXLImageToImageGraph.ts | 63 +++--- .../buildLinearSDXLTextToImageGraph.ts | 3 + .../buildLinearTextToImageGraph.ts | 78 +++---- 12 files changed, 584 insertions(+), 514 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index 181a0d9294..a4cd3b2add 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -75,6 +75,18 @@ export const buildCanvasImageToImageGraph = ( const graph: NonNullableGraph = { id: IMAGE_TO_IMAGE_GRAPH, nodes: { + [MAIN_MODEL_LOADER]: { + type: 'main_model_loader', + id: MAIN_MODEL_LOADER, + is_intermediate: true, + model, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + is_intermediate: true, + skipped_layers: clipSkip, + }, [POSITIVE_CONDITIONING]: { type: 'compel', id: POSITIVE_CONDITIONING, @@ -93,17 +105,14 @@ export const buildCanvasImageToImageGraph = ( is_intermediate: true, use_cpu, }, - [MAIN_MODEL_LOADER]: { - type: 'main_model_loader', - id: MAIN_MODEL_LOADER, + [IMAGE_TO_LATENTS]: { + type: 'i2l', + id: IMAGE_TO_LATENTS, is_intermediate: true, - model, - }, - [CLIP_SKIP]: { - type: 'clip_skip', - id: CLIP_SKIP, - is_intermediate: true, - skipped_layers: clipSkip, + // must be set manually later, bc `fit` parameter may require a resize node inserted + // image: { + // image_name: initialImage.image_name, + // }, }, [DENOISE_LATENTS]: { type: 'denoise_latents', @@ -115,15 +124,6 @@ export const buildCanvasImageToImageGraph = ( denoising_start: 1 - strength, denoising_end: 1, }, - [IMAGE_TO_LATENTS]: { - type: 'i2l', - id: IMAGE_TO_LATENTS, - is_intermediate: true, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, - }, [LATENTS_TO_IMAGE]: { type: 'l2i', id: LATENTS_TO_IMAGE, @@ -131,6 +131,17 @@ export const buildCanvasImageToImageGraph = ( }, }, edges: [ + // Connect Model Loader to CLIP Skip and UNet + { + source: { + node_id: MAIN_MODEL_LOADER, + field: 'unet', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'unet', + }, + }, { source: { node_id: MAIN_MODEL_LOADER, @@ -141,6 +152,7 @@ export const buildCanvasImageToImageGraph = ( field: 'clip', }, }, + // Connect CLIP Skip To Conditioning { source: { node_id: CLIP_SKIP, @@ -161,44 +173,15 @@ export const buildCanvasImageToImageGraph = ( field: 'clip', }, }, + // Connect Everything To Denoise Latents { source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - { - source: { - node_id: IMAGE_TO_LATENTS, - field: 'latents', + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', }, destination: { node_id: DENOISE_LATENTS, - field: 'latents', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'noise', - }, - }, - { - source: { - node_id: MAIN_MODEL_LOADER, - field: 'unet', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'unet', + field: 'positive_conditioning', }, }, { @@ -213,12 +196,33 @@ export const buildCanvasImageToImageGraph = ( }, { source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', + node_id: NOISE, + field: 'noise', }, destination: { node_id: DENOISE_LATENTS, - field: 'positive_conditioning', + field: 'noise', + }, + }, + { + source: { + node_id: IMAGE_TO_LATENTS, + field: 'latents', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + }, + // Decode the denoised latents to an image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', }, }, ], @@ -328,7 +332,7 @@ export const buildCanvasImageToImageGraph = ( addLoRAsToGraph(state, graph, DENOISE_LATENTS); // optionally add custom VAE - addVAEToGraph(state, graph); + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 5afcd80d85..2ecc858aa4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -80,15 +80,29 @@ export const buildCanvasInpaintGraph = ( const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { - [INPAINT]: { - type: 'denoise_latents', - id: INPAINT, + [MAIN_MODEL_LOADER]: { + type: 'main_model_loader', + id: MAIN_MODEL_LOADER, is_intermediate: true, - steps: steps, - cfg_scale: cfg_scale, - scheduler: scheduler, - denoising_start: 1 - strength, - denoising_end: 1, + model, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + is_intermediate: true, + skipped_layers: clipSkip, + }, + [POSITIVE_CONDITIONING]: { + type: 'compel', + id: POSITIVE_CONDITIONING, + is_intermediate: true, + prompt: positivePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'compel', + id: NEGATIVE_CONDITIONING, + is_intermediate: true, + prompt: negativePrompt, }, [MASK_BLUR]: { type: 'mask_blur', @@ -113,23 +127,15 @@ export const buildCanvasInpaintGraph = ( use_cpu, is_intermediate: true, }, - [POSITIVE_CONDITIONING]: { - type: 'compel', - id: POSITIVE_CONDITIONING, + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, is_intermediate: true, - prompt: positivePrompt, - }, - [NEGATIVE_CONDITIONING]: { - type: 'compel', - id: NEGATIVE_CONDITIONING, - is_intermediate: true, - prompt: negativePrompt, - }, - [MAIN_MODEL_LOADER]: { - type: 'main_model_loader', - id: MAIN_MODEL_LOADER, - is_intermediate: true, - model, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: 1, }, [LATENTS_TO_IMAGE]: { type: 'l2i', @@ -137,12 +143,6 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, }, - [CLIP_SKIP]: { - type: 'clip_skip', - id: CLIP_SKIP, - is_intermediate: true, - skipped_layers: clipSkip, - }, [COLOR_CORRECT]: { type: 'color_correct', id: COLOR_CORRECT, @@ -172,6 +172,7 @@ export const buildCanvasInpaintGraph = ( }, }, edges: [ + // Connect Model Loader to CLIP Skip and UNet { source: { node_id: MAIN_MODEL_LOADER, @@ -192,6 +193,7 @@ export const buildCanvasInpaintGraph = ( field: 'clip', }, }, + // Connect CLIP Skip to Conditioning { source: { node_id: CLIP_SKIP, @@ -212,6 +214,17 @@ export const buildCanvasInpaintGraph = ( field: 'clip', }, }, + // Connect Everything To Inpaint Node + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, { source: { node_id: NEGATIVE_CONDITIONING, @@ -222,16 +235,6 @@ export const buildCanvasInpaintGraph = ( field: 'negative_conditioning', }, }, - { - source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'positive_conditioning', - }, - }, { source: { node_id: NOISE, @@ -262,6 +265,7 @@ export const buildCanvasInpaintGraph = ( field: 'mask', }, }, + // Iterate { source: { node_id: RANGE_OF_SIZE, @@ -282,6 +286,7 @@ export const buildCanvasInpaintGraph = ( field: 'seed', }, }, + // Decode Inpainted Latents To Image { source: { node_id: INPAINT, @@ -292,6 +297,7 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + // Color Correct The Inpainted Result { source: { node_id: LATENTS_TO_IMAGE, @@ -302,6 +308,17 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, + // Paste Back Onto Original Image + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'image', + }, + }, { source: { node_id: MASK_BLUR, @@ -312,22 +329,9 @@ export const buildCanvasInpaintGraph = ( field: 'mask', }, }, - { - source: { - node_id: COLOR_CORRECT, - field: 'image', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'image', - }, - }, ], }; - // Add VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); - // handle seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed @@ -348,6 +352,9 @@ export const buildCanvasInpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add VAE + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + // add LoRA support addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index a11473314d..7949adfe39 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -104,17 +104,30 @@ export const buildCanvasOutpaintGraph = ( const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { - [INPAINT]: { - type: 'denoise_latents', - id: INPAINT, + [MAIN_MODEL_LOADER]: { + type: 'main_model_loader', + id: MAIN_MODEL_LOADER, is_intermediate: true, - steps: steps, - cfg_scale: cfg_scale, - scheduler: scheduler, - denoising_start: 1 - strength, - denoising_end: 1, + model, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + is_intermediate: true, + skipped_layers: clipSkip, + }, + [POSITIVE_CONDITIONING]: { + type: 'compel', + id: POSITIVE_CONDITIONING, + is_intermediate: true, + prompt: positivePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'compel', + id: NEGATIVE_CONDITIONING, + is_intermediate: true, + prompt: negativePrompt, }, - [infillNode.id]: infillNode, [MASK_FROM_ALPHA]: { type: 'tomask', id: MASK_FROM_ALPHA, @@ -134,6 +147,7 @@ export const buildCanvasOutpaintGraph = ( radius: maskBlur, blur_type: maskBlurMethod, }, + [infillNode.id]: infillNode, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, @@ -148,23 +162,15 @@ export const buildCanvasOutpaintGraph = ( use_cpu, is_intermediate: true, }, - [POSITIVE_CONDITIONING]: { - type: 'compel', - id: POSITIVE_CONDITIONING, + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, is_intermediate: true, - prompt: positivePrompt, - }, - [NEGATIVE_CONDITIONING]: { - type: 'compel', - id: NEGATIVE_CONDITIONING, - is_intermediate: true, - prompt: negativePrompt, - }, - [MAIN_MODEL_LOADER]: { - type: 'main_model_loader', - id: MAIN_MODEL_LOADER, - is_intermediate: true, - model, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: 1, }, [LATENTS_TO_IMAGE]: { type: 'l2i', @@ -172,12 +178,6 @@ export const buildCanvasOutpaintGraph = ( is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, }, - [CLIP_SKIP]: { - type: 'clip_skip', - id: CLIP_SKIP, - is_intermediate: true, - skipped_layers: clipSkip, - }, [COLOR_CORRECT]: { type: 'color_correct', id: COLOR_CORRECT, @@ -204,6 +204,7 @@ export const buildCanvasOutpaintGraph = ( }, }, edges: [ + // Connect Model Loader To UNet & Clip Skip { source: { node_id: MAIN_MODEL_LOADER, @@ -224,6 +225,7 @@ export const buildCanvasOutpaintGraph = ( field: 'clip', }, }, + // Connect CLIP Skip to Conditioning { source: { node_id: CLIP_SKIP, @@ -244,36 +246,7 @@ export const buildCanvasOutpaintGraph = ( field: 'clip', }, }, - { - source: { - node_id: NEGATIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'negative_conditioning', - }, - }, - { - source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'positive_conditioning', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: INPAINT, - field: 'noise', - }, - }, + // Connect Infill Result To Inpaint Image { source: { node_id: INPAINT_INFILL, @@ -284,16 +257,7 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, - { - source: { - node_id: INPAINT_IMAGE, - field: 'latents', - }, - destination: { - node_id: INPAINT, - field: 'latents', - }, - }, + // Combine Mask from Init Image with User Painted Mask { source: { node_id: MASK_FROM_ALPHA, @@ -314,6 +278,47 @@ export const buildCanvasOutpaintGraph = ( field: 'mask', }, }, + // Plug Everything Into Inpaint Node + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, { source: { node_id: MASK_BLUR, @@ -324,6 +329,7 @@ export const buildCanvasOutpaintGraph = ( field: 'mask', }, }, + // Iterate { source: { node_id: RANGE_OF_SIZE, @@ -344,6 +350,7 @@ export const buildCanvasOutpaintGraph = ( field: 'seed', }, }, + // Decode the result from Inpaint { source: { node_id: INPAINT, @@ -354,6 +361,7 @@ export const buildCanvasOutpaintGraph = ( field: 'latents', }, }, + // Color Correct The Inpainted Result { source: { node_id: INPAINT_INFILL, @@ -364,16 +372,6 @@ export const buildCanvasOutpaintGraph = ( field: 'reference', }, }, - { - source: { - node_id: MASK_BLUR, - field: 'mask', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'mask', - }, - }, { source: { node_id: LATENTS_TO_IMAGE, @@ -384,6 +382,17 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Everything Back { source: { node_id: INPAINT_INFILL, @@ -394,16 +403,6 @@ export const buildCanvasOutpaintGraph = ( field: 'base_image', }, }, - { - source: { - node_id: MASK_BLUR, - field: 'mask', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'mask', - }, - }, { source: { node_id: COLOR_CORRECT, @@ -414,12 +413,19 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, ], }; - // Add VAE - addVAEToGraph(state, graph, MAIN_MODEL_LOADER); - // handle seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed @@ -440,6 +446,9 @@ export const buildCanvasOutpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add VAE + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + // add LoRA support addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index b8322fd612..80f3d671c9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -110,6 +110,15 @@ export const buildCanvasSDXLImageToImageGraph = ( is_intermediate: true, use_cpu, }, + [IMAGE_TO_LATENTS]: { + type: 'i2l', + id: IMAGE_TO_LATENTS, + is_intermediate: true, + // must be set manually later, bc `fit` parameter may require a resize node inserted + // image: { + // image_name: initialImage.image_name, + // }, + }, [DENOISE_LATENTS]: { type: 'denoise_latents', id: DENOISE_LATENTS, @@ -122,15 +131,6 @@ export const buildCanvasSDXLImageToImageGraph = ( : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, - [IMAGE_TO_LATENTS]: { - type: 'i2l', - id: IMAGE_TO_LATENTS, - is_intermediate: true, - // must be set manually later, bc `fit` parameter may require a resize node inserted - // image: { - // image_name: initialImage.image_name, - // }, - }, [LATENTS_TO_IMAGE]: { type: 'l2i', id: LATENTS_TO_IMAGE, @@ -138,36 +138,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }, }, edges: [ - { - source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - { - source: { - node_id: IMAGE_TO_LATENTS, - field: 'latents', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'noise', - }, - }, + // Connect Model Loader To UNet & CLIP { source: { node_id: SDXL_MODEL_LOADER, @@ -218,6 +189,17 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'clip2', }, }, + // Connect Everything to Denoise Latents + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'positive_conditioning', + }, + }, { source: { node_id: NEGATIVE_CONDITIONING, @@ -230,12 +212,33 @@ export const buildCanvasSDXLImageToImageGraph = ( }, { source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', + node_id: NOISE, + field: 'noise', }, destination: { node_id: DENOISE_LATENTS, - field: 'positive_conditioning', + field: 'noise', + }, + }, + { + source: { + node_id: IMAGE_TO_LATENTS, + field: 'latents', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + }, + // Decode denoised latents to an image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', }, }, ], diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 1d05d05073..52a159fb3a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -176,6 +176,7 @@ export const buildCanvasSDXLInpaintGraph = ( }, }, edges: [ + // Connect Model Loader to UNet and CLIP { source: { node_id: SDXL_MODEL_LOADER, @@ -226,6 +227,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'clip2', }, }, + // Connect everything to Inpaint { source: { node_id: POSITIVE_CONDITIONING, @@ -276,6 +278,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'mask', }, }, + // Iterate { source: { node_id: RANGE_OF_SIZE, @@ -296,6 +299,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'seed', }, }, + // Decode inpainted latents to image { source: { node_id: INPAINT, @@ -306,6 +310,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, }, + // Color Correct Inpainted Result { source: { node_id: LATENTS_TO_IMAGE, @@ -316,6 +321,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, + // Paste them back on original image { source: { node_id: COLOR_CORRECT, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 2b60019003..3ed12cd935 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -111,17 +111,34 @@ export const buildCanvasSDXLOutpaintGraph = ( const graph: NonNullableGraph = { id: INPAINT_GRAPH, nodes: { - [INPAINT]: { - type: 'denoise_latents', - id: INPAINT, - is_intermediate: true, - steps: steps, - cfg_scale: cfg_scale, - scheduler: scheduler, - denoising_start: 1 - strength, - denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, + [SDXL_MODEL_LOADER]: { + type: 'sdxl_model_loader', + id: SDXL_MODEL_LOADER, + model, + }, + [POSITIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: POSITIVE_CONDITIONING, + prompt: positivePrompt, + style: shouldConcatSDXLStylePrompt + ? `${positivePrompt} ${positiveStylePrompt}` + : positiveStylePrompt, + }, + [NEGATIVE_CONDITIONING]: { + type: 'sdxl_compel_prompt', + id: NEGATIVE_CONDITIONING, + prompt: negativePrompt, + style: shouldConcatSDXLStylePrompt + ? `${negativePrompt} ${negativeStylePrompt}` + : negativeStylePrompt, }, [infillNode.id]: infillNode, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [MASK_FROM_ALPHA]: { type: 'tomask', id: MASK_FROM_ALPHA, @@ -141,12 +158,6 @@ export const buildCanvasSDXLOutpaintGraph = ( radius: maskBlur, blur_type: maskBlurMethod, }, - [INPAINT_IMAGE]: { - type: 'i2l', - id: INPAINT_IMAGE, - is_intermediate: true, - fp32: vaePrecision === 'fp32' ? true : false, - }, [NOISE]: { type: 'noise', id: NOISE, @@ -155,26 +166,15 @@ export const buildCanvasSDXLOutpaintGraph = ( use_cpu, is_intermediate: true, }, - [POSITIVE_CONDITIONING]: { - type: 'sdxl_compel_prompt', - id: POSITIVE_CONDITIONING, - prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, - }, - [NEGATIVE_CONDITIONING]: { - type: 'sdxl_compel_prompt', - id: NEGATIVE_CONDITIONING, - prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, - }, - [SDXL_MODEL_LOADER]: { - type: 'sdxl_model_loader', - id: SDXL_MODEL_LOADER, - model, + [INPAINT]: { + type: 'denoise_latents', + id: INPAINT, + is_intermediate: true, + steps: steps, + cfg_scale: cfg_scale, + scheduler: scheduler, + denoising_start: 1 - strength, + denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, [LATENTS_TO_IMAGE]: { type: 'l2i', @@ -208,6 +208,7 @@ export const buildCanvasSDXLOutpaintGraph = ( }, }, edges: [ + // Connect Model Loader To UNet and CLIP { source: { node_id: SDXL_MODEL_LOADER, @@ -258,36 +259,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'clip2', }, }, - { - source: { - node_id: NEGATIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'negative_conditioning', - }, - }, - { - source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', - }, - destination: { - node_id: INPAINT, - field: 'positive_conditioning', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: INPAINT, - field: 'noise', - }, - }, + // Infill The Image { source: { node_id: INPAINT_INFILL, @@ -298,16 +270,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, - { - source: { - node_id: INPAINT_IMAGE, - field: 'latents', - }, - destination: { - node_id: INPAINT, - field: 'latents', - }, - }, + // Create mask from image alpha & merge with user painted mask { source: { node_id: MASK_FROM_ALPHA, @@ -328,6 +291,47 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'mask', }, }, + // Connect Everything To Inpaint + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'positive_conditioning', + }, + }, + { + source: { + node_id: NEGATIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: INPAINT, + field: 'negative_conditioning', + }, + }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: INPAINT, + field: 'noise', + }, + }, + { + source: { + node_id: INPAINT_IMAGE, + field: 'latents', + }, + destination: { + node_id: INPAINT, + field: 'latents', + }, + }, { source: { node_id: MASK_BLUR, @@ -338,6 +342,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'mask', }, }, + // Iterate { source: { node_id: RANGE_OF_SIZE, @@ -358,6 +363,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'seed', }, }, + // Decode inpainted latents to image { source: { node_id: INPAINT, @@ -368,6 +374,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'latents', }, }, + // Color Correct The Inpainted Result { source: { node_id: INPAINT_INFILL, @@ -378,16 +385,6 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'reference', }, }, - { - source: { - node_id: MASK_BLUR, - field: 'mask', - }, - destination: { - node_id: COLOR_CORRECT, - field: 'mask', - }, - }, { source: { node_id: LATENTS_TO_IMAGE, @@ -398,6 +395,17 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Back Outpainted Image on Original { source: { node_id: INPAINT_INFILL, @@ -408,16 +416,6 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'base_image', }, }, - { - source: { - node_id: MASK_BLUR, - field: 'mask', - }, - destination: { - node_id: INPAINT_FINAL_IMAGE, - field: 'mask', - }, - }, { source: { node_id: COLOR_CORRECT, @@ -428,6 +426,16 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'mask', + }, + destination: { + node_id: INPAINT_FINAL_IMAGE, + field: 'mask', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index ed0fb74165..70375c1423 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -65,13 +65,17 @@ export const buildCanvasSDXLTextToImageGraph = ( const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; + const isUsingOnnxModel = model.model_type === 'onnx'; + const modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : SDXL_MODEL_LOADER; + const modelLoaderNodeType = isUsingOnnxModel ? 'onnx_model_loader' : 'sdxl_model_loader'; + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = isUsingOnnxModel ? { @@ -106,6 +110,12 @@ export const buildCanvasSDXLTextToImageGraph = ( const graph: NonNullableGraph = { id: TEXT_TO_IMAGE_GRAPH, nodes: { + [modelLoaderNodeId]: { + type: modelLoaderNodeType, + id: modelLoaderNodeId, + is_intermediate: true, + model, + }, [POSITIVE_CONDITIONING]: { type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, @@ -133,13 +143,6 @@ export const buildCanvasSDXLTextToImageGraph = ( use_cpu, }, [t2lNode.id]: t2lNode, - [modelLoaderNodeId]: { - type: modelLoaderNodeType, - id: modelLoaderNodeId, - is_intermediate: true, - model, - }, - [LATENTS_TO_IMAGE]: { type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: LATENTS_TO_IMAGE, @@ -147,6 +150,7 @@ export const buildCanvasSDXLTextToImageGraph = ( }, }, edges: [ + // Connect Model Loader to UNet and CLIP { source: { node_id: modelLoaderNodeId, @@ -197,6 +201,17 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'clip2', }, }, + // Connect everything to Denoise Latents + { + source: { + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'positive_conditioning', + }, + }, { source: { node_id: NEGATIVE_CONDITIONING, @@ -209,14 +224,15 @@ export const buildCanvasSDXLTextToImageGraph = ( }, { source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', + node_id: NOISE, + field: 'noise', }, destination: { node_id: DENOISE_LATENTS, - field: 'positive_conditioning', + field: 'noise', }, }, + // Decode Denoised Latents To Image { source: { node_id: DENOISE_LATENTS, @@ -227,16 +243,6 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'latents', }, }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'noise', - }, - }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 9e25f97586..519be4e498 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -57,13 +57,17 @@ export const buildCanvasTextToImageGraph = ( const use_cpu = shouldUseNoiseSettings ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; + const isUsingOnnxModel = model.model_type === 'onnx'; + const modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER; + const modelLoaderNodeType = isUsingOnnxModel ? 'onnx_model_loader' : 'main_model_loader'; + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = isUsingOnnxModel ? { @@ -84,6 +88,7 @@ export const buildCanvasTextToImageGraph = ( denoising_start: 0, denoising_end: 1, }; + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -98,6 +103,18 @@ export const buildCanvasTextToImageGraph = ( const graph: NonNullableGraph = { id: TEXT_TO_IMAGE_GRAPH, nodes: { + [modelLoaderNodeId]: { + type: modelLoaderNodeType, + id: modelLoaderNodeId, + is_intermediate: true, + model, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + is_intermediate: true, + skipped_layers: clipSkip, + }, [POSITIVE_CONDITIONING]: { type: isUsingOnnxModel ? 'prompt_onnx' : 'compel', id: POSITIVE_CONDITIONING, @@ -119,18 +136,6 @@ export const buildCanvasTextToImageGraph = ( use_cpu, }, [t2lNode.id]: t2lNode, - [modelLoaderNodeId]: { - type: modelLoaderNodeType, - id: modelLoaderNodeId, - is_intermediate: true, - model, - }, - [CLIP_SKIP]: { - type: 'clip_skip', - id: CLIP_SKIP, - is_intermediate: true, - skipped_layers: clipSkip, - }, [LATENTS_TO_IMAGE]: { type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: LATENTS_TO_IMAGE, @@ -138,16 +143,49 @@ export const buildCanvasTextToImageGraph = ( }, }, edges: [ + // Connect Model Loader to UNet & CLIP Skip { source: { - node_id: NEGATIVE_CONDITIONING, - field: 'conditioning', + node_id: modelLoaderNodeId, + field: 'unet', }, destination: { node_id: DENOISE_LATENTS, - field: 'negative_conditioning', + field: 'unet', }, }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip', + }, + destination: { + node_id: CLIP_SKIP, + field: 'clip', + }, + }, + // Connect CLIP Skip to Conditioning + { + source: { + node_id: CLIP_SKIP, + field: 'clip', + }, + destination: { + node_id: POSITIVE_CONDITIONING, + field: 'clip', + }, + }, + { + source: { + node_id: CLIP_SKIP, + field: 'clip', + }, + destination: { + node_id: NEGATIVE_CONDITIONING, + field: 'clip', + }, + }, + // Connect everything to Denoise Latents { source: { node_id: POSITIVE_CONDITIONING, @@ -160,52 +198,12 @@ export const buildCanvasTextToImageGraph = ( }, { source: { - node_id: modelLoaderNodeId, - field: 'clip', - }, - destination: { - node_id: CLIP_SKIP, - field: 'clip', - }, - }, - { - source: { - node_id: CLIP_SKIP, - field: 'clip', - }, - destination: { - node_id: POSITIVE_CONDITIONING, - field: 'clip', - }, - }, - { - source: { - node_id: CLIP_SKIP, - field: 'clip', - }, - destination: { node_id: NEGATIVE_CONDITIONING, - field: 'clip', - }, - }, - { - source: { - node_id: modelLoaderNodeId, - field: 'unet', + field: 'conditioning', }, destination: { node_id: DENOISE_LATENTS, - field: 'unet', - }, - }, - { - source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', + field: 'negative_conditioning', }, }, { @@ -218,6 +216,17 @@ export const buildCanvasTextToImageGraph = ( field: 'noise', }, }, + // Decode denoised latents to image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, ], }; @@ -253,12 +262,12 @@ export const buildCanvasTextToImageGraph = ( }, }); - // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); - // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); + // add LoRA support + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts index 69d5227ea2..982a09357f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearImageToImageGraph.ts @@ -138,6 +138,7 @@ export const buildLinearImageToImageGraph = ( }, }, edges: [ + // Connect Model Loader to UNet and CLIP Skip { source: { node_id: MAIN_MODEL_LOADER, @@ -158,6 +159,7 @@ export const buildLinearImageToImageGraph = ( field: 'clip', }, }, + // Connect CLIP Skip to Conditioning { source: { node_id: CLIP_SKIP, @@ -178,34 +180,15 @@ export const buildLinearImageToImageGraph = ( field: 'clip', }, }, + // Connect everything to Denoise Latents { source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - { - source: { - node_id: IMAGE_TO_LATENTS, - field: 'latents', + node_id: POSITIVE_CONDITIONING, + field: 'conditioning', }, destination: { node_id: DENOISE_LATENTS, - field: 'latents', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'noise', + field: 'positive_conditioning', }, }, { @@ -220,12 +203,33 @@ export const buildLinearImageToImageGraph = ( }, { source: { - node_id: POSITIVE_CONDITIONING, - field: 'conditioning', + node_id: NOISE, + field: 'noise', }, destination: { node_id: DENOISE_LATENTS, - field: 'positive_conditioning', + field: 'noise', + }, + }, + { + source: { + node_id: IMAGE_TO_LATENTS, + field: 'latents', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + }, + // Decode denoised latents to image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', }, }, ], @@ -334,12 +338,12 @@ export const buildLinearImageToImageGraph = ( }, }); + // optionally add custom VAE + addVAEToGraph(state, graph, MAIN_MODEL_LOADER); + // add LoRA support addLoRAsToGraph(state, graph, DENOISE_LATENTS); - // optionally add custom VAE - addVAEToGraph(state, graph); - // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index 98f9458ae0..901ca8c09f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -137,6 +137,7 @@ export const buildLinearSDXLImageToImageGraph = ( }, }, edges: [ + // Connect Model Loader to UNet, CLIP & VAE { source: { node_id: SDXL_MODEL_LOADER, @@ -207,36 +208,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'clip2', }, }, - { - source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - { - source: { - node_id: IMAGE_TO_LATENTS, - field: 'latents', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - }, - { - source: { - node_id: NOISE, - field: 'noise', - }, - destination: { - node_id: DENOISE_LATENTS, - field: 'noise', - }, - }, + // Connect everything to Denoise Latents { source: { node_id: POSITIVE_CONDITIONING, @@ -257,6 +229,37 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'negative_conditioning', }, }, + { + source: { + node_id: NOISE, + field: 'noise', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'noise', + }, + }, + { + source: { + node_id: IMAGE_TO_LATENTS, + field: 'latents', + }, + destination: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + }, + // Decode Denoised Latents To Image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index 566eb6536e..ac859597b3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -111,6 +111,7 @@ export const buildLinearSDXLTextToImageGraph = ( }, }, edges: [ + // Connect Model Loader to UNet, VAE & CLIP { source: { node_id: SDXL_MODEL_LOADER, @@ -171,6 +172,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'clip2', }, }, + // Connect everything to Denoise Latents { source: { node_id: POSITIVE_CONDITIONING, @@ -201,6 +203,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'noise', }, }, + // Decode Denoised Latents To Image { source: { node_id: DENOISE_LATENTS, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts index dcbbe69290..99a1ec7420 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearTextToImageGraph.ts @@ -54,12 +54,15 @@ export const buildLinearTextToImageGraph = ( } const isUsingOnnxModel = model.model_type === 'onnx'; + const modelLoaderNodeId = isUsingOnnxModel ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER; + const modelLoaderNodeType = isUsingOnnxModel ? 'onnx_model_loader' : 'main_model_loader'; + const t2lNode: DenoiseLatentsInvocation | ONNXTextToLatentsInvocation = isUsingOnnxModel ? { @@ -80,6 +83,7 @@ export const buildLinearTextToImageGraph = ( denoising_start: 0, denoising_end: 1, }; + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -95,6 +99,18 @@ export const buildLinearTextToImageGraph = ( const graph: NonNullableGraph = { id: TEXT_TO_IMAGE_GRAPH, nodes: { + [modelLoaderNodeId]: { + type: modelLoaderNodeType, + id: modelLoaderNodeId, + is_intermediate: true, + model, + }, + [CLIP_SKIP]: { + type: 'clip_skip', + id: CLIP_SKIP, + skipped_layers: clipSkip, + is_intermediate: true, + }, [POSITIVE_CONDITIONING]: { type: isUsingOnnxModel ? 'prompt_onnx' : 'compel', id: POSITIVE_CONDITIONING, @@ -116,18 +132,6 @@ export const buildLinearTextToImageGraph = ( is_intermediate: true, }, [t2lNode.id]: t2lNode, - [modelLoaderNodeId]: { - type: modelLoaderNodeType, - id: modelLoaderNodeId, - is_intermediate: true, - model, - }, - [CLIP_SKIP]: { - type: 'clip_skip', - id: CLIP_SKIP, - skipped_layers: clipSkip, - is_intermediate: true, - }, [LATENTS_TO_IMAGE]: { type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: LATENTS_TO_IMAGE, @@ -135,16 +139,7 @@ export const buildLinearTextToImageGraph = ( }, }, edges: [ - { - source: { - node_id: modelLoaderNodeId, - field: 'clip', - }, - destination: { - node_id: CLIP_SKIP, - field: 'clip', - }, - }, + // Connect Model Loader to UNet and CLIP Skip { source: { node_id: modelLoaderNodeId, @@ -155,6 +150,17 @@ export const buildLinearTextToImageGraph = ( field: 'unet', }, }, + { + source: { + node_id: modelLoaderNodeId, + field: 'clip', + }, + destination: { + node_id: CLIP_SKIP, + field: 'clip', + }, + }, + // Connect CLIP Skip to Conditioning { source: { node_id: CLIP_SKIP, @@ -175,6 +181,7 @@ export const buildLinearTextToImageGraph = ( field: 'clip', }, }, + // Connect everything to Denoise Latents { source: { node_id: POSITIVE_CONDITIONING, @@ -195,16 +202,6 @@ export const buildLinearTextToImageGraph = ( field: 'negative_conditioning', }, }, - { - source: { - node_id: DENOISE_LATENTS, - field: 'latents', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, { source: { node_id: NOISE, @@ -215,6 +212,17 @@ export const buildLinearTextToImageGraph = ( field: 'noise', }, }, + // Decode Denoised Latents To Image + { + source: { + node_id: DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'latents', + }, + }, ], }; @@ -250,12 +258,12 @@ export const buildLinearTextToImageGraph = ( }, }); - // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); - // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); + // add LoRA support + addLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); From f296e5c41e67a566176ff155a99f84d150e61ab5 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 20:54:30 +1200 Subject: [PATCH 31/67] wip: Remove MaskBlur / Adjust color correction --- invokeai/app/invocations/image.py | 49 +---------- .../listeners/userInvokedCanvas.ts | 3 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 19 +++-- .../graphBuilders/buildCanvasOutpaintGraph.ts | 12 +-- .../buildCanvasSDXLInpaintGraph.ts | 19 +++-- .../buildCanvasSDXLOutpaintGraph.ts | 12 +-- .../frontend/web/src/services/api/schema.d.ts | 83 +++++-------------- 7 files changed, 67 insertions(+), 130 deletions(-) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 846812435d..2c47020207 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -697,8 +697,8 @@ class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): type: Literal["mask_combine"] = "mask_combine" # Inputs - mask1: Optional[ImageField] = Field(default=None, description="The first mask to combine") - mask2: Optional[ImageField] = Field(default=None, description="The second image to combine") + mask1: ImageField = Field(default=None, description="The first mask to combine") + mask2: ImageField = Field(default=None, description="The second image to combine") # fmt: on class Config(InvocationConfig): @@ -706,7 +706,7 @@ class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): "ui": {"title": "Mask Combine", "tags": ["mask", "combine"]}, } - def invoke(self, context: InvocationContext) -> MaskOutput: + def invoke(self, context: InvocationContext) -> ImageOutput: mask1 = context.services.images.get_pil_image(self.mask1.image_name).convert("L") mask2 = context.services.images.get_pil_image(self.mask2.image_name).convert("L") @@ -721,48 +721,7 @@ class MaskCombineInvocation(BaseInvocation, PILInvocationConfig): is_intermediate=self.is_intermediate, ) - return MaskOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) - - -class MaskBlurInvocation(BaseInvocation, PILInvocationConfig): - """Blurs a mask""" - - # fmt: off - type: Literal["mask_blur"] = "mask_blur" - - # Inputs - mask: Optional[ImageField] = Field(default=None, description="The mask image to blur") - radius: float = Field(default=8.0, ge=0, description="The blur radius") - blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur") - # fmt: on - - class Config(InvocationConfig): - schema_extra = { - "ui": {"title": "Mask Blur", "tags": ["mask", "blur"]}, - } - - def invoke(self, context: InvocationContext) -> MaskOutput: - mask = context.services.images.get_pil_image(self.mask.image_name) - - blur = ( - ImageFilter.GaussianBlur(self.radius) if self.blur_type == "gaussian" else ImageFilter.BoxBlur(self.radius) - ) - blur_mask = mask.filter(blur) - - image_dto = context.services.images.create( - image=blur_mask, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - ) - - return MaskOutput( + return ImageOutput( image=ImageField(image_name=image_dto.image_name), width=image_dto.width, height=image_dto.height, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts index 39bd742d7d..dbcb87f3cf 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts @@ -1,6 +1,7 @@ import { logger } from 'app/logging/logger'; import { userInvoked } from 'app/store/actions'; import openBase64ImageInTab from 'common/util/openBase64ImageInTab'; +import { parseify } from 'common/util/serialize'; import { canvasSessionIdChanged, stagingAreaInitialized, @@ -15,7 +16,6 @@ import { imagesApi } from 'services/api/endpoints/images'; import { sessionCreated } from 'services/api/thunks/session'; import { ImageDTO } from 'services/api/types'; import { startAppListening } from '..'; -import { parseify } from 'common/util/serialize'; /** * This listener is responsible invoking the canvas. This involves a number of steps: @@ -123,6 +123,7 @@ export const addUserInvokedCanvasListener = () => { log.debug({ graph: parseify(graph) }, `Canvas graph built`); // currently this action is just listened to for logging + console.log(canvasGraphBuilt(graph)); dispatch(canvasGraphBuilt(graph)); // Create the session, store the request id diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 2ecc858aa4..7e6ae2f474 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -105,12 +105,12 @@ export const buildCanvasInpaintGraph = ( prompt: negativePrompt, }, [MASK_BLUR]: { - type: 'mask_blur', + type: 'img_blur', id: MASK_BLUR, is_intermediate: true, + image: canvasMaskImage, radius: maskBlur, blur_type: maskBlurMethod, - mask: canvasMaskImage, }, [INPAINT_IMAGE]: { type: 'i2l', @@ -148,7 +148,6 @@ export const buildCanvasInpaintGraph = ( id: COLOR_CORRECT, is_intermediate: true, reference: canvasInitImage, - mask: canvasMaskImage, }, [INPAINT_FINAL_IMAGE]: { type: 'img_paste', @@ -258,7 +257,7 @@ export const buildCanvasInpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT, @@ -308,6 +307,16 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, // Paste Back Onto Original Image { source: { @@ -322,7 +331,7 @@ export const buildCanvasInpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT_FINAL_IMAGE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 7949adfe39..2513144871 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -141,7 +141,7 @@ export const buildCanvasOutpaintGraph = ( mask2: canvasMaskImage, }, [MASK_BLUR]: { - type: 'mask_blur', + type: 'img_blur', id: MASK_BLUR, is_intermediate: true, radius: maskBlur, @@ -271,11 +271,11 @@ export const buildCanvasOutpaintGraph = ( { source: { node_id: MASK_COMBINE, - field: 'mask', + field: 'image', }, destination: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, }, // Plug Everything Into Inpaint Node @@ -322,7 +322,7 @@ export const buildCanvasOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT, @@ -385,7 +385,7 @@ export const buildCanvasOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: COLOR_CORRECT, @@ -416,7 +416,7 @@ export const buildCanvasOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT_FINAL_IMAGE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 52a159fb3a..066f049e57 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -109,12 +109,12 @@ export const buildCanvasSDXLInpaintGraph = ( : negativeStylePrompt, }, [MASK_BLUR]: { - type: 'mask_blur', + type: 'img_blur', id: MASK_BLUR, is_intermediate: true, radius: maskBlur, blur_type: maskBlurMethod, - mask: canvasMaskImage, + image: canvasMaskImage, }, [INPAINT_IMAGE]: { type: 'i2l', @@ -152,7 +152,6 @@ export const buildCanvasSDXLInpaintGraph = ( id: COLOR_CORRECT, is_intermediate: true, reference: canvasInitImage, - mask: canvasMaskImage, }, [INPAINT_FINAL_IMAGE]: { type: 'img_paste', @@ -271,7 +270,7 @@ export const buildCanvasSDXLInpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT, @@ -321,6 +320,16 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, // Paste them back on original image { source: { @@ -335,7 +344,7 @@ export const buildCanvasSDXLInpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT_FINAL_IMAGE, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 3ed12cd935..4d0a6feb55 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -152,7 +152,7 @@ export const buildCanvasSDXLOutpaintGraph = ( mask2: canvasMaskImage, }, [MASK_BLUR]: { - type: 'mask_blur', + type: 'img_blur', id: MASK_BLUR, is_intermediate: true, radius: maskBlur, @@ -284,11 +284,11 @@ export const buildCanvasSDXLOutpaintGraph = ( { source: { node_id: MASK_COMBINE, - field: 'mask', + field: 'image', }, destination: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, }, // Connect Everything To Inpaint @@ -335,7 +335,7 @@ export const buildCanvasSDXLOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT, @@ -398,7 +398,7 @@ export const buildCanvasSDXLOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: COLOR_CORRECT, @@ -429,7 +429,7 @@ export const buildCanvasSDXLOutpaintGraph = ( { source: { node_id: MASK_BLUR, - field: 'mask', + field: 'image', }, destination: { node_id: INPAINT_FINAL_IMAGE, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 84053db29c..a2076557b8 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1586,7 +1586,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; + [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]) | undefined; }; /** * Edges @@ -3273,47 +3273,6 @@ export type components = { */ model: components["schemas"]["MainModelField"]; }; - /** - * MaskBlurInvocation - * @description Blurs a mask - */ - MaskBlurInvocation: { - /** - * Id - * @description The id of this node. Must be unique among all nodes. - */ - id: string; - /** - * Is Intermediate - * @description Whether or not this node is an intermediate node. - * @default false - */ - is_intermediate?: boolean; - /** - * Type - * @default mask_blur - * @enum {string} - */ - type?: "mask_blur"; - /** - * Mask - * @description The mask image to blur - */ - mask?: components["schemas"]["ImageField"]; - /** - * Radius - * @description The blur radius - * @default 8 - */ - radius?: number; - /** - * Blur Type - * @description The type of blur - * @default gaussian - * @enum {string} - */ - blur_type?: "gaussian" | "box"; - }; /** * MaskCombineInvocation * @description Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`. @@ -5821,6 +5780,24 @@ export type components = { */ image?: components["schemas"]["ImageField"]; }; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; + /** + * StableDiffusion2ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * ControlNetModelFormat + * @description An enumeration. + * @enum {string} + */ + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion1ModelFormat * @description An enumeration. @@ -5833,24 +5810,6 @@ export type components = { * @enum {string} */ StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionOnnxModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; - /** - * ControlNetModelFormat - * @description An enumeration. - * @enum {string} - */ - ControlNetModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusion2ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -5961,7 +5920,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { @@ -5998,7 +5957,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["MaskBlurInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; + "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"]; }; }; responses: { From 27bd127fb0ec793a6ca8b81ad38060d4901976cf Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 21:10:30 +1200 Subject: [PATCH 32/67] fix: Do not add anything but final output to staging area --- .../listeners/socketio/socketInvocationComplete.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index 30e0bedb54..a5e22de6e4 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -7,6 +7,7 @@ import { imageSelected, } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; +import { INPAINT_FINAL_IMAGE } from 'features/nodes/util/graphBuilders/constants'; import { progressImageSet } from 'features/system/store/systemSlice'; import { imagesApi } from 'services/api/endpoints/images'; import { isImageOutput } from 'services/api/guards'; @@ -52,7 +53,9 @@ export const addInvocationCompleteEventListener = () => { // Add canvas images to the staging area if ( - graph_execution_state_id === canvas.layerState.stagingArea.sessionId + graph_execution_state_id === + canvas.layerState.stagingArea.sessionId && + data.source_node_id === INPAINT_FINAL_IMAGE ) { dispatch(addImageToStagingArea(imageDTO)); } From ad96c4115673fcb88de17a779ac73b18887fc558 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 22:04:43 +1200 Subject: [PATCH 33/67] feat: Add Canvas Output node to all Canvas Graphs --- .../socketio/socketInvocationComplete.ts | 4 ++-- .../buildCanvasImageToImageGraph.ts | 16 ++++++++++++++++ .../graphBuilders/buildCanvasInpaintGraph.ts | 10 +++++----- .../graphBuilders/buildCanvasOutpaintGraph.ts | 12 ++++++------ .../buildCanvasSDXLImageToImageGraph.ts | 16 ++++++++++++++++ .../graphBuilders/buildCanvasSDXLInpaintGraph.ts | 10 +++++----- .../buildCanvasSDXLOutpaintGraph.ts | 12 ++++++------ .../buildCanvasSDXLTextToImageGraph.ts | 16 ++++++++++++++++ .../graphBuilders/buildCanvasTextToImageGraph.ts | 16 ++++++++++++++++ .../nodes/util/graphBuilders/constants.ts | 1 + 10 files changed, 89 insertions(+), 24 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index a5e22de6e4..5b3b9424b6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -7,7 +7,7 @@ import { imageSelected, } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; -import { INPAINT_FINAL_IMAGE } from 'features/nodes/util/graphBuilders/constants'; +import { CANVAS_OUTPUT } from 'features/nodes/util/graphBuilders/constants'; import { progressImageSet } from 'features/system/store/systemSlice'; import { imagesApi } from 'services/api/endpoints/images'; import { isImageOutput } from 'services/api/guards'; @@ -55,7 +55,7 @@ export const addInvocationCompleteEventListener = () => { if ( graph_execution_state_id === canvas.layerState.stagingArea.sessionId && - data.source_node_id === INPAINT_FINAL_IMAGE + [CANVAS_OUTPUT].includes(data.source_node_id) ) { dispatch(addImageToStagingArea(imageDTO)); } diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index a4cd3b2add..e8a5b46639 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -14,6 +14,7 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, @@ -129,6 +130,10 @@ export const buildCanvasImageToImageGraph = ( id: LATENTS_TO_IMAGE, is_intermediate: !shouldAutoSave, }, + [CANVAS_OUTPUT]: { + type: 'load_image', + id: CANVAS_OUTPUT, + }, }, edges: [ // Connect Model Loader to CLIP Skip and UNet @@ -225,6 +230,17 @@ export const buildCanvasImageToImageGraph = ( field: 'latents', }, }, + // Canvas Output + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 7e6ae2f474..6ecb73b992 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -12,10 +12,10 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, INPAINT, - INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, ITERATE, @@ -149,9 +149,9 @@ export const buildCanvasInpaintGraph = ( is_intermediate: true, reference: canvasInitImage, }, - [INPAINT_FINAL_IMAGE]: { + [CANVAS_OUTPUT]: { type: 'img_paste', - id: INPAINT_FINAL_IMAGE, + id: CANVAS_OUTPUT, is_intermediate: true, base_image: canvasInitImage, }, @@ -324,7 +324,7 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'image', }, }, @@ -334,7 +334,7 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'mask', }, }, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 2513144871..2ed0cc952a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -14,10 +14,10 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, INPAINT, - INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, INPAINT_INFILL, @@ -183,9 +183,9 @@ export const buildCanvasOutpaintGraph = ( id: COLOR_CORRECT, is_intermediate: true, }, - [INPAINT_FINAL_IMAGE]: { + [CANVAS_OUTPUT]: { type: 'img_paste', - id: INPAINT_FINAL_IMAGE, + id: CANVAS_OUTPUT, is_intermediate: true, }, [RANGE_OF_SIZE]: { @@ -399,7 +399,7 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'base_image', }, }, @@ -409,7 +409,7 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'image', }, }, @@ -419,7 +419,7 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'mask', }, }, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index 80f3d671c9..5b198e0bc3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -15,6 +15,7 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, @@ -136,6 +137,10 @@ export const buildCanvasSDXLImageToImageGraph = ( id: LATENTS_TO_IMAGE, is_intermediate: !shouldAutoSave, }, + [CANVAS_OUTPUT]: { + type: 'load_image', + id: CANVAS_OUTPUT, + }, }, edges: [ // Connect Model Loader To UNet & CLIP @@ -241,6 +246,17 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, }, + // Canvas Output + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 066f049e57..a8e190bcd1 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -13,9 +13,9 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, COLOR_CORRECT, INPAINT, - INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, ITERATE, @@ -153,9 +153,9 @@ export const buildCanvasSDXLInpaintGraph = ( is_intermediate: true, reference: canvasInitImage, }, - [INPAINT_FINAL_IMAGE]: { + [CANVAS_OUTPUT]: { type: 'img_paste', - id: INPAINT_FINAL_IMAGE, + id: CANVAS_OUTPUT, is_intermediate: true, base_image: canvasInitImage, }, @@ -337,7 +337,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'image', }, }, @@ -347,7 +347,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'mask', }, }, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 4d0a6feb55..da7eb6b6d3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -15,9 +15,9 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, COLOR_CORRECT, INPAINT, - INPAINT_FINAL_IMAGE, INPAINT_GRAPH, INPAINT_IMAGE, INPAINT_INFILL, @@ -187,9 +187,9 @@ export const buildCanvasSDXLOutpaintGraph = ( id: COLOR_CORRECT, is_intermediate: true, }, - [INPAINT_FINAL_IMAGE]: { + [CANVAS_OUTPUT]: { type: 'img_paste', - id: INPAINT_FINAL_IMAGE, + id: CANVAS_OUTPUT, is_intermediate: true, }, [RANGE_OF_SIZE]: { @@ -412,7 +412,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'base_image', }, }, @@ -422,7 +422,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'image', }, }, @@ -432,7 +432,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT_FINAL_IMAGE, + node_id: CANVAS_OUTPUT, field: 'mask', }, }, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index 70375c1423..391cccd7a9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -14,6 +14,7 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, DENOISE_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, @@ -148,6 +149,10 @@ export const buildCanvasSDXLTextToImageGraph = ( id: LATENTS_TO_IMAGE, is_intermediate: !shouldAutoSave, }, + [CANVAS_OUTPUT]: { + type: 'load_image', + id: CANVAS_OUTPUT, + }, }, edges: [ // Connect Model Loader to UNet and CLIP @@ -243,6 +248,17 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'latents', }, }, + // Canvas Output + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 519be4e498..6301ad6c9f 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -13,6 +13,7 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, LATENTS_TO_IMAGE, @@ -141,6 +142,10 @@ export const buildCanvasTextToImageGraph = ( id: LATENTS_TO_IMAGE, is_intermediate: !shouldAutoSave, }, + [CANVAS_OUTPUT]: { + type: 'load_image', + id: CANVAS_OUTPUT, + }, }, edges: [ // Connect Model Loader to UNet & CLIP Skip @@ -227,6 +232,17 @@ export const buildCanvasTextToImageGraph = ( field: 'latents', }, }, + // Canvas Output + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, ], }; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 076c92eb76..24bf1b404d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -17,6 +17,7 @@ export const CLIP_SKIP = 'clip_skip'; export const IMAGE_TO_LATENTS = 'image_to_latents'; export const LATENTS_TO_LATENTS = 'latents_to_latents'; export const RESIZE = 'resize_image'; +export const CANVAS_OUTPUT = 'canvas_output'; export const INPAINT = 'inpaint'; export const INPAINT_SEAM_FIX = 'inpaint_seam_fix'; export const INPAINT_IMAGE = 'inpaint_image'; From 746c7c59ffd943f2dffc8a12da20388e3c6213cf Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 12 Aug 2023 22:39:30 +1200 Subject: [PATCH 34/67] fix: remove extra node for canvas output catch --- .../nodes/util/graphBuilders/addVAEToGraph.ts | 21 ++++++++++---- .../buildCanvasImageToImageGraph.ts | 28 ++++--------------- .../graphBuilders/buildCanvasInpaintGraph.ts | 4 +-- .../graphBuilders/buildCanvasOutpaintGraph.ts | 4 +-- .../buildCanvasSDXLImageToImageGraph.ts | 19 +++++-------- .../buildCanvasSDXLInpaintGraph.ts | 4 +-- .../buildCanvasSDXLOutpaintGraph.ts | 4 +-- .../buildCanvasSDXLTextToImageGraph.ts | 28 ++++--------------- .../buildCanvasTextToImageGraph.ts | 28 ++++--------------- 9 files changed, 49 insertions(+), 91 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 85e8fed572..384eae8c38 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -2,6 +2,7 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { MetadataAccumulatorInvocation } from 'services/api/types'; import { + CANVAS_OUTPUT, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, INPAINT_GRAPH, @@ -35,11 +36,21 @@ export const addVAEToGraph = ( }; } const isOnnxModel = modelLoaderNodeId == ONNX_MODEL_LOADER; - if ( - graph.id === TEXT_TO_IMAGE_GRAPH || - graph.id === IMAGE_TO_IMAGE_GRAPH || - graph.id === INPAINT_GRAPH - ) { + + if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) { + graph.edges.push({ + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'vae', + }, + }); + } + + if (graph.id === INPAINT_GRAPH) { graph.edges.push({ source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index e8a5b46639..04636a19b7 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -19,7 +19,6 @@ import { DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, - LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -125,14 +124,10 @@ export const buildCanvasImageToImageGraph = ( denoising_start: 1 - strength, denoising_end: 1, }, - [LATENTS_TO_IMAGE]: { - type: 'l2i', - id: LATENTS_TO_IMAGE, - is_intermediate: !shouldAutoSave, - }, [CANVAS_OUTPUT]: { - type: 'load_image', + type: 'l2i', id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, }, }, edges: [ @@ -225,20 +220,9 @@ export const buildCanvasImageToImageGraph = ( node_id: DENOISE_LATENTS, field: 'latents', }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - // Canvas Output - { - source: { - node_id: LATENTS_TO_IMAGE, - field: 'image', - }, destination: { node_id: CANVAS_OUTPUT, - field: 'image', + field: 'latents', }, }, ], @@ -339,7 +323,7 @@ export const buildCanvasImageToImageGraph = ( field: 'metadata', }, destination: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'metadata', }, }); @@ -359,12 +343,12 @@ export const buildCanvasImageToImageGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 6ecb73b992..a1e5c27083 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -373,12 +373,12 @@ export const buildCanvasInpaintGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph, INPAINT); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph, INPAINT); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 2ed0cc952a..3f4737134a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -458,12 +458,12 @@ export const buildCanvasOutpaintGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph, INPAINT); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph, INPAINT); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index 5b198e0bc3..f81bd8376b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -19,7 +19,6 @@ import { DENOISE_LATENTS, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, - LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, @@ -132,14 +131,10 @@ export const buildCanvasSDXLImageToImageGraph = ( : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, - [LATENTS_TO_IMAGE]: { - type: 'l2i', - id: LATENTS_TO_IMAGE, - is_intermediate: !shouldAutoSave, - }, [CANVAS_OUTPUT]: { - type: 'load_image', + type: 'l2i', id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, }, }, edges: [ @@ -242,14 +237,14 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, destination: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'latents', }, }, // Canvas Output { source: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'image', }, destination: { @@ -355,7 +350,7 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'metadata', }, destination: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'metadata', }, }); @@ -380,12 +375,12 @@ export const buildCanvasSDXLImageToImageGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index a8e190bcd1..091778a569 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -391,12 +391,12 @@ export const buildCanvasSDXLInpaintGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph, INPAINT); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph, INPAINT); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index da7eb6b6d3..5db564ba6c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -476,12 +476,12 @@ export const buildCanvasSDXLOutpaintGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph, INPAINT); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph, INPAINT); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index 391cccd7a9..f1559ace5c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -16,7 +16,6 @@ import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, DENOISE_LATENTS, - LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, @@ -144,14 +143,10 @@ export const buildCanvasSDXLTextToImageGraph = ( use_cpu, }, [t2lNode.id]: t2lNode, - [LATENTS_TO_IMAGE]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: LATENTS_TO_IMAGE, - is_intermediate: !shouldAutoSave, - }, [CANVAS_OUTPUT]: { - type: 'load_image', + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, }, }, edges: [ @@ -243,20 +238,9 @@ export const buildCanvasSDXLTextToImageGraph = ( node_id: DENOISE_LATENTS, field: 'latents', }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - // Canvas Output - { - source: { - node_id: LATENTS_TO_IMAGE, - field: 'image', - }, destination: { node_id: CANVAS_OUTPUT, - field: 'image', + field: 'latents', }, }, ], @@ -289,7 +273,7 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'metadata', }, destination: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'metadata', }, }); @@ -314,12 +298,12 @@ export const buildCanvasSDXLTextToImageGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 6301ad6c9f..8d25f13a47 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -16,7 +16,6 @@ import { CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, - LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -137,14 +136,10 @@ export const buildCanvasTextToImageGraph = ( use_cpu, }, [t2lNode.id]: t2lNode, - [LATENTS_TO_IMAGE]: { - type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', - id: LATENTS_TO_IMAGE, - is_intermediate: !shouldAutoSave, - }, [CANVAS_OUTPUT]: { - type: 'load_image', + type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: CANVAS_OUTPUT, + is_intermediate: !shouldAutoSave, }, }, edges: [ @@ -227,20 +222,9 @@ export const buildCanvasTextToImageGraph = ( node_id: DENOISE_LATENTS, field: 'latents', }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'latents', - }, - }, - // Canvas Output - { - source: { - node_id: LATENTS_TO_IMAGE, - field: 'image', - }, destination: { node_id: CANVAS_OUTPUT, - field: 'image', + field: 'latents', }, }, ], @@ -273,7 +257,7 @@ export const buildCanvasTextToImageGraph = ( field: 'metadata', }, destination: { - node_id: LATENTS_TO_IMAGE, + node_id: CANVAS_OUTPUT, field: 'metadata', }, }); @@ -293,12 +277,12 @@ export const buildCanvasTextToImageGraph = ( // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { // must add before watermarker! - addNSFWCheckerToGraph(state, graph); + addNSFWCheckerToGraph(state, graph, CANVAS_OUTPUT); } if (state.system.shouldUseWatermarker) { // must add after nsfw checker! - addWatermarkerToGraph(state, graph); + addWatermarkerToGraph(state, graph, CANVAS_OUTPUT); } return graph; From 55d27f71a376bbc31a04706883ccbe4cbed6c568 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 00:51:10 +1200 Subject: [PATCH 35/67] feat: Give each graph its own unique id --- .../nodes/util/graphBuilders/addVAEToGraph.ts | 71 ++++++++++++------- .../buildCanvasImageToImageGraph.ts | 4 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 4 +- .../graphBuilders/buildCanvasOutpaintGraph.ts | 4 +- .../buildCanvasSDXLImageToImageGraph.ts | 4 +- .../buildCanvasSDXLInpaintGraph.ts | 4 +- .../buildCanvasSDXLOutpaintGraph.ts | 4 +- .../buildCanvasSDXLTextToImageGraph.ts | 4 +- .../buildCanvasTextToImageGraph.ts | 4 +- .../nodes/util/graphBuilders/constants.ts | 7 +- 10 files changed, 67 insertions(+), 43 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 384eae8c38..7cc9cead42 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -2,10 +2,13 @@ import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { MetadataAccumulatorInvocation } from 'services/api/types'; import { + CANVAS_IMAGE_TO_IMAGE_GRAPH, + CANVAS_INPAINT_GRAPH, + CANVAS_OUTPAINT_GRAPH, CANVAS_OUTPUT, + CANVAS_TEXT_TO_IMAGE_GRAPH, IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, - INPAINT_GRAPH, INPAINT_IMAGE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, @@ -38,19 +41,6 @@ export const addVAEToGraph = ( const isOnnxModel = modelLoaderNodeId == ONNX_MODEL_LOADER; if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) { - graph.edges.push({ - source: { - node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, - field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', - }, - destination: { - node_id: CANVAS_OUTPUT, - field: 'vae', - }, - }); - } - - if (graph.id === INPAINT_GRAPH) { graph.edges.push({ source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, @@ -63,7 +53,26 @@ export const addVAEToGraph = ( }); } - if (graph.id === IMAGE_TO_IMAGE_GRAPH) { + if ( + graph.id === CANVAS_TEXT_TO_IMAGE_GRAPH || + graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH + ) { + graph.edges.push({ + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'vae', + }, + }); + } + + if ( + graph.id === IMAGE_TO_IMAGE_GRAPH || + graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH + ) { graph.edges.push({ source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, @@ -76,17 +85,29 @@ export const addVAEToGraph = ( }); } - if (graph.id === INPAINT_GRAPH) { - graph.edges.push({ - source: { - node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, - field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + if (graph.id === CANVAS_INPAINT_GRAPH || graph.id == CANVAS_OUTPAINT_GRAPH) { + graph.edges.push( + { + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'vae', + }, }, - destination: { - node_id: INPAINT_IMAGE, - field: 'vae', - }, - }); + { + source: { + node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, + field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae', + }, + destination: { + node_id: LATENTS_TO_IMAGE, + field: 'vae', + }, + } + ); } if (vae && metadataAccumulator) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts index 04636a19b7..a68aeef392 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasImageToImageGraph.ts @@ -14,10 +14,10 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_IMAGE_TO_IMAGE_GRAPH, CANVAS_OUTPUT, CLIP_SKIP, DENOISE_LATENTS, - IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, @@ -73,7 +73,7 @@ export const buildCanvasImageToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids const graph: NonNullableGraph = { - id: IMAGE_TO_IMAGE_GRAPH, + id: CANVAS_IMAGE_TO_IMAGE_GRAPH, nodes: { [MAIN_MODEL_LOADER]: { type: 'main_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index a1e5c27083..c2b8b62e42 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -12,11 +12,11 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_INPAINT_GRAPH, CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, INPAINT, - INPAINT_GRAPH, INPAINT_IMAGE, ITERATE, LATENTS_TO_IMAGE, @@ -78,7 +78,7 @@ export const buildCanvasInpaintGraph = ( : shouldUseCpuNoise; const graph: NonNullableGraph = { - id: INPAINT_GRAPH, + id: CANVAS_INPAINT_GRAPH, nodes: { [MAIN_MODEL_LOADER]: { type: 'main_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 3f4737134a..d434f3d7cd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -14,11 +14,11 @@ import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPAINT_GRAPH, CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, INPAINT, - INPAINT_GRAPH, INPAINT_IMAGE, INPAINT_INFILL, ITERATE, @@ -102,7 +102,7 @@ export const buildCanvasOutpaintGraph = ( } const graph: NonNullableGraph = { - id: INPAINT_GRAPH, + id: CANVAS_OUTPAINT_GRAPH, nodes: { [MAIN_MODEL_LOADER]: { type: 'main_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index f81bd8376b..f495bef983 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -15,9 +15,9 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_IMAGE_TO_IMAGE_GRAPH, CANVAS_OUTPUT, DENOISE_LATENTS, - IMAGE_TO_IMAGE_GRAPH, IMAGE_TO_LATENTS, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -81,7 +81,7 @@ export const buildCanvasSDXLImageToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids const graph: NonNullableGraph = { - id: IMAGE_TO_IMAGE_GRAPH, + id: CANVAS_IMAGE_TO_IMAGE_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 091778a569..dbb34649cc 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -13,10 +13,10 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_INPAINT_GRAPH, CANVAS_OUTPUT, COLOR_CORRECT, INPAINT, - INPAINT_GRAPH, INPAINT_IMAGE, ITERATE, LATENTS_TO_IMAGE, @@ -85,7 +85,7 @@ export const buildCanvasSDXLInpaintGraph = ( : shouldUseCpuNoise; const graph: NonNullableGraph = { - id: INPAINT_GRAPH, + id: CANVAS_INPAINT_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 5db564ba6c..2abefd910e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -15,10 +15,10 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { + CANVAS_OUTPAINT_GRAPH, CANVAS_OUTPUT, COLOR_CORRECT, INPAINT, - INPAINT_GRAPH, INPAINT_IMAGE, INPAINT_INFILL, ITERATE, @@ -109,7 +109,7 @@ export const buildCanvasSDXLOutpaintGraph = ( } const graph: NonNullableGraph = { - id: INPAINT_GRAPH, + id: CANVAS_OUTPAINT_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index f1559ace5c..e4c4f52160 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -15,6 +15,7 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, + CANVAS_TEXT_TO_IMAGE_GRAPH, DENOISE_LATENTS, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, @@ -22,7 +23,6 @@ import { ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, SDXL_MODEL_LOADER, - TEXT_TO_IMAGE_GRAPH, } from './constants'; /** @@ -108,7 +108,7 @@ export const buildCanvasSDXLTextToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids // TODO: Actually create the graph correctly for ONNX const graph: NonNullableGraph = { - id: TEXT_TO_IMAGE_GRAPH, + id: CANVAS_TEXT_TO_IMAGE_GRAPH, nodes: { [modelLoaderNodeId]: { type: modelLoaderNodeType, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts index 8d25f13a47..4548a7e099 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasTextToImageGraph.ts @@ -14,6 +14,7 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, + CANVAS_TEXT_TO_IMAGE_GRAPH, CLIP_SKIP, DENOISE_LATENTS, MAIN_MODEL_LOADER, @@ -22,7 +23,6 @@ import { NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, - TEXT_TO_IMAGE_GRAPH, } from './constants'; /** @@ -101,7 +101,7 @@ export const buildCanvasTextToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids // TODO: Actually create the graph correctly for ONNX const graph: NonNullableGraph = { - id: TEXT_TO_IMAGE_GRAPH, + id: CANVAS_TEXT_TO_IMAGE_GRAPH, nodes: { [modelLoaderNodeId]: { type: modelLoaderNodeType, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index 24bf1b404d..af7b88d023 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -49,7 +49,10 @@ export const SDXL_REFINER_LATENTS_TO_LATENTS = 'l2l_sdxl_refiner'; // friendly graph ids export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph'; +export const IMAGE_TO_IMAGE_GRAPH = 'image_to_image_graph'; +export const CANVAS_TEXT_TO_IMAGE_GRAPH = 'canvas_text_to_image_graph'; +export const CANVAS_IMAGE_TO_IMAGE_GRAPH = 'canvas_image_to_image_graph'; +export const CANVAS_INPAINT_GRAPH = 'canvas_inpaint_graph'; +export const CANVAS_OUTPAINT_GRAPH = 'canvas_outpaint_graph'; export const SDXL_TEXT_TO_IMAGE_GRAPH = 'sdxl_text_to_image_graph'; export const SDXL_IMAGE_TO_IMAGE_GRAPH = 'sxdl_image_to_image_graph'; -export const IMAGE_TO_IMAGE_GRAPH = 'image_to_image_graph'; -export const INPAINT_GRAPH = 'inpaint_graph'; From 500cd552bc8a32b847bc451fa9516e48e07fd6ea Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 01:45:03 +1200 Subject: [PATCH 36/67] feat: Make SDXL work across the board + Custom VAE Support Also a major cleanup pass to the SDXL graphs to ensure there's no ID overlap --- .../graphBuilders/addSDXLRefinerToGraph.ts | 20 ++++---- .../nodes/util/graphBuilders/addVAEToGraph.ts | 28 +++++++++-- .../buildCanvasSDXLImageToImageGraph.ts | 39 ++++++---------- .../buildCanvasSDXLInpaintGraph.ts | 32 ++++++------- .../buildCanvasSDXLOutpaintGraph.ts | 32 ++++++------- .../buildCanvasSDXLTextToImageGraph.ts | 26 +++++------ .../buildLinearSDXLImageToImageGraph.ts | 46 ++++++------------- .../buildLinearSDXLTextToImageGraph.ts | 37 +++++++-------- .../nodes/util/graphBuilders/constants.ts | 11 +++-- 9 files changed, 132 insertions(+), 139 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 3faf72fb2e..ca0d574d61 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -2,12 +2,12 @@ import { RootState } from 'app/store/store'; import { MetadataAccumulatorInvocation } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { - DENOISE_LATENTS, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, - SDXL_LATENTS_TO_LATENTS, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, + SDXL_REFINER_DENOISE_LATENTS, SDXL_REFINER_MODEL_LOADER, SDXL_REFINER_NEGATIVE_CONDITIONING, SDXL_REFINER_POSITIVE_CONDITIONING, @@ -61,7 +61,7 @@ export const addSDXLRefinerToGraph = ( // connect the VAE back to the i2l, which we just removed in the filter // but only if we are doing l2l - if (baseNodeId === SDXL_LATENTS_TO_LATENTS) { + if (baseNodeId === SDXL_DENOISE_LATENTS) { graph.edges.push({ source: { node_id: SDXL_MODEL_LOADER, @@ -91,9 +91,9 @@ export const addSDXLRefinerToGraph = ( style: `${negativePrompt} ${negativeStylePrompt}`, aesthetic_score: refinerAestheticScore, }; - graph.nodes[DENOISE_LATENTS] = { + graph.nodes[SDXL_REFINER_DENOISE_LATENTS] = { type: 'denoise_latents', - id: DENOISE_LATENTS, + id: SDXL_REFINER_DENOISE_LATENTS, cfg_scale: refinerCFGScale, steps: refinerSteps / (1 - Math.min(refinerStart, 0.99)), scheduler: refinerScheduler, @@ -108,7 +108,7 @@ export const addSDXLRefinerToGraph = ( field: 'unet', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'unet', }, }, @@ -148,7 +148,7 @@ export const addSDXLRefinerToGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -158,7 +158,7 @@ export const addSDXLRefinerToGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -168,13 +168,13 @@ export const addSDXLRefinerToGraph = ( field: 'latents', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'latents', }, }, { source: { - node_id: DENOISE_LATENTS, + node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'latents', }, destination: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts index 7cc9cead42..360e07062a 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addVAEToGraph.ts @@ -14,6 +14,12 @@ import { MAIN_MODEL_LOADER, METADATA_ACCUMULATOR, ONNX_MODEL_LOADER, + SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, + SDXL_CANVAS_INPAINT_GRAPH, + SDXL_CANVAS_OUTPAINT_GRAPH, + SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, + SDXL_IMAGE_TO_IMAGE_GRAPH, + SDXL_TEXT_TO_IMAGE_GRAPH, TEXT_TO_IMAGE_GRAPH, VAE_LOADER, } from './constants'; @@ -40,7 +46,12 @@ export const addVAEToGraph = ( } const isOnnxModel = modelLoaderNodeId == ONNX_MODEL_LOADER; - if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) { + if ( + graph.id === TEXT_TO_IMAGE_GRAPH || + graph.id === IMAGE_TO_IMAGE_GRAPH || + graph.id === SDXL_TEXT_TO_IMAGE_GRAPH || + graph.id === SDXL_IMAGE_TO_IMAGE_GRAPH + ) { graph.edges.push({ source: { node_id: isAutoVae ? modelLoaderNodeId : VAE_LOADER, @@ -55,7 +66,9 @@ export const addVAEToGraph = ( if ( graph.id === CANVAS_TEXT_TO_IMAGE_GRAPH || - graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH + graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH || + graph.id == SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH ) { graph.edges.push({ source: { @@ -71,7 +84,9 @@ export const addVAEToGraph = ( if ( graph.id === IMAGE_TO_IMAGE_GRAPH || - graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH + graph.id === SDXL_IMAGE_TO_IMAGE_GRAPH || + graph.id === CANVAS_IMAGE_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH ) { graph.edges.push({ source: { @@ -85,7 +100,12 @@ export const addVAEToGraph = ( }); } - if (graph.id === CANVAS_INPAINT_GRAPH || graph.id == CANVAS_OUTPAINT_GRAPH) { + if ( + graph.id === CANVAS_INPAINT_GRAPH || + graph.id === CANVAS_OUTPAINT_GRAPH || + graph.id === SDXL_CANVAS_INPAINT_GRAPH || + graph.id === SDXL_CANVAS_OUTPAINT_GRAPH + ) { graph.edges.push( { source: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index f495bef983..8e96b43fea 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -15,15 +15,15 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_IMAGE_TO_IMAGE_GRAPH, CANVAS_OUTPUT, - DENOISE_LATENTS, IMAGE_TO_LATENTS, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, RESIZE, + SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; @@ -81,7 +81,7 @@ export const buildCanvasSDXLImageToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids const graph: NonNullableGraph = { - id: CANVAS_IMAGE_TO_IMAGE_GRAPH, + id: SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', @@ -119,9 +119,9 @@ export const buildCanvasSDXLImageToImageGraph = ( // image_name: initialImage.image_name, // }, }, - [DENOISE_LATENTS]: { + [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', - id: DENOISE_LATENTS, + id: SDXL_DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, @@ -145,7 +145,7 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'unet', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, @@ -196,7 +196,7 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -206,7 +206,7 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -216,7 +216,7 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'noise', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, @@ -226,14 +226,14 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, }, // Decode denoised latents to an image { source: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -241,17 +241,6 @@ export const buildCanvasSDXLImageToImageGraph = ( field: 'latents', }, }, - // Canvas Output - { - source: { - node_id: CANVAS_OUTPUT, - field: 'image', - }, - destination: { - node_id: CANVAS_OUTPUT, - field: 'image', - }, - }, ], }; @@ -356,11 +345,11 @@ export const buildCanvasSDXLImageToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, DENOISE_LATENTS); + addLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS); // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } // optionally add custom VAE @@ -370,7 +359,7 @@ export const buildCanvasSDXLImageToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index dbb34649cc..7b43be497c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -13,10 +13,8 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_INPAINT_GRAPH, CANVAS_OUTPUT, COLOR_CORRECT, - INPAINT, INPAINT_IMAGE, ITERATE, LATENTS_TO_IMAGE, @@ -26,6 +24,8 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + SDXL_CANVAS_INPAINT_GRAPH, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; @@ -85,7 +85,7 @@ export const buildCanvasSDXLInpaintGraph = ( : shouldUseCpuNoise; const graph: NonNullableGraph = { - id: CANVAS_INPAINT_GRAPH, + id: SDXL_CANVAS_INPAINT_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', @@ -131,9 +131,9 @@ export const buildCanvasSDXLInpaintGraph = ( use_cpu, is_intermediate: true, }, - [INPAINT]: { + [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', - id: INPAINT, + id: SDXL_DENOISE_LATENTS, is_intermediate: true, steps: steps, cfg_scale: cfg_scale, @@ -182,7 +182,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'unet', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, @@ -233,7 +233,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -243,7 +243,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -253,7 +253,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'noise', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, @@ -263,7 +263,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, }, @@ -273,7 +273,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'mask', }, }, @@ -301,7 +301,7 @@ export const buildCanvasSDXLInpaintGraph = ( // Decode inpainted latents to image { source: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -356,10 +356,10 @@ export const buildCanvasSDXLInpaintGraph = ( // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, INPAINT); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } - // Add VAE + // optionally add custom VAE addVAEToGraph(state, graph, SDXL_MODEL_LOADER); // handle seed @@ -383,10 +383,10 @@ export const buildCanvasSDXLInpaintGraph = ( } // add LoRA support - addSDXLLoRAsToGraph(state, graph, INPAINT, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, INPAINT); + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 2abefd910e..c75ef0f800 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -15,10 +15,8 @@ import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - CANVAS_OUTPAINT_GRAPH, CANVAS_OUTPUT, COLOR_CORRECT, - INPAINT, INPAINT_IMAGE, INPAINT_INFILL, ITERATE, @@ -31,6 +29,8 @@ import { POSITIVE_CONDITIONING, RANDOM_INT, RANGE_OF_SIZE, + SDXL_CANVAS_OUTPAINT_GRAPH, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; @@ -109,7 +109,7 @@ export const buildCanvasSDXLOutpaintGraph = ( } const graph: NonNullableGraph = { - id: CANVAS_OUTPAINT_GRAPH, + id: SDXL_CANVAS_OUTPAINT_GRAPH, nodes: { [SDXL_MODEL_LOADER]: { type: 'sdxl_model_loader', @@ -166,9 +166,9 @@ export const buildCanvasSDXLOutpaintGraph = ( use_cpu, is_intermediate: true, }, - [INPAINT]: { + [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', - id: INPAINT, + id: SDXL_DENOISE_LATENTS, is_intermediate: true, steps: steps, cfg_scale: cfg_scale, @@ -215,7 +215,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'unet', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, @@ -298,7 +298,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -308,7 +308,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -318,7 +318,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'noise', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, @@ -328,7 +328,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'latents', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, }, @@ -338,7 +338,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'mask', }, }, @@ -366,7 +366,7 @@ export const buildCanvasSDXLOutpaintGraph = ( // Decode inpainted latents to image { source: { - node_id: INPAINT, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -441,10 +441,10 @@ export const buildCanvasSDXLOutpaintGraph = ( // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, INPAINT); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } - // Add VAE + // optionally add custom VAE addVAEToGraph(state, graph, SDXL_MODEL_LOADER); // handle seed @@ -468,10 +468,10 @@ export const buildCanvasSDXLOutpaintGraph = ( } // add LoRA support - addSDXLLoRAsToGraph(state, graph, INPAINT, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, INPAINT); + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index e4c4f52160..c75cfd205e 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -15,13 +15,13 @@ import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { CANVAS_OUTPUT, - CANVAS_TEXT_TO_IMAGE_GRAPH, - DENOISE_LATENTS, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, ONNX_MODEL_LOADER, POSITIVE_CONDITIONING, + SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; @@ -80,7 +80,7 @@ export const buildCanvasSDXLTextToImageGraph = ( isUsingOnnxModel ? { type: 't2l_onnx', - id: DENOISE_LATENTS, + id: SDXL_DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, @@ -88,7 +88,7 @@ export const buildCanvasSDXLTextToImageGraph = ( } : { type: 'denoise_latents', - id: DENOISE_LATENTS, + id: SDXL_DENOISE_LATENTS, is_intermediate: true, cfg_scale, scheduler, @@ -108,7 +108,7 @@ export const buildCanvasSDXLTextToImageGraph = ( // copy-pasted graph from node editor, filled in with state values & friendly node ids // TODO: Actually create the graph correctly for ONNX const graph: NonNullableGraph = { - id: CANVAS_TEXT_TO_IMAGE_GRAPH, + id: SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, nodes: { [modelLoaderNodeId]: { type: modelLoaderNodeType, @@ -157,7 +157,7 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'unet', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, @@ -208,7 +208,7 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -218,7 +218,7 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -228,14 +228,14 @@ export const buildCanvasSDXLTextToImageGraph = ( field: 'noise', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, // Decode Denoised Latents To Image { source: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -280,11 +280,11 @@ export const buildCanvasSDXLTextToImageGraph = ( // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } // add LoRA support - addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, modelLoaderNodeId); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, modelLoaderNodeId); // optionally add custom VAE addVAEToGraph(state, graph, modelLoaderNodeId); @@ -293,7 +293,7 @@ export const buildCanvasSDXLTextToImageGraph = ( addDynamicPromptsToGraph(state, graph); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index 901ca8c09f..a2a5055394 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -10,9 +10,9 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - DENOISE_LATENTS, IMAGE_TO_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, @@ -20,6 +20,7 @@ import { NOISE, POSITIVE_CONDITIONING, RESIZE, + SDXL_DENOISE_LATENTS, SDXL_IMAGE_TO_IMAGE_GRAPH, SDXL_MODEL_LOADER, } from './constants'; @@ -115,9 +116,9 @@ export const buildLinearSDXLImageToImageGraph = ( id: LATENTS_TO_IMAGE, fp32: vaePrecision === 'fp32' ? true : false, }, - [DENOISE_LATENTS]: { + [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', - id: DENOISE_LATENTS, + id: SDXL_DENOISE_LATENTS, cfg_scale, scheduler, steps, @@ -144,30 +145,10 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'unet', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, - { - source: { - node_id: SDXL_MODEL_LOADER, - field: 'vae', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'vae', - }, - }, - { - source: { - node_id: SDXL_MODEL_LOADER, - field: 'vae', - }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'vae', - }, - }, { source: { node_id: SDXL_MODEL_LOADER, @@ -215,7 +196,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -225,7 +206,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -235,7 +216,7 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'noise', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, @@ -245,14 +226,14 @@ export const buildLinearSDXLImageToImageGraph = ( field: 'latents', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, }, // Decode Denoised Latents To Image { source: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -368,13 +349,16 @@ export const buildLinearSDXLImageToImageGraph = ( }, }); - addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, SDXL_MODEL_LOADER); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } + // optionally add custom VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index ac859597b3..84d27a673d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -6,14 +6,15 @@ import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; +import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { - DENOISE_LATENTS, LATENTS_TO_IMAGE, METADATA_ACCUMULATOR, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, + SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, SDXL_TEXT_TO_IMAGE_GRAPH, } from './constants'; @@ -95,9 +96,9 @@ export const buildLinearSDXLTextToImageGraph = ( height, use_cpu, }, - [DENOISE_LATENTS]: { + [SDXL_DENOISE_LATENTS]: { type: 'denoise_latents', - id: DENOISE_LATENTS, + id: SDXL_DENOISE_LATENTS, cfg_scale, scheduler, steps, @@ -118,20 +119,10 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'unet', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'unet', }, }, - { - source: { - node_id: SDXL_MODEL_LOADER, - field: 'vae', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'vae', - }, - }, { source: { node_id: SDXL_MODEL_LOADER, @@ -179,7 +170,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -189,7 +180,7 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'conditioning', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -199,14 +190,14 @@ export const buildLinearSDXLTextToImageGraph = ( field: 'noise', }, destination: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'noise', }, }, // Decode Denoised Latents To Image { source: { - node_id: DENOISE_LATENTS, + node_id: SDXL_DENOISE_LATENTS, field: 'latents', }, destination: { @@ -251,13 +242,17 @@ export const buildLinearSDXLTextToImageGraph = ( }, }); - addSDXLLoRAsToGraph(state, graph, DENOISE_LATENTS, SDXL_MODEL_LOADER); - // Add Refiner if enabled if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, DENOISE_LATENTS); + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); } + // optionally add custom VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + + // add LoRA support + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index af7b88d023..f1654383dd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -38,14 +38,13 @@ export const REALESRGAN = 'esrgan'; export const DIVIDE = 'divide'; export const SCALE = 'scale_image'; export const SDXL_MODEL_LOADER = 'sdxl_model_loader'; -export const SDXL_TEXT_TO_LATENTS = 't2l_sdxl'; -export const SDXL_LATENTS_TO_LATENTS = 'l2l_sdxl'; +export const SDXL_DENOISE_LATENTS = 'sdxl_denoise_latents'; export const SDXL_REFINER_MODEL_LOADER = 'sdxl_refiner_model_loader'; export const SDXL_REFINER_POSITIVE_CONDITIONING = 'sdxl_refiner_positive_conditioning'; export const SDXL_REFINER_NEGATIVE_CONDITIONING = 'sdxl_refiner_negative_conditioning'; -export const SDXL_REFINER_LATENTS_TO_LATENTS = 'l2l_sdxl_refiner'; +export const SDXL_REFINER_DENOISE_LATENTS = 'sdxl_refiner_denoise_latents'; // friendly graph ids export const TEXT_TO_IMAGE_GRAPH = 'text_to_image_graph'; @@ -56,3 +55,9 @@ export const CANVAS_INPAINT_GRAPH = 'canvas_inpaint_graph'; export const CANVAS_OUTPAINT_GRAPH = 'canvas_outpaint_graph'; export const SDXL_TEXT_TO_IMAGE_GRAPH = 'sdxl_text_to_image_graph'; export const SDXL_IMAGE_TO_IMAGE_GRAPH = 'sxdl_image_to_image_graph'; +export const SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH = + 'sdxl_canvas_text_to_image_graph'; +export const SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH = + 'sdxl_canvas_image_to_image_graph'; +export const SDXL_CANVAS_INPAINT_GRAPH = 'sdxl_canvas_inpaint_graph'; +export const SDXL_CANVAS_OUTPAINT_GRAPH = 'sdxl_canvas_outpaint_graph'; From c33acf951ee87d5495291e376c4261d41a9ffb72 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 03:53:40 +1200 Subject: [PATCH 37/67] feat: Make Refiner work with Canvas --- .../graphBuilders/addSDXLRefinerToGraph.ts | 74 +++++++++++-------- 1 file changed, 43 insertions(+), 31 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index ca0d574d61..65664c9f2d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -2,10 +2,14 @@ import { RootState } from 'app/store/store'; import { MetadataAccumulatorInvocation } from 'services/api/types'; import { NonNullableGraph } from '../../types/types'; import { - IMAGE_TO_LATENTS, + CANVAS_OUTPUT, LATENTS_TO_IMAGE, + MASK_BLUR, METADATA_ACCUMULATOR, - SDXL_DENOISE_LATENTS, + SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH, + SDXL_CANVAS_INPAINT_GRAPH, + SDXL_CANVAS_OUTPAINT_GRAPH, + SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH, SDXL_MODEL_LOADER, SDXL_REFINER_DENOISE_LATENTS, SDXL_REFINER_MODEL_LOADER, @@ -59,21 +63,6 @@ export const addSDXLRefinerToGraph = ( ) ); - // connect the VAE back to the i2l, which we just removed in the filter - // but only if we are doing l2l - if (baseNodeId === SDXL_DENOISE_LATENTS) { - graph.edges.push({ - source: { - node_id: SDXL_MODEL_LOADER, - field: 'vae', - }, - destination: { - node_id: IMAGE_TO_LATENTS, - field: 'vae', - }, - }); - } - graph.nodes[SDXL_REFINER_MODEL_LOADER] = { type: 'sdxl_refiner_model_loader', id: SDXL_REFINER_MODEL_LOADER, @@ -112,16 +101,6 @@ export const addSDXLRefinerToGraph = ( field: 'unet', }, }, - { - source: { - node_id: SDXL_REFINER_MODEL_LOADER, - field: 'vae', - }, - destination: { - node_id: LATENTS_TO_IMAGE, - field: 'vae', - }, - }, { source: { node_id: SDXL_REFINER_MODEL_LOADER, @@ -171,8 +150,25 @@ export const addSDXLRefinerToGraph = ( node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'latents', }, - }, - { + } + ); + + if ( + graph.id === SDXL_CANVAS_TEXT_TO_IMAGE_GRAPH || + graph.id === SDXL_CANVAS_IMAGE_TO_IMAGE_GRAPH + ) { + graph.edges.push({ + source: { + node_id: SDXL_REFINER_DENOISE_LATENTS, + field: 'latents', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'latents', + }, + }); + } else { + graph.edges.push({ source: { node_id: SDXL_REFINER_DENOISE_LATENTS, field: 'latents', @@ -181,6 +177,22 @@ export const addSDXLRefinerToGraph = ( node_id: LATENTS_TO_IMAGE, field: 'latents', }, - } - ); + }); + } + + if ( + graph.id === SDXL_CANVAS_INPAINT_GRAPH || + graph.id === SDXL_CANVAS_OUTPAINT_GRAPH + ) { + graph.edges.push({ + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: SDXL_REFINER_DENOISE_LATENTS, + field: 'mask', + }, + }); + } }; From 28208e6f491adc02437d24e148bf430b4f185394 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:09:51 +1200 Subject: [PATCH 38/67] fix: Fix VAE Precision not working for SDXL Canvas Modes --- .../util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts | 2 ++ .../nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts | 2 ++ 2 files changed, 4 insertions(+) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index 8e96b43fea..0342d3e65c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -42,6 +42,7 @@ export const buildCanvasSDXLImageToImageGraph = ( cfgScale: cfg_scale, scheduler, steps, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, @@ -135,6 +136,7 @@ export const buildCanvasSDXLImageToImageGraph = ( type: 'l2i', id: CANVAS_OUTPUT, is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }, }, edges: [ diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index c75cfd205e..da27ace3b0 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -39,6 +39,7 @@ export const buildCanvasSDXLTextToImageGraph = ( cfgScale: cfg_scale, scheduler, steps, + vaePrecision, clipSkip, shouldUseCpuNoise, shouldUseNoiseSettings, @@ -147,6 +148,7 @@ export const buildCanvasSDXLTextToImageGraph = ( type: isUsingOnnxModel ? 'l2i_onnx' : 'l2i', id: CANVAS_OUTPUT, is_intermediate: !shouldAutoSave, + fp32: vaePrecision === 'fp32' ? true : false, }, }, edges: [ From 29f1c6dc82e508e9a56fc957e52d344148cc9c75 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:23:52 +1200 Subject: [PATCH 39/67] fix: Image To Image FP32 Fix for Canvas SDXL --- .../nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index 0342d3e65c..dac605580d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -115,6 +115,7 @@ export const buildCanvasSDXLImageToImageGraph = ( type: 'i2l', id: IMAGE_TO_LATENTS, is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, // must be set manually later, bc `fit` parameter may require a resize node inserted // image: { // image_name: initialImage.image_name, From fcf7f4ac77f7bc3760f28c32855f5187880535e0 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:27:38 +1200 Subject: [PATCH 40/67] feat: Add SDXL ControlNet To Linear UI --- .../util/graphBuilders/buildLinearSDXLImageToImageGraph.ts | 4 ++++ .../util/graphBuilders/buildLinearSDXLTextToImageGraph.ts | 4 ++++ .../sdxl/components/SDXLImageToImageTabParameters.tsx | 4 +++- .../features/sdxl/components/SDXLTextToImageTabParameters.tsx | 4 +++- 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index a2a5055394..0d30fe1c63 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -6,6 +6,7 @@ import { ImageResizeInvocation, ImageToLatentsInvocation, } from 'services/api/types'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; @@ -359,6 +360,9 @@ export const buildLinearSDXLImageToImageGraph = ( // optionally add custom VAE addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index 84d27a673d..595b6f47cd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -2,6 +2,7 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { initialGenerationState } from 'features/parameters/store/generationSlice'; +import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; @@ -253,6 +254,9 @@ export const buildLinearSDXLTextToImageGraph = ( // add LoRA support addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); + // add controlnet, mutating `graph` + addControlNetToLinearGraph(state, graph, SDXL_DENOISE_LATENTS); + // add dynamic prompts - also sets up core iteration and seed addDynamicPromptsToGraph(state, graph); diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx index edc92a56c8..a6ee21ab68 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLImageToImageTabParameters.tsx @@ -1,10 +1,11 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse'; +import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; +import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; import SDXLImageToImageTabCoreParameters from './SDXLImageToImageTabCoreParameters'; -import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; const SDXLImageToImageTabParameters = () => { return ( @@ -13,6 +14,7 @@ const SDXLImageToImageTabParameters = () => { + diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx index 325fd7d881..c562951c4d 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLTextToImageTabParameters.tsx @@ -1,10 +1,11 @@ import ParamDynamicPromptsCollapse from 'features/dynamicPrompts/components/ParamDynamicPromptsCollapse'; +import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; +import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; -import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse'; const SDXLTextToImageTabParameters = () => { return ( @@ -13,6 +14,7 @@ const SDXLTextToImageTabParameters = () => { + From c8864e475bb9ade26a7c0f0bc5ef96cb3d2ead2f Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 04:34:15 +1200 Subject: [PATCH 41/67] fix: SDXL Lora's not working on Canvas Image To Image --- .../util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index dac605580d..a45ffe7df5 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -9,8 +9,8 @@ import { } from 'services/api/types'; import { addControlNetToLinearGraph } from './addControlNetToLinearGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; -import { addLoRAsToGraph } from './addLoRAsToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; +import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addVAEToGraph } from './addVAEToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph'; @@ -348,7 +348,7 @@ export const buildCanvasSDXLImageToImageGraph = ( }); // add LoRA support - addLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS); + addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); // Add Refiner if enabled if (shouldUseSDXLRefiner) { From b35cdc05a5699d76147f5c4b0f446d86499cee80 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 20:17:23 +1200 Subject: [PATCH 42/67] feat: Scaled Processing to Inpainting & Outpainting / 1.x & SDXL --- .../listeners/userInvokedCanvas.ts | 1 - .../src/features/canvas/store/canvasSlice.ts | 2 +- .../src/features/canvas/store/canvasTypes.ts | 2 +- .../graphBuilders/buildCanvasInpaintGraph.ts | 194 +++++++++-- .../graphBuilders/buildCanvasOutpaintGraph.ts | 302 ++++++++++++++--- .../buildCanvasSDXLInpaintGraph.ts | 188 ++++++++++- .../buildCanvasSDXLOutpaintGraph.ts | 310 +++++++++++++++--- .../nodes/util/graphBuilders/constants.ts | 6 + .../frontend/web/src/services/api/types.ts | 9 + 9 files changed, 874 insertions(+), 140 deletions(-) diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts index dbcb87f3cf..cd6791cc0b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/userInvokedCanvas.ts @@ -123,7 +123,6 @@ export const addUserInvokedCanvasListener = () => { log.debug({ graph: parseify(graph) }, `Canvas graph built`); // currently this action is just listened to for logging - console.log(canvasGraphBuilt(graph)); dispatch(canvasGraphBuilt(graph)); // Create the session, store the request id diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index f63ab2fd67..11f829221a 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -47,7 +47,7 @@ export const initialCanvasState: CanvasState = { boundingBoxCoordinates: { x: 0, y: 0 }, boundingBoxDimensions: { width: 512, height: 512 }, boundingBoxPreviewFill: { r: 0, g: 0, b: 0, a: 0.5 }, - boundingBoxScaleMethod: 'auto', + boundingBoxScaleMethod: 'none', brushColor: { r: 90, g: 90, b: 255, a: 1 }, brushSize: 50, canvasContainerDimensions: { width: 0, height: 0 }, diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts b/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts index ba85a7e132..f2ba90b050 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts @@ -11,9 +11,9 @@ export const LAYER_NAMES = ['base', 'mask'] as const; export type CanvasLayer = (typeof LAYER_NAMES)[number]; export const BOUNDING_BOX_SCALES_DICT = [ + { label: 'None', value: 'none' }, { label: 'Auto', value: 'auto' }, { label: 'Manual', value: 'manual' }, - { label: 'None', value: 'none' }, ]; export const BOUNDING_BOX_SCALES = ['none', 'auto', 'manual'] as const; diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index c2b8b62e42..00af964350 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -2,7 +2,10 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + ImageBlurInvocation, ImageDTO, + ImageToLatentsInvocation, + NoiseInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -16,12 +19,16 @@ import { CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, - INPAINT, + DENOISE_LATENTS, INPAINT_IMAGE, + INPAINT_IMAGE_RESIZE_DOWN, + INPAINT_IMAGE_RESIZE_UP, ITERATE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, MASK_BLUR, + MASK_RESIZE_DOWN, + MASK_RESIZE_UP, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -108,7 +115,6 @@ export const buildCanvasInpaintGraph = ( type: 'img_blur', id: MASK_BLUR, is_intermediate: true, - image: canvasMaskImage, radius: maskBlur, blur_type: maskBlurMethod, }, @@ -117,19 +123,16 @@ export const buildCanvasInpaintGraph = ( id: INPAINT_IMAGE, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, - image: canvasInitImage, }, [NOISE]: { type: 'noise', id: NOISE, - width, - height, use_cpu, is_intermediate: true, }, - [INPAINT]: { + [DENOISE_LATENTS]: { type: 'denoise_latents', - id: INPAINT, + id: DENOISE_LATENTS, is_intermediate: true, steps: steps, cfg_scale: cfg_scale, @@ -152,7 +155,7 @@ export const buildCanvasInpaintGraph = ( [CANVAS_OUTPUT]: { type: 'img_paste', id: CANVAS_OUTPUT, - is_intermediate: true, + is_intermediate: !shouldAutoSave, base_image: canvasInitImage, }, [RANGE_OF_SIZE]: { @@ -178,7 +181,7 @@ export const buildCanvasInpaintGraph = ( field: 'unet', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -220,7 +223,7 @@ export const buildCanvasInpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -230,7 +233,7 @@ export const buildCanvasInpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -240,7 +243,7 @@ export const buildCanvasInpaintGraph = ( field: 'noise', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -250,7 +253,7 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'latents', }, }, @@ -260,7 +263,7 @@ export const buildCanvasInpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'mask', }, }, @@ -288,7 +291,7 @@ export const buildCanvasInpaintGraph = ( // Decode Inpainted Latents To Image { source: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -296,6 +299,155 @@ export const buildCanvasInpaintGraph = ( field: 'latents', }, }, + ], + }; + + // Handle Scale Before Processing + if (['auto', 'manual'].includes(boundingBoxScaleMethod)) { + const scaledWidth: number = scaledBoundingBoxDimensions.width; + const scaledHeight: number = scaledBoundingBoxDimensions.height; + + // Add Scaling Nodes + graph.nodes[INPAINT_IMAGE_RESIZE_UP] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasInitImage, + }; + graph.nodes[MASK_RESIZE_UP] = { + type: 'img_resize', + id: MASK_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasMaskImage, + }; + graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[MASK_RESIZE_DOWN] = { + type: 'img_resize', + id: MASK_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: scaledWidth, + height: scaledHeight, + }; + + // Connect Nodes + graph.edges.push( + // Scale Inpaint Image and Mask + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, + // Color Correct The Inpainted Result + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Back Onto Original Image + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'mask', + }, + } + ); + } else { + // Add Images To Nodes + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: width, + height: height, + }; + graph.nodes[INPAINT_IMAGE] = { + ...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation), + image: canvasInitImage, + }; + graph.nodes[MASK_BLUR] = { + ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), + image: canvasMaskImage, + }; + + graph.edges.push( // Color Correct The Inpainted Result { source: { @@ -337,11 +489,11 @@ export const buildCanvasInpaintGraph = ( node_id: CANVAS_OUTPUT, field: 'mask', }, - }, - ], - }; + } + ); + } - // handle seed + // Handle Seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed const randomIntNode: RandomIntInvocation = { @@ -365,10 +517,10 @@ export const buildCanvasInpaintGraph = ( addVAEToGraph(state, graph, MAIN_MODEL_LOADER); // add LoRA support - addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, INPAINT); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index d434f3d7cd..2eb70884cf 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -2,9 +2,12 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + ImageBlurInvocation, ImageDTO, + ImageToLatentsInvocation, InfillPatchmatchInvocation, InfillTileInvocation, + NoiseInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -18,15 +21,20 @@ import { CANVAS_OUTPUT, CLIP_SKIP, COLOR_CORRECT, - INPAINT, + DENOISE_LATENTS, INPAINT_IMAGE, + INPAINT_IMAGE_RESIZE_DOWN, + INPAINT_IMAGE_RESIZE_UP, INPAINT_INFILL, + INPAINT_INFILL_RESIZE_DOWN, ITERATE, LATENTS_TO_IMAGE, MAIN_MODEL_LOADER, MASK_BLUR, MASK_COMBINE, MASK_FROM_ALPHA, + MASK_RESIZE_DOWN, + MASK_RESIZE_UP, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -84,23 +92,6 @@ export const buildCanvasOutpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; - let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { - type: 'infill_tile', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - tile_size: tileSize, - }; - - if (infillMethod === 'patchmatch') { - infillNode = { - type: 'infill_patchmatch', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - }; - } - const graph: NonNullableGraph = { id: CANVAS_OUTPAINT_GRAPH, nodes: { @@ -147,7 +138,12 @@ export const buildCanvasOutpaintGraph = ( radius: maskBlur, blur_type: maskBlurMethod, }, - [infillNode.id]: infillNode, + [INPAINT_INFILL]: { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + tile_size: tileSize, + }, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, @@ -157,14 +153,12 @@ export const buildCanvasOutpaintGraph = ( [NOISE]: { type: 'noise', id: NOISE, - width, - height, use_cpu, is_intermediate: true, }, - [INPAINT]: { + [DENOISE_LATENTS]: { type: 'denoise_latents', - id: INPAINT, + id: DENOISE_LATENTS, is_intermediate: true, steps: steps, cfg_scale: cfg_scale, @@ -186,7 +180,7 @@ export const buildCanvasOutpaintGraph = ( [CANVAS_OUTPUT]: { type: 'img_paste', id: CANVAS_OUTPUT, - is_intermediate: true, + is_intermediate: !shouldAutoSave, }, [RANGE_OF_SIZE]: { type: 'range_of_size', @@ -211,7 +205,7 @@ export const buildCanvasOutpaintGraph = ( field: 'unet', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'unet', }, }, @@ -268,16 +262,6 @@ export const buildCanvasOutpaintGraph = ( field: 'mask1', }, }, - { - source: { - node_id: MASK_COMBINE, - field: 'image', - }, - destination: { - node_id: MASK_BLUR, - field: 'image', - }, - }, // Plug Everything Into Inpaint Node { source: { @@ -285,7 +269,7 @@ export const buildCanvasOutpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'positive_conditioning', }, }, @@ -295,7 +279,7 @@ export const buildCanvasOutpaintGraph = ( field: 'conditioning', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'negative_conditioning', }, }, @@ -305,7 +289,7 @@ export const buildCanvasOutpaintGraph = ( field: 'noise', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'noise', }, }, @@ -315,7 +299,7 @@ export const buildCanvasOutpaintGraph = ( field: 'latents', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'latents', }, }, @@ -325,7 +309,7 @@ export const buildCanvasOutpaintGraph = ( field: 'image', }, destination: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'mask', }, }, @@ -353,7 +337,7 @@ export const buildCanvasOutpaintGraph = ( // Decode the result from Inpaint { source: { - node_id: INPAINT, + node_id: DENOISE_LATENTS, field: 'latents', }, destination: { @@ -361,6 +345,230 @@ export const buildCanvasOutpaintGraph = ( field: 'latents', }, }, + ], + }; + + // Add Infill Nodes + + if (infillMethod === 'patchmatch') { + graph.nodes[INPAINT_INFILL] = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + }; + } + + // Handle Scale Before Processing + if (['auto', 'manual'].includes(boundingBoxScaleMethod)) { + const scaledWidth: number = scaledBoundingBoxDimensions.width; + const scaledHeight: number = scaledBoundingBoxDimensions.height; + + // Add Scaling Nodes + graph.nodes[INPAINT_IMAGE_RESIZE_UP] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasInitImage, + }; + graph.nodes[MASK_RESIZE_UP] = { + type: 'img_resize', + id: MASK_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + }; + graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_INFILL_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[MASK_RESIZE_DOWN] = { + type: 'img_resize', + id: MASK_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: scaledWidth, + height: scaledHeight, + }; + + // Connect Nodes + graph.edges.push( + // Scale Inpaint Image + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_INFILL, + field: 'image', + }, + }, + // Take combined mask and resize and then blur + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, + // Resize Results Down + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + }, + // Color Correct The Inpainted Result + { + source: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Everything Back + { + source: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'base_image', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'mask', + }, + } + ); + } else { + // Add Images To Nodes + graph.nodes[INPAINT_INFILL] = { + ...(graph.nodes[INPAINT_INFILL] as + | InfillTileInvocation + | InfillPatchmatchInvocation), + image: canvasInitImage, + }; + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: width, + height: height, + }; + graph.nodes[INPAINT_IMAGE] = { + ...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation), + image: canvasInitImage, + }; + graph.nodes[MASK_BLUR] = { + ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), + image: canvasMaskImage, + }; + + graph.edges.push( + // Take combined mask and plug it to blur + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -422,11 +630,11 @@ export const buildCanvasOutpaintGraph = ( node_id: CANVAS_OUTPUT, field: 'mask', }, - }, - ], - }; + } + ); + } - // handle seed + // Handle Seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed const randomIntNode: RandomIntInvocation = { @@ -450,10 +658,10 @@ export const buildCanvasOutpaintGraph = ( addVAEToGraph(state, graph, MAIN_MODEL_LOADER); // add LoRA support - addLoRAsToGraph(state, graph, INPAINT, MAIN_MODEL_LOADER); + addLoRAsToGraph(state, graph, DENOISE_LATENTS, MAIN_MODEL_LOADER); // add controlnet, mutating `graph` - addControlNetToLinearGraph(state, graph, INPAINT); + addControlNetToLinearGraph(state, graph, DENOISE_LATENTS); // NSFW & watermark - must be last thing added to graph if (state.system.shouldUseNSFWChecker) { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 7b43be497c..13ba385a66 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -2,7 +2,10 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + ImageBlurInvocation, ImageDTO, + ImageToLatentsInvocation, + NoiseInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -16,9 +19,13 @@ import { CANVAS_OUTPUT, COLOR_CORRECT, INPAINT_IMAGE, + INPAINT_IMAGE_RESIZE_DOWN, + INPAINT_IMAGE_RESIZE_UP, ITERATE, LATENTS_TO_IMAGE, MASK_BLUR, + MASK_RESIZE_DOWN, + MASK_RESIZE_UP, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -114,20 +121,16 @@ export const buildCanvasSDXLInpaintGraph = ( is_intermediate: true, radius: maskBlur, blur_type: maskBlurMethod, - image: canvasMaskImage, }, [INPAINT_IMAGE]: { type: 'i2l', id: INPAINT_IMAGE, is_intermediate: true, fp32: vaePrecision === 'fp32' ? true : false, - image: canvasInitImage, }, [NOISE]: { type: 'noise', id: NOISE, - width, - height, use_cpu, is_intermediate: true, }, @@ -156,7 +159,7 @@ export const buildCanvasSDXLInpaintGraph = ( [CANVAS_OUTPUT]: { type: 'img_paste', id: CANVAS_OUTPUT, - is_intermediate: true, + is_intermediate: !shouldAutoSave, base_image: canvasInitImage, }, [RANGE_OF_SIZE]: { @@ -309,7 +312,156 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'latents', }, }, - // Color Correct Inpainted Result + ], + }; + + // Handle Scale Before Processing + if (['auto', 'manual'].includes(boundingBoxScaleMethod)) { + const scaledWidth: number = scaledBoundingBoxDimensions.width; + const scaledHeight: number = scaledBoundingBoxDimensions.height; + + // Add Scaling Nodes + graph.nodes[INPAINT_IMAGE_RESIZE_UP] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasInitImage, + }; + graph.nodes[MASK_RESIZE_UP] = { + type: 'img_resize', + id: MASK_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasMaskImage, + }; + graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[MASK_RESIZE_DOWN] = { + type: 'img_resize', + id: MASK_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: scaledWidth, + height: scaledHeight, + }; + + // Connect Nodes + graph.edges.push( + // Scale Inpaint Image and Mask + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, + // Color Correct The Inpainted Result + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Back Onto Original Image + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'mask', + }, + } + ); + } else { + // Add Images To Nodes + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: width, + height: height, + }; + graph.nodes[INPAINT_IMAGE] = { + ...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation), + image: canvasInitImage, + }; + graph.nodes[MASK_BLUR] = { + ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), + image: canvasMaskImage, + }; + + graph.edges.push( + // Color Correct The Inpainted Result { source: { node_id: LATENTS_TO_IMAGE, @@ -330,7 +482,7 @@ export const buildCanvasSDXLInpaintGraph = ( field: 'mask', }, }, - // Paste them back on original image + // Paste Back Onto Original Image { source: { node_id: COLOR_CORRECT, @@ -350,19 +502,11 @@ export const buildCanvasSDXLInpaintGraph = ( node_id: CANVAS_OUTPUT, field: 'mask', }, - }, - ], - }; - - // Add Refiner if enabled - if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + } + ); } - // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); - - // handle seed + // Handle Seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed const randomIntNode: RandomIntInvocation = { @@ -382,6 +526,14 @@ export const buildCanvasSDXLInpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + } + + // optionally add custom VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + // add LoRA support addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index c75ef0f800..7dc46ababf 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -2,9 +2,12 @@ import { logger } from 'app/logging/logger'; import { RootState } from 'app/store/store'; import { NonNullableGraph } from 'features/nodes/types/types'; import { + ImageBlurInvocation, ImageDTO, + ImageToLatentsInvocation, InfillPatchmatchInvocation, InfillTileInvocation, + NoiseInvocation, RandomIntInvocation, RangeOfSizeInvocation, } from 'services/api/types'; @@ -18,12 +21,17 @@ import { CANVAS_OUTPUT, COLOR_CORRECT, INPAINT_IMAGE, + INPAINT_IMAGE_RESIZE_DOWN, + INPAINT_IMAGE_RESIZE_UP, INPAINT_INFILL, + INPAINT_INFILL_RESIZE_DOWN, ITERATE, LATENTS_TO_IMAGE, MASK_BLUR, MASK_COMBINE, MASK_FROM_ALPHA, + MASK_RESIZE_DOWN, + MASK_RESIZE_UP, NEGATIVE_CONDITIONING, NOISE, POSITIVE_CONDITIONING, @@ -91,23 +99,6 @@ export const buildCanvasSDXLOutpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; - let infillNode: InfillTileInvocation | InfillPatchmatchInvocation = { - type: 'infill_tile', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - tile_size: tileSize, - }; - - if (infillMethod === 'patchmatch') { - infillNode = { - type: 'infill_patchmatch', - id: INPAINT_INFILL, - is_intermediate: true, - image: canvasInitImage, - }; - } - const graph: NonNullableGraph = { id: SDXL_CANVAS_OUTPAINT_GRAPH, nodes: { @@ -132,13 +123,6 @@ export const buildCanvasSDXLOutpaintGraph = ( ? `${negativePrompt} ${negativeStylePrompt}` : negativeStylePrompt, }, - [infillNode.id]: infillNode, - [INPAINT_IMAGE]: { - type: 'i2l', - id: INPAINT_IMAGE, - is_intermediate: true, - fp32: vaePrecision === 'fp32' ? true : false, - }, [MASK_FROM_ALPHA]: { type: 'tomask', id: MASK_FROM_ALPHA, @@ -158,11 +142,21 @@ export const buildCanvasSDXLOutpaintGraph = ( radius: maskBlur, blur_type: maskBlurMethod, }, + [INPAINT_INFILL]: { + type: 'infill_tile', + id: INPAINT_INFILL, + is_intermediate: true, + tile_size: tileSize, + }, + [INPAINT_IMAGE]: { + type: 'i2l', + id: INPAINT_IMAGE, + is_intermediate: true, + fp32: vaePrecision === 'fp32' ? true : false, + }, [NOISE]: { type: 'noise', id: NOISE, - width, - height, use_cpu, is_intermediate: true, }, @@ -190,7 +184,7 @@ export const buildCanvasSDXLOutpaintGraph = ( [CANVAS_OUTPUT]: { type: 'img_paste', id: CANVAS_OUTPUT, - is_intermediate: true, + is_intermediate: !shouldAutoSave, }, [RANGE_OF_SIZE]: { type: 'range_of_size', @@ -259,7 +253,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'clip2', }, }, - // Infill The Image + // Connect Infill Result To Inpaint Image { source: { node_id: INPAINT_INFILL, @@ -270,7 +264,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'image', }, }, - // Create mask from image alpha & merge with user painted mask + // Combine Mask from Init Image with User Painted Mask { source: { node_id: MASK_FROM_ALPHA, @@ -281,16 +275,6 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'mask1', }, }, - { - source: { - node_id: MASK_COMBINE, - field: 'image', - }, - destination: { - node_id: MASK_BLUR, - field: 'image', - }, - }, // Connect Everything To Inpaint { source: { @@ -374,6 +358,230 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'latents', }, }, + ], + }; + + // Add Infill Nodes + + if (infillMethod === 'patchmatch') { + graph.nodes[INPAINT_INFILL] = { + type: 'infill_patchmatch', + id: INPAINT_INFILL, + is_intermediate: true, + }; + } + + // Handle Scale Before Processing + if (['auto', 'manual'].includes(boundingBoxScaleMethod)) { + const scaledWidth: number = scaledBoundingBoxDimensions.width; + const scaledHeight: number = scaledBoundingBoxDimensions.height; + + // Add Scaling Nodes + graph.nodes[INPAINT_IMAGE_RESIZE_UP] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + image: canvasInitImage, + }; + graph.nodes[MASK_RESIZE_UP] = { + type: 'img_resize', + id: MASK_RESIZE_UP, + is_intermediate: true, + width: scaledWidth, + height: scaledHeight, + }; + graph.nodes[INPAINT_IMAGE_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_IMAGE_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[INPAINT_INFILL_RESIZE_DOWN] = { + type: 'img_resize', + id: INPAINT_INFILL_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + graph.nodes[MASK_RESIZE_DOWN] = { + type: 'img_resize', + id: MASK_RESIZE_DOWN, + is_intermediate: true, + width: width, + height: height, + }; + + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: scaledWidth, + height: scaledHeight, + }; + + // Connect Nodes + graph.edges.push( + // Scale Inpaint Image + { + source: { + node_id: INPAINT_IMAGE_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: INPAINT_INFILL, + field: 'image', + }, + }, + // Take combined mask and resize and then blur + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_UP, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, + // Resize Results Down + { + source: { + node_id: LATENTS_TO_IMAGE, + field: 'image', + }, + destination: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: MASK_BLUR, + field: 'image', + }, + destination: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + }, + { + source: { + node_id: INPAINT_INFILL, + field: 'image', + }, + destination: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + }, + // Color Correct The Inpainted Result + { + source: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'reference', + }, + }, + { + source: { + node_id: INPAINT_IMAGE_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: COLOR_CORRECT, + field: 'mask', + }, + }, + // Paste Everything Back + { + source: { + node_id: INPAINT_INFILL_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'base_image', + }, + }, + { + source: { + node_id: COLOR_CORRECT, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'image', + }, + }, + { + source: { + node_id: MASK_RESIZE_DOWN, + field: 'image', + }, + destination: { + node_id: CANVAS_OUTPUT, + field: 'mask', + }, + } + ); + } else { + // Add Images To Nodes + graph.nodes[INPAINT_INFILL] = { + ...(graph.nodes[INPAINT_INFILL] as + | InfillTileInvocation + | InfillPatchmatchInvocation), + image: canvasInitImage, + }; + graph.nodes[NOISE] = { + ...(graph.nodes[NOISE] as NoiseInvocation), + width: width, + height: height, + }; + graph.nodes[INPAINT_IMAGE] = { + ...(graph.nodes[INPAINT_IMAGE] as ImageToLatentsInvocation), + image: canvasInitImage, + }; + graph.nodes[MASK_BLUR] = { + ...(graph.nodes[MASK_BLUR] as ImageBlurInvocation), + image: canvasMaskImage, + }; + + graph.edges.push( + // Take combined mask and plug it to blur + { + source: { + node_id: MASK_COMBINE, + field: 'image', + }, + destination: { + node_id: MASK_BLUR, + field: 'image', + }, + }, // Color Correct The Inpainted Result { source: { @@ -405,7 +613,7 @@ export const buildCanvasSDXLOutpaintGraph = ( field: 'mask', }, }, - // Paste Back Outpainted Image on Original + // Paste Everything Back { source: { node_id: INPAINT_INFILL, @@ -435,19 +643,11 @@ export const buildCanvasSDXLOutpaintGraph = ( node_id: CANVAS_OUTPUT, field: 'mask', }, - }, - ], - }; - - // Add Refiner if enabled - if (shouldUseSDXLRefiner) { - addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + } + ); } - // optionally add custom VAE - addVAEToGraph(state, graph, SDXL_MODEL_LOADER); - - // handle seed + // Handle seed if (shouldRandomizeSeed) { // Random int node to generate the starting seed const randomIntNode: RandomIntInvocation = { @@ -467,6 +667,14 @@ export const buildCanvasSDXLOutpaintGraph = ( (graph.nodes[RANGE_OF_SIZE] as RangeOfSizeInvocation).start = seed; } + // Add Refiner if enabled + if (shouldUseSDXLRefiner) { + addSDXLRefinerToGraph(state, graph, SDXL_DENOISE_LATENTS); + } + + // optionally add custom VAE + addVAEToGraph(state, graph, SDXL_MODEL_LOADER); + // add LoRA support addSDXLLoRAsToGraph(state, graph, SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts index f1654383dd..3e213120b3 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/constants.ts @@ -21,12 +21,18 @@ export const CANVAS_OUTPUT = 'canvas_output'; export const INPAINT = 'inpaint'; export const INPAINT_SEAM_FIX = 'inpaint_seam_fix'; export const INPAINT_IMAGE = 'inpaint_image'; +export const SCALED_INPAINT_IMAGE = 'scaled_inpaint_image'; +export const INPAINT_IMAGE_RESIZE_UP = 'inpaint_image_resize_up'; +export const INPAINT_IMAGE_RESIZE_DOWN = 'inpaint_image_resize_down'; export const INPAINT_INFILL = 'inpaint_infill'; +export const INPAINT_INFILL_RESIZE_DOWN = 'inpaint_infill_resize_down'; export const INPAINT_FINAL_IMAGE = 'inpaint_final_image'; export const MASK_FROM_ALPHA = 'tomask'; export const MASK_EDGE = 'mask_edge'; export const MASK_BLUR = 'mask_blur'; export const MASK_COMBINE = 'mask_combine'; +export const MASK_RESIZE_UP = 'mask_resize_up'; +export const MASK_RESIZE_DOWN = 'mask_resize_down'; export const COLOR_CORRECT = 'color_correct'; export const PASTE_IMAGE = 'img_paste'; export const CONTROL_NET_COLLECT = 'control_net_collect'; diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index 471c995f4d..435b605489 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -184,6 +184,15 @@ export type ImageNSFWBlurInvocation = TypeReq< export type ImageWatermarkInvocation = TypeReq< components['schemas']['ImageWatermarkInvocation'] >; +export type ImageBlurInvocation = TypeReq< + components['schemas']['ImageBlurInvocation'] +>; +export type ColorCorrectInvocation = TypeReq< + components['schemas']['ColorCorrectInvocation'] +>; +export type ImagePasteInvocation = TypeReq< + components['schemas']['ImagePasteInvocation'] +>; // ControlNet Nodes export type ControlNetInvocation = TypeReq< From 33779b6339a471f87f8f068479d1061e97612da8 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 21:16:37 +1200 Subject: [PATCH 43/67] chore: Remove shouldFitToWidthHeight from Inpaint Graphs Was never used for inpainting but was fed to the node anyway. --- .../features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts | 1 - .../nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts | 1 - .../nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts | 1 - .../nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts | 1 - 4 files changed, 4 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts index 00af964350..6b0da8e197 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasInpaintGraph.ts @@ -53,7 +53,6 @@ export const buildCanvasInpaintGraph = ( scheduler, steps, img2imgStrength: strength, - shouldFitToWidthHeight, iterations, seed, shouldRandomizeSeed, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts index 2eb70884cf..23f6acb539 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasOutpaintGraph.ts @@ -59,7 +59,6 @@ export const buildCanvasOutpaintGraph = ( scheduler, steps, img2imgStrength: strength, - shouldFitToWidthHeight, iterations, seed, shouldRandomizeSeed, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 13ba385a66..570439f9e6 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -53,7 +53,6 @@ export const buildCanvasSDXLInpaintGraph = ( scheduler, steps, img2imgStrength: strength, - shouldFitToWidthHeight, iterations, seed, shouldRandomizeSeed, diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 7dc46ababf..404f3501dd 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -59,7 +59,6 @@ export const buildCanvasSDXLOutpaintGraph = ( scheduler, steps, img2imgStrength: strength, - shouldFitToWidthHeight, iterations, seed, shouldRandomizeSeed, From 3ff9961bda9d1739afca6c0446779133694a3d67 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 21:26:20 +1200 Subject: [PATCH 44/67] fix: Circular dependency in Mask Blur Method --- .../Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx | 2 +- .../features/parameters/store/generationSlice.ts | 7 ++++--- .../features/parameters/types/parameterSchemas.ts | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx index b45dc8b884..fa20dcdbcc 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod.tsx @@ -6,7 +6,7 @@ import IAIMantineSelect from 'common/components/IAIMantineSelect'; import { setMaskBlurMethod } from 'features/parameters/store/generationSlice'; import { useTranslation } from 'react-i18next'; -export type MaskBlurMethods = 'box' | 'gaussian'; +type MaskBlurMethods = 'box' | 'gaussian'; const maskBlurMethods: SelectItem[] = [ { label: 'Box Blur', value: 'box' }, diff --git a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts index 33a76da4e6..0173391833 100644 --- a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts +++ b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts @@ -4,12 +4,13 @@ import { roundToMultiple } from 'common/util/roundDownToMultiple'; import { configChanged } from 'features/system/store/configSlice'; import { clamp } from 'lodash-es'; import { ImageDTO } from 'services/api/types'; -import { MaskBlurMethods } from '../components/Parameters/Canvas/MaskAdjustment/ParamMaskBlurMethod'; + import { clipSkipMap } from '../types/constants'; import { CfgScaleParam, HeightParam, MainModelParam, + MaskBlurMethodParam, NegativePromptParam, OnnxModelParam, PositivePromptParam, @@ -35,7 +36,7 @@ export interface GenerationState { negativePrompt: NegativePromptParam; scheduler: SchedulerParam; maskBlur: number; - maskBlurMethod: MaskBlurMethods; + maskBlurMethod: MaskBlurMethodParam; seed: SeedParam; seedWeights: string; shouldFitToWidthHeight: boolean; @@ -196,7 +197,7 @@ export const generationSlice = createSlice({ setMaskBlur: (state, action: PayloadAction) => { state.maskBlur = action.payload; }, - setMaskBlurMethod: (state, action: PayloadAction) => { + setMaskBlurMethod: (state, action: PayloadAction) => { state.maskBlurMethod = action.payload; }, setTileSize: (state, action: PayloadAction) => { diff --git a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts index ac799ac600..5221bf64a9 100644 --- a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts +++ b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts @@ -385,6 +385,21 @@ export const isValidSDXLRefinerStart = ( val: unknown ): val is SDXLRefinerStartParam => zSDXLRefinerstart.safeParse(val).success; +/** + * Zod schema for a mask blur method parameter + */ +export const zMaskBlurMethod = z.enum(['box', 'gaussian']); +/** + * Type alias for mask blur method parameter, inferred from its zod schema + */ +export type MaskBlurMethodParam = z.infer; +/** + * Validates/type-guards a value as a mask blur method parameter + */ +export const isValidMaskBlurMethod = ( + val: unknown +): val is MaskBlurMethodParam => zMaskBlurMethod.safeParse(val).success; + // /** // * Zod schema for BaseModelType // */ From 561951ad988c53ffdfc3871a850f68096f69e1ed Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 13 Aug 2023 21:28:39 +1200 Subject: [PATCH 45/67] chore: Black linting --- invokeai/app/invocations/compel.py | 12 +++-- invokeai/app/invocations/latent.py | 4 +- .../stable_diffusion/diffusers_pipeline.py | 19 ++++--- .../diffusion/shared_invokeai_diffusion.py | 53 +++++++++++-------- 4 files changed, 52 insertions(+), 36 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 484d813dea..2a56bd04ab 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -294,11 +294,17 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: - c1, c1_pooled, ec1 = self.run_clip_compel(context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=False) + c1, c1_pooled, ec1 = self.run_clip_compel( + context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=False + ) if self.style.strip() == "": - c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.prompt, True, "lora_te2_", zero_on_empty=True) + c2, c2_pooled, ec2 = self.run_clip_compel( + context, self.clip2, self.prompt, True, "lora_te2_", zero_on_empty=True + ) else: - c2, c2_pooled, ec2 = self.run_clip_compel(context, self.clip2, self.style, True, "lora_te2_", zero_on_empty=True) + c2, c2_pooled, ec2 = self.run_clip_compel( + context, self.clip2, self.style, True, "lora_te2_", zero_on_empty=True + ) original_size = (self.original_height, self.original_width) crop_coords = (self.crop_top, self.crop_left) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index fc934618cf..8e8d2b2fc9 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -320,7 +320,7 @@ class DenoiseLatentsInvocation(BaseInvocation): def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): if scheduler.config.get("cpu_only", False): device = torch.device("cpu") - + num_inference_steps = steps scheduler.set_timesteps(num_inference_steps, device=device) timesteps = scheduler.timesteps @@ -344,7 +344,7 @@ class DenoiseLatentsInvocation(BaseInvocation): # calculate step count based on scheduler order num_inference_steps = len(timesteps) if scheduler.order == 2: - num_inference_steps += (num_inference_steps % 2) + num_inference_steps += num_inference_steps % 2 num_inference_steps = num_inference_steps // 2 return num_inference_steps, timesteps, init_timestep diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 8e0edb3c30..516e901971 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -202,8 +202,8 @@ class ControlNetData: @dataclass class ConditioningData: - unconditioned_embeddings: Any # TODO: type - text_embeddings: Any # TODO: type + unconditioned_embeddings: Any # TODO: type + text_embeddings: Any # TODO: type guidance_scale: Union[float, List[float]] """ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). @@ -389,19 +389,17 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): batched_t = init_timestep.repeat(batch_size) if noise is not None: - #latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers + # latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers latents = self.scheduler.add_noise(latents, noise, batched_t) if mask is not None: if is_inpainting_model(self.unet): # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint # (that's why there's a mask!) but it seems to really want that blanked out. - #masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill + # masked_latents = latents * torch.where(mask < 0.5, 1, 0) TODO: inpaint/outpaint/infill # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( - self._unet_forward, mask, orig_latents - ) + self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents) else: # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) if noise is None: @@ -413,7 +411,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): ).to(device=orig_latents.device, dtype=orig_latents.dtype) latents = self.scheduler.add_noise(latents, noise, batched_t) - latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) + latents = torch.lerp( + orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) + ) additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) @@ -549,11 +549,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, - timestep=t, # TODO: debug how handled batched and non batched timesteps + timestep=t, # TODO: debug how handled batched and non batched timesteps step_index=step_index, total_step_count=total_step_count, conditioning_data=conditioning_data, - # extra: down_block_additional_residuals=controlnet_down_block_samples, # from controlnet(s) mid_block_additional_residual=controlnet_mid_block_sample, # from controlnet(s) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 9b1630dc3a..e739855b9e 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -202,15 +202,21 @@ class InvokeAIDiffuserComponent: else: if type(conditioning_data.text_embeddings) is SDXLConditioningInfo: added_cond_kwargs = { - "text_embeds": torch.cat([ - # TODO: how to pad? just by zeros? or even truncate? - conditioning_data.unconditioned_embeddings.pooled_embeds, - conditioning_data.text_embeddings.pooled_embeds, - ], dim=0), - "time_ids": torch.cat([ - conditioning_data.unconditioned_embeddings.add_time_ids, - conditioning_data.text_embeddings.add_time_ids, - ], dim=0), + "text_embeds": torch.cat( + [ + # TODO: how to pad? just by zeros? or even truncate? + conditioning_data.unconditioned_embeddings.pooled_embeds, + conditioning_data.text_embeddings.pooled_embeds, + ], + dim=0, + ), + "time_ids": torch.cat( + [ + conditioning_data.unconditioned_embeddings.add_time_ids, + conditioning_data.text_embeddings.add_time_ids, + ], + dim=0, + ), } ( encoder_hidden_states, @@ -260,7 +266,7 @@ class InvokeAIDiffuserComponent: self, sample: torch.Tensor, timestep: torch.Tensor, - conditioning_data, # TODO: type + conditioning_data, # TODO: type step_index: int, total_step_count: int, **kwargs, @@ -380,20 +386,25 @@ class InvokeAIDiffuserComponent: added_cond_kwargs = None if type(conditioning_data.text_embeddings) is SDXLConditioningInfo: added_cond_kwargs = { - "text_embeds": torch.cat([ - # TODO: how to pad? just by zeros? or even truncate? - conditioning_data.unconditioned_embeddings.pooled_embeds, - conditioning_data.text_embeddings.pooled_embeds, - ], dim=0), - "time_ids": torch.cat([ - conditioning_data.unconditioned_embeddings.add_time_ids, - conditioning_data.text_embeddings.add_time_ids, - ], dim=0), + "text_embeds": torch.cat( + [ + # TODO: how to pad? just by zeros? or even truncate? + conditioning_data.unconditioned_embeddings.pooled_embeds, + conditioning_data.text_embeddings.pooled_embeds, + ], + dim=0, + ), + "time_ids": torch.cat( + [ + conditioning_data.unconditioned_embeddings.add_time_ids, + conditioning_data.text_embeddings.add_time_ids, + ], + dim=0, + ), } both_conditionings, encoder_attention_mask = self._concat_conditionings_for_batch( - conditioning_data.unconditioned_embeddings.embeds, - conditioning_data.text_embeddings.embeds + conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds ) both_results = self.model_forward_callback( x_twice, From 90fa3eebb3d0e10e40435b0c49670893b72a67e2 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 02:25:39 +1200 Subject: [PATCH 46/67] feat: Make SDXL Style Prompt not take spaces --- .../graphBuilders/addSDXLRefinerToGraph.ts | 12 ++++---- .../buildCanvasSDXLImageToImageGraph.ts | 17 ++++++----- .../buildCanvasSDXLInpaintGraph.ts | 22 ++++++--------- .../buildCanvasSDXLOutpaintGraph.ts | 22 ++++++--------- .../buildCanvasSDXLTextToImageGraph.ts | 23 +++++++-------- .../buildLinearSDXLImageToImageGraph.ts | 13 +++++---- .../buildLinearSDXLTextToImageGraph.ts | 15 +++++----- .../helpers/craftSDXLStylePrompt.ts | 28 +++++++++++++++++++ 8 files changed, 86 insertions(+), 66 deletions(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/graphBuilders/helpers/craftSDXLStylePrompt.ts diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 65664c9f2d..53f068c91b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -16,18 +16,16 @@ import { SDXL_REFINER_NEGATIVE_CONDITIONING, SDXL_REFINER_POSITIVE_CONDITIONING, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; export const addSDXLRefinerToGraph = ( state: RootState, graph: NonNullableGraph, baseNodeId: string ): void => { - const { positivePrompt, negativePrompt } = state.generation; const { refinerModel, refinerAestheticScore, - positiveStylePrompt, - negativeStylePrompt, refinerSteps, refinerScheduler, refinerCFGScale, @@ -49,6 +47,10 @@ export const addSDXLRefinerToGraph = ( metadataAccumulator.refiner_steps = refinerSteps; } + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, true); + // Unplug SDXL Latents Generation To Latents To Image graph.edges = graph.edges.filter( (e) => @@ -71,13 +73,13 @@ export const addSDXLRefinerToGraph = ( graph.nodes[SDXL_REFINER_POSITIVE_CONDITIONING] = { type: 'sdxl_refiner_compel_prompt', id: SDXL_REFINER_POSITIVE_CONDITIONING, - style: `${positivePrompt} ${positiveStylePrompt}`, + style: craftedPositiveStylePrompt, aesthetic_score: refinerAestheticScore, }; graph.nodes[SDXL_REFINER_NEGATIVE_CONDITIONING] = { type: 'sdxl_refiner_compel_prompt', id: SDXL_REFINER_NEGATIVE_CONDITIONING, - style: `${negativePrompt} ${negativeStylePrompt}`, + style: craftedNegativeStylePrompt, aesthetic_score: refinerAestheticScore, }; graph.nodes[SDXL_REFINER_DENOISE_LATENTS] = { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts index a45ffe7df5..ef32943bc8 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLImageToImageGraph.ts @@ -26,6 +26,7 @@ import { SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; /** * Builds the Canvas tab's Image to Image graph. @@ -49,12 +50,10 @@ export const buildCanvasSDXLImageToImageGraph = ( } = state.generation; const { - positiveStylePrompt, - negativeStylePrompt, - shouldConcatSDXLStylePrompt, shouldUseSDXLRefiner, refinerStart, sdxlImg2ImgDenoisingStrength: strength, + shouldConcatSDXLStylePrompt, } = state.sdxl; // The bounding box determines width and height, not the width and height params @@ -71,6 +70,10 @@ export const buildCanvasSDXLImageToImageGraph = ( ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -93,17 +96,13 @@ export const buildCanvasSDXLImageToImageGraph = ( type: 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [NOISE]: { type: 'noise', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 570439f9e6..7d8586d09c 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -35,6 +35,7 @@ import { SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; /** * Builds the Canvas tab's Inpaint graph. @@ -63,13 +64,8 @@ export const buildCanvasSDXLInpaintGraph = ( maskBlurMethod, } = state.generation; - const { - positiveStylePrompt, - negativeStylePrompt, - shouldConcatSDXLStylePrompt, - shouldUseSDXLRefiner, - refinerStart, - } = state.sdxl; + const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = + state.sdxl; if (!model) { log.error('No model found in state'); @@ -90,6 +86,10 @@ export const buildCanvasSDXLInpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + const graph: NonNullableGraph = { id: SDXL_CANVAS_INPAINT_GRAPH, nodes: { @@ -102,17 +102,13 @@ export const buildCanvasSDXLInpaintGraph = ( type: 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [MASK_BLUR]: { type: 'img_blur', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index 404f3501dd..b5260894c4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -41,6 +41,7 @@ import { SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; /** * Builds the Canvas tab's Outpaint graph. @@ -71,13 +72,8 @@ export const buildCanvasSDXLOutpaintGraph = ( infillMethod, } = state.generation; - const { - positiveStylePrompt, - negativeStylePrompt, - shouldConcatSDXLStylePrompt, - shouldUseSDXLRefiner, - refinerStart, - } = state.sdxl; + const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = + state.sdxl; if (!model) { log.error('No model found in state'); @@ -98,6 +94,10 @@ export const buildCanvasSDXLOutpaintGraph = ( ? shouldUseCpuNoise : shouldUseCpuNoise; + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + const graph: NonNullableGraph = { id: SDXL_CANVAS_OUTPAINT_GRAPH, nodes: { @@ -110,17 +110,13 @@ export const buildCanvasSDXLOutpaintGraph = ( type: 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [MASK_FROM_ALPHA]: { type: 'tomask', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts index da27ace3b0..e79e08ba41 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLTextToImageGraph.ts @@ -24,6 +24,7 @@ import { SDXL_DENOISE_LATENTS, SDXL_MODEL_LOADER, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; /** * Builds the Canvas tab's Text to Image graph. @@ -50,13 +51,8 @@ export const buildCanvasSDXLTextToImageGraph = ( const { shouldAutoSave } = state.canvas; - const { - positiveStylePrompt, - negativeStylePrompt, - shouldConcatSDXLStylePrompt, - shouldUseSDXLRefiner, - refinerStart, - } = state.sdxl; + const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = + state.sdxl; if (!model) { log.error('No model found in state'); @@ -97,6 +93,11 @@ export const buildCanvasSDXLTextToImageGraph = ( denoising_start: 0, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }; + + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -122,18 +123,14 @@ export const buildCanvasSDXLTextToImageGraph = ( id: POSITIVE_CONDITIONING, is_intermediate: true, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: isUsingOnnxModel ? 'prompt_onnx' : 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, is_intermediate: true, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [NOISE]: { type: 'noise', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts index 0d30fe1c63..42ea07c923 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLImageToImageGraph.ts @@ -25,6 +25,7 @@ import { SDXL_IMAGE_TO_IMAGE_GRAPH, SDXL_MODEL_LOADER, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; /** * Builds the Image to Image tab graph. @@ -82,6 +83,10 @@ export const buildLinearSDXLImageToImageGraph = ( ? shouldUseCpuNoise : initialGenerationState.shouldUseCpuNoise; + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + // copy-pasted graph from node editor, filled in with state values & friendly node ids const graph: NonNullableGraph = { id: SDXL_IMAGE_TO_IMAGE_GRAPH, @@ -95,17 +100,13 @@ export const buildLinearSDXLImageToImageGraph = ( type: 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [NOISE]: { type: 'noise', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts index 595b6f47cd..a74884f23b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildLinearSDXLTextToImageGraph.ts @@ -19,6 +19,7 @@ import { SDXL_MODEL_LOADER, SDXL_TEXT_TO_IMAGE_GRAPH, } from './constants'; +import { craftSDXLStylePrompt } from './helpers/craftSDXLStylePrompt'; export const buildLinearSDXLTextToImageGraph = ( state: RootState @@ -42,8 +43,8 @@ export const buildLinearSDXLTextToImageGraph = ( const { positiveStylePrompt, negativeStylePrompt, - shouldConcatSDXLStylePrompt, shouldUseSDXLRefiner, + shouldConcatSDXLStylePrompt, refinerStart, } = state.sdxl; @@ -56,6 +57,10 @@ export const buildLinearSDXLTextToImageGraph = ( throw new Error('No model found in state'); } + // Construct Style Prompt + const { craftedPositiveStylePrompt, craftedNegativeStylePrompt } = + craftSDXLStylePrompt(state, shouldConcatSDXLStylePrompt); + /** * The easiest way to build linear graphs is to do it in the node editor, then copy and paste the * full graph here as a template. Then use the parameters from app state and set friendlier node @@ -78,17 +83,13 @@ export const buildLinearSDXLTextToImageGraph = ( type: 'sdxl_compel_prompt', id: POSITIVE_CONDITIONING, prompt: positivePrompt, - style: shouldConcatSDXLStylePrompt - ? `${positivePrompt} ${positiveStylePrompt}` - : positiveStylePrompt, + style: craftedPositiveStylePrompt, }, [NEGATIVE_CONDITIONING]: { type: 'sdxl_compel_prompt', id: NEGATIVE_CONDITIONING, prompt: negativePrompt, - style: shouldConcatSDXLStylePrompt - ? `${negativePrompt} ${negativeStylePrompt}` - : negativeStylePrompt, + style: craftedNegativeStylePrompt, }, [NOISE]: { type: 'noise', diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/helpers/craftSDXLStylePrompt.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/helpers/craftSDXLStylePrompt.ts new file mode 100644 index 0000000000..f46d5cc5dc --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/helpers/craftSDXLStylePrompt.ts @@ -0,0 +1,28 @@ +import { RootState } from 'app/store/store'; + +export const craftSDXLStylePrompt = ( + state: RootState, + shouldConcatSDXLStylePrompt: boolean +) => { + const { positivePrompt, negativePrompt } = state.generation; + const { positiveStylePrompt, negativeStylePrompt } = state.sdxl; + + let craftedPositiveStylePrompt = positiveStylePrompt; + let craftedNegativeStylePrompt = negativeStylePrompt; + + if (shouldConcatSDXLStylePrompt) { + if (positiveStylePrompt.length > 0) { + craftedPositiveStylePrompt = `${positivePrompt} ${positiveStylePrompt}`; + } else { + craftedPositiveStylePrompt = positivePrompt; + } + + if (negativeStylePrompt.length > 0) { + craftedNegativeStylePrompt = `${negativePrompt} ${negativeStylePrompt}`; + } else { + craftedNegativeStylePrompt = negativePrompt; + } + } + + return { craftedPositiveStylePrompt, craftedNegativeStylePrompt }; +}; From 499e89d6f64847c6e05e971ca2bc463d2e434103 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 04:02:36 +1200 Subject: [PATCH 47/67] feat: Add SDXL Negative Aesthetic Score --- invokeai/app/invocations/metadata.py | 10 +++- .../graphBuilders/addSDXLRefinerToGraph.ts | 12 ++-- .../parameters/hooks/useRecallParameters.ts | 32 +++++++--- .../parameters/types/parameterSchemas.ts | 36 ++++++++--- .../components/ParamSDXLRefinerCollapse.tsx | 6 +- ...ParamSDXLRefinerNegativeAestheticScore.tsx | 60 +++++++++++++++++++ ...aramSDXLRefinerPositiveAestheticScore.tsx} | 20 +++---- .../web/src/features/sdxl/store/sdxlSlice.ts | 22 +++++-- .../frontend/web/src/services/api/schema.d.ts | 38 +++++++----- 9 files changed, 183 insertions(+), 53 deletions(-) create mode 100644 invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerNegativeAestheticScore.tsx rename invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/{ParamSDXLRefinerAestheticScore.tsx => ParamSDXLRefinerPositiveAestheticScore.tsx} (66%) diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 4f51bf10b8..d0549f8539 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -67,7 +67,10 @@ class CoreMetadata(BaseModelExcludeNull): ) refiner_steps: Union[int, None] = Field(default=None, description="The number of steps used for the refiner") refiner_scheduler: Union[str, None] = Field(default=None, description="The scheduler used for the refiner") - refiner_aesthetic_store: Union[float, None] = Field( + refiner_positive_aesthetic_store: Union[float, None] = Field( + default=None, description="The aesthetic score used for the refiner" + ) + refiner_negative_aesthetic_store: Union[float, None] = Field( default=None, description="The aesthetic score used for the refiner" ) refiner_start: Union[float, None] = Field(default=None, description="The start value used for refiner denoising") @@ -136,7 +139,10 @@ class MetadataAccumulatorInvocation(BaseInvocation): ) refiner_steps: Union[int, None] = Field(default=None, description="The number of steps used for the refiner") refiner_scheduler: Union[str, None] = Field(default=None, description="The scheduler used for the refiner") - refiner_aesthetic_store: Union[float, None] = Field( + refiner_positive_aesthetic_score: Union[float, None] = Field( + default=None, description="The aesthetic score used for the refiner" + ) + refiner_negative_aesthetic_score: Union[float, None] = Field( default=None, description="The aesthetic score used for the refiner" ) refiner_start: Union[float, None] = Field(default=None, description="The start value used for refiner denoising") diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 53f068c91b..577f4e4b7d 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -25,7 +25,8 @@ export const addSDXLRefinerToGraph = ( ): void => { const { refinerModel, - refinerAestheticScore, + refinerPositiveAestheticScore, + refinerNegativeAestheticScore, refinerSteps, refinerScheduler, refinerCFGScale, @@ -40,7 +41,10 @@ export const addSDXLRefinerToGraph = ( if (metadataAccumulator) { metadataAccumulator.refiner_model = refinerModel; - metadataAccumulator.refiner_aesthetic_store = refinerAestheticScore; + metadataAccumulator.refiner_positive_aesthetic_score = + refinerPositiveAestheticScore; + metadataAccumulator.refiner_negative_aesthetic_score = + refinerNegativeAestheticScore; metadataAccumulator.refiner_cfg_scale = refinerCFGScale; metadataAccumulator.refiner_scheduler = refinerScheduler; metadataAccumulator.refiner_start = refinerStart; @@ -74,13 +78,13 @@ export const addSDXLRefinerToGraph = ( type: 'sdxl_refiner_compel_prompt', id: SDXL_REFINER_POSITIVE_CONDITIONING, style: craftedPositiveStylePrompt, - aesthetic_score: refinerAestheticScore, + aesthetic_score: refinerPositiveAestheticScore, }; graph.nodes[SDXL_REFINER_NEGATIVE_CONDITIONING] = { type: 'sdxl_refiner_compel_prompt', id: SDXL_REFINER_NEGATIVE_CONDITIONING, style: craftedNegativeStylePrompt, - aesthetic_score: refinerAestheticScore, + aesthetic_score: refinerNegativeAestheticScore, }; graph.nodes[SDXL_REFINER_DENOISE_LATENTS] = { type: 'denoise_latents', diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts b/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts index 907107e95e..95caf9a9e1 100644 --- a/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts +++ b/invokeai/frontend/web/src/features/parameters/hooks/useRecallParameters.ts @@ -4,16 +4,16 @@ import { refinerModelChanged, setNegativeStylePromptSDXL, setPositiveStylePromptSDXL, - setRefinerAestheticScore, setRefinerCFGScale, + setRefinerNegativeAestheticScore, + setRefinerPositiveAestheticScore, setRefinerScheduler, setRefinerStart, setRefinerSteps, } from 'features/sdxl/store/sdxlSlice'; import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import { UnsafeImageMetadata } from 'services/api/types'; -import { ImageDTO } from 'services/api/types'; +import { ImageDTO, UnsafeImageMetadata } from 'services/api/types'; import { initialImageSelected, modelSelected } from '../store/actions'; import { setCfgScale, @@ -34,8 +34,9 @@ import { isValidPositivePrompt, isValidSDXLNegativeStylePrompt, isValidSDXLPositiveStylePrompt, - isValidSDXLRefinerAestheticScore, isValidSDXLRefinerModel, + isValidSDXLRefinerNegativeAestheticScore, + isValidSDXLRefinerPositiveAestheticScore, isValidSDXLRefinerStart, isValidScheduler, isValidSeed, @@ -339,7 +340,8 @@ export const useRecallParameters = () => { refiner_cfg_scale, refiner_steps, refiner_scheduler, - refiner_aesthetic_store, + refiner_positive_aesthetic_store, + refiner_negative_aesthetic_store, refiner_start, } = metadata; @@ -398,8 +400,24 @@ export const useRecallParameters = () => { dispatch(setRefinerScheduler(refiner_scheduler)); } - if (isValidSDXLRefinerAestheticScore(refiner_aesthetic_store)) { - dispatch(setRefinerAestheticScore(refiner_aesthetic_store)); + if ( + isValidSDXLRefinerPositiveAestheticScore( + refiner_positive_aesthetic_store + ) + ) { + dispatch( + setRefinerPositiveAestheticScore(refiner_positive_aesthetic_store) + ); + } + + if ( + isValidSDXLRefinerNegativeAestheticScore( + refiner_negative_aesthetic_store + ) + ) { + dispatch( + setRefinerNegativeAestheticScore(refiner_negative_aesthetic_store) + ); } if (isValidSDXLRefinerStart(refiner_start)) { diff --git a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts index 5221bf64a9..25905e1e14 100644 --- a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts +++ b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts @@ -353,22 +353,40 @@ export const isValidPrecision = (val: unknown): val is PrecisionParam => zPrecision.safeParse(val).success; /** - * Zod schema for SDXL refiner aesthetic score parameter + * Zod schema for SDXL refiner positive aesthetic score parameter */ -export const zSDXLRefinerAestheticScore = z.number().min(1).max(10); +export const zSDXLRefinerPositiveAestheticScore = z.number().min(1).max(10); /** - * Type alias for SDXL refiner aesthetic score parameter, inferred from its zod schema + * Type alias for SDXL refiner aesthetic positive score parameter, inferred from its zod schema */ -export type SDXLRefinerAestheticScoreParam = z.infer< - typeof zSDXLRefinerAestheticScore +export type SDXLRefinerPositiveAestheticScoreParam = z.infer< + typeof zSDXLRefinerPositiveAestheticScore >; /** - * Validates/type-guards a value as a SDXL refiner aesthetic score parameter + * Validates/type-guards a value as a SDXL refiner positive aesthetic score parameter */ -export const isValidSDXLRefinerAestheticScore = ( +export const isValidSDXLRefinerPositiveAestheticScore = ( val: unknown -): val is SDXLRefinerAestheticScoreParam => - zSDXLRefinerAestheticScore.safeParse(val).success; +): val is SDXLRefinerPositiveAestheticScoreParam => + zSDXLRefinerPositiveAestheticScore.safeParse(val).success; + +/** + * Zod schema for SDXL refiner negative aesthetic score parameter + */ +export const zSDXLRefinerNegativeAestheticScore = z.number().min(1).max(10); +/** + * Type alias for SDXL refiner aesthetic negative score parameter, inferred from its zod schema + */ +export type SDXLRefinerNegativeAestheticScoreParam = z.infer< + typeof zSDXLRefinerNegativeAestheticScore +>; +/** + * Validates/type-guards a value as a SDXL refiner negative aesthetic score parameter + */ +export const isValidSDXLRefinerNegativeAestheticScore = ( + val: unknown +): val is SDXLRefinerNegativeAestheticScoreParam => + zSDXLRefinerNegativeAestheticScore.safeParse(val).success; /** * Zod schema for SDXL start parameter diff --git a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx index 37e1718dc6..3b186006f1 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/ParamSDXLRefinerCollapse.tsx @@ -4,9 +4,10 @@ import { stateSelector } from 'app/store/store'; import { useAppSelector } from 'app/store/storeHooks'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import IAICollapse from 'common/components/IAICollapse'; -import ParamSDXLRefinerAestheticScore from './SDXLRefiner/ParamSDXLRefinerAestheticScore'; import ParamSDXLRefinerCFGScale from './SDXLRefiner/ParamSDXLRefinerCFGScale'; import ParamSDXLRefinerModelSelect from './SDXLRefiner/ParamSDXLRefinerModelSelect'; +import ParamSDXLRefinerNegativeAestheticScore from './SDXLRefiner/ParamSDXLRefinerNegativeAestheticScore'; +import ParamSDXLRefinerPositiveAestheticScore from './SDXLRefiner/ParamSDXLRefinerPositiveAestheticScore'; import ParamSDXLRefinerScheduler from './SDXLRefiner/ParamSDXLRefinerScheduler'; import ParamSDXLRefinerStart from './SDXLRefiner/ParamSDXLRefinerStart'; import ParamSDXLRefinerSteps from './SDXLRefiner/ParamSDXLRefinerSteps'; @@ -38,7 +39,8 @@ const ParamSDXLRefinerCollapse = () => { - + + diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerNegativeAestheticScore.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerNegativeAestheticScore.tsx new file mode 100644 index 0000000000..4dad3f519a --- /dev/null +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerNegativeAestheticScore.tsx @@ -0,0 +1,60 @@ +import { createSelector } from '@reduxjs/toolkit'; +import { stateSelector } from 'app/store/store'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import IAISlider from 'common/components/IAISlider'; +import { setRefinerNegativeAestheticScore } from 'features/sdxl/store/sdxlSlice'; +import { memo, useCallback } from 'react'; +import { useIsRefinerAvailable } from 'services/api/hooks/useIsRefinerAvailable'; + +const selector = createSelector( + [stateSelector], + ({ sdxl, hotkeys }) => { + const { refinerNegativeAestheticScore } = sdxl; + const { shift } = hotkeys; + + return { + refinerNegativeAestheticScore, + shift, + }; + }, + defaultSelectorOptions +); + +const ParamSDXLRefinerNegativeAestheticScore = () => { + const { refinerNegativeAestheticScore, shift } = useAppSelector(selector); + + const isRefinerAvailable = useIsRefinerAvailable(); + + const dispatch = useAppDispatch(); + + const handleChange = useCallback( + (v: number) => dispatch(setRefinerNegativeAestheticScore(v)), + [dispatch] + ); + + const handleReset = useCallback( + () => dispatch(setRefinerNegativeAestheticScore(2.5)), + [dispatch] + ); + + return ( + + ); +}; + +export default memo(ParamSDXLRefinerNegativeAestheticScore); diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerAestheticScore.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerPositiveAestheticScore.tsx similarity index 66% rename from invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerAestheticScore.tsx rename to invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerPositiveAestheticScore.tsx index 9c9c4b2f89..47842629f6 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerAestheticScore.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLRefiner/ParamSDXLRefinerPositiveAestheticScore.tsx @@ -3,50 +3,50 @@ import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; import IAISlider from 'common/components/IAISlider'; -import { setRefinerAestheticScore } from 'features/sdxl/store/sdxlSlice'; +import { setRefinerPositiveAestheticScore } from 'features/sdxl/store/sdxlSlice'; import { memo, useCallback } from 'react'; import { useIsRefinerAvailable } from 'services/api/hooks/useIsRefinerAvailable'; const selector = createSelector( [stateSelector], ({ sdxl, hotkeys }) => { - const { refinerAestheticScore } = sdxl; + const { refinerPositiveAestheticScore } = sdxl; const { shift } = hotkeys; return { - refinerAestheticScore, + refinerPositiveAestheticScore, shift, }; }, defaultSelectorOptions ); -const ParamSDXLRefinerAestheticScore = () => { - const { refinerAestheticScore, shift } = useAppSelector(selector); +const ParamSDXLRefinerPositiveAestheticScore = () => { + const { refinerPositiveAestheticScore, shift } = useAppSelector(selector); const isRefinerAvailable = useIsRefinerAvailable(); const dispatch = useAppDispatch(); const handleChange = useCallback( - (v: number) => dispatch(setRefinerAestheticScore(v)), + (v: number) => dispatch(setRefinerPositiveAestheticScore(v)), [dispatch] ); const handleReset = useCallback( - () => dispatch(setRefinerAestheticScore(6)), + () => dispatch(setRefinerPositiveAestheticScore(6)), [dispatch] ); return ( { ); }; -export default memo(ParamSDXLRefinerAestheticScore); +export default memo(ParamSDXLRefinerPositiveAestheticScore); diff --git a/invokeai/frontend/web/src/features/sdxl/store/sdxlSlice.ts b/invokeai/frontend/web/src/features/sdxl/store/sdxlSlice.ts index 7ee3ea1d4f..7670790f05 100644 --- a/invokeai/frontend/web/src/features/sdxl/store/sdxlSlice.ts +++ b/invokeai/frontend/web/src/features/sdxl/store/sdxlSlice.ts @@ -16,7 +16,8 @@ type SDXLInitialState = { refinerSteps: number; refinerCFGScale: number; refinerScheduler: SchedulerParam; - refinerAestheticScore: number; + refinerPositiveAestheticScore: number; + refinerNegativeAestheticScore: number; refinerStart: number; }; @@ -30,7 +31,8 @@ const sdxlInitialState: SDXLInitialState = { refinerSteps: 20, refinerCFGScale: 7.5, refinerScheduler: 'euler', - refinerAestheticScore: 6, + refinerPositiveAestheticScore: 6, + refinerNegativeAestheticScore: 2.5, refinerStart: 0.7, }; @@ -68,8 +70,17 @@ const sdxlSlice = createSlice({ setRefinerScheduler: (state, action: PayloadAction) => { state.refinerScheduler = action.payload; }, - setRefinerAestheticScore: (state, action: PayloadAction) => { - state.refinerAestheticScore = action.payload; + setRefinerPositiveAestheticScore: ( + state, + action: PayloadAction + ) => { + state.refinerPositiveAestheticScore = action.payload; + }, + setRefinerNegativeAestheticScore: ( + state, + action: PayloadAction + ) => { + state.refinerNegativeAestheticScore = action.payload; }, setRefinerStart: (state, action: PayloadAction) => { state.refinerStart = action.payload; @@ -87,7 +98,8 @@ export const { setRefinerSteps, setRefinerCFGScale, setRefinerScheduler, - setRefinerAestheticScore, + setRefinerPositiveAestheticScore, + setRefinerNegativeAestheticScore, setRefinerStart, } = sdxlSlice.actions; diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index a2076557b8..0bfa7c334f 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1209,10 +1209,15 @@ export type components = { */ refiner_scheduler?: string; /** - * Refiner Aesthetic Store + * Refiner Positive Aesthetic Store * @description The aesthetic score used for the refiner */ - refiner_aesthetic_store?: number; + refiner_positive_aesthetic_store?: number; + /** + * Refiner Negative Aesthetic Store + * @description The aesthetic score used for the refiner + */ + refiner_negative_aesthetic_store?: number; /** * Refiner Start * @description The start value used for refiner denoising @@ -3599,10 +3604,15 @@ export type components = { */ refiner_scheduler?: string; /** - * Refiner Aesthetic Store + * Refiner Positive Aesthetic Score * @description The aesthetic score used for the refiner */ - refiner_aesthetic_store?: number; + refiner_positive_aesthetic_score?: number; + /** + * Refiner Negative Aesthetic Score + * @description The aesthetic score used for the refiner + */ + refiner_negative_aesthetic_score?: number; /** * Refiner Start * @description The start value used for refiner denoising @@ -5781,11 +5791,11 @@ export type components = { image?: components["schemas"]["ImageField"]; }; /** - * StableDiffusionOnnxModelFormat + * ControlNetModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + ControlNetModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. @@ -5793,23 +5803,23 @@ export type components = { */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** - * ControlNetModelFormat + * StableDiffusionXLModelFormat * @description An enumeration. * @enum {string} */ - ControlNetModelFormat: "checkpoint" | "diffusers"; + StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusionOnnxModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion1ModelFormat * @description An enumeration. * @enum {string} */ StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; - /** - * StableDiffusionXLModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusionXLModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; From 746e099f0d765209bb09ba4776f6dbc4dd780517 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 04:04:15 +1200 Subject: [PATCH 48/67] fix: Do not do step math for refinerSteps This is probably better done on the backend or in a different way. This can cause steps to go above 1000 which is more than the set number for the model. --- .../features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index 577f4e4b7d..adce34adf5 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -90,7 +90,7 @@ export const addSDXLRefinerToGraph = ( type: 'denoise_latents', id: SDXL_REFINER_DENOISE_LATENTS, cfg_scale: refinerCFGScale, - steps: refinerSteps / (1 - Math.min(refinerStart, 0.99)), + steps: refinerSteps, scheduler: refinerScheduler, denoising_start: refinerStart, denoising_end: 1, From 94636ddb039d9bb2eae1fb20375302d3122a7426 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 13 Aug 2023 19:31:14 +0300 Subject: [PATCH 49/67] Fix empty prompt handling --- invokeai/app/invocations/compel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 2a56bd04ab..86565366d9 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -295,7 +295,7 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: c1, c1_pooled, ec1 = self.run_clip_compel( - context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=False + context, self.clip, self.prompt, False, "lora_te1_", zero_on_empty=True ) if self.style.strip() == "": c2, c2_pooled, ec2 = self.run_clip_compel( From 6e0beb1ed4ccc7495924df4c1e0919c4d76d527c Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 13 Aug 2023 19:31:47 +0300 Subject: [PATCH 50/67] Fixes for second order scheduler timesteps --- invokeai/app/invocations/latent.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 8e8d2b2fc9..de9d8b37c8 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -317,6 +317,7 @@ class DenoiseLatentsInvocation(BaseInvocation): return control_data # original idea by https://github.com/AmericanPresidentJimmyCarter + # TODO: research more for second order schedulers timesteps def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): if scheduler.config.get("cpu_only", False): device = torch.device("cpu") @@ -329,8 +330,7 @@ class DenoiseLatentsInvocation(BaseInvocation): t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, timesteps))) timesteps = timesteps[t_start_idx:] - if scheduler.order == 2: - # TODO: research for second order schedulers timesteps + if scheduler.order == 2 and t_start_idx > 0: timesteps = timesteps[1:] # save start timestep to apply noise @@ -339,6 +339,8 @@ class DenoiseLatentsInvocation(BaseInvocation): # apply denoising_end t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end))) t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, timesteps))) + if scheduler.order == 2 and t_end_idx > 0: + t_end_idx += 1 timesteps = timesteps[:t_end_idx] # calculate step count based on scheduler order From 59ba9fc0f6c8582d80baa7ba1f913893ae0775d3 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 13 Aug 2023 19:50:16 +0300 Subject: [PATCH 51/67] Flip bits in seed for sde/ancestral schedulers to have different noise from initial --- invokeai/app/invocations/latent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index de9d8b37c8..7451213860 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -205,7 +205,8 @@ class DenoiseLatentsInvocation(BaseInvocation): # for ddim scheduler eta=0.0, # ddim_eta # for ancestral and sde schedulers - generator=torch.Generator(device=unet.device).manual_seed(seed), + # flip all bits to have noise different from initial + generator=torch.Generator(device=unet.device).manual_seed(seed ^ 0xFFFFFFFF), ) return conditioning_data From 7a8f14d595cfa1e17280028255fdef7a531ac50b Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 13 Aug 2023 19:50:48 +0300 Subject: [PATCH 52/67] Clean-up code a bit --- .../stable_diffusion/diffusers_pipeline.py | 19 +++++++++---------- .../stable_diffusion/diffusion/__init__.py | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 516e901971..b38ebc6684 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -34,6 +34,7 @@ from .diffusion import ( AttentionMapSaver, InvokeAIDiffuserComponent, PostprocessingSettings, + BasicConditioningInfo, ) from ..util import normalize_device @@ -92,8 +93,7 @@ class AddsMaskGuidance: mask: torch.FloatTensor mask_latents: torch.FloatTensor scheduler: SchedulerMixin - noise: Optional[torch.Tensor] = None - _debug: Optional[Callable] = None + noise: torch.Tensor def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput: output_class = step_output.__class__ # We'll create a new one with masked data. @@ -123,14 +123,13 @@ class AddsMaskGuidance: # some schedulers expect t to be one-dimensional. # TODO: file diffusers bug about inconsistency? t = einops.repeat(t, "-> batch", batch=batch_size) - - if self.noise is not None: - mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) - + # Noise shouldn't be re-randomized between steps here. The multistep schedulers + # get very confused about what is happening from step to step when we do that. + mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) + # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? + # mask_latents = self.scheduler.scale_model_input(mask_latents, t) mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) - if self._debug: - self._debug(masked_input, f"t={t} lerped") return masked_input @@ -202,8 +201,8 @@ class ControlNetData: @dataclass class ConditioningData: - unconditioned_embeddings: Any # TODO: type - text_embeddings: Any # TODO: type + unconditioned_embeddings: BasicConditioningInfo + text_embeddings: BasicConditioningInfo guidance_scale: Union[float, List[float]] """ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py index 6dd2817f29..e9d86a933a 100644 --- a/invokeai/backend/stable_diffusion/diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py @@ -3,4 +3,4 @@ Initialization file for invokeai.models.diffusion """ from .cross_attention_control import InvokeAICrossAttentionMixin from .cross_attention_map_saving import AttentionMapSaver -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, BasicConditioningInfo, SDXLConditioningInfo From 096333ba3f9d419b9d5815b4648374b6759e981d Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 14 Aug 2023 00:20:01 +0300 Subject: [PATCH 53/67] Fix error on zero timesteps --- .../stable_diffusion/diffusers_pipeline.py | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index b38ebc6684..6268af369f 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -385,7 +385,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): orig_latents = latents.clone() batch_size = latents.shape[0] - batched_t = init_timestep.repeat(batch_size) + batched_t = init_timestep.expand(batch_size) if noise is not None: # latents = noise * self.scheduler.init_noise_sigma # it's like in t2l according to diffusers @@ -448,20 +448,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): self._adjust_memory_efficient_attention(latents) if additional_guidance is None: additional_guidance = [] + + batch_size = latents.shape[0] + attention_map_saver: Optional[AttentionMapSaver] = None + + if timesteps.shape[0] == 0: + return latents, attention_map_saver + extra_conditioning_info = conditioning_data.extra with self.invokeai_diffuser.custom_attention_context( self.invokeai_diffuser.model, extra_conditioning_info=extra_conditioning_info, step_count=len(self.scheduler.timesteps), ): - batch_size = latents.shape[0] - batched_t = torch.full( - (batch_size,), - timesteps[0], - dtype=timesteps.dtype, - device=self.unet.device, - ) - yield PipelineIntermediateState( step=-1, order=self.scheduler.order, @@ -470,10 +469,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): latents=latents, ) - attention_map_saver: Optional[AttentionMapSaver] = None # print("timesteps:", timesteps) for i, t in enumerate(self.progress_bar(timesteps)): - batched_t.fill_(t) + batched_t = t.expand(batch_size) step_output = self.step( batched_t, latents, From d63bb3947592193bc360662f48f56f26e2edf021 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 14 Aug 2023 00:24:38 +0300 Subject: [PATCH 54/67] Make dpmpp_sde(_k) use not random seed --- invokeai/app/invocations/latent.py | 9 ++++++++- invokeai/app/invocations/onnx.py | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 7451213860..fc7b5cd77d 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -13,7 +13,7 @@ from diffusers.models.attention_processor import ( LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) -from diffusers.schedulers import SchedulerMixin as Scheduler +from diffusers.schedulers import DPMSolverSDEScheduler, SchedulerMixin as Scheduler from pydantic import BaseModel, Field, validator from torchvision.transforms.functional import resize as tv_resize @@ -81,6 +81,7 @@ def get_scheduler( context: InvocationContext, scheduler_info: ModelInfo, scheduler_name: str, + seed: int, ) -> Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"]) orig_scheduler_info = context.services.model_manager.get_model( @@ -97,6 +98,11 @@ def get_scheduler( **scheduler_extra_config, "_backup": scheduler_config, } + + # make dpmpp_sde reproducable(seed can be passed only in initializer) + if scheduler_class is DPMSolverSDEScheduler: + scheduler_config["noise_sampler_seed"] = seed + scheduler = scheduler_class.from_config(scheduler_config) # hack copied over from generate.py @@ -421,6 +427,7 @@ class DenoiseLatentsInvocation(BaseInvocation): context=context, scheduler_info=self.unet.scheduler, scheduler_name=self.scheduler, + seed=seed, ) pipeline = self.create_pipeline(unet, scheduler) diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py index fe9a64552e..0fb1d56848 100644 --- a/invokeai/app/invocations/onnx.py +++ b/invokeai/app/invocations/onnx.py @@ -212,6 +212,7 @@ class ONNXTextToLatentsInvocation(BaseInvocation): context=context, scheduler_info=self.unet.scheduler, scheduler_name=self.scheduler, + seed=0, # TODO: refactor this node ) def torch2numpy(latent: torch.Tensor): From 75fb3f429fecfba87b088f29043d8b56b1af9385 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 09:26:01 +1200 Subject: [PATCH 55/67] re: Readd Refiner Step Math but cap max steps to 1000 --- .../features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts index adce34adf5..16256e05af 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSDXLRefinerToGraph.ts @@ -90,7 +90,7 @@ export const addSDXLRefinerToGraph = ( type: 'denoise_latents', id: SDXL_REFINER_DENOISE_LATENTS, cfg_scale: refinerCFGScale, - steps: refinerSteps, + steps: Math.min(refinerSteps / (1 - Math.min(refinerStart, 0.99)), 1000), scheduler: refinerScheduler, denoising_start: refinerStart, denoising_end: 1, From cc85c98bf3c8953454e6fe54d4f5884fe98c98c9 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 09:26:28 +1200 Subject: [PATCH 56/67] feat: Upgrade Diffusers to 0.19.3 Needed for some schedulers --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 8fb2e7aaa7..6e5b754914 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ dependencies = [ "controlnet-aux>=0.0.6", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", - "diffusers[torch]~=0.19.0", + "diffusers[torch]~=0.19.3", "dnspython~=2.4.0", "dynamicprompts", "easing-functions", From 550e6ef27a037f812a71227d2942630f3999d4f6 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 10:27:07 +1200 Subject: [PATCH 57/67] re: Set the image denoise str back to 0 Bug has been fixed. No longer needed. --- invokeai/frontend/web/src/features/system/store/configSlice.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts index ff7a0c0700..6cff92a136 100644 --- a/invokeai/frontend/web/src/features/system/store/configSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/configSlice.ts @@ -61,7 +61,7 @@ export const initialConfigState: AppConfig = { }, img2imgStrength: { initial: 0.7, - min: 0.01, + min: 0, sliderMax: 1, inputMax: 1, fineStep: 0.01, From fecad2c0145638d6b88c1cf241b96307ae663a02 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:59:11 +1200 Subject: [PATCH 58/67] fix: SDXL Denoising Strength not plugged in correctly --- .../SDXLUnifiedCanvasTabCoreParameters.tsx | 75 +++++++++++++++++++ .../SDXLUnifiedCanvasTabParameters.tsx | 4 +- 2 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabCoreParameters.tsx diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabCoreParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabCoreParameters.tsx new file mode 100644 index 0000000000..7db6ccc219 --- /dev/null +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabCoreParameters.tsx @@ -0,0 +1,75 @@ +import { Box, Flex } from '@chakra-ui/react'; +import { createSelector } from '@reduxjs/toolkit'; +import { stateSelector } from 'app/store/store'; +import { useAppSelector } from 'app/store/storeHooks'; +import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import IAICollapse from 'common/components/IAICollapse'; +import ParamBoundingBoxSize from 'features/parameters/components/Parameters/Canvas/BoundingBox/ParamBoundingBoxSize'; +import ParamCFGScale from 'features/parameters/components/Parameters/Core/ParamCFGScale'; +import ParamIterations from 'features/parameters/components/Parameters/Core/ParamIterations'; +import ParamModelandVAEandScheduler from 'features/parameters/components/Parameters/Core/ParamModelandVAEandScheduler'; +import ParamSteps from 'features/parameters/components/Parameters/Core/ParamSteps'; +import ParamSeedFull from 'features/parameters/components/Parameters/Seed/ParamSeedFull'; +import { memo } from 'react'; +import ParamSDXLImg2ImgDenoisingStrength from './ParamSDXLImg2ImgDenoisingStrength'; + +const selector = createSelector( + stateSelector, + ({ ui, generation }) => { + const { shouldUseSliders } = ui; + const { shouldRandomizeSeed } = generation; + + const activeLabel = !shouldRandomizeSeed ? 'Manual Seed' : undefined; + + return { shouldUseSliders, activeLabel }; + }, + defaultSelectorOptions +); + +const SDXLUnifiedCanvasTabCoreParameters = () => { + const { shouldUseSliders, activeLabel } = useAppSelector(selector); + + return ( + + + {shouldUseSliders ? ( + <> + + + + + + + + + + ) : ( + <> + + + + + + + + + + + + )} + + + + ); +}; + +export default memo(SDXLUnifiedCanvasTabCoreParameters); diff --git a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx index 6faafc6891..c6af754ad9 100644 --- a/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx +++ b/invokeai/frontend/web/src/features/sdxl/components/SDXLUnifiedCanvasTabParameters.tsx @@ -5,16 +5,16 @@ import ParamMaskAdjustmentCollapse from 'features/parameters/components/Paramete import ParamControlNetCollapse from 'features/parameters/components/Parameters/ControlNet/ParamControlNetCollapse'; import ParamNoiseCollapse from 'features/parameters/components/Parameters/Noise/ParamNoiseCollapse'; import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons'; -import UnifiedCanvasCoreParameters from 'features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters'; import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; +import SDXLUnifiedCanvasTabCoreParameters from './SDXLUnifiedCanvasTabCoreParameters'; export default function SDXLUnifiedCanvasTabParameters() { return ( <> - + From 957ee6d370e32715506fbd1aeefdea03f7e73b4f Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:13:29 +1200 Subject: [PATCH 59/67] fix: SDXL Canvas Inpaint & Outpaint not respecting SDXL Refiner start value --- .../nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts | 4 +++- .../nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index 7d8586d09c..f38ed00bf4 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -136,7 +136,9 @@ export const buildCanvasSDXLInpaintGraph = ( steps: steps, cfg_scale: cfg_scale, scheduler: scheduler, - denoising_start: 1 - strength, + denoising_start: shouldUseSDXLRefiner + ? Math.min(refinerStart, 1 - strength) + : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, [LATENTS_TO_IMAGE]: { diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index b5260894c4..d7904de4f9 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -162,7 +162,9 @@ export const buildCanvasSDXLOutpaintGraph = ( steps: steps, cfg_scale: cfg_scale, scheduler: scheduler, - denoising_start: 1 - strength, + denoising_start: shouldUseSDXLRefiner + ? Math.min(refinerStart, 1 - strength) + : 1 - strength, denoising_end: shouldUseSDXLRefiner ? refinerStart : 1, }, [LATENTS_TO_IMAGE]: { From 3d8da67be3bef184439ef4c2103765713dfd3c11 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 14 Aug 2023 03:35:15 +0300 Subject: [PATCH 60/67] Remove callback-generator wrapper --- .../stable_diffusion/diffusers_pipeline.py | 75 ++++++------------- 1 file changed, 22 insertions(+), 53 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 6268af369f..98c3c22423 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -5,7 +5,7 @@ import inspect import math import secrets from dataclasses import dataclass, field -from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union +from typing import Any, Callable, Generic, List, Optional, Type, Union import PIL.Image import einops @@ -27,7 +27,6 @@ from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.outputs import BaseOutput from pydantic import Field from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer -from typing_extensions import ParamSpec from invokeai.app.services.config import InvokeAIAppConfig from .diffusion import ( @@ -161,33 +160,6 @@ def is_inpainting_model(unet: UNet2DConditionModel): return unet.conv_in.in_channels == 9 -CallbackType = TypeVar("CallbackType") -ReturnType = TypeVar("ReturnType") -ParamType = ParamSpec("ParamType") - - -@dataclass(frozen=True) -class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]): - """Convert a generator to a function with a callback and a return value.""" - - generator_method: Callable[ParamType, ReturnType] - callback_arg_type: Type[CallbackType] - - def __call__( - self, - *args: ParamType.args, - callback: Callable[[CallbackType], Any] = None, - **kwargs: ParamType.kwargs, - ) -> ReturnType: - result = None - for result in self.generator_method(*args, **kwargs): - if callback is not None and isinstance(result, self.callback_arg_type): - callback(result) - if result is None: - raise AssertionError("why was that an empty generator?") - return result - - @dataclass class ControlNetData: model: ControlNetModel = Field(default=None) @@ -375,10 +347,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): if init_timestep.shape[0] == 0: return latents, None - infer_latents_from_embeddings = GeneratorToCallbackinator( - self.generate_latents_from_embeddings, PipelineIntermediateState - ) - if additional_guidance is None: additional_guidance = [] @@ -417,7 +385,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) try: - result: PipelineIntermediateState = infer_latents_from_embeddings( + latents, attention_map_saver = self.generate_latents_from_embeddings( latents, timesteps, conditioning_data, @@ -428,13 +396,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): finally: self.invokeai_diffuser.model_forward_callback = self._unet_forward - latents = result.latents - # restore unmasked part if mask is not None: latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) - return latents, result.attention_map_saver + return latents, attention_map_saver def generate_latents_from_embeddings( self, @@ -444,6 +410,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): *, additional_guidance: List[Callable] = None, control_data: List[ControlNetData] = None, + callback: Callable[[PipelineIntermediateState], None] = None, ): self._adjust_memory_efficient_attention(latents) if additional_guidance is None: @@ -461,13 +428,14 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): extra_conditioning_info=extra_conditioning_info, step_count=len(self.scheduler.timesteps), ): - yield PipelineIntermediateState( - step=-1, - order=self.scheduler.order, - total_steps=len(timesteps), - timestep=self.scheduler.config.num_train_timesteps, - latents=latents, - ) + if callback is not None: + callback(PipelineIntermediateState( + step=-1, + order=self.scheduler.order, + total_steps=len(timesteps), + timestep=self.scheduler.config.num_train_timesteps, + latents=latents, + )) # print("timesteps:", timesteps) for i, t in enumerate(self.progress_bar(timesteps)): @@ -500,15 +468,16 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # attention_map_saver = AttentionMapSaver(token_ids=attention_map_token_ids, latents_shape=latents.shape[-2:]) # self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) - yield PipelineIntermediateState( - step=i, - order=self.scheduler.order, - total_steps=len(timesteps), - timestep=int(t), - latents=latents, - predicted_original=predicted_original, - attention_map_saver=attention_map_saver, - ) + if callback is not None: + callback(PipelineIntermediateState( + step=i, + order=self.scheduler.order, + total_steps=len(timesteps), + timestep=int(t), + latents=latents, + predicted_original=predicted_original, + attention_map_saver=attention_map_saver, + )) return latents, attention_map_saver From 58d5c61c79d34098810a09f1f4b7126afe183198 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:55:18 +1200 Subject: [PATCH 61/67] fix: SDXL Inpaint & Outpaint using regular Img2Img strength --- .../util/graphBuilders/buildCanvasSDXLInpaintGraph.ts | 9 ++++++--- .../util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts index f38ed00bf4..ba40a70c83 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLInpaintGraph.ts @@ -53,7 +53,6 @@ export const buildCanvasSDXLInpaintGraph = ( cfgScale: cfg_scale, scheduler, steps, - img2imgStrength: strength, iterations, seed, shouldRandomizeSeed, @@ -64,8 +63,12 @@ export const buildCanvasSDXLInpaintGraph = ( maskBlurMethod, } = state.generation; - const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = - state.sdxl; + const { + sdxlImg2ImgDenoisingStrength: strength, + shouldUseSDXLRefiner, + refinerStart, + shouldConcatSDXLStylePrompt, + } = state.sdxl; if (!model) { log.error('No model found in state'); diff --git a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts index d7904de4f9..50a773bf50 100644 --- a/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts +++ b/invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildCanvasSDXLOutpaintGraph.ts @@ -59,7 +59,6 @@ export const buildCanvasSDXLOutpaintGraph = ( cfgScale: cfg_scale, scheduler, steps, - img2imgStrength: strength, iterations, seed, shouldRandomizeSeed, @@ -72,8 +71,12 @@ export const buildCanvasSDXLOutpaintGraph = ( infillMethod, } = state.generation; - const { shouldUseSDXLRefiner, refinerStart, shouldConcatSDXLStylePrompt } = - state.sdxl; + const { + sdxlImg2ImgDenoisingStrength: strength, + shouldUseSDXLRefiner, + refinerStart, + shouldConcatSDXLStylePrompt, + } = state.sdxl; if (!model) { log.error('No model found in state'); From 409e5d01bada60b0c7e29525672d6744e248fc8e Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 14 Aug 2023 05:14:05 +0300 Subject: [PATCH 62/67] Fix cpu_only schedulers(unipc) --- invokeai/app/invocations/latent.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index fc7b5cd77d..c66c9c6214 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -326,12 +326,13 @@ class DenoiseLatentsInvocation(BaseInvocation): # original idea by https://github.com/AmericanPresidentJimmyCarter # TODO: research more for second order schedulers timesteps def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end): - if scheduler.config.get("cpu_only", False): - device = torch.device("cpu") - num_inference_steps = steps - scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = scheduler.timesteps + if scheduler.config.get("cpu_only", False): + scheduler.set_timesteps(num_inference_steps, device="cpu") + timesteps = scheduler.timesteps.to(device=device) + else: + scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = scheduler.timesteps # apply denoising_start t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) From 511da59793bb8cbafb39422c5ee9ffb910f06340 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 14 Aug 2023 05:14:24 +0300 Subject: [PATCH 63/67] Add magic to debug --- .../stable_diffusion/diffusers_pipeline.py | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 98c3c22423..82dcad96f8 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -92,7 +92,7 @@ class AddsMaskGuidance: mask: torch.FloatTensor mask_latents: torch.FloatTensor scheduler: SchedulerMixin - noise: torch.Tensor + noise: Optional[torch.Tensor] def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput: output_class = step_output.__class__ # We'll create a new one with masked data. @@ -124,7 +124,10 @@ class AddsMaskGuidance: t = einops.repeat(t, "-> batch", batch=batch_size) # Noise shouldn't be re-randomized between steps here. The multistep schedulers # get very confused about what is happening from step to step when we do that. - mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) + if self.noise is not None: + mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) + else: + mask_latents = self.mask_latents.clone() # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? # mask_latents = self.scheduler.scale_model_input(mask_latents, t) mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) @@ -368,19 +371,21 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # TODO: we should probably pass this in so we don't have to try/finally around setting it. self.invokeai_diffuser.model_forward_callback = AddsMaskLatents(self._unet_forward, mask, orig_latents) else: - # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) - if noise is None: - noise = torch.randn( - orig_latents.shape, - dtype=torch.float32, - device="cpu", - generator=torch.Generator(device="cpu").manual_seed(seed or 0), - ).to(device=orig_latents.device, dtype=orig_latents.dtype) - - latents = self.scheduler.add_noise(latents, noise, batched_t) - latents = torch.lerp( - orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) - ) + # TODO: debug better with or without Oo + if False: + # if no noise provided, noisify unmasked area based on seed(or 0 as fallback) + if noise is None: + noise = torch.randn( + orig_latents.shape, + dtype=torch.float32, + device="cpu", + generator=torch.Generator(device="cpu").manual_seed(seed or 0), + ).to(device=orig_latents.device, dtype=orig_latents.dtype) + + latents = self.scheduler.add_noise(latents, noise, batched_t) + latents = torch.lerp( + orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype) + ) additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) From b2700ffde43895e6e7055286d53175823c4ab902 Mon Sep 17 00:00:00 2001 From: Millun Atluri Date: Mon, 14 Aug 2023 12:23:07 +1000 Subject: [PATCH 64/67] Update post processing docs --- docs/assets/features/upscale-dialog.png | Bin 317552 -> 304094 bytes docs/features/POSTPROCESS.md | 99 +++--------------------- 2 files changed, 9 insertions(+), 90 deletions(-) diff --git a/docs/assets/features/upscale-dialog.png b/docs/assets/features/upscale-dialog.png index 3ba013654566e4d40075822bc5ad8e3c8e22bc98..937bda9db359a27a2b74c32fb6273ff897d25ae6 100644 GIT binary patch literal 304094 zcmeFZWmp_hwl>-{?iMV#yM+Xo#$AFt0fM``J0w7Gw*(LF9^8Wmhv4q+x0snT=Y02@ z^ZWj}JYDJPs@i+?UcT3QI}u6>lBh^;kw732si66K_C=C z3sF%eX;D!!B}Y3`3u_bLMns|pyr#+^ZkA45EW{iN=11@f%(u4?83g9b73*Ls2pv0I zU?j1gs{IiKnA2HJtW&0j(iOLN1rM2Fs9Cd;(M}mM>$Bx`;p4Q^@se>jmg;17+y?Ex z;1Dg95LE&lOUs1O5BD$?ucEy7qtG9Q4uWhBB1Gdd==lL;qPKQ5(0-aixOX-3@ABWQAnK7ijG?S6ii=kU>t?;a%TJ8v3@RQ4w`NEl9i_AA)%OK#O2j768#RIBbGubSlPZe1Qp zC8FtNz_>x2qWn(wC-^qVpSC6pKmW6|2-fCx5foQ8tP`9wnd_zlxQmR_pIa)0kf8kU z6bFp3b@&>Ra3Lbn+!@A!NNadDDSlz)JXID^>1hb3X}FdgtX~pk(p}JRj-R~-gPcC5otZzv&>Pq$ELFV=pIuy3eP+dtI5{^p}?Z5IjG)()Jkhf28KIr7FEXSTtyf7z*^Yw?TPCgGsjK3iE0T}O36`w*Ss0KZQPv62v z{>sKU6MjDB_S0e|eIxRGz{c3I5QL&-muoGC<_>lsL4a;L)u$k&Cb~{SAQ6=~ML!|Z zY+_r{)y#i-$s#rC;E)>#X2c(im26&!rDl)ChoF$dVcOXHEWzJ&b+65!tJ7u#c%h7L zvV+Mzk$T#>_sPMo2rbcpSeJBegJm2Y z`z9nyk9=6Uhj|p~iT0>t^lw;UZY3pO+{bf-YSQ`poo^-eCFR`=le#Y{q#i%_y6;R; zZypb`uSBmsDp4Qiv2=ou`hS0-t#q0*{E0YTWvt2ZCxH0d`r6EuW_@#LYwytdU;uBR z-z}kM^`zW3-yfc9Uf*vwIH!fus}ueowBGA64U914aIE14D4HQRp?%*~h*tOwlImbR zpPfy|pLc_v*t`8b`T`9)F%Aj}3Tt=D?FY>rpf&gnyZugs5+WV~W!S9A^n^H~CF>)c zGUvj%rIE0KO@F=p-H+G^#rhW>EKC;;J|lyb3!X6RbYX#0>6BwbPw;~3F{%N%&w|RKKgAfnjY=_B<5>tRknO~>Y~r4S_Qdrmvz1WPgzJ<VaE#Fv2^-bnq9W*!y-Uweo`wD@+x_Kwd_9L_ticM{^Y`NJq~|z{o=nOP|kxm>8Kj zl<3Mptk(SVQgx3*E7Mx|no9DU$L8Lq$!6K6`nk9x`W=UOkdgRVL61r`{RJIPg69zZ zklqlXiHV7d3HlbkNx-@87Sfi$kXa%rouBH2YL%K{@fVG6MNDe*<&L^rNkfDTiI&n= z-wTwHi)4%IrsJm{rqN5@tHG<)Gbp-5TP9k5-#3{_w*347`9r~iSRF;3W8IS_-pu{< z;C|9RlS}ly^%cVv?iJxa^xOAe+24MA>+x3hy{l!$lzV~nWbHJ{^ri*N_)?yS@&zX9 z_tXi^_bfJoHjGDa$|R$TO!9mG4EQZx!K24(`CGGZv-h&{GTgFNbN1z~XMiVTAy~-8WZuaB}VeyAaD=N3odjmd{c7!=_^>9^i94Php&iE+!ZwcTCBT;4vvfdwZ zOtPPI3~_4PI8PVNrkEZYGqLI0c#quj=%xSuu`|iIdvxiG4IPhx9WiJcuMj_0pk9Dj z&|837a5F+@ic*78Gh=FF`n27*jk(P+dN5+A6U&T`tA%UD{9CiNp|)DCTGxF3^WBKd zY^06aXEigI*)+=|D}mnTzWWl?iBQ9MN2IA+TO%XAYR6R!!hI;R@!xw3|Xx8nF) z>@wUXrqifiI9>%lxjvas(D1}z_F+b0G-1i#MT>C(X)dA}|3!@%Nn z`bOJ)%VOAZ{C3S?;o@vh@`ldF!}!=p%ud7T3H?-P>gZCh_uk1a>$TACOV-w4S}!k& zKP(J<0-Q6PB^(WmI@}8SJJcbR66Ae!SJW&_Z~kBmEZk~}O}NF_QIpx~2hn9wvyl3* z;^5v8wNL?+22{MT28kTZT+C96V+n0BB+B$W$QwROwdpqV(p~zAFr#qx33fGhMWgQk@i3i82)40&aC}bsilZy+9$4+L* z$jHkrC-Jhe*e|6y2yE!}Cx7V>?6mBvMckp;R~*T7{~WhK_4~(@MR4tp+Dr=r?vEL7 ze?xfr+IZWthu6!u+W+32sb6^Btvn_^780xwFmhy>c$$?D9gd!lGK}qGSHdI)YRr?* z6V|Nmh;3Pq5sb(W*$k)*$qrj#S|T|GAH{#AQA}T=uEtIbZ!UCK(BjB9Afj1GirG{g zuwxky6gnryvKnNKgAWl5Q41k0pix*Bk~dDX*Y0;-+>0s{Vn$-Pw}BH3;`?q>FgW7D zK{{gedx4)YHB+#~CVD;t*2!}lZfu!7<{hiq%1Li@FJsi|Z^g9L=3n-mORzD%h|l@u zVs&D=j>fsOU0)veEV<`c;BB{wlI zR%SHnHa^}Xt>W;hTmEFyzB5#Iwsi>q74S;GO$W0u+A zCjVXOxptzTnrNNy)%yO;fyV)>-RYuI$ArD{e4k=EOuDTt+RBoqv6f1ENMpYJaC}*W z&e&t$ZJBHObA7dqK?8Dw+e!HHg#LlPPRrD%ooe1%1?AZmtH*NL7HC(l*~M8q+i?rJ zMd)j(YyQ>r`kVXqN*|jCf`V?ds<>^%%m#1BQhl|G|Cx(Fii{47h*omyruFu@rx3ja6 z!XJu?LW)l!_sVB8+g1k+m3D6M7jW?6MC_8+jQ*XGrk_Elo_wI`Hz4yZ|JONGJ#Y^stRgBc4ZN!uJDQl-I+@!!qnO2X0v8bNrL>$tAYAI#14>$% z>J*rN)0VP$7wWBvEsz)=3ztGr4U z?k3h+Viq<4&j1>N?>V^n|MC3)b?3h+{*RHG|2>kOo%{X&9Qr?Q{lAB*Ihi<$+Svd! zody3}UjG{WKX3kPAV2Ht(*Fl5{>kTmTm={{h{VtOZ=neyee&tC1>{I%A*QGbyaQVH zkH1CWUpnA;eFu&oKcPRxAA&#-khGYvsyozS8+-=QOyY^(dmeMs1gGC3QaME5V8GH+ z6hVkIJYK}EvvyCb4R&r%lgD0!f-8Dn*|Yi?;hgwvJO~(4{*Ex@_rc^7>y$L&#LY7&#)n$QxuJ@PB+^Zni>SfpxVlY_MhpJ0ZTslkLv|4{{Kt09LOjG3_a{nOT@EI~kfI{731<@-oqNwKGK*yK;$EgTJR*KXmH2BOzV7UrD7aXV za-#t?dR3km>;6g&Yht2XVccYO4~q8j&Hj@?)3VvFKu3FdsAUQ3L_Tt&s>`0Yk<-_Y zEw%P!iMVLF>yPcKDXRh!Hda-SD^!-WGy@%?cQ-U0C1H0rg^yT+hSleM@0BwOk_(b# zr^l70UF7?3R|kJdu;;L4hvMOl$F6%b43EkWluJ}*Vj~HgGNf$sg`JoDP;+N)$Nafc}7OUhXAL=WU=w_vQO}+g}%_CAN75G8Jb5@ z1hsXvcSnjTo$q@LE$!@NEp;2|J7ov*xrvg(+qG z1{LR24)jY^H%L3FP-Y4^WiTj`e*crS{evbhT}29qdZQ@MooQ<)iN=I+@ohux%ty6u zJXN)h%Ey+xnHn^PU$^_Kj7_9Qm#Js%;9*6#B6NyG z*|jrrwEWFoznaLQ0%^N#?)$-q>C%IQ9@bCrmJaQ%e0o2hrfZ@riVo z&0N*CHRn>w!8!r1W&jMhr0TyEhT&x6-w>U5wp=OukA@X{i;`Hy=is%(s^7j zX6nCx2%KAQ&HH4;R$SIJl3mk7s@@Ae)-a8JKGwi49FgV!+c;qpCL{gne5}6W>O*0! zfSn@u-^PmQ>YQ&dG(w(hiNTVdUfD?dDzVA-Ezxs-t(_R#9P3Lc6;PePfKb=~p}ac` z{KQQ*K94Yx(h}V4T>UFsfA$usrPEITCR)Q`+S~s9Da4vx)101dLh6*n;5LJREl%og zi~FKEJ$|=t@8_|L6-qx(m6ZX%r| zwtFngP*MiGIo5z<>bPMjXNE&&hNXEMu?a)BR3S&?(>ECiGGR-214GA%-eKkoQ&fR=DC}`c zq#i5kfBx`WlnkENxG(Sk(`cfBb>d6^ zAcIgtjZMD+Cm@E#P{GiVj*r6tHh>3wulCAZOn`ezFa={;ay(5A=_P2Rvn zM4iAy380XNM6}71CnORjUJ+_2sG*_mlT=cuea1tmM09jISRNRw zt{^aD4otxrv>po#T}Uw?2#YV&?tP0`Mp2z2vH=ki6iw9bjRvXQUC=}R-^7r>s#vFT z^5l@zPq$Jc{^wAr@<58=jp+~CLMD7004z}a)52jo9TzJros*UgJXn=28QmxuIP56Py4=8Sw_Di0W@@+C63ti3xfU*d0RuX3^a#bTy;ZoFbCyHKhp_1%Xw$ zbq#R<`6_@_%vPm5JnkI+9M8>=L13MH$m_Xz0wX%92%wN|7_lV*j((w1#>k2DH?!QA z^!NM>L1!4*Gpe5Sh{wp`J*n#-5hj}f^e#V*`yN<+B}NKw6l{?_734!QH$$t4DOm?EF8`(^A0#D(M^xOv|yuOf9lR zocY}V%F=mNl7KQP#)9k#K?(i(M1i`ACoMwZX<>*x$2SUJDf3>ye=rDAf&i!}j>RQ3 znC!32r}HlduOY7PXN}3dPY$*MM$Q*Eevt#Nqn!HsdL_7>n*aqkXsDq^DhwSITy2JBxxTLi z!VN4sV!h%a(+dj{R)1^@TnHO1x@GsQ-U$zIT=^y{`+xiWxKi;?Gr>@(wc6J3e>-zS zKD3AR8?BmB;wDe>$>ln!*~cd?Z)bXx1|?n-_Pj(2%XPk&}+ zrUs`Hw^ zj9GZDnHt?(X-S%Doanwn)GaT`&3;~&e&SDz_zY9TcB0Ydb(M3x%%H2Jn@cIFsO%&g zC7YT?ojrxw4R>%t?$qnqu93;@(wBiFwQ%xe`A^#=TBC}M$e#-b)aP?HtBS1x_Zjwy zul>8x7T?dGsL)6oAwo}KGXe8X9GjB<%@qb*3}eW`C|hI3W8aMFI(qJiMUt`=JT?zh zl@?}f-pXVxeXkd~W3((Tb(*%Yv&p@9xK{SED*eusih`gtI4-L5>LXtjFc^0~5MZgo z-!zL0Eq?~orE-IyyE~7|vWlhtJpUGR3Rj_*b+We8^F`m5Q|njNKnW#>pc0KIAGeyY zBTLz$_x8PK7}L+e)AvYc(QW-{hI{%ulwuu~kT*O}oF0?>r2plauuTsm!a0h$YFkNJ zSx%)Q6@uLtQy|#p#oM*^SmF1m#e|KRT$nE1kD1feWN>vnAJqO-_dwroEPE@PT)MZ@ zai3YJ`S44C(fo42FzX}=&%h(h^$vdO$rFQ$E7piH=1(r<&2vdukH3nuYX<(F z{;+g~@bVZJn)XxX%^U4jfo%D6bFI&GPtROB;F=xN0l@gFadVnO$RJllACk%+UY`4G zQUW%QypJt-Ywz12))o?E5DThPBp^m{0JYj!bt)x9kI#aCxlcC4 zh*;?5>SS-}E~=`8TJ;#<5ig>se{~ex1Wk>}gW2Vu7SesS0$siUTP@EvUlL}_+P`st zdRdiIu#BYgByNvp$%Me(>Ce!alT3bWPF?bOP)=jBO%U8cPA=IsixDWxE`0Yb0bW(= zrj-4A=+qO_oG0ND zR$JQ^GprXw=_|*@`ZIiimuXi^%gBnd6~n*?Ijo#}`82tj_J<%CGYr2gfr_VZ?d}&d zw}d?(vB9cDu}ZLjU19)E6x@kG2{=(-#Lq(p-F0WII<5W%im64Jp6%`DeGK=`v-)F> zLaxjnqtW^FbdPq3MDt(~YrC;UJ*F_B9S#I3;BxP^c+1H`RBYK%Y7nX4at z_oCA+9_Qw&SXbQ5P;C~ghV8h@kKW1q-;)9yo^@z8SrB-CyhAN)oX3RtZV^G-9#Pi% z?zw>^&s$%cF3jC;+VQb8Elpb-|JLY*JkCVfHmqmN^Rkn#n3c65Y6l@L?2-T%yGp?E z79(wFg(};(J~=pO(P8nNaBm@E<1&vUac8fpKYMCyGy>}Bp|Wu94X^Vpx6O-lLla%B zGbf_b(PQo1WlhC6W^^L%!LIs81p~iLSykDdUS?a-b z{qb!^azl9pL81A=bVTi8!Cg7~tSXdcX{k>~Dv)QSRwAzyt%>dp-;|FRwwpZIhE!#J z>k77mDq%%dB0mZ&lz%ySK2m33)DT+K;UxMVqfR^?$#2kau$-N=%&t$L{ILG=xTd4( zQoK^*s|5S-oBvxSJ!TG)2v*K#wtNB4kHV=f8b)0~TW~h=@~TdKi`bJf@@1$nbYEfg z0fp_N0g?*qu1a*kDJDW_nEGRh2W)k@!wT#_nXlx~Y&*n-6s`(BfxS8hW)J?Ny@uVu z93#kJcatmC2GrHBWNy0iaKp6Xq6%TJ$7xl)AJvg0A0IANZpWV*Cc<-rzH{QlB*x6z z<;MK6Rqy#>MH5}PnA{&tUqGF3EU)J*2hv%Cc?cEWYrL<5F@&eGn|)k^6Bk*BVI|KjKm7adn6W-qRS^|@Bp{0du%E1z(l?R!f? zyYCMqsS-8igt&Gtl5HE3tP>rN%Ma0yZ11iguyQ;rLHRzGfuu7%UhQ&}c!Ljm*0if* zO{~}B&3;c|f8mF4b#7PAZ>2Y4Xmg#x-vK{N_6zVkYc{}1tU9d#RMZ$-;CY1f>e}H$WQ}q&EM3hlm1ro)q|Ls@ zOjkC_F2!v0l6$f3hOF&zf$vaYJhA<`@}Zi}OGRt)W)9o$FTBCdpT+Cuh^#Pbh`{|8 zA9wi-WOhv|$KfnA4m2i(hmz@0Rv97Y7Gy z(b`b?w|9lxrqOqo{am~I4#sm<)`5cZteBR($n+FN<}nRj*GFzbc@ORc)(ws`4@Ri) zq2A%;kQf?L@<+exC-3%}M1S7)T-A^b zjH!4T@X@l3Md?+iK~geKUX|FeYf?$I#GsfiCSEkZ@Oyr5?i?wApp^S5ZWnq+lqxDHmnKlc} z@#^LG;eB68q;FPCdc+(`7Z?60ba<_~@H%cb>Nre#7$PcBkS+TT%V(XH^%$6qwTn*w znd}oDkUY5HcGySBPDaLSb5p%EP{!>C_MUV+yC*KC`juNuie*H|b-WVs39l zCFpr*1~E*#o-3}2A6|D(#5{30xzXh>DX#nkKglY-qJk47z!iXL7-IW_a5xR;;DNf6 z44E8M2|B#bbKmzHWqn)kVS(sueo?!hy7FdYdgJ}NO0i=rAM>4?N*GS5-8^Z(i8TYI zvNP#W7^orNkZ>R=ju4k}_`~KX=IwEo=(g*G%SZZYEOLTGc)oXI>n?evAV@Nr;48F3 z2sLDo&7^ql19pXS5`Vh3#Hu4oqCGRp&faKGkHyC2|UQh9&L z;7kKXT>`rx&SBb!dN&QImlPaxG$ z!zFxaAoLRU**P$%=l@}OVegfgdFK0Ey84lH`}QjA#DU3#XK;m}qT>n*o(26zm{|@= zCfyC{gpM;Rnf`Ywdk7Np%&%T;E&hsipTECDP)y!e(zym8uV-*g8ep!lMM&>V+Q?PWY7k>=}b{BG#3!tt^>rrUqmPiQ%Iz&e3v*Sc=WIxh!o->}5I zfQ^XzYneUy%|1{83Kk(_f|5xxdOTI^C=kl=Ky5E+R-dq{ zdblK?mP4ygK2BPxH*F7I?fQ8d%jp8{9@r>Bom52xL8R4apJm6b|nAGNhd zhSem1+^-T@DNbXA;_qgJ@tyGR`(KWzJE- zP-_REvqTDG=8~-CoQT4qj$1|8KiBkUedN>_=TJ!)cmuVs__ArZum7=pOdDo&Rp2H~ z)H@ETdgNHv?d{ndr_Ei0dizV&in?sLKTB5h`b;75`p2sZA-}+V=x1Z`dIY7eT;zT$ zo!ZYt%pFrr#)sRel6&H4yelJTE0YeN^FT8;;~3hv4SslB(VBtN^|Zdy{aio4F6vX@ zr1NlZW6!~|Vty_{2FhIVKwBaE?>Gt&0P(><0K#c~n9BCs`IZvY`pXaMrMJRQvHFpA zSrRY+TREyL zh@jTn`Uo&?Nv``g27iG(@6SW&>=bXbJnrS1j(;54COv;-6!LEU1I@&jCQ+Oi{)Y07 z+t#=&FGoH-Ub|(N@iN}r`!GLQcU^7h2Q#0IF+Z6?A(wV| zn+BLN50MI>6xkIr1cWGQpvImE!~Y%@s3T|ns^y6SLK{&G_`#sBO@i^vZDn)0o~m4i zH-;0JuHM#tY!;*I+~1K91%@t2hU>aj(|uVk?Y-fbYMs1~M)FTW(HxXIErkbCi6H8L zKX}X;M@@|SwRak`(^FvwoD+q7LT196BzN6BV*U${XEFwTg`-;!YH#^-3K*gK@%GnM zQ2PfM4z%;e42Q4vYR}uIBb9t%FH&a-NTlvz;^e|Lollk-xnsTBZHw#61D;R&xujin zr=x{lerpuv>FSlt4s`8wJYLSW%BHVXQTjDDW^RfMkR!e6G8y9`--Y|;1!f1) z1s$@|oogcPFX644l%~g366BHf1=1G-UcbM{ie6`9OBUe&Z0&nYkSzf9|Me|u_>VOQC~sNLz3m9 z`xLvwD(W^YnUaWn*LWiw4{;Rm0H#p(1%Re zqKr4lO0H*n16A0UOHp?3PCSy9&y<-(OyWS)b}*QIq>Vf349x7{@P<>CNY=J%mVSIb zv8VgXJUgE&!6$(lvsLmHpL6D?R#Lk+_Ako~u^zONTCO)kgeMc_0uNW7!nE)7{2Ay5 z@`YZ~3C(}yAo=V|qU+_v2ysi9DQKvSWl_I3*~Vx#Ey0}A6D#t&dr&(>Ol+Quq=3w$ ztLdC4nE-jJ`vk@EwG(dN9OL_NuE*^PUO}@AcusE*1{1Hu}CYo=C zVO`MO61XsEf52%QIx%n;^?2DOpjQZ{hPZ6`Z*VQM#n7BPim^%t*6gq-%j+?JCE^sz zcG;T0P+c9zSF6Oz-8#AvR2TH}>;_>0nU?g=+zRhI7V$61#IsbKnMAhhQ#GuugAT6R zy3p~Z6w`0@qa~UAamSb4nDR*knlI2iDkkobJ3W7V21-n=o=%W+_>;C#3JSeOO>v=z zt*KVV9aZhxPxY!QX!GQDd?(%|#BLYWEr`aFmW4xcT8N)nZl2g;kLyxT*QSGTO1mpi_6bVh6D~Ur z4g3^fsU`d$HH{}^ADSp`pUEcS$2O91ikt`x2>#w?|CP)(T-?I z7jk^`2P!UfK6kPWbWSa*YTY*}$vAe+=~}xp{BOl#r+R2n0~e3oDMWmJWo1++$RTU8 zWMx`$aU1uWyFRv>=7Fd$Vucv^-A|6DzkBh_)NmcBbofQQR;era-3xz<{)o?h?8k2P z5GfOMndedCEXTks{pP30-#>g4O~O~49F{on8LGrByZlnauOrw`qvWJZu|6w@mUK}{ zgI|P(rQL*|xy2_idVluCj`--F3RZUUF7UY2Tla)(MT`NaDE;}lUv;6>tyMHq`(%{V zJ4#!#F=l7{nDA^mkyrjPtdbTpm|tJF2!Kn(!T^>d{Yz8@u$)cAA0qydA7n69fK5*p z9O^D!X^C?C4zhmpS#^~&Ww``N;`=k{NDjcDP3)3WRcI$&oC^3%u@8V>;UsHC6LLi+ zv7{+dQ~%MMAcOGO!)0Lta(FmTmva5^p7h+FbvS*+M6nB~t>&B4!;fY0*yQOS%|)nO z-g2 z^yE@-P9$)BOG-pE?r`2El;oko-qnV~0QpW}pBBLb;ChJPQ%CV)Z0`2j zy^MijL!SmuC;|QR3T%-@d_@6fci1>S8e3+1`bJ81kM!L< zYqp^sj>~*(Q`dMWw8i9%@w}{Q<5s|&E<89PRf2o35~he`nQ>RK{TZ|+zTP1g-d^W4tW?oFm?wQ1_1qp(`p1sK4(v*BaBl;GPO6SV z*8(!hDoo#rZ_=Fv*xsaI24qD}$+d@?9a}|WCP@RjM^uVr zDll|5G;YiF!Q~`E+I&~uK3iYaF89jYvbL&_fbG+?osCug5utjo<-Rt_9~O7$hnxHg zA;%c2Z0!Q3D($=H{)OVLE7ta#5Bm#hPgafjWR4V7jlP(Yhjb0~`W}W6%=%AFTe2uc zq~1Fp+oX~dj)a`WgcMyUg?{vVV7k@-d7X!N}nyiG-3JqFq87F`?Yn*(?`Wu`E-6} z#)GDQcq<>ml;L-hj_!zIyR&06x*#9m1*xmAKP^CvSgq)qgB^qVo#gNknVyizP%R5Gv+pnWdS3Ou zEDq|v50}V?(%V=08n{l>zN#8}dFPSG5nL#h;smT)4oFI!p668WmYg|0!h(#8SG1xB zFS40_{@750rSsFL-G3U|gH1W)@B2cu#sG;}mQCiNnI_dQ82!D9`M3Z4B1S%V|7>BmktEd*9(<%LuX$I`)jv}lE$Qb5&yKuQw@vV z4A}A~+BlE9+mG5nbjsJEKz_Qb`BNVXm$E_ zf5vphoL4uqM0j%Jn<+94DOG>0HECpc_8`)IWoC%<-?P_-Go<* z3p;}PV6%;Jf6irk`!>C4V&Cm%!fNc~y%}tlt(9FtkeTsGm*t6O(%*T3oWN&}-!pEj zir;PJ^qmD7Vg%$~u1F765Ds-=iVFWi;ovLc1K6iipvXUzjw%CW4$6>E_e<1|N*Jw@ z0js&g_2*bq1wS;OteVm@;|m$~8pc}9?~py;b;bXYYy~H1xlb;HQbVrxmg8R=xGt*< zsXNrsSC?n^HnkTu0F{kCKCJSuERZStTl{+bi12g0JOA3LW@EL$7 zF8T2z_<8-oV5UjnB5XDx6Fj;hXTrNI*Y4c^?k$&Tu;5nI;pjz8KUU0q9-$P)w*=4! zHwU_#(tuW|u33`iMJIK6X68#)p{Eb5Q!Xra2@(c=w$pT_UQCU7~LN<_#$Pf&8S z>UV(bxsg8>$i8iqU)B}@Tht@Kuna?ocX98tRK}#fl9FzhUuxUJs9u+{RakGAJKFK6 zHR9>Z2mwG(Xh#4;v0hQYAB~D$8z^+ID)!lNqd%G|0ygo0;>(BAb5zXio*+>n&ET`7 zPnX*4*i}GvaZQ@zZ4!w3S`w91=IQ?WN7@1$uh%tE=Cs0VHFeq`}ZFvwq$3V z4{xQ%!gk$l`bsqPT)rj$y$V-jzH~(NX42{6;^dri#a&*wWgXe$oqPo_sM}*ams0mx-cRK?=@(J;V}*Y%d-Y4fEVZ6s@1X%3vw)?vQm zI$CbE>RY?(_x!zjLb}$qYDml*3zOj|G+0&hvmqXUxl#au6EYEAF^I}CKuDn^Yvh#}#Oyz5d9Xr6y z=8KU>9)up^85$Y0C~@mB=zZI?RBB%Y^BHl4b5-_~1QB)P!xY?v=xRzalNL%|yqm$Q zD5Jq5aWK6Gp$!oM;8nDaVhu!Ih>sLxYcc~hvu$LC7~UUJSG?o{AD(I74q_j z#g!&sSC~j%m-rL3l~MRdkwHKHNMPAWmI0r7L6U}TPyM4rFA;!=od*ez7o~DrUIhgSs&dxM;=@)Ex#Lia2`QXhkbzdewp88r249z7V~Oy zwg4&sHxzosxP@l}7`I!kTpFOyjQ|5|JtH}{$a-n_3Y(fD1m|0MSz>4(8rQLN;u2jC z+e&uUG+j;@vzFvAbk+&X7!Ld+gbBe3ofN669IT~@4{CYvK>cD4M?a~>WVuB`0$j}g z)P<9Sg)g&E8fiBEJXDBH64nCRGNExdWPr&WwmN0MIM76SdfK~uG9=pueT0I6awx3v;we%OWssYN!L!<9pgOq80SK5ZqCtY`3U4~j%{Y?n{kd3 zzYg&9uEHVI)-~di+xB2^#es$gqQ0$?&QsI%Z%P-!pH_O5Q+!KRHa2H%)D8U8F4F5!&emGAw^k<>>|9TEXbi2UjY+q6*!?9NX3Qaq-x#YyQlk^!yg0Ha5mkmQM^v?{hVTz^Zh4 zDsk%{MdGiEP^x+n;p9+g1`(8|=AlcF`E%Z+l$9|-Y=S{1tu>GNWRSZJQ@g}3Ad?2_ zonlPCb?hI?>`B|#jF+3mQMPok|A3+gU;&LCItBcHpf4fn2uehsDBe`{!IhSkCDxbw zjU^GTTC{>yb1nuzRfAvAfTiO6d#RQiHj525a==>Q6B3(&#>h#xx@hM92>lWaGo)z^ zMFYX{H+`b9`SuBqVpcF{tB=S7*h+N1Qe;xpe5Du*;Y3g>KJ~@2wmGt?oiVdc(*&zd zcBzC$%G=s;g)=KK0O;iakV&N!%}k$gRD+Gw|G`*5zp-rZ6NQ)QljClFp{b`tDu%}m z!p++uBP%Ha$@i^5FdRTJ0%%?(a&y!IJ_I2aQh=;fjFk;z!H2Bm+pt-WtR#)7phd;m z)P_^agW$mv7^^ex9}z&w1GQi9Q}dnm2xqIR7a{UC;a{jkCd1d>g6bmBRyhUw#eRpoO@kYID`hE@+KQb`ytVFVckjgcyADfeC%B(eNrQw9qEoF z1|YZdrt1R>ZW}ZJIq;beo_S^4c(4-ir}!|BD;JV(`_%4mLd=N~Ko5cQXr_Y#PXt%(fYSrix9Y5gv%2vw0U#{;u7U&kwhu%#Ul$Vd!@uHwswCyB$yR z#;1oZ@XZ~7g#EzWdBKWy7j{xHcH2`VW&=Qo{fH;90IKY-IM9ud1z_<|*aKov48-g4 zpW3=eVAFjBzw~TD<(qR=!Di&JR9PuHAV>{f8;pWg0kAK!1RJ~kfR6ovXEu1+`Y^EY zTWeQ~sm(G7N@ip2X66+TgY5xviJ)AV09?s;U9cNZ9}`5};^&(to$wik=kuN;rzrfd zMsIZBX8Ux?9=C5{ap$P{z9r#ua09(GP(TV`zrA9?NWk|p!~$A=40`MFZ0%j_Ih+qD zBF7And?|ZJoU{Z)kEGzcBnm*wTK}zOuuu#Ho)S9$ghIo6!;t$qe?mtSVKG8}xyWt0 zHOHELv(Z4f+PG0wh^VvxA{YA?{Ovv9(uA=?1OV#62%jTCi3)2G`LrRa`%sCEk^%3N zOqlf?Z>>zj^HfxU|1BU({;a)*OuWya!tC5wOP4W$Y{?WF1)dGKrH6R5e4iQvPG=W@S~9ve6g(kizDHmY9%4y;M%gSyWM)yUh2v zd=Ylp8iI6QH6Pf@nx?}tQH5OL)gjJ5ov!1hp;Qsbf8t8=|FHGeQBC)8-#CNO-5@E_ z-3^W|fzguEDcvAA5RmRJM~@+0(x@~@Hv&p`3W}oszPqmbdY=0^=XcKj+&=a0dW}uT zcQo!X;Rj*C#4a; z;y#kGcJ{&|Z^`7^${^^vnim4*IIh7?aAPKMTiF|0LDsKrRPVW74YE5FX1@>7ZLR#t zavr(#u}hJI7eu~Y`bH)!GnsMK4Rsr7Tl$-`H@&}%(3J>#kk68p)RL<MOgu=QryMO~k^aK3wAXcAvbm3;u2;TKM-O_1V+HUR-;~ z6rVPsU+&*8(6Gbq`k71$BDnl+GnYWsfXT1qSww43@uaaQ{AZ;BuC9x^9>{X3Clk;2 z)F(05XFQ=E-$`rI+wo|ql;Y6;GudV9^_jWb<_I19gvZ#2M|8^es!!dF~JRmeE z1w0i>Orm&H*rjCsB~?$<`QTvQkLr-Eb<5!S?(V=0hTqk4L^%K!A)79h?9*yPs{n{x zkYa`dR-Ny>FsYaMBb)BXxUsFB##eqne<)xirWn5bQ%FcJJgYXIJjZI9Y_0yHPxnvH za_zD5kx_{DaIoE>$V@q@+(!}bk~8v7!(Vw$!EYno#uHqG|28w7J@T4rZEt;JZho03 zrc{5HR-4nsw&l6KQQxv@W~8=OL-%=Nc;dJe2^BgnK}Wf>&H&T>t20u?(WRt1=3h>G z9Z6U})s!vK=a3(y(l%SZuD+Tll@7bY0aj-BCycV|i}=Vwe_a@{fI(55PM&Y;tAPdH zVvsuY4`8qT#Ai4JRA){gIDPEOh`r60d80xr3?NBz%iSm0)aAvE`% z3}IjAgj^Gs5S#57%kaCM@OE|6nAjeWUo0$Czn5eK?*DNU{t_j3k+mXCXkGwVi#3-cPYos!9tF`l&MYHwLrQ80W0aH z#tL1E3Y|+C9{<%Cm;Cu`w1nrLr`77?{q~WAXM6b%=z{Mw*({}BW+RdOU?H(})!^X3 zBYyg>?M(*su zis)C>oNvzWg4blPrV(epiaaD6VN222+Q zt*jq{%~b1G>wy77%Kf=7r7w>1bLe_}%ICR$O8S=4HZ5jPRhXFvS(XIsD&MK|>|b3P zkBYJC$6*49pzE$pH^+ z0ark}k^l&?AA^j%;`_q)<_-IJ!*X@!Ra~RV zTnQ>=L}CX84GxaIEMW3zU&aYS07KwMI-+yQ1u9{SKvs8Q9l?ctoU32Kih&;kA6t@( zY}alr+gyS^$-0t3Bd+^K_GJuZ1URBZ8plr`FkjM#(ZenOaESFCmz!ZK7%LyL@HDD$ zw=rqB?rTdt{r?O(<0QZg&b;@cW&jaoo{rK)<75%YwO~d742Et9%rxY({nJ0( z^zj#$jugkFD=y2{k@3gH=7HMiMHV}3s?|iBcdy7&C6({)?&c@GivZOqeZVVJOQg$; z??-t|QtMP=@djb3%tzoL`F~eW)&*7{?iw$**^Bs6gff>V$tx}Y8t>ygE&o2# z653lC<`#^IZ{#GG)w!>Va{T$q7Tc&cy5U$XhCc=HRZ}&=(#f_Ko=HHa2N2N{^MGy>tE{?aM%pAR=YXoQjoO~3Lv7=N|=x4fpKM9M$SYaCY6U}YYD@ZbnJf+M)6@R(Bmu3gI;;z2C zJA+Xiharz_Nz?XGK&GB!<&QMdvY$TG>HlE?#QT!Cwo398Y>FqdClmOdwp2Az2%P4i zVW34MqPk%0_%X52fpFFpJ&#AeBbJHZv@uA9viQYv!onNl;&+FVW&?UE$4vFisi$Bc z$uB{q*!0v>RiC?GAU&aSZ-6>g!5bERu3~71VO1RTX>!%q_ikz&BsqIzCT!7$_hMnN zBpVC9WWCW^b1m}mIP=q%-S_z1?gukMIQH}?*$dPVWCmzCPhuU+{z!!|N1?-B#SZAM zU~}d$W7-q0GIdJs=zUaV?XMPU4})N{kktPu*EXdrxysrNA}JB&4#JGUiHH)Vb#idh zf!NCt(5Q^ zJ1bzu&9iL){mSlj8lgYWreHRgE6zz)S@v`*@7ADtzy zB~C(<#4he7I}^(^zkG~T00s%_MiBjuB``_Dub88aio7=HH#qqESKTpgnO_0d^%- z$5s8#>FhRAxE)FvqQcEH9@rn0q0@m}<7oyD*D-XCJmNR?(B}J)VfZEBH2=%Mrm_Q< zF7++qfaQ#Ipx%|TdQ?reDEBBg-SrnqxDS)2L}RajFPkr=zw?oui2)+K{ERC@Wnp=lWIkZ&+rkF}`HsgLLt**PF~(N4(nm3LL!N8U z#a?*}$uArQlKxrQsJm}OBt6>=%wkaJ}|k9*rNvd(q+a$7H9mvYR1*KN|#T!JpSa< zTaB{Pe6k2=UG7S5tIEAo%;}FcR&$Y9?_sGJL%%GdsN2J9+1R5~n ziEFa%y-G(n;_5HXr=(8aBqNLCWE*Pi=5YhwjeJJRFFHTGlyt2M_|x6i)(WKy`kV+@+xw$Vo zMj&mJR`xnvJ7hMR^bVb8L+1{2>o>Gt#%cF19M<|h4wd+tCxLo`{tR-sXRd~^)V(&U zB}EMn%xc1mQ)N|0v@+K8SQcQ;KQq^ z$BZcoLH0Gw)V!oB1_nqIFqpuGSwBd=dE%fCxktwd+g{_A4N- zfs+oqvaXa{!*r2+XwHanF=DJumy$WwQC;`oI_?!QHKo!E%>!w;Fpj-2=v1kA`D3Yf z73@dms0pVU_R1sjM$*4OB5DN1$2%j5sK zt0&UQ;n>u=mTH!^8?K0s-^`r)6^IBNeqesM0Bl_(bAO7{s;>pf9$DMh8NjJ_-3Tf- zoU}aBJgMsLe!_XwdwqBzcsMw2WGe|+-1tk#*bv7 zQf-)V15N@ zf-%jsIJH|74%>VxvrBpY34RG3mE!l$;w?7(l_k#2$7hnWnfI3fN&A5Za0akh;OhkQ zRqEUFq&I2J7@wF7KoOH_5ZR(*q`xX4IO-%6e{|o)vWt>s$hT z5S}5_zOUKgqdHgpoPnO35T*cvlXxV&Al76llC+ho4=4j% z-ib&E77#gPMNnfZ0X0yZNVIV&V0E0DM)x4p>MIHC@)``7H#Kn@9@L1dB=zp%r*OzFeiBf99iJXLRXK&3{5sT={xN3ll(e&km` z$$^z1Mu z{c#zwALb~f#j-r6_t9fj3iHmC9(`x*{mHM5P!QV+3i=Sm_5TWbIBrAXHp|YBR;rgW zIXae2g|~ipqjpO&;-!C>T#yf<(RNwm{)<2k)WJwLoZtj@NSt7Kywcbdb2l8E4IqEn zmxH+gHjAV!f)pM75hw=i3w=D-Q z`1g63Q*pt{FjpH8w}~yJ+Cx$H?FhPzQFJ_#X|R<-m;GqEke;?=8%Aw-5IY0HDK7%p zs^u9v0jf#oDf;0>xUllWaSPd|Bxy&RIA8ahEQ6eiz}Y`|`#?X)72XPF{j^{l#eaxQ zkk=2PVRz`QE#fl3{Ghe+%h?^y<3jD9dKN61)x9<&D1LhA5c~Wg&1(&SHU78N0c$Yd zZ0L$Fs23!vwHfoeW|YPADM|TXhRgE2<5{QF^gMNS?W6=z_ zadgb^J)4VbS+=yW#RJTO=%>*4S5aalBNQmJw5w@N53Ha;dJwyzi$1^$;d03stHgos z%5nCgX<{HETS^PMc7!5H8N_gD`nd}B8 zF}mTC4aC0$K|^2B?B{|sf-8_BvTOzA6gkv}g^KBA`Hvp?l@G}(o#Qc}e3e*$T`3|| zC4kd^C7lKrT!b{3im+_UC7Y*kdzQ4o~vXBfu*HTw_UvUE#P$Xe^DF8^uf+mWbw+d189<%f`9 zJ5}8Oc|boB^g8s+ktdy0_UW0KiapOK&vqE_g@&>Vq=34kmBK+*se-2RrG6!{7lI(9-3SfBi&T1bNAfC6Ax|M>Al zjY!BaD?CoDx~S+OA=mF!o%P|;#Cgsu=Q4Zq>`e{w)+@@jE16M~NB}KH_#F%8e=0l? zW&|TE;o2IqC(WNg&qRye+N5w{EX$^#dtIHBe>fs|6q6cBZ3y51kb!5Ffm7%Am!VJ; z>~ZZ=zGrBM&x*2PqgA0%lD;Xg)P|8MAT3cl_;@K}{rBn==*8$1y3_u&#ifM)z`Raf zh0s#6kAn~~$w49N@RP^MrmrB_%&C0nncCs{Vrv0?rCP))I8vNh8sH5uu&6l)#uXtH9@~e)OvxVs% z^rYv%A!MPDg+E^+sbWs8Ksj`W(H|R7K_?ePCG(+(UkNv#B^lT`PobU30)Hk=k6w5+`AX zH`W#?aY3U9Nz_qzM2CG?fU5z6383fkL4rqgJ99FE#~jl%vWm**wP9PbcaK?&fo2K= zWuQFy?<)cE5zN)2^z&y6ssd%u*2|b9iqW|}cd|l?#u2|dqZuoATm8&@GyI!{MSdd| z0K4eep`RAVo(I@$C1h97gXG{Fc$O1F%bm7wSiJ78f?X*=+^;ba^m;FUya5EPr3AN{Yu9P#wVq?ymcDhHeT{pe^s1C_ zVkf8)0Pw*fuY@os0WVoWD&L6(w0Y@5NP|Qzhx+m)%{u+I(jCNhxE~*~WOUTI#=k`V zRf|s~5^dDrf<|#D1--(~%wwHJNgzrHFL9jF4T%1G&WpvLxiB-$cQ_;>F zr}#wzpU6!E4*4&)>wdXnyM?*B*j{Q_Mpoenk=ke!bU&P=u}#%ui+^ixX!1m*y4t^@ zcXgH4Hn-I*)oFurv#OHQ2wp#amok~3D7*yNW zm_3;Z_^zoMpn(=Gg$ZcD5XdE+@S*Tm(nfz<8UJGmz|Gp4u@|c?rnSxZHV_<-R&j$2 zhTK>aBUt#zQPZyfei&k54p}o zrr-I4Dpf8QC_Sso0nA$Ml*|YSVLE#%9x-PPYdOIR+qg6(_1$P$QZZoFzL90LlYcLr z7nw3AhwJx&>)~)%X8<3N7qmG3W;yAKtU**)k4jV5<>ch(4zdjzT39GXcSkjJROCH) z@Bq=0AH7+uBwmm$mtM_0w)%p<*Rl7Yb_1OB&5!g2DOl zilY1n1&F$~nBnmNmL;}qJ{NQSS);sX{@Rqoht6@U$^}YcmPNvjhfBvJqoZHhve$Ec z8I3G^pYTm^F52{pw~8&yFE1ZhpF1egnY+2sEsR*?x-F|&c`#<>m~bwMGKMz}J?c`< zGBM2jCmbRI#f2Y+)h8dAZ?h{7AFIwJPJ^4n$;%`0Hk;so68jH~M@jYsZ|t+~s-Ad)$eO^}ln6 z114*b#h$(5*hD8~7{B_UdnG0p>J$`I!ka7eT}}d>7XEnw&o?y}?jPLh4QJ0zQA204W1Mbh?E6C39_}F!mW3neqFWB+=+) zpg?^G>SK#vnu=*Me9lbMy17Bq0O z)}AB(+g_%~E=|>q!?)w+w#)h3fw!T7x%Q3!P5e^&&VJ)-C~2q&B^4D9`cU$zx%1;S zl5?i(hnw}GH)l%QL%MKW6W#Le`xjJo%UTEpTeqDC4ud!r)uKMH;@j|EtveGFlVdK0 zIOk^DK?czmJL(Rm1IWP5%Bjbj8lSW`iZxg1^YiHr%NCfXHw5JOe`@d9939Ti>UVC} z{d&rItTpxbE3LHmeXiAB)^weA3S-Z^Ki(ndMG}Yi9nHnQ(3>0_`YayH64o1In1U44=q90wOCsYZX2 z6CnDG+{JZcPSr$-k_KHED!-!4d=0eT-f4XIXwo&y$(@hVrk~d82rG}I`O-p4K1Ko` zbD);Jg=wxY$+Kwhkj%JU4*`#y7M8r)5d>aXTH>*ra(cfXLzgq1E400=btroJ%a*w4 z@k?V;>(1;fJ>A=0(zV~%1m#nKFI5Y~>&o(vTt8*1E7)Filf0 z5^uNpgPAe?&XmVy+OLV3T6o`c^9WQ)UGL4kYb2-etWm-yp60p@K-SjiMG2U8iUnOj zAB@_i+L2G4UR%mkmozF!wg06N_M=&U_rsBU*Wm?bcjCpDh~MA7<{O>(`_5PyJzfIP z%X&G(UcTE4llG648+g89HOp)YjD2JD{=Plt9SXDg3MXbyPpt^YukTB%bF3F9y<&!} z;C}sATdDOMz|uGG*%wB+#t@qX-_Ef$S>oT8n;7mtsHg;eTx2(TN$`p~(;GJxJ>oeE zI&Q6wW(XMC?MES1Jo%6^w8o~zlfGmP<}4#;tH6i8VE= zU$hn%`bnGDi}efFTn=Vwfyrm&z<%)Go~(Galwg-)0-xzG5B+|Z4#u0zjkQ}6enNgyL6}P2iDutRi)#luz~RrWM;?yMf<|2LDhA7o7DW+ z##6e*`m(Ul$APUv4yy#6d#hZUyokWf@C=rOotzLzkq@9W1D+gP4mA6DR z5|QG`p={$CtdqvwjKK7^aBub=)u71aq?R<`LSahj0EPpAHzkFev1ncFSaGtX@b@Ed z{j&?(!pWgi-(=Zr>g~|WO%ijPO+^+&)7!+W0bCw5o%f$;sPqB5orl6a+Owid1V*U) zGa&Jpob4Qz_yN7yWD>6Yq0B{F4th+rrk{!p zbaWq*YO^;X=4#CYS8rCT{4TeGEk92W&kv%%9WaDujh)rt=|E>1z@YdaM6Y5#b=AXS8!m)O& zeOzuu^gE9l`uA~mzu@I7HY{)hfh@iV%`u!&1}fOIwi`yaDG*HOd|yZQIhyslBj>bUHW4HrH$j zshAR}qLA5xU6Q~*Z}_7yZbux^kpN2Apzj?qe(4VcGKHm|l?8$e_IRKYIs4@JUFqV2 ztEn>Qc%1R0Oq2gXbHPDnsisNGFo0m}3vTXU?w4AgvDB5fCHQ&1H!Xl25iBt>){cXh z?>*I~bD{!iUcL~XTH%FMD~xRaOh*v1jGFW@Lbv!toi_Y=y(}ulYBno;v$DL;#W|D~ z2MPfH`dmKDnKtx#M8C=8S-2i9JHPv2nTvR+nvrGQ);o1iF0H?;o5wU_j=Y3lcY~-U z88Zhm(jB7q>`vZYw!q}z{8pXHH6}eu>z&`Nr)moj+0j`>=iR3+&)$5*KG(}B48T?kaJ^(;94R(6Z+l*xzVnu!!gKWqCQ3>S-#gyOZwo!^9tl&;7F2|#*Tkc$A&*F7`5~iE9+eU&=%LJg^Uh^l4{*f_ zpTtfxC3CtkS+ihuV0!+aA_R#r;J7}-hzb6OR-BP3B@5uqh)N=tY`briKm+k9D?t7Y zsBBxnhG|`4j#;%3oPHCnbq4#-kqIo_a(_UkI`DiWK5#C4qvZ5MhwsVV97zsIMS=vQ z)`flP$JZ%4^4SfXvjdFK`eQ~YKGntu%e*}iS?@ThIkz4}syp*UwRrePP2FC_3#OS# z-N#=m+$aU3zb{t;MQA8XUwQpL#KTrH!^y#v0X!?d8YBf-;yX@;WG+}z*+qp#o|9N|= z>sxOQ3Hx=k%@*QxKp-&ZhN!nosm426ddYlqHdnA6aLTfB0}Ys(+Y|JsE~8gQ{gN{Vg+ObL-Ue+h zHjX_9D)J3p7jz{#7yuidzqvqr8qGXR-c@D?1QU)QJ}dKWy-AZ+H8R({42f@P0rKtF zeIFDRLd`Ha@5s6w=2SZ;B44fPO`#96-RjH)jAm3|kOQD88BcMJCO@pKG&>#ajBdy0 zr15Sdw}Q0&Tg7-kw{+ws9D&$g;!h)riV95~7%zLd!^Wr+i zxW05MAPfoS4m@Q|o;d3+^prXeH<|3Z?YaCp?x&dDeklgbj<1dyWbXPENO*o>_~S~y z2&9Y#oVa;v;L#GF)iES5I$Ksc?7DhC3z&jj8Su@T9Y0mr@Q#ID{E?a1@nuVn73Yi_ zWmWj9qQ2JRQp9ouu05!)iMZ6AqXXoD3sZM5Nk?<@|*Jj6_^V8C<}zQ4Pc6n;c3@M zvAUAN@SqM;i)Uo(SUZn& zuVLIym*x4GjJV3D0fmwQiP5i;;?)w3`E7?{(;k_=ps5|eJTjiKjH6@qemz-SYp6`J z!faGgH8=X~h zc$vb&;` zJCQGwqQm%j^2z)Ut0tbW!no3#Y&jdIcUYm2qK2T(I5hE>UkOy-0>i;a1^8^Xl6jlbo-M7EzEyJ)gr{4p(4AW^iLHJN%tzNo(6^_^M zSOc|gimO>OGiWwx=Gj$m4t%oAre=qHO4o3KsvNg-DFEPKjcWc8E1baxsLBi$C1oeE zV^n>f9&tLq!ASw)W|6E8yNtW{Br9j-`fX)Hj1Z=RP_Wi*?2>d;Mf0@DkG&bGAjAesf-bWpGhp($K&Lkngc`NC~N?onP*BTYR zjG_h2crC6R48w${Ek!vIqgTC^L#h?kI5OWG(Adyt-0FMPv2Ra(W6iJkp4AU7tedKeLJpRess z+UEUPQ7iO!=LtP6-XEp8_rA|GnS1!+K<$Q-4Z72m zX)MM+6bA{d5aV9)TRDC(i)=~=Tzgl;K7EfV!MRPP&@eV=;PLrK4FxH(wsoBal4LVg zg?U12{PCU*(X|%pJks!WtVt8Mfxk2-5g$GC_t+P}k!N1ReG2Bx_)>nP^FqRe{76*F zJ?mwnh}id_FZk9n%yvmjWPI*pMDQx?S#I?fRmU=(hD~P-{QS zdW#H0E1Bgo{AlUf<+SN1txo|Ryp3JklcALLH9*RNXZ*80huF3E=}xsM3?sk|-O$A) zZ%fa}Y3+i@3Hpok<;JYG>vWNhp;0_&Q_E>*bOJ?*<35LsGtXKr&;mt9)t-GR{Nc~< zUpyNcuZig4jB=r!cu1{w{6kg1NAZq zIaNPa-^PQpfw8HoC{%;Ya}H}NxU$u36vycT3*>Nj^)2B;bzLspE@;IKjV15gcuyEApo5zLH|%|UsV7@>MSpT_6x zgW>eeK~ifi)gp-U7DLmcCCAlPoiQWbw-zcp!KYa{%fI#!qoI61b;2L1CfYV0gxo4~ z+gu)nNrxS=@;Rk&;|`rJVQ2=#lBexgvW?pZwWMP-CphJ8Eew2^uh7ald}YLwC2296 zQh+^QIHKRq&^^AM?0(jDxVyJ^DLjmw`PBI3i?5~*ue(-!7ubAeR^txd`K{nNxYykW;hwLuZTD;g)`u zX7qTgg6DvnD6z+T+sxrEi=EvCJ##%&ef3F^c!uL-@h6#Pv1w$W3HSR?4pMId65j}> z?M*#HoFum&t+!Kp4_nkN%vp(ncIM+US{!de$Ikpzc)!)YyCHJm+7z<7H8R;J5avHs z!2z=Wch;1lc44-0OqA|KWaKVKiFsp9lBSC7?#HV6a)I(Uww^WiC?xgvsGdaCB*yV^ zEE8|&d5`GdpDLmu-z3f20|-;RtC}okv|#yOH>~-?C&~2k%0RI9cHsA{q)YuSJ;8mO z%-O?B&5+-%If(S0bmfbTWn&;1aI{HX>)?G)ClVkV8qT-HmbSqn)`MsKkGQ93 zK*!uqk-j!d7n8`E@f%wE0>pv9FsYTQaQll3aLuvX3_|gZ@S1j2UCJm_*llVZ z3-gXSv$K8zf-BLr>5a@C@+aR{CV(kK{0y%rEOONMd%J(ovnNity z^i(!fo85&&DFywFsq(QWPpd+tRmeS+gPu-U$v4U5^L}3)lc`&{(ib`9tUplI@~~3C z==eJoaAxi8)J&E!q7BYKTgLZ%`a~&)pn{N2zEZc{(;%RdZjwcm)I+0JR!r z!apqV_FGPAVNK2Lhsu}HitKX=H4k(hPaI~9IgM+R}*kipMX{=O(Dw>#OT`?mbP)y7H7jPmVOIC+C0@OaRoa9N7Oy;- zUyGOzn>T`Xpo`z>cPtsj=<&!*T zHcWxl=hkHYHI`j^iAcR^^H_7edUA6bSZNyPN#>)bt>(N#E4Jqx8W?NtHJoNtkIdWxaMs>OZ zEnYX(0}11X=20}D%?D^`>FP4VN@0xfax|o{oSrnb5pydZCrq>UPl&$}gfi9)MAxev z_Ca2>rEeofhtTj9gBi_8b9?6PWCg`b;y*YXB^99ZRe_-_hs6w~Ir^_4m8f+a9GE zF&*Ux)2>)(Ektz{xQ~-mmRDHFH&hyC{wDgSo^+?Z5V|)oZSm^OC>VQeClof__NQFM!4{fk|>)DfWDu+*96xfCce;od-h%8%7 zsLuYPpdxi6kM~OxY{SoN3q(c{<(zOdZZ6?t6+@5_{2p)lqT*go#Wpd3g zRbDesC-aP_f(R`?^rV~H+C7Fft#o?bKU>= z*e*<$Tym<;{zh#IV_rI#lgY;q1v3dX;V4b*SgJ2JyrAYb>JN{f}0)X>^v*h#tkS)^Jyf4VTYQg#}>E2?oRPNkQ)YsZG z%0Z3EH`4E5*>o0Uu?2!Z5PzQ7(Dvu)dVQnD*(ALYo%p-25fvcLjjmf7d`o*mI`I#` zKRfm_*gWj}AZYVb@-5yw*WG%e=`C5wfa19dbBStmY&dRkGfo}Sq@L)z=|c-=k&4jz z4L0NV+a0%JT>-~jgb&0;zsH3*Iyz`3ea>f(YJyv5dzY{9Tvubnj~!OxA<(llBFNC< zGw?wi$9$J)V;1S~nzx^p228k)CvsDGVDYBHatf3P5tXv;QWCWhpb%y=_YQsre&TJkP~$DKsm-QLLb?TCO2Yw`0=c zp@u#&ugjp17{o)!pid-3L%%KZrgRb+yL&C2eppuVTWY^!GKt^;AG~KHNaS)O3Sc6n z6BDmlaBwoo8YO=zY~y;{sD6IivtrhQDg8;I^c$a!f%GCUPRHhdyqKYqK$V|$6t4tJ zO2YA!wwri?@jXteEx&z4Z7mi^t;CsDJp6Ye&xpr%1Cc@(}AcMjBmw6(D?3n-3Pm~`7F7|O?a<& z$?QslCWT~bxIYJHgb_)5_L&)D0y1O$Qm-Eb{UK2}J@_Mg3k*jnC_kZ)gvLA@OP>o& zt|trZpo9!s>YqX>)#K6e;OQ*QR?3u6%&~e%I*7tWfvbQp;)OO#;r5y8=+AAbPjX{L zzc64|1icn2(iT1>;yGz~nDCFvvx5cJZIgiHAV=OFmL&<_@$ed#wXc7>i`}? zLU#50(KzpA+AKk8$eD9Bl$A(Kz_A_G` zOe4ABc!si=6cD5C(9%cxn1&Gzc!pkRMtH4W#3lB>FDIr0Gr?nWj$<3a<_>h>6=R@X zBB&N%NZ_a~Vce*u`Ep7zGwc`xt$8m~G9pR0RwD)ca*nrZ1^TrE+&>p%Y78 zX&4eXqbwy9jZ#;vRRAzD>Tq-`l8o5#pnl>mV#8M^0N*$FW!^46?2cLeQP3%75KjtO zfDj>0SnDJACrt7G*RD`ZyrudWrJ&c_z&|~~wpi_6Lm4Bp(6T5XY!Jg!eMsTx2ufZPyDL=Yb91Bmw!sgT|m7&;>U`-_8E z!-(J7AWZ15HHNuMBs%t6bV^*v3eJeHh{dA}rz*P;z%1gldKYxE72D^%k^S}JVbJaU zx*HGdN~4w%Sn{8h;j>&Mz{%eB1jC=yA>b5L+CI7IT;SX2$Ac^8muM8P_a;VYtEf<`qZNkHd9($1XN9h$r;Cp5Sa%@fUL#BCv?x_J z^v~IX7-n|-CUiHn_}laUmW6NekWfLqmt#)RaHBWEq~NjfADWol zs(d;9>I{fE)V^4&L_w3Wz7Ey@LJ>)k3m}r>IuS!6KqO^j;9hCwC%;Jh$hoWyxMymU zV1*bFbEm)0JTet2H|4t?ZhZO^h6y$L3X9)sPcJ@p(fg0#pqSjf=rb6clY>eL5C_@T zY2{kE$#4KE3}16gw2dh0SS%?1?}rE?P0{2jB7cQJHMLU}O7#{LKi4+gqWIYx;OET2 zs^|AA*z+DyUly`>F`?ceLoIaw=W4cGP%7A#GvSNNepV3-Oep2C>X9>Wi+;d6&}0Gw z-&+=-5L_N?B_0f(h)BjT`aAd>?H@L4rK>OA_L$*o1%Ffz90r8e zm~@A91}1on(kdvUIkfKD?EgfZtdyaREt+i_p@a~Sn20e18pzQ5k_d23C3HA^ILNW6 ziP2QvEB?-H_d>@aa>lWeO!Iq#di)w-JLjy~0F?Ss#N-x)UHP)UQGNKFf(gyxO9ndU zn|?FBe}x-(>kjmrQ=pnYD(Q$jpZkFkk$$#q_M&MOI{Ltc+ur#jFIse#` zcnO%2HwQ?+eo{j5EMsn3UZ&yW>gmfHu}h=-)2*j-tUKNm23`7?WC-B!;P@KL8h}M# zPzX*B!g7CnGil0!>_;HmdoW-pMHOuhj{yC0DdF!{xkK28U(~#`@|tRqsXKbo4u>%# zb>D@|=a=z6$NxtKLsux41JN+qG`$t1o{msxf}j-hE)%6yWE#((kw0UbrQO}AM?-!l zbHzqN^eLc#lFYkd0p70R91pD{DtEm1yWV161U?wK(aeBmf1WtKxSphs*gD%D_4t(e zB4dE@kN0i#(H`$yJ~ky}2tiI$HkOL(^4o0D-_5a8L z6uyOq5d1M3q4gfujClxM<5q}z@*NwaNGnR2dUl=Af>uRvgKJmOpexm;n4ZJ8wfcrF zGm`KVsUwtB{l}IFV?GlfAGZf;hE4m%@BT<|@$+Z5eEa{{d&{7>wys^Yk%Yz_LU4Br z!8H&dI3c)eu;A_z0s(?sa0%}2PH=*2Xe_w9H*i<){qB9vxmE9}x_@tdf2ee7O&{YK z&oky+b2*>T661Zribmo~Hrc8~x_XO{#g{$qk-_yag+`(bd<<3E8n8~RHURN*=45qX ztU(qwi#cpZA#d`u+=XuXN#%hR^#2#D+KLXT{fm)72?AJCoYbz6*dw6td4S#6V_-@ovkm3sQ#a z>6CUwXkE;Z%I9lti$7YUe#`%3l=GtaS#S&$iU@-vLyC8ZETB3B42B?3-=2S9-JnS< zO2kqf7eDO>hTgf2*R{5Hn{zmW&DoR-2<<&Z0o~Eyt zG}`>pWa1y=b1m0{1x*Nm5BR){b<}{PzWWBBfFR~^f@A6k08zUpP9Hc+B>_gDj5FL1 z)&8D`tUtiy4YkpuWa+Zj<^n5U1)EHsw%R&iL^OH%_k*ear4GH=ZHc*A76h@CF zr2|Gf(3h2g6cT0H;OG;;2yxh|^#9>afegx3AUvNjdwdUwLLZ#zH!0tS_cG?uUJ%AF zqfUEoejy;?i9LHZ*$g!Be}n=m!0Tg-V`SF2oiW8-YEbljXeiAU!#b5hWEynVVgW(JPMYPobdnkl))H>DQ%D~eqKLEV0hEqLr zQaRBA{5nM`NdG@Wr?>?c&yiaLv}u)13(VJ77-y{7&3l$Ta0LR<7|OhTqgnBR0uGKJAJ~RPaYXeur)3Cr!s$Ar8c2Nwh{@*TG zCRPlZehw;3N69)^#5!%Gu?U}6#RQyF3Ghhk}Pccz$vRYD6W(y&I4wD(8=G=7{9}# zFDM&W?|xbO02|`}AvFBqLlKp2t?bQL0e#(1f)v}*!*;v8#HNg*=6x?z_UqAqF?72S zVTd}G;9ow1#|$%UxcB^7^N{{@Z~TTDK0sdd%Dg+2`ak8xrW!>kXejTk5mFgwh&{YK zaC=SyhG4KoNYDUm;;2pS|7|1x*hN2RX#JDpLZVJieEycD`ITXTKvwKy*6y=^yYUr8 z^)<*${PbJVfFprcj2iShS}owYD96-b0r1_RvWX?wXdHri_@O24j0YV`^16lOX|Tw4gD59<1)Q! ziMrn2ObK&xa`KBQSFIM;2s}c<)Zw=&(v6&-7OEv@%XC7_>fP=z3e;rcteR$W)qe>U zsWB@}PEDE0y^q1Og)pS@+QjR%xN3o>oYZsok4k*``5S%9QdEFEpOFXsR)dFJOsP)G zECflE;S!BT`O|n-z=rx*0#o+AY#EOoTbMg}O-+gH!}Cj8L0|*(n%i2}20?FKOD3)_ zf>>^*SpC&w0<*?oeVLA|1+Gwqkc&tjZ%422YEiTAk|M8O#Wy4 z+1xO1Rs<$-t_d#IWzRqeU zS1yTdvff6pGl6ch(nw-(U;uGd6BAv1FqSsl>@1$~IiiP=Zti|xv4NCSaINK(lz`h2 z6%wg)bdzty%Q{ec{gCH3qrpFtuYhZ7wQ4OW4aa6xjFbo{CZ7@Netpy?*8C~s`}E-U z$`-=M2(OIVz-{webyQ&g6Q$vrPeDB@;Yd=tk(fMB=hQL#L)&Y+Wr*|Ttd8A4;4@U} z?lSwia5DpK&*qb5w4c<`6%lEGJSqO)64`g)(6eEbni9qIX2%460hN11jVy!%jPs=)F7Tt!k0bw@mV2UJ|WRMQBe zEcD`+%kX4{RK&~4HZLXq!&=|THfg1z#JF|cIkxd#Zl^7IB4N*X=*>E&eZEaCW`euj zQga=|Wp2swqzg%~zb}fMeN6bNl;!Ex+&r^kJX->(>ETL{<7xJJqqm56W9!2fakb-& zEV=(0KQOSQezN-(IM>n@m+ix|sUT!=r1!17%i*?5%|uzVz(KP0=07grES5Qa{A6Cc z4DYi+4upjrLnfQ^Q7#q1=1g^?W#HUgG|=7G;}fnK!q9=0jmjR3Bs8%qX>y;pkJlZX z{?&_AE_wHA0kQ>RY>#bwh|0^rcfhnx&mEJw%o9KTZe*myDn0`-h&)dmp5Nx?Nl>Yb zt*8oKRcpqKQ+$X2E*4mgSejaWDPWnd0APmCDy`&QVR?Z>__4-yeZ~pG3S>sCtiDA# z^2FcvFAx@|)@Qr&^{JSlb)CbbB>cFVUkSv@G^-#ZCiGj_wyDC_k>8+Jxej7^=DEb%W6!oRIgT<}b?S$@B~e#P zwM{@sJ#A;_=XtJ&zpD|jc78;OL`R802ON|VG(?-d6!~vWi8xlNECbk``xZW*hy}ah z6WDSz^IA?Knpl4l$Mn11B=mT=oJ0E*x^dIA8VI*@EFQ!>>y$sn(H%nW31TZBtoG87 zQN}~$N?nWh+|7H6dk^A}w14Z-Ul z=h&Cr#i{oX7gJBsmLCd&U2GQCuSeq|3$)x5d2NhJx%zG%m^ z_B%L(&A{ZDGSVM&#qCm*pD;pys{H;QCan>flsQ4FFQ9li(zFG(MpURI3(^uhV6}#y zCiz>P`Qv06dj}@HR@>j;`rY--kd&SId_&NYHjB96-5u7`-v_9Xc!71 z_uT=#V6w0aVpNNf8`!Y;-Hv}7?&|4Wr}M?neEx+K+2L~AaM0~xFzS~SuLcVs^~TW? z;&&v5U`Niv)qpQCv6E|5xU}T9vft>@9yDx;2oXhVGAXC-&RmZa)K?U)tC%kOAf3pj z!{q0+VU*14R!)>jD0Ud`3oLhRs$F}pt|+>Z9+&xOtbYB0 z`Wz*DHmpN)Q;qW>nPLut!@~s8F1@fm(a;iUW5;n9A@b>XQzy8fw=@?+aMCT_v&Z4pchH{XI}i*5T_H#ACb_DDWx-r)7T5bum@Fa zw%WHl+RUBtz-vskBT8D7By-z=wm?ETp)QQ4cp z2K6aK*W=6;t)>)MPH%v;VdH*cKDi4qpM6%E?;X*1FIKS%+CC#5?6pr_mF4AnErTv| z2JDB?Kf**Wo)FeTen>@#KjQl>|4>X97|wjWFh@X39{!r`5JKko)xk{xIOX$rTT5KO zGt-UwvGra$ij1|Q?JBHPv%PzQMjh0J36gY#->0DD*_we?b``qSfgn zvF$v!XZNTjgd)hIz9F>Mc1h}}^_*?PQ{Y9kV-$3g{hHcuR`VQ98eVyXb*4-Qv$4w2 zjQi}JLb4FHQLHVWAstCjKZ#e`x-2o*mS6SD+nED$Ffzd+2}C{RwJnW{)m*|}(+oO; zUW09!2iy1aPe`3bw#8yhc;M~=8PNz_fse>po>#lnF4%o(U~qyXC?OHfLcZ7L;+p$8({?zA=Sk zjsDi*AWAcRY~`wYm<@>qn(jdR?d4^H_h~O!th^-mLxG~dn$9NR?XkcI(v!d+&I5_| z#%{604g=*5F6Z>QBVP#~tCoVoOrDmGed!7H-K(ah`RFOP!a*HpS1E57$-gWOUUJ^0r*z59!H{qX}bE{e%jLUA{m%rj%mh7$NWm*Ra|uIFsQa`e4W$aXy?<^??Y{#R*P6$ftPXiZi5_kJYQCIJg)5>X2K91` z^+fUM-evs8^l15=V-tCh>|Q7o}nY`zY(>2U_)8&Sz=H?3OvI~oLb`Nkcj)seR>UD zhrB_FpC=UnyCGIL(FwBn@p|`nTdJ9HR!HgJ_N0K|YKvZ!uAFu@ukh(^2~W-0+k-4{V>PVy$t zXYhJ-z0GYUChBomNMYZ{sgBvzop67o%(JT4QG&@CLz z$=ZLmnkGRXU-56VG@qZXE_)d)_qG|dxZ2#{R?Z2I0p}AQL{2(CKNqVSsfv8T3GwfG z3o4AWw`1$yE+LBDcanGR23{1x?yPyPt9ghmu7$u>#(y@#r9^zPulI%X&)Z7?`RSkC z5b>?$#?c~$V~&VL*5TWWV2doIqGt=Z?PK)s(IpEx4w zNLJHr4TRj~wVmGZE6Lp#`J+gV)l4xZO(Amk7#q*3e@P8`KcRzw@eX`W>Sh!#gd&98 zH3*q7XZ<7e#-SP|Thaw9*5OrqEs57b)mHe0*Sjs~@-^njTkR}x%`!Wc(PXt;3$?K~ zg3=KPEah+%Y0Q3$W7eF|C<=d>j^uF5+ptUp3qy{8212pkVuO|ThYi_4VLtB{u2(nf zQG%n)ct$#2ReODHjb}J{Ais(67nJ$X>wK&y zJBm(BTPSZE<>P~Cqkvz`>NC}@`l@MWYX3jbqND|2*P|3jbEYD>1~s3y-KN|ywSPP< zJXw)j0^~EW(+1n%T%jh-X0rE(u_@)zP?>&L_m;M+GqWrG1}p^i40B?z z7Nex*VO#WxXYGBEpfs5WUUz1>1Xzvh_Nc{fbA_)ykU9Q~$QuQ0ZEn*6;mINs>06=> zZu3{>TUTqCWi}Wh^POcc?FBeLnb7(Zq)R~^67BaU<=DAH3z8dlEH_xGZUAYKa0N$K z*>t;=YI=8JsaPX45*Ho3rVGeGo~A-@PNPF!6L9WzuyldTjHx!66E{0ZCoGNM6Hc}C zPA?Fmy`AosA!DofpznPu397&!Z3N$ZH%z6}?uuBm7k#|>99I$EtmpHqZnEPk`6JW- z?m3PoRt9$;p#+^az<=Uo=pF}pFuQgye_)yhA#gdbp^*Tqb_owv|U9;TE;7`a3FTvl~4Z5P#jF|Tp;w4rxb zr~Oadx5GD^4Yj`nmCD`Uf$PB>kvcQQGxqFOsg>gyZJxyaFEZz($@A97tZ?l>38Bi< z{p5b|03WwR;~E3$8Ga$=d9f3pxiVQg)V~3hi$Suw+*|2}_n+J_~h@{?7bdEv@a82}s^0#r}?Biq-_OgtY1ghp>&Cp)w$GihvJH^v?L{#f%2E0$U1q zi3Tyq2_Vixx7(R^pdsEBZ}jUacZ3Q6fPWnKCh1y1Sal^2%LR}9xh#w0RMYyAo4!NR zD3}N%nS*1n-ASHn;7ddiP~#n~Nt4BXdH0cw)t@TO2!p!Zkq~m#EhC=Q`F4Ie!iWx# zT|Z@=mO$Cs@V?xyL8WZ5Ke)8`obWlb?~uq_kxtHkz1C#1we*IePO@V}Sa4d&f-$p=sa6?%7<2 z$WAGV`&@cMs_FLH#mAN92+AZs=(@rrwsP#)`-qe}tjQ#5#E3B}KU`8E&hQ*3t0#}i z@ADuHOTI%7lXS7B)Ykr_GA)M4*QHYoA5e|4JS zcyvuLvd0vKX5AVCYn#(zE`+0J*H!FF+tAjH)rM~WJ!lckXGQHD&0N3P@QJZ(?JE_F z1=FH&4={~Q;WPC`^amyH0ET|+jbRhPh zey;U7x_#n@RL|ix`reV1>*;f4C=03QPWHP=-*)H2*Z1Gz5 z%ct$TvhJh5CcELX{NWj%6x2bRf2X0b(gAXwGbN(~+us=i_IF&e;2C^F$%U=rWr862 z3dEGPz=adpgW<28cbo>EUc<|LTvD4mYPMjpL(Z*~v^oO%X)P~d>p(%-+HhNz_Tw}wb8ky>J(su(%G(i$9HaR-D;5Lk# z*M7?ooRYib$evdBZJ8S$k29Li0r4qE`p%``OY4|$D|~e?-H)sKSYIWdWvya)y~KFE zOzcH%?{hHGk!tz?+17g&Wm`ff>^VRQZW!s`4rs~*Z{H;KgFh7*rrEXM(xrJla~K9* zELB)h3#i7UfL=bfyeRj1ZPT5u@w+_?Ba6@VanY0|emT^T{PM)^h^)XA1;_!7I=|Gu zRMBouI{SNXPxX~k)CU7V@B^E1u!MH#l)4ve52q2>FWM7(rmSz@vjIVfX}lZZ`u?Gi zfJD@3GuclhBH>_5prjjJ++sG_JR^dAs4m(N>a!7Whe1J`wm;y>F;}X`s>5R!L#L2B zknV=@rg~cuZ1Wop>y=c#LM9WlpPr>U$MBe407n^d5JuKpghCNb&T@r)o>*j!2@;xv z3@&R|O~O&(Ylyia@sjk2+6A_F&8(f~F@kc{UO({mWP|lw`Ex4H(AyAlABz3iQr1(m zs1u9TtT%Lw?q%BCo5L{%LnZ7wCB4{KOGq!~^W+H6%?c|kfxB^$?oRy7N`dP#J6mz-;em`MS) zoSBfoxZW2L=l8U4P>tsi`Q3i8r!3rv4ph2o)DO>p%c;*BkM~c;IQdRMHfzBK z{1VgZ_ekS$3I(SRD&L8xp&940K_CO&*|x!!@stAoyF_?P%iTV>W~5A1x_->fy`tkxfb4Hfh?z!(DEXK1+@?_%ZUZIQ@Y3O?_l$w6@3mEH8 zioF6)4Gk8daMaBNC3AY*8ikOv`L#Yq_1$u%+hPlNB(k{L%ZQd@^mCK}sd0J|G0vZd z;{NE(ij9FQ!|WYF_$LH+;>gHhkIsvkRZO~e8ze)~+nIifQr>TPHQ&Dl^+u^^-JN=k zWR+Ar-w+%Y6A8@ZT>-ur-E_Gh6C@Dd6V{n$K9=Eli;s{;sw*~qO|&8RGUDyJv(c%D z?w9-V7uQDU>eu&o`|mYFu{@gLDZUJ@?vmvYhn3t5qrFq%Fpq>2(dWR$5KEqc$9~0U z*!cxaR2mi2(xDYaAhx-o8pm9@XVBqWkgVK^t@*wRj>J{>S0vkOI)!bfaMyNPtAXgY zE7ISoUeidFK?~!?D6z90r+vg9zlb(}HeMJ?G1u~ugjWXCXwsrx#|P~OTbot#^KTFS zG^bE1MP;|WBA-nlXTZy!?~YC^wn;UnRr8TGS3TmTeg5IAra|ylBd2#l^z~0%rPrf8 zTV%v{-RA?i^$?Od;2b^?is?sF*_B80?=$iRJzdTAUEbtDRNlt=^%*`Gl3%a1iK4cb zAN}5X^cR4JDzXIr>y8PYm=-9kW5Fyk=X|sVCxTt4emW@gR~lze>Cc4BOm;wj);LQX zy==A>5&yabeD|nKf`Lqr^>9_2|Ng)!+O(AiBI)$aWOL5I?@jEfh$C<}k4*R$>o>WL zr^n-!S58|jjY5+rIu~?}TX}Q_*b?Jcy>Pv`z-!Uo*4?+=KHqpKN1_b=5c$q#`yky; zW!fe2*l5eWpHPQwmdK~Pp5Uu`;jDsSm_-i})oqHPGzp^id)R+?^(EU8^mZ9br~)Bu zj#W;yo-+5Pkb9*J*(kfCytlCHfu>fFDMS~UmARxCPDD5F*szpjK!jc?nTj35XBnmA z^G6Z{N=a@|277xD7YaqR;+NIg!uQncja14ssm3{u-Ar4@Vb4c6Cipde3m!GD$MChL zO!?G;_Ut<=y5{sSc_p1nnHs=zenXvu))l0mI)7E04UZH%%R7$R^VyDc^qN;r-VR^Xivtx~$_jzREAv`7#lv$EX*dRi7q=sNF-{F8J+wNS;DhuW zw?AR<&!|(%_~`{nUvW_+jdI%Z_*(^T9#)X>4fyvOH&%#1PkeCoWt4{z4+-_$IWI&; zK@8;=*RzXuDY+Y64wicO@8aQu#`M;H?mq{&n_rV93jn9VW`1#D4FxN;VT;I)3!vM( zlRcKL)cfe@1_MXz*na@o8Zzw<05t5t4tJz0{+0#kxk^635}qmBepUM5{igkLcCk|4 z8`Hb%9k1L{$NiDu^YPptzC(irRE!rO+?+StXin4PdU1N(I?ZnTM%|&`*F#JCec-S0 zdS<`(SPVo{&W72zj}rN+DJxSD*%P{0%wbS^Dn#6ej%ETo^=R*nMHq>V5k+O6f$O}U z+OAD$DbSsS^c;8d98pM^F4qEctSx%O@Igo&Yqf2CM7<}B!OISHwVWn>1B?D3-I+)o zOHf$9+*DM56RFdP1Kl96QF!`IdUhxfCO9dr5DlO?{iaK3!d}};4Wyu+$K$4J5rpWQ z&6802%xF&RSPxu%ht?o1#CI!MOMZ{%cI(>V1S9s}=JdTt4P^_yi=XUpw5$aa{~B^Z z+XRwLHSDWYi_dRddJ_<07Q?bLxT}W#Y=aUisK1%xiv~L~rKYIC~O5BHgj9$>A_zHNKf*kq>!X#wbWjWXLM z0X~&pIAwnDn^rR0&h;qK2?%cU@yD@x^(_2Jwu}c+Jt8i3Eob- z7i>ag!wb&A9 z`ja*XgT)mE51qZWL?yH-O^cWuS5DHFysicuY5X!cjB-ePE+*bNw%Jj-lQ>xtYdCrl zP<|R--T5OGGE`KMK_!iV$e9*hP9OK3r2ooLUQ$xWlr;836cWR|_suqlrz6GV1X2yc zTa3>qkD4;F2kRQqXSm1KhESJaZ~bETzUoVOhWrD(EAsZJ$iv^;b%%)C)PgXscS`SS z6#+AADeWt0D+pIHz6ASfE*Cfs95G{mCO3&#){%1n>ZNper28rKuMyHH#okf;-uM% zq3QIXleA(hdaYWdDxu-xc4>cH=OZXkPP&E^LJr-r=OgiT2s%U(%Mf~wW}H!g_$q*8&F=7RATkz>3M~jZkeL?CeBS)n=dmruJl*fX z+Ggy?N1E`bNU%SpA^4LPB7vz*|C08AX>NY`d-$$so|fIrqR{s;blY=vOLO}7ktF<%Zv(qt-2yvK z8#QC2GAsl-vr-Z!KcK7kqS5*JAKjo2*DDF(glt0)Xxbv?7jehCi#W%#4H-)di}>p5 ztdTK#Q!6VK+vV0bZRTcHj#Q#TM=Aa*>|-L=tt6tpX`ZP+_b-P=MoPU;H@6+n`xHEO z^OAC#nk4)Bq&77=?#zkE$YOl%uZNl0J6O26T_)$|r~!7K|1B!23`Ga%qLI z!&_rfdvW6?6t_C_z7)7*lP`bDaxSF2MbGiEre<bD-@7ahgec%w~ux2pDhB}4D#WlC9j{`mxC3)4{}Wo56!tMMNrG#gM)bb@6t#5$?FoIKRfQ8 z(Kh)Wra-|Xu15#<=X(XB55&8>?Q>)4Ev~PgGGg1ju4P?Z>fC+E9x>KmbwS(RPXl#V zk6FnK9^IOz3vFJ&rv`~dcWnOfmQFW}l&0eL=9S#+Cd?e32~VOCAvCzwpHAHI5absb zS#b$GCha)E@0ae#yD)fkN^SCSlgHe^Xnu7>es!EMK;G_VYB_9Es_uH4k{8(Y5E4Ja z8+*LBhYw+{W7E0&RvQJgZ68D-DKVReH`Qb>;iGurrNnj@OQ_S%DykO0`)iLim2Zff zndQYNB@G`Ou&|FfcwjNc*lC2)`^21jQhI$fr%8CX^?r1W$L-cKObsIUd!<~EojPM-yE!&~u_)tiG~!1q!b zBC0y@X)#K4VlfcGx=x<xTKhKU)cm>;-&qCxMh0^5jq$%5D3D{AA9Lp)yV>8YuJX( z0dLiYU3Uljkq-M&f}=e3VLdi2^M@DezR(tM0Q@ia@TaVF!8*C~R>3VI?7&u*`)rIj zje}Qj1h|d?9AMn-@S5t=fLY5109LJ&;GdEjF7wG%yE<8(kuJE-66Rrlg+~ee8h$$XMp>Bv7=smaMDS%VP?`FhPH(i>p@AN0wZ25>@c54-XTzyU*a#Bctu;H@GUjSK#Pg#eo{SOIhZdj4A@ zCAi8dxI^&Av)}t%fB$C!VkEQT$M}JVRFp>sXT1VEGb;w0N=30U zPXB+(z);9n3FrY62H$_3q{}NbySz-9ucEC8-#@q|JA=~!mb>qr)xrihs#HvJY= zj)te`*z1=%uD`0&z077Uhrxv|@RP-yh&(RC}G$KmVdX^B?~z;Q#XfLje{R ztUz8PO7~v05&-P7OwG^&CSi!W+u!L4^J`212!M9T1l}Cv{tG9pFf_O^0^r2Gv^1Ae zSr!H-w0E=hz!P|X;}1TkLh09k3;e>W#A^M6G!4K>RdsBz7$dBL_#K@^06mlu{~Ej6 z&(~E{{|kR4I`NcyF$;k{nV8X{2dGd1ePWzws0ScA3k)>(3cmQOrLp8+zOx&(6wAu#=5>QNyTQl(Cw=X( z1)VScXZP?bD-gQ6%U{t-k5qkQVgDgXKtuzxNMnTkyoZ3M3b0~Ras2~qV_Q%go7D<0;&vKM7GFr@4N_2&w27) z(bqxif6Tp}2tEa=xZ^sKW^0|;*BlrpX4QZ76j-PFGbB{B^sM&^s6LkCbN&Y{e8IVs zjJs_CEY0T3z=yt@fy>GMi&rY|{9cUjNNso1UWV zrG(*UnBRPctKS3gVs#$?4}e1%xM3_@{);eofC;E#NKSh>*52T zt4aQXq{{&X?frkG>0gWp()kjuPJzC0DUq_W3m8yc7f1(I<%tW>9-u}m1Vauy|069W z41EAgW(xU%t-Q~bSxO+W>L3BkN59R_Ltpd&X_%J%=I^;dSLX5`{c4yB134J2`<;&q zu^q;mH-y=gp??{y%A4lDCUAN#UBb2PUtkph{xhLx0$g~RtfU@JdC~|-?CEu$Kdv3Y zozyUKD2K~>senDN3(r*3h@wBK-lbzAvcTkS4Kgi24`2-h9@a6XC?$ps zhXU5H3gUG850&@$9^-}!K-d3{GKM;^=wR!v09RL5(6p3BSqaDwxH_Y8?Ovc@C9FSn zo8UrOFvHm0xAGa|zuN_ZnJoAd8(e}U95+=_JY&{u3U$7}HY%l{9rzQBfd78EZrBFH zaU@_4V4^_anhbdVkL!Yr&*<|29qPuX2Wj#Rn!cjj~m2 zVNKkk*lNRZ?3SsC?=(2b6G@QEM?|PEfRT6|E8BgBR-TE3i_@OGCYMC;i!TGdd?|&r zxkwtZaHkWgvaU!H!}eZsN*u4% z%#JQgtX|+so6SY?AR|H@5Bo@)JnqYZG~W?E!@PjYS)ud6NJ5+M4>?=Nk*DE8;4s~lSi6YH1urVW(c_GQeo*M_pSDDZJT*JS}iSBp`ZsG ze)Ff!vK1HW1&rD$EvYCk>9EveSA*5ibx=ccWQZ6(52nl}S8dB)0`4^doA?B9VWegEE|3pA3X{?hT@4XGK@)Y`T;*bkv*fMptP{z7j@2w}t)ds^)1ZfAk6L$k zA!P5TOBaL1EU{ol^%>r(wh&7wYTfOIzQF;L$f?lark)iDqB`xaj z_J*d3s(S#kjBOT!J7-}!qfC%zmJ2YjztT5#;KCj(<$V8Y(a}FJC$=dC#!pPmYy|#< z4;ac&L=a%7W>VUsXaUlwfDe(ComMXcE=vixEC5o)Q{pFNJsn*YZo~U;#5_({O9b8t zL$n;J&5)I*&}2MWzE%TLQ|{yGymp(*Ty&hQ=&osT;ePFd7zKrXrjp8_>H9g02T#4| z`q#B=35s$lnKO=p_;O-RzV;pMy}r3q{3Nwy#v+&>#8~_JQ2Q3VUN{}6Fw34#ltK|n z*&vz5O+&W{o7_*TSYJ}N;HIoEuC*7+ogu@WoIMW$Xho|16LR{T4@&#)%EAVOH3<(5 zJ`#o>kQv(N(%_legpfTHzx_NgP-WdMWU<-iIYP&~XZ|Ut?YRq`oHf>yyO*OS;8jo4 zESpV8!h79QgpUu=xY;M=cs4!PGgi)n$OD@Y9#l)~e!yA(l9hlH$J0_i32&dSdo6fk zDZ6lE@9tP6ohc{F%S-Cbf2TXLwJJ?29zsb@%}ioIu9NQXGh-4!cvG9kFfo^*y=O6f zre;)B3T}a$&{f5adrw3{Mw@Lv5~K5ldNX9l@>8kPp{j;jbnBuL&ZS59+@Dt>B9!a6 zAF$-Ex2;e@hV$x*-cxa_)vB>+E5TQrIa*0cN%fnCc`=Nf))K(ERFI?RJdB(E@f_o- zaUjY$j+-t(glK1mU@L!l5fz~@1iGA^QJ0hONmq3g<&5o*Qji}S2R7Yfj6@H(go*Y` zv57qx8lb}>8juV*)ThZh8s@AD;OZ>)t;N>Fgx?!~6h8Z1!v4-j@2&09ZcM6M^I&qV zoa^DI5dRaw%kc7@*_kDJbD2NFD9AZsFCG|fbAJwpnuM4hA7K=QstR0w%UE9oGXC_E zxEX#9YfNuPFL03aatD`3^&Iy?sVAG{2HDjVJkkPXG|u-AiKE%s_2XKEA0neC?k+wU zXi$e>9K@J}T+2yFq*G@)n!nBA;+*Su$}|WXAkw4CCX*6lJ?_W2!_I0>ixs;>6jt}4<7rq@|UTWExpFmh`lwj(nEHIt&-R16yRw69dO&YEx z3Yi}+$yPnhZR&|6`z<|MEQOq~dF18j8C=y&wHdA1k8$i|`iFi4|NbQI{1q41zO9=u zC-cuSfa*(93^Tber6vRcR{}^X349R4M^C_ijVKGt&d`2Yc?nDT)WJYP4qN#?QdUyU z^w6jVC%BUeeqTW>LX7RURirAreb9EvQ!DyTttNrCZDN4dfq+X{N*MR`v&hFChBMA zM1K|9xZs9eX^H10CB-zY??{$R@g>xJin`?zls!d@rBtYtL#q-@$45!a&h8rYqw5nt zf~#qUsu&9>hA%soi+;y{_c}ksTtW#=@m^9&55YW4N@_o?r6yHd;(m6TYk~{h#zE;F z_J%Ylm#&oklT#a~6UDq}uoxV?tgYJ$a2fN7x{`Rt{eCT0YoL)9RXu9d0!-q45{5-= zPZML|SHJ?8!6Hu{xbOi>IRP-U)SUWrWyIN3|DmG9{LMd`)@*o6_&lBsYgwS3vxPWpMB&I3kGxll5BBpJu<|h{(AW>RKbo-+=rY|LV=EI-tlIH|+VvI(vEhReTx-ExaGkA`+Lvf{bZ!)hmBnZ{2oVRT& zQ00Wea4ePjF_NOJ@gx^Sp;LBxnrZDcq8@jA50EI8B9&$azXx@~;u_>43beE20>vRX z4t1PeuCG0q6mSK|*`LHDuO!s>7n^?k`o-F|_1uIbK)kFtr@N@g6*snMZnq%XU)yd( z7vU<3QrW-rBaZU#*^7+#=qWhJWxM;z>#%8d1L=@BfS#S79`9>S&;48@tB0Edy~Uc?Po`S(*YjTSa;NW zxK#2z;+Rd^NKBNPHoz9&8roo^j7@oxl_@3Rcahikj;tywch3IS0X_6$^FCu(3t#cv z^++<1?P2tLmR(30?BOyuO3Hfnc}_p^qoWvO;upkePZL-2udb_A(m*_b;8l_+M?GW) z!Wn|0`hYiFdUbsy1oo&pw%|tIN}A{k)Muuo=wwbwYH^b+QD0Wq+=8HSWlW%qT1{203*#; zQ6o{XW;h|2pd#a_Z55^zg_`~;j%*GHENrP*p=F)>xiB@Mr7GgYCeIcH4IdL_9LpJ< zFx|}bmhz=kRlJ3YYfGtVC@8#qBycWSjQIAAeHbBcn^`83j1JDjLs_B^o=syCIK9YE zn;1*i;dAfCwxKlh7LzJV-}~tGgL`0ex8v@fg7|bZZD@7z$@!o2g1m$i9yP6p{*L=E zAJkZVeDDZNTQuQi&p$k@v#&S~cAcwvM{)GgN0K{c`*qyka24Ez#ni}%77N^Ih(5NL z7<8mR_B>rb0JVqsc@OX7t-_gn#Go9z z5bIc)NPujy?r3@k4S5%%*{k!5x)0*+`Khsmj#1#jW0%ow zfk8WKu{;yh;>9v%$97pNpj?y>)J*n5q=9gA16ki*fcLz=ulQGcwpiRPU6Y4lo;!)vpLPo){^z@SaG^ zSc9gx<7$d9s%160P+PEC4Li4SzFV)j2(8>mMgq6hA2(R+*96ul-hCEVaI&%V11@;^ zGFOiD@%HMVfiha!@Ep1DeR~a#w;v!`$v@Bd?q7fPfu*(fVG?VWoO(Z-^5i@4?|4Pf zW|3oa0VHx#Gv8mu^q^)2`{pReQEEn9mS*D&3i(j8ai962t~IjQtl~qR_sIl*fryGq zVYL4qb?ITI?H7ko#TY4jhI2GII1#!I2?n!GRvt41iG zay(4d!c&iyubLMMZ6ro6C~*mGar!yigIw6{Yr*jQ6N@W3TtdH+Unaj7+~)gCIUId= zp^ml%#CWEz$T;@>WO}8>@t;Dc1%p4=qbtd?>tryNgc60VjG0d3>WE5xau%*MJoQrX3w&YH%%8{GIsP+?CIV#=%8PB$$BYB< z!CXqZFO*ha5aWYK9xli!lkP&!sU+O4r8T(^7uiLnm$L?{y~RB&;#Vgt`4pt|C6a?v zzFIgQ*3W(C5@drC0rge zvWU}NE^5~Y6=e;EG&+9o#~&VrkKfZ&1=!Vt{~wypGODevYuCk{1b25UQrz7gg1Zzc z?heJ>gS&f?0!2%4r$tJNJH_4Yq|bT3f7w4WGRDf<)9!1|xlzye@qzc`ziX+5QiXtA zg+fzw2@yMPhaQ*I$TShGArdqXXq(p4Q~P&CLGWz?Fy(jMtmw{QVD}I;w07Yc3i-hX zz4kfU^(BKpg&*RyVQzNIkFXbA+J|~SW&MsRMcO5)isGj740UPYdp%I;@MOq@?@A6C zw`wk_DT}K^<|`bH7&@@G{nGfjqTq&WC;!`5N87y&!Q4v`iazLcN^e`%{p3Y?DE4HG zX+pb6*yEjjROb4$GRTJWvhmx+Su{P)Logb4FZ13Rq- z$59O-cWY;&DGEB56WbN2rER>(+>+SHVENi%DOf zFti{EO9>V?qq@zwUWmK9uVaomxknS(g};@AODbH0Zavf0-0?gj#Y{aRZ*n zKE!{P0ujz(;hze*D6`?pP=x!P6j4?@aJ9o~F%uRBL>8DtyOHA{xN#@-ZfbN$Y0;?J zj;?H6{^u%09nIHcu->mC;CO1;`(K!CHPp_uxGM>(_(qP1D7SmOpGnHJL-R%RlTw=S z+ci`Fa?Kbx)pgON8&)FXP&O^*=+Fn8te%ti+`s#&!KA05y!uzYS0ydT!O7(HHDUc5 zuuV`W|LN7cSHTB@nR$Ij>S+A^*Y2e(f|P`S?{zY%m|q`L*>o!HlQ->HfkgJrwiqoZ z@1}RnxbJ@0heX=Ap*^GilR$r=o!l9b19`TdYN+8EeiVFoJz^IqQ6Bg$O2zQ|q^oD= zXKWWoFK=p+*wqearlozz!hO))g?=_=-nm5K6n&9LYu{ODX(T;ns?E{KojVKAkMix% zzIGSk95OQQr&xr?TtwVw8uMd1(MxX6`1``{o-{YHJIiH0zgHiY^KP==aqUt1Ny0r zA2vx8Ih4=ut_X@sICWE|WK6dw8^n#2sNC@uW5aFJzSt=5{2!=cXv@S0nEW)W3d6W`E2M;~>t>3ZK7Bxq}`zZl!(yQJve}DNW=^q_|&#|EM8O9{IYHx+}|=MR;6 zf{I{1DVkC)2{(7+Y?U;}+F5&t0L3-Q{L;aU+Z$+X{w;jpPt5NW(@MkRW8`^1>Yvq) zFwh1!m9PFUR7Q{;nH8O&RCB|f~D=*IU8ii6Fu=z*pePw3m zC2>EWQnWdOp8eJ>{EoGVGaIk#FeFBq6PCl zItNL4yHTiq(|QQJ6Y{qDW^ygVCM@opzM^e!y|$v z`R5l%P``kjptV_33aFV=hPk=1Nm)?$gZBV;&ea+G#=I!1VN{u|XGgC&R-)DHo}3!n zI7g-(N6_F=+f;Odx`;iU!~O2PSbOL^GkY4kujzS2J7QRmoFm zqHYCCYFPj7+n&nEOI=tbo2|}25HVN6G)$zK1R*!2)jfJ$LZ}uK8z2>nOx5L7q1)kC zfk78F6Q9bgvVycbu2`*7Qcb{MbMIh3BOIsolhac6AUTtR9)i-4*-t!^r#mqNm+)iV zU|TfSTd5f)PTq(T<(LNdjf&W&y5rzi|DwlEOG^#yr{BYXW+V^rNp+%M^;?-4bL%OQgR(s9BuGj4AvQmMSg9 zfgXG>HeGT0J1x4zhUm%~BD!zdpLuwA*!a{{RIobRQoDa032HbVC_4J(GkbE^sinn! za;jQ;rONbYT2Fj!py_z{0oR2hRO^0MN&5>vmQF>!fo0=_(nc%f_DjjPLn$r8LMTeCZO z5jL-Za>^ukdw?49X!n^?GgURF0416g!<<#QJLrBxRPNCZ_U33M&LY3;?D5xYb$@+^ zzi!9=h>e&jYN0l@g1ZFM>%z|6Y^L;3W3JVX6fiMr6brw)p`>14hr0W4FSIs2_}X9! z00Lxm`V~j&CaNfpp8q7Q#VBz5X6N?wp)iPvB41?Y+lmtIcayB$CyVbGpM_zG<`Y-F z8=N$e*I=o9XeM96eF8h}D{rYQMIK*Js{E~f_OR)mXs#}e2NqgKT=jhWGJgrDw8-kn zD)v3mABxP-Wa)sZ8bAN5XMEl;Soo)BkRuD%z~kPT6iI)pRQT3j{`Xr?sM{$rQsx&6 z@PIg&`c%qy)nEgmvjS5_`hS_*TCg@Vv+o#`);#CIQV%Uchh>H#DyE&>IyPzj4Thg* z53U`nV<44)y7lZ5_6R_&6wB|Q4U{Mo6QkSd;e86g@pAjTR#C-pTcN~|9K){f28C;; z(Joz9Tx<%d50eZN!t z6Zjt?C-~1b3&Pni{L6d&{zdVrn&TS(axnLaLw{|c4-n?2>U#b0VF8FNr7dzZs8M;Z zSWG#$01v{;2&qDsg6A!Ye%#|L@GuXo;dpvrU(Yz=J6ov?c(SiIu#+m{ho3!2zh063 z2ObT;7!C`tE&JYke&5%pDGE~CIOdWvOZ!&{OoM6Pg9dCHwbbX|u+%q7Q5mfBGDM+e zDG76uq<{Q`h{*ssYQ)Ymy9k4v20yq|hlHFc69&1eqeGzozNW0TBjsney_;G2?q0r= z@7;@{M)TFJ} zge=8_&VP!kB?R}7LUlf{-|&Atm-;_3J;E09U!_h0p4=Q3bwJ~7aYjUBuBX0ydtzSI zKyLzf?G6>z{vvNTL=*nxil6f5ygI|vou}fr?mVmAxw=rVDA3hb(m=h~@4KpEpPYc6 zjwV&+e!RMOf4w`3J`z2}#MtS)9l2haJ+vQMVq~vA)Bl6{bXCL~!kQ&TTVb|6#D2BG zByb-@ zhm%C8#vT&w2o<(Ms9eNgbFx+_kQHM=I4UO|VxnGFSO1cr*}x;zo&Cv3z8bF)T}|;~ zY$Ju+6}8C~nlLC@A?xIP02%LMf!^2qd=o?UprPsUoJH%m*C(S)`+97s*raCzHQWX^ z;`M_69-QHMQ$uEZ*GeOeW^|A7*AlSQsoPLZHsc%r$?^Y6|05Wm9eBIs#i1(q?*-ay zVv3LDKcNODh#;~ifw_HKo>J~|C#!(;E?ub9`m*pTCfmqY|6yYQkf#5xU_P+@1U?+6U+e0rb4#gt* zpz0w|p#6}V7pG(&G))G`KqxDzk=(Oz$1cnV>jlViRma~8OE9spFgQzRPfjYgNV_YJ zkF%7ObgI@+W6Ybf9K>fnB3XOY=zbjTDsQURAkN7x?=FFL)=Iu5u>BCON>2+0<8573 z&(9UH7`N1g(9>rTL@50z-Pghw*Veu@2F!OO&)AQ-UMeETj`B?|i`b z`MT8TV}*^ipM_>WNP_7LYnt05bt+id&E)aRYVzqP-QA_#I9$`@#IStLy*mYc4$7`u z&k!lLyR(#~NP0V4NAN+`ZQQ31`9e; z=Q)kk6xs4r_%Vsc-1wQXfV`(zI=X(hP4G?YB;9j_51>VPgVvo>fCP*%#GjJRG)yxY ztBP?_4|_hz`K4BDD2vXbWpY(tCpcCEsa{i#_~H9+5l$?y_l$A!ri${g18NmgaMT~7 z|Gh9LNg?$8-~Er82YnKqhxh+B;Q#q!!tUw0ur-t{PuHAUFf&TpCWoHuryxF-O zgDjJ@wLvL~vvK(ms|V$*Z6JD1_G)Ep65*+y$~>~VTWx&d`S%L+To8STKc2m%H7)M7 zyrmQm(7MT+!Fv7GOCaztOW1d7L{qCvsZ-X;%uni!lf3phWGx}0`i#{)RpaJj}<&!rA`Ym(0YT7)doufL@QvtVF!SzEyG z>U&9!cc*C{WM>|70dBoJs^B;xUhoIj3K>}Wj}#OIy!^_9S?4~u9^WJ%`(FCmIh~{* zs`H#7K&pDSFN2>8-!h@oIOV?f2fC`2w0ZjGbMv015)}AoB1S#!=6^UF9qXxgb5QF# zc5wW`%DTNuMRfx*sZP454O7V}e)VWGpx+DNN;w~Gf=lRc92cL#Ac8v@`{PSQX4bve zIi9Pr#UzkFPM5$?L#1gFG1%@@I_;OZarDtn-!6>7(ar=|~i& zw|Q!l7p2lNzu8fkv()JS${_$MQOVy)8R1k0l9LN7Z#QKfN3x_9#WIq)0HEz zzaIprskJWIV}hQJrJ%kq96o$v2z2^zmF!rb*v+!Nwngh@{I2w){5P|oYTx%cHJPhR zWe;=gsq#1se1Ob$TmX&T3#-1>kqeHQ57C07Ih@fYKN_4mJa1x^RaKORDf;A5l7;yh z!3*;4mMr>QZ&u!u-V z!$ZQVLc`}?9|>vL#codWw+8x%PdZpG7{8UbBulgcL7wULls57P74!_>+G*WcSU^C- z-3{zAHI%(y=(O5|nr=Txl2e+h(m`@%t(KXCR7)QTqu{r-@tM;QqkCAxEwH!7M&OJX z@{q!aELjF3^lV@UbY2+w`%H*{e4F;kQ(0 zdIfm1XpS@?EmYvd-wZx@s5AkjP+MuY=Y(b0faC zTK<3Ds1Xkf;sAT7kmr2@F$0^E+CyONpVBiU8Vzk+LS(H;`>8NDlS9&LB0_2!-h1;h z<^kIZ*Mx2KFa6hu#}^Zup*|z_d}*3b_e06kx_ExwNLN8Fm-q$;|-`x zrmNxR)B4~4$hdjHr_7Vyia{6!>l`okK$h(sjs>C{6_6Wz6)Z@~wOk!CzRnNC!Z$8s z^GAD|Wu98g%=L5?4pj6zk$ppm!37sFApDT=C4N7Uab?Ml)RbvcSxGzO>B)cDztG5% zY$%~7*|_3@9@%)O5tT3{4{{9S_V!RjU*?Sn#AS>#(?wjcVRla|5WzHcP;&PL%&saPNTljFjJsgc{7Rv;Vi))Lsa4E8HQ&{?rZ=)~?Z6@AZ@-nBLUn zIrJiX;G@n+tBdo^>^y`y>h2g^9pl>Jb5KHRISKGQ4QK4%1EhbTgt_==VTOw%yw>~< zCi7xJp-=ZqICnn1U!jhF7^g6Rq0qD|Z!&0@OIu%6?5JsC z;BbGRtXOqTDA*+E_Q=$ENR01jj`tsn;mBW=w8ZwZ*>0Ll5p+anQcvLHGHBJj-|XZ2 z(C}%A4Wu%o;JZ^bnHVrGRb_}Ayq~{50XGqqX{B5O#A^WxUieJ_}OVV{ofrErTH~1CYi=VKNVyCrRDhG+u6#+M^V6 zD*Zb~+ak>0&9$;V66S~RRz_MrgT+zb{OBSj{Wk^ zHbmIUq*tj_wz7P;Pqt>tk$w!M#aa_#pad{bcB(*x49G2!E$2D6&c~!Rryy`Mg)`e$ zi$N;FKN~8%^5V1B2ZL6GeUr@`Fpr#FN$~+F*W;u0-uN4n&Gf39V#VQMvU2>C(kJ)) zfVZ1D;h%F{qFiiGs-)mOW5zmo8syYTH4!lXB-faJ$Nidqe3{$QE22q7R=lJn1|VMY ze)3|&ij?ZRq|b#xL6D|of(n;P?gvX8U>YGBkLP5*V0^D(fs zO*&qrB8)+gdBfsF6qV~O@g(#zbnr$mqVit~u!wquXf361ur~^p@7l})ilr@`9*j&m zr?nM^e1k~Gs_A_&8;ZGUUCoJmo3a%CPq-+ z^)psQ`VDhdQG$r#IcW7AMG#?S=4&$}fq7E;JU9swsUUvKgD=6CEC*zSf4++OL$NCi z`%nMtgfc^;Zu|p3KZkTHFM|0V;MI2xAw<25KK|=4bQ&+TFh@8o#X}dmR>b{VThTay z`DG$Mfm4(>rvB>dS2Dr(A~r!ZSKHoY#pO!^cd%6@l8_u2G5g|&#+7frx}t(tDc9FY zB^ybCLJT2ndY@nL2Z1Q&+w+VMp^mBjP5y+2H$#ginF#VtM30_F<0=QIrw9>pHO`|;G;`_P`lAfK zqefk!pRv-wy|`W6(sc;^j55Yhh-l%CvuSQk8AWrrR&*IRiyD^wxZC$CHQQua3E#L# z(>=ny*fuI|+Kzs`K175`gize3uJBbB?yzD>r{nR5nBpB+jq-c6Tb5R(wLyW;jej!% z2!u|;7vcUp%&oVnTp`DxBhg=r;c!x0*}gF<*Nc0?^kvm_BDb($IW*YyclpUbig?*k znxg4Rpxe9hnJ@UEGJIe`+>6%NMH;2&P!fbq;7l2>Ye@n4jA^oWFNQK*to1F>N&YgHMEy(-`FZTR2tL?fFj~Uzbs}EHZn1I;DV`^pZTkAZGg< zWUAVag>6*uCBhsnqvkgeVsH}JAy!q$nZ>t1y25m223DVirS~cIWuqRg8^6c7e4`mH zF|?*qftVsDAzuDQ?dG#)%&`EXbCOH9v;Yx(a>sql)48OAv}7u!%yj>PgR-5+E=Qv1(!huVLC$_O~3z}1#sn4 zo_-uk;6^J$%A*n<6<{Nm8;{e>%PGv70_~)viNV}h&4!Ia2|7gXx^i0un)@rr6H)-a z?q%?HbEr)FP~BrGb30&B5WcK6eV?oLr>C?WNBLFjPPTf%u9hfAjN$#0 zgw2e~^cCpUyvc3Sywr&}obglg$98xVaEC#C@mSa?ER$V5!`1PS_L+m4>hZvm{NUS+ ziqu#0c-z|T_YS~0riTFW*oYcmFf z&$?t*SDFzC+8SEyAEZ_xH7!7ah3RV9Ng{Lms@zevWEDi?td$M*LUcd^jvszR{EvcS zxzQ#0Q61oHFiN~foxUsLNiEE;w- zrjiocq4)7@#Vvj15;@JhlOJ@6w_vn=uP!l_#32c>%SxM2<(J4xI&}y~<0WS}042UG{gX<(>?ICi0F8H}m+LdC;|$M@kQuNPCGRPpJ*>3|2FgTh5SEdggD*;TcaYe{;`ly8 z9EAlw1lY;SAySrpED}rJLad>SUZhH)f5TwkHXlEL({e|)vrX?E)V&O)NSYZP-GDCi z@y(_Z1E3mqQ`jvIWq?O1Yu374V2k(X_P&no@Y~?6ski7B)tQSA!OzEd|IO*AtljLbGB61O3~`0Vlife$(H zSXZZE&()5yC-Y5kTO4q|28^of+;&uf&F-)>ORScrGNxfGxdb$9+0@~f7EO~_%6>DX zVIesYAvuGe_+<#K&?8KE#w*DN&z?Zc2@Uoiu|YVIiNpy(2nfgA&+icq%H6T#aQUmg zk`d7ChQ?>!3LzYnJrK;xYo@Y9zz2N}^7ELs7=)7l>qE=@owpRrg5aMI`Vyxflx{mz zL!j#X|L+4%!i6O|Za!Vs&A)R0Ao#k}G9ynIGY4BHiC7|c#j!aYY8#z$OQqCMg1mzv z5YkwRkGy=hm4#-W3-5|vgAlqnDKHxVkvddHlS;Nt)M~BVm(Ncgtk9>#2(8fksos2` zF_ol1xVVF!sOG?`sr1#1<;3<=Xvnzn7dyZ>RS$DxE2ybn@hW^zsy`zsVPqHE>-SA= zyGx(yw>*U?%+riOIWMJ8HgQIsKKh7}vK%v^JF|zG6NSbjf8w_BN0^lXcUlbo>MKYw zvWz3ex`2zhr8O!cA%J*#v0LGRlwHF^V{~N5ajg9~+;Olc{-NS$o-&bh3356V_X&%+ zPp>ry4!hD8(*lBL*S2e=SZi2cyyIn!Ndv}m$P}b0jx!f#aPdWeS_qbcyzLuHdN}9v( zX+!j#<8Bot#na*Ly+G_C%aq8>?Dtpl%d9O*6Y*@%zk>>PM}N~@Nl$Wy;ovCdfHv&R zd{c6Q9Q@4ReXm6R1aC!mU-meUG1$DyOb6dq8}$~JunCmv)`@lmXh-xB#TAcKSa$dgUU{(=_HRelkD zA4ofXTS|;#pGew2IILHZ5tX#}rqMs71~FzUR=aYI#$f1pzC1lRz*a320k7nJN3nS^{4)Op)yBP<%if1nMySX^gKl5K~6Q#bf zi5J0NWH44Ij})1~Wo z8@}&ecI7#&xi~AZ>r{!wo7!`SOY=s1*w;$FMs=MBw*eNcMOyxgv@A!dI{}6>?rw)y zfgb$LHvHW$tHkXA%fX=;@isfBYSmp{%2dscEI(}vq6RHv^$e?3yd6>06ic(uNGZ%k z+%xlek$jhC=(^H*1dM{^sm{m*-5fl$i#m83IDk!HM~;X7+UMnVx>&(_a_96t9@H&k zt(zHA+4$jn8_F(vq{uQXE{FY1RqVJVbqaEFCtexbC1K7(A{%M=vWD^qB*8Kr?g-Sl zizxWnWJS8(JxiQqBb0q(rGvbXb_FR+-xtQ7e$V&QVpCEeJf|QxF*fBS&M)e05{xy2 ze)y7Zsb!h=+-%~=3*44^3po8K7iF>R>{lvar<^_`oRv?=1@zE<-PFBjO)f=uZac{b zTzBQPh)RWk!V-Yp<93vA?R*{rBLtsgjoUkIk5Ma3X+N1*5e=wH^a8^-mCJ$9fEVv9 zAe%)`YT8#dA=&(q6F9elKT!2^a0e}330>{)-}&(Ft3PCWCp$~Q4oBK0o+IS(U5I=9 z1zJ!y_T8+vodyaaqH_=Y{yyU74u@Sx2!*OEsxWp0|o6 zJfD?DPLvNc`W^Ix@1@G?X|-nsKF%ex)vFFsFetNxs2C<*1^{i0aHE`v4&XdCGnpl4 z^78o-4T5|mBZra7Y>pF;ZPyu|Zu2PjZKApH!*^1`sy%gUJxC#RVcr_cN%6bMeTa+d z@G8bX(W>2VZz~~6{~{tr_}4%5+>vxI)rq z6!&0V*$FcWmU#;05_M#$Xl%2XHo3X-;qKLy&F!rh9ur@X1EC^&W6D3hKIN~$>L=>+ z;J5ZS1k+)xc~LgukkQtGnd=>X_Ok$(Gh3O@Tx{FIgOa;B4h9N{!s5)p&D?J_#0yxa z!*<#nEyBu`mBabkzenICb+zZ-p2M@IWHrjf9hj0xrL-__C_7I8a`NY{Bg`ZDyC^b(`IY>*}MS$fzs*6j>3+NxoHMb(MaipXY8Gq%;lD``j@6#?qtPStG46VjCacxXI zSFd0HB|iD6a!Gz8A)Y$$?ndw34VXCvR=DiQaGAZ02FTNKrauSe!u(DXAvgJ@I`Fsw0` zNR}8f{$8Asns19h*YX!V z4$eTCxw@f}erUp5o)uoV7p~aU;d$7geqV#85PQWTt>o~YD=za``d~-JG^lmzIayou zSd&3>gi@o@L2f^&VAVPta>GAj2;4to9QVU8W*NXoml6e~iPKab?ny=21HpP2X9vq#02PV}PbMtSdRh z`eNOz31zY$h&p_`CdvzuL0CbNnmxjGOi0mweI}UOlk?gHCh0QZoJ5KINW#h(foNJC zWgWTZr3ycamSksdrCoZOs7El-e`CcBw#g+}br_~6Ux%ZAiEytLBH zM%l_Kkrd}|=~NH=ka7;dAL8GPqGAFybt>NKN=JWUa1xx z@ZA*WbpEQDzBOvCR1!(4@RZa769L;Uw5tQJ41nT_8gH+1fDnO}29(c?w#?+Ti-^C@ z;50Gm@Y#)Bfl8x37vxgWJ8QrFbgrZlA`(mx_4TxtM{9lyS-TWPkv8_Ynca!5i7-pM z|2TwITd2$6=Uf@+f;2uaRO<2tX&Ry>@%%$beh-*F=Dyv%y#y;r_UV||0>;TM@2=N6 z6_JgEE2$+8b2f{!LDb<V)B%%l7o~u4#LM^943vtt3$!eQDwjoQQa;+H5H&f@CW^MY7mOf`B z7Ss**d`AA*DJh$Z^og|EH9kB@@!dgYu6kE|2Kt|11Kgy#l6ulX{@yaW6|^gWHJw6+;hP4z2X`msEehSG`|9yacOe^2!p zmhls{Cs44;ZJaXEg9%dR=WE8p4X+l%0g6M=H}L)#AS>(p&P3ST1kwK~-+OFEk$lrk zM-V_B=J972SF97awgFf&(LL=hw#hNpO0|6bqkc2ECH#esO1WyaI&Ay`5+37TNtX!x z7^R^RsR5iV+ejAIk5BsHq%%N5mc02l`FK588Iw#f1_^P7h$>K22f7!gVvXZ9dqT;oaZN%#w5kowWO?{KbL1*FHk&Hjph|vC4^|2W zXG|&(zzn|XkSXq11g0R<`8fluSTfsjMc3^Ibo_y!BPjhM{iYS)wWpmW_c?QLBF8#0 zk^@s@Baag6!DWV{pp{%;;!FcQaq6)OyRO=rK$bbn{R=eAhLTPHk^N}!9HH0{nSrp? z>5lA(dr16@`8=z?7mxX}j*!rM%nNKO7}Lbn3YN1}MwDgv5o_!P984XRv7*{Kx<-w8 z34??lh~!cv+(@FJWaW2~@<;d%7R$P-IE-rhPsU9R@ez*N#)ZQvd~koZVGy(;xXVgxk;fdbeLwnzEZ=dqxl~3*xhfMo znM;G{ueB@TtRQzg_cxbLfY^hw*Rc~g(=_yyL4p7`rU9Oon(X36k*i_$iNNcS;+fq$2CEYB-NNbq`mjJKSmVOmp6*o)MuTlkeRuj zL-9`Fvh^^OH>>2mro;dN%}%_gT-RPgJ|dEF%EN+~U$(FzS@)NF>(HidCHNof&rjb| zT_5ifY%sT#hed~PdzJ!qomNl4pS!{!`K*}m)z`;yU&ec-%I-Nvd ztsFuSfWfE59#2oBTpJtiUL&sQsf2KH(K7)@AcHc}@@N$X`pN$&fSgBpxda4&-RKH@ zMyuT9!Hkck02|PN_IwjIl1XLtFW25t1=MIa8L4Xf2e(;IBJDH~BQ0 zwiDYy!_O&w)PbF|3dUM^cX7>;qE*C-QZMxM^7x7NNK4`1Q$K}k{`g8u(pl7-G*Ni( zv#5Zvu_oVF0qRf0r52IrL;O3j*wX!Q3Uq3*QriVOZZr{Am>_UX7JtkZvNb;7W0`>J zN%>&1T4tooXWmFYWhq?W;Zm1chHAm$jGZ24hO>-RsjKA37Fvx$$~BWK^g+(_B2S-W z`mu6pLh{6+fyzV$W5W*ft3teZ__jSE@iZRhH0H;kdQCM6tC{0zmVa54;J7s5HfiD{90`1SbbL08~b2#O4GJ zS+Rm1c^sC5Y63LNrZ0EY!zp`zmJTc)2e6EJvpCfDMqtP0wFo;=w0#3bo*j$amKJ`D z)6rHeCXX{-$>K!x2ABFnrx75MKL`!$q($nL*t-)69(GixRH1+Ycj5^98FV5 z4_i`7v)sAs<~?@f27#@qto7)U)IXejNwv_BC!BJGE=vh}M5-ix$yyP^uHF!#(iq!VFuIRyIcArAyVR=POHV)X2&3Jmx%#yR%il%>M30qaKS& zkh4c}7-3AomfCSl7#fp_vvk3Iaswhch#d&{X)t0~k}$)_@B~CR_Pzi8)Id-5t--84 zEgdWB_9SI{CkoY)1c#q-ZeRyYC-DG#XCiBl#dB5y3GfTfv6pbKJf!~cD4O!4shYRp zcYTMrRyf%sWsMg(Bqos9ZEVMRrAO)S2FAlqX$xd(E*x@_Tv_OAP(UvS5$Gotd%H|E zdf6(8i#)%2{S`}Ho3hd>ZK;TMhL3dT(HZoq2B@M+@v&QQ?CJ2LrZavaFUBitp!d0O z?Vg}V;a^$#pNd6-SPwEnhke`r{=f-%x6>Jraz(lkm;Wq~Ok^$Z^eOq5YJxHifILmV zmr*O6^8rO+LOiz2qg)v6t&o5t5B*7%OUgvKEg9;16o(7YgXUC#7N%d3*sgs&hA)P4xDt zW^;M9M)usK@PSm@FfVZU!5E8g_&MWvDB#r7((jSlS;$VRD&L zgmPP)35E!FfH5nWDBkr)o#hRDy3>!tg625I9ruxuV=jhy#Sil@pDk5SQm?37q!Dx? zmPR9}-^-Jkr_|JGs+BZc^2y~&!G$HKqSzq>gV|Xg_|RQ5#viR0uNaI^E1Fmqjb54b z)>vP-T-vewr3Nf!S_6+l>eK28sK*j16Q z!V>UwD4b6Yrc(T7hn>obI_wPXnxz5y%Yg^Y@5&)maQN4^XN#U~=-0?ny#} zeRS;MEL^aEf73o;A2`0=qeW2*>wC~r(jM2vlGr)xw_rb#S!3X7IfU0~2nl%2!5g}a zy1A25?XUoxEgI+&v|Teg@2Bq`rq2R>=n6`z{>0_S_b-#xsXJ_{K8w`G!iS^o<%DYOQY3|O5Fw!H^pCbv^ zSk`J^5}xbqB!`z*@g!q*Id(W^HFi{)Q=oNu)oE&W!RU|)tGeP@m72N9o&F{8fr19N zj*Jc{z>T7XRy@f*67-9U$syfdo_5OkFpM(J;*)A=z+zqvH)RXteqhIi`0rV!3I5;g z-RCtCM~FSvzWn=<25svez_Kb?lm4;N_nK$UG)+i~sJ48{C9%V{f=*i(3wa5QHm4}& zSZU%`qLf@?>Gc|e0&aoZW~>%}D}YRdnnA75Yn&!?8#h#rhK#%40-xFmGgMWJUs-(k zdQ&{;ZNU9<%!YvMp=@L>PE{48{a_BJ-kATjM$bT1SRfEMyQ^0(L8OD}6|^Q4{!?D; zj0r`4z}-glhpPw*%x~0#ig2n#Iqbel9L7SI^BO;u0m{H`Io3w9fuKA|Fc0R1@7{`y z+ie`XHDV~+5Ra%^!RuJzv)=3F`b$K>^Mx;VN+I~lM%3aL1+wOA|MsMz*coROCRsW| zA{lfyf!VH*rf;~NmV4f1sPs;Bs{c;fZf{G~;Vh)%ZB;GkmGSMD!8!#Dl=b$5N3;|2 zz6gHt4xSE6=it?rwC)}}B4{G8j*HBB2sE#JjHX_wuiA+a4kAM@zrxEk(T}P@Xr`u1 z$>n?pY6$D*o6MvUa|U&TUb%Es2adQWL&oMrvFo#>q%ezfU6@qqZ zh-sj)0t)RlKe=5? z>Br*dbf-*r%Qma>KT?@;2Yrux)_9)>z*ZVK_J~L_<_OurpRc{<4`+qqx-39%cj= zu9kYl41;)uQ%We+It%--BgdsGmFP3u)`USB!J$pb3d$(x_n8epS>qOC(qSp?WgMIK zH+-Y_xJ9 z#O?}2S*vexK7pg6pnEcY-}H0n>CL16L`v1!s_%?vaLMCjA7U64W|QKqdO&#ID2