From 8c9764476ccec53f3e463b3162715050bc196bc6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 27 Feb 2023 23:52:46 -0500 Subject: [PATCH 01/19] first phase of source tree restructure This is the first phase of a big shifting of files and directories in the source tree. You will need to run `pip install -e .` before the code will work again! Here's what's in the current commit: 1) Remove a lot of dead code that dealt with checkpoint and safetensor loading. 2) Entire ckpt_generator hierarchy is now gone! 3) ldm.invoke.generator.* => invokeai.generator.* 4) ldm.model.* => invokeai.model.* 5) ldm.invoke.model_manager => invokeai.model.model_manager 6) In addition, a number of frequently-accessed classes can be imported from the invokeai.model and invokeai.generator modules: from invokeai.generator import ( Generator, PipelineIntermediateState, StableDiffusionGeneratorPipeline, infill_methods) from invokeai.models import ( ModelManager, SDLegacyType InvokeAIDiffuserComponent, AttentionMapSaver, DDIMSampler, KSampler, PLMSSampler, PostprocessingSettings ) --- invokeai/backend/__init__.py | 1 + invokeai/backend/invoke_ai_web_server.py | 8 +- .../configs/stable-diffusion/v1-finetune.yaml | 4 +- .../stable-diffusion/v1-finetune_style.yaml | 4 +- .../stable-diffusion/v1-inference.yaml | 4 +- .../v1-inpainting-inference.yaml | 4 +- .../stable-diffusion/v1-m1-finetune.yaml | 4 +- .../stable-diffusion/v2-inference-v.yaml | 4 +- ldm/generate.py | 25 +- ldm/invoke/CLI.py | 6 +- ldm/invoke/_version.py | 2 +- .../app/services/generate_initializer.py | 2 +- ldm/invoke/args.py | 15 +- ldm/invoke/ckpt_generator/__init__.py | 4 - ldm/invoke/ckpt_generator/base.py | 335 --- ldm/invoke/ckpt_generator/embiggen.py | 501 ---- ldm/invoke/ckpt_generator/img2img.py | 97 - ldm/invoke/ckpt_generator/inpaint.py | 358 --- ldm/invoke/ckpt_generator/omnibus.py | 175 -- ldm/invoke/ckpt_generator/txt2img.py | 90 - ldm/invoke/ckpt_generator/txt2img2img.py | 182 -- ldm/invoke/ckpt_to_diffuser.py | 4 +- ldm/invoke/conditioning.py | 2 +- ldm/invoke/config/model_install_backend.py | 2 +- ldm/invoke/generator/__init__.py | 4 - ldm/invoke/generator/base.py | 374 --- ldm/invoke/generator/diffusers_pipeline.py | 765 ------ ldm/invoke/generator/embiggen.py | 501 ---- ldm/invoke/generator/img2img.py | 70 - ldm/invoke/generator/inpaint.py | 324 --- ldm/invoke/generator/omnibus.py | 173 -- ldm/invoke/generator/txt2img.py | 61 - ldm/invoke/generator/txt2img2img.py | 163 -- ldm/invoke/globals.py | 2 +- ldm/invoke/merge_diffusers.py | 2 +- ldm/invoke/model_manager.py | 1372 ---------- ldm/models/autoencoder.py | 596 ----- ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 355 --- .../diffusion/cross_attention_control.py | 642 ----- .../diffusion/cross_attention_map_saving.py | 95 - ldm/models/diffusion/ddim.py | 111 - ldm/models/diffusion/ddpm.py | 2271 ----------------- ldm/models/diffusion/ksampler.py | 312 --- ldm/models/diffusion/plms.py | 146 -- ldm/models/diffusion/sampler.py | 450 ---- .../diffusion/shared_invokeai_diffusion.py | 491 ---- ldm/modules/attention.py | 2 +- pyproject.toml | 7 +- 49 files changed, 49 insertions(+), 11073 deletions(-) delete mode 100644 ldm/invoke/ckpt_generator/__init__.py delete mode 100644 ldm/invoke/ckpt_generator/base.py delete mode 100644 ldm/invoke/ckpt_generator/embiggen.py delete mode 100644 ldm/invoke/ckpt_generator/img2img.py delete mode 100644 ldm/invoke/ckpt_generator/inpaint.py delete mode 100644 ldm/invoke/ckpt_generator/omnibus.py delete mode 100644 ldm/invoke/ckpt_generator/txt2img.py delete mode 100644 ldm/invoke/ckpt_generator/txt2img2img.py delete mode 100644 ldm/invoke/generator/__init__.py delete mode 100644 ldm/invoke/generator/base.py delete mode 100644 ldm/invoke/generator/diffusers_pipeline.py delete mode 100644 ldm/invoke/generator/embiggen.py delete mode 100644 ldm/invoke/generator/img2img.py delete mode 100644 ldm/invoke/generator/inpaint.py delete mode 100644 ldm/invoke/generator/omnibus.py delete mode 100644 ldm/invoke/generator/txt2img.py delete mode 100644 ldm/invoke/generator/txt2img2img.py delete mode 100644 ldm/invoke/model_manager.py delete mode 100644 ldm/models/autoencoder.py delete mode 100644 ldm/models/diffusion/__init__.py delete mode 100644 ldm/models/diffusion/classifier.py delete mode 100644 ldm/models/diffusion/cross_attention_control.py delete mode 100644 ldm/models/diffusion/cross_attention_map_saving.py delete mode 100644 ldm/models/diffusion/ddim.py delete mode 100644 ldm/models/diffusion/ddpm.py delete mode 100644 ldm/models/diffusion/ksampler.py delete mode 100644 ldm/models/diffusion/plms.py delete mode 100644 ldm/models/diffusion/sampler.py delete mode 100644 ldm/models/diffusion/shared_invokeai_diffusion.py diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index a4f563acd7..82014807ba 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -3,3 +3,4 @@ Initialization file for invokeai.backend ''' from .invoke_ai_web_server import InvokeAIWebServer + diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index 15bf25d5db..c93e5e2a60 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -27,10 +27,10 @@ from invokeai.backend.modules.parameters import parameters_to_command from ldm.generate import Generate from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer -from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState -from ldm.invoke.generator.inpaint import infill_methods -from ldm.invoke.globals import Globals, global_converted_ckpts_dir -from ldm.invoke.globals import global_models_dir +from ..generator import infill_methods, PipelineIntermediateState +from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, + global_models_dir + ) from ldm.invoke.merge_diffusers import merge_diffusion_models from ldm.invoke.pngwriter import PngWriter, retrieve_metadata diff --git a/invokeai/configs/stable-diffusion/v1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-finetune.yaml index 783a7f10ec..9fea4ae01f 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -45,7 +45,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml index 1964d925e1..fdecca9b72 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -44,7 +44,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/invokeai/configs/stable-diffusion/v1-inference.yaml b/invokeai/configs/stable-diffusion/v1-inference.yaml index d872404f2c..913cbbf310 100644 --- a/invokeai/configs/stable-diffusion/v1-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-04 - target: ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -53,7 +53,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml index 2d25b8a4e6..78458a7e54 100644 --- a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 7.5e-05 - target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion + target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -53,7 +53,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml index f2d5ddda02..e6db3ac067 100644 --- a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -45,7 +45,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/invokeai/configs/stable-diffusion/v2-inference-v.yaml b/invokeai/configs/stable-diffusion/v2-inference-v.yaml index 8ec8dfbfef..6b6828fbe7 100644 --- a/invokeai/configs/stable-diffusion/v2-inference-v.yaml +++ b/invokeai/configs/stable-diffusion/v2-inference-v.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-4 - target: ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.models.diffusion.ddpm.LatentDiffusion params: parameterization: "v" linear_start: 0.00085 @@ -38,7 +38,7 @@ model: legacy: False first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL + target: invokeai.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss diff --git a/ldm/generate.py b/ldm/generate.py index 256f214b25..a639360491 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -26,21 +26,19 @@ from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything import ldm.invoke.conditioning + +from invokeai.models import ModelManager +from invokeai.generator import infill_methods +from invokeai.models import (DDIMSampler, KSampler, PLMSSampler ) from ldm.invoke.args import metadata_from_png from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.conditioning import get_uc_and_c_and_ec from ldm.invoke.devices import choose_precision, choose_torch_device -from ldm.invoke.generator.inpaint import infill_methods from ldm.invoke.globals import Globals, global_cache_dir from ldm.invoke.image_util import InitImageResizer -from ldm.invoke.model_manager import ModelManager from ldm.invoke.pngwriter import PngWriter from ldm.invoke.seamless import configure_model_padding from ldm.invoke.txt2mask import Txt2Mask -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.ksampler import KSampler -from ldm.models.diffusion.plms import PLMSSampler - def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): @@ -816,7 +814,6 @@ class Generate: hires_fix: bool = False, force_outpaint: bool = False, ): - inpainting_model_in_use = self.sampler.uses_inpainting_model() if hires_fix: return self._make_txt2img2img() @@ -824,9 +821,6 @@ class Generate: if embiggen is not None: return self._make_embiggen() - if inpainting_model_in_use: - return self._make_omnibus() - if ((init_image is not None) and (mask_image is not None)) or force_outpaint: return self._make_inpaint() @@ -903,16 +897,9 @@ class Generate: def _make_inpaint(self): return self._load_generator(".inpaint", "Inpaint") - def _make_omnibus(self): - return self._load_generator(".omnibus", "Omnibus") - def _load_generator(self, module, class_name): - if self.is_legacy_model(self.model_name): - mn = f"ldm.invoke.ckpt_generator{module}" - cn = f"Ckpt{class_name}" - else: - mn = f"ldm.invoke.generator{module}" - cn = class_name + mn = f"invokeai.generator{module}" + cn = class_name module = importlib.import_module(mn) constructor = getattr(module, cn) return constructor(self.model, self.precision) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index b755eafed4..05aa4482d0 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -21,11 +21,11 @@ import ldm.invoke from ..generate import Generate from .args import (Args, dream_cmd_from_png, metadata_dumps, metadata_from_png) -from .generator.diffusers_pipeline import PipelineIntermediateState +from invokeai.generator import PipelineIntermediateState from .globals import Globals from .image_util import make_grid from .log import write_log -from .model_manager import ModelManager +from invokeai.models import ModelManager from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .readline import Completer, get_completer from ..util import url_attachment_name @@ -64,7 +64,7 @@ def main(): Globals.internet_available = args.internet_available and check_internet() Globals.disable_xformers = not args.xformers Globals.sequential_guidance = args.sequential_guidance - Globals.ckpt_convert = args.ckpt_convert + Globals.ckpt_convert = True # always true now print(f">> Internet connectivity is {Globals.internet_available}") diff --git a/ldm/invoke/_version.py b/ldm/invoke/_version.py index 259b4f09e5..041471f37e 100644 --- a/ldm/invoke/_version.py +++ b/ldm/invoke/_version.py @@ -1 +1 @@ -__version__='2.3.1' +__version__='3.0.0+a0' diff --git a/ldm/invoke/app/services/generate_initializer.py b/ldm/invoke/app/services/generate_initializer.py index 39c0fe491e..0cfc3f39bb 100644 --- a/ldm/invoke/app/services/generate_initializer.py +++ b/ldm/invoke/app/services/generate_initializer.py @@ -3,7 +3,7 @@ import os import sys import traceback -from ...model_manager import ModelManager +from invokeai.models import ModelManager from ...globals import Globals from ....generate import Generate diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index b23238cf09..1a5dbe334a 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -434,6 +434,14 @@ class Args(object): deprecated_group.add_argument('--laion400m') deprecated_group.add_argument('--weights') # deprecated + deprecated_group.add_argument( + '--ckpt_convert', + action=argparse.BooleanOptionalAction, + dest='ckpt_convert', + default=True, + help='Load legacy ckpt files as diffusers (deprecated; always true now).', + ) + general_group.add_argument( '--version','-V', action='store_true', @@ -518,13 +526,6 @@ class Args(object): help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}', default='auto', ) - model_group.add_argument( - '--ckpt_convert', - action=argparse.BooleanOptionalAction, - dest='ckpt_convert', - default=False, - help='Load legacy ckpt files as diffusers. Pass --no-ckpt-convert to inhibit this behavior', - ) model_group.add_argument( '--internet', action=argparse.BooleanOptionalAction, diff --git a/ldm/invoke/ckpt_generator/__init__.py b/ldm/invoke/ckpt_generator/__init__.py deleted file mode 100644 index d25e192149..0000000000 --- a/ldm/invoke/ckpt_generator/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -''' -Initialization file for the ldm.invoke.generator package -''' -from .base import CkptGenerator diff --git a/ldm/invoke/ckpt_generator/base.py b/ldm/invoke/ckpt_generator/base.py deleted file mode 100644 index 520b35612d..0000000000 --- a/ldm/invoke/ckpt_generator/base.py +++ /dev/null @@ -1,335 +0,0 @@ -''' -Base class for ldm.invoke.ckpt_generator.* -including img2img, txt2img, and inpaint - -THESE MODULES ARE TRANSITIONAL AND WILL BE REMOVED AT A FUTURE DATE -WHEN LEGACY CKPT MODEL SUPPORT IS DISCONTINUED. -''' -import torch -import numpy as np -import random -import os -import os.path as osp -import traceback -from tqdm import tqdm, trange -from PIL import Image, ImageFilter, ImageChops -import cv2 as cv -from einops import rearrange, repeat -from pathlib import Path -from pytorch_lightning import seed_everything -import invokeai.assets.web as web_assets -from ldm.invoke.devices import choose_autocast -from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver -from ldm.util import rand_perlin_2d - -downsampling = 8 -CAUTION_IMG = 'caution.png' - -class CkptGenerator(): - def __init__(self, model, precision): - self.model = model - self.precision = precision - self.seed = None - self.latent_channels = model.channels - self.downsampling_factor = downsampling # BUG: should come from model or config - self.safety_checker = None - self.perlin = 0.0 - self.threshold = 0 - self.variation_amount = 0 - self.with_variations = [] - self.use_mps_noise = False - self.free_gpu_mem = None - self.caution_img = None - - # this is going to be overridden in img2img.py, txt2img.py and inpaint.py - def get_make_image(self,prompt,**kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - """ - raise NotImplementedError("image_iterator() must be implemented in a descendent class") - - def set_variation(self, seed, variation_amount, with_variations): - self.seed = seed - self.variation_amount = variation_amount - self.with_variations = with_variations - - def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None, - image_callback=None, step_callback=None, threshold=0.0, perlin=0.0, - safety_checker:dict=None, - attention_maps_callback = None, - free_gpu_mem: bool=False, - **kwargs): - scope = choose_autocast(self.precision) - self.safety_checker = safety_checker - self.free_gpu_mem = free_gpu_mem - attention_maps_images = [] - attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image()) - make_image = self.get_make_image( - prompt, - sampler = sampler, - init_image = init_image, - width = width, - height = height, - step_callback = step_callback, - threshold = threshold, - perlin = perlin, - attention_maps_callback = attention_maps_callback, - **kwargs - ) - results = [] - seed = seed if seed is not None and seed >= 0 else self.new_seed() - first_seed = seed - seed, initial_noise = self.generate_initial_noise(seed, width, height) - - # There used to be an additional self.model.ema_scope() here, but it breaks - # the inpaint-1.5 model. Not sure what it did.... ? - with scope(self.model.device.type): - for n in trange(iterations, desc='Generating'): - x_T = None - if self.variation_amount > 0: - seed_everything(seed) - target_noise = self.get_noise(width,height) - x_T = self.slerp(self.variation_amount, initial_noise, target_noise) - elif initial_noise is not None: - # i.e. we specified particular variations - x_T = initial_noise - else: - seed_everything(seed) - try: - x_T = self.get_noise(width,height) - except: - print('** An error occurred while getting initial noise **') - print(traceback.format_exc()) - - image = make_image(x_T) - - if self.safety_checker is not None: - image = self.safety_check(image) - - results.append([image, seed]) - - if image_callback is not None: - attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1] - image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image) - - seed = self.new_seed() - - return results - - def sample_to_image(self,samples)->Image.Image: - """ - Given samples returned from a sampler, converts - it into a PIL Image - """ - x_samples = self.model.decode_first_stage(samples) - x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) - if len(x_samples) != 1: - raise Exception( - f'>> expected to get a single image, but got {len(x_samples)}') - x_sample = 255.0 * rearrange( - x_samples[0].cpu().numpy(), 'c h w -> h w c' - ) - return Image.fromarray(x_sample.astype(np.uint8)) - - # write an approximate RGB image from latent samples for a single step to PNG - - def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image: - if init_image is None or init_mask is None: - return result - - # Get the original alpha channel of the mask if there is one. - # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') - pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L') - pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist - - # Build an image with only visible pixels from source to use as reference for color-matching. - init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8) - init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8) - init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8) - - # Get numpy version of result - np_image = np.asarray(result, dtype=np.uint8) - - # Mask and calculate mean and standard deviation - mask_pixels = init_a_pixels * init_mask_pixels > 0 - np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :] - np_image_masked = np_image[mask_pixels, :] - - if np_init_rgb_pixels_masked.size > 0: - init_means = np_init_rgb_pixels_masked.mean(axis=0) - init_std = np_init_rgb_pixels_masked.std(axis=0) - gen_means = np_image_masked.mean(axis=0) - gen_std = np_image_masked.std(axis=0) - - # Color correct - np_matched_result = np_image.copy() - np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8) - matched_result = Image.fromarray(np_matched_result, mode='RGB') - else: - matched_result = Image.fromarray(np_image, mode='RGB') - - # Blur the mask out (into init image) by specified amount - if mask_blur_radius > 0: - nm = np.asarray(pil_init_mask, dtype=np.uint8) - nmd = cv.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2)) - pmd = Image.fromarray(nmd, mode='L') - blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius)) - else: - blurred_init_mask = pil_init_mask - - multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) - - # Paste original on color-corrected generation (using blurred mask) - matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask) - return matched_result - - - - def sample_to_lowres_estimated_image(self,samples): - # origingally adapted from code by @erucipe and @keturn here: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 - - # these updated numbers for v1.5 are from @torridgristle - v1_5_latent_rgb_factors = torch.tensor([ - # R G B - [ 0.3444, 0.1385, 0.0670], # L1 - [ 0.1247, 0.4027, 0.1494], # L2 - [-0.3192, 0.2513, 0.2103], # L3 - [-0.1307, -0.1874, -0.7445] # L4 - ], dtype=samples.dtype, device=samples.device) - - latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors - latents_ubyte = (((latent_image + 1) / 2) - .clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - .byte()).cpu() - - return Image.fromarray(latents_ubyte.numpy()) - - def generate_initial_noise(self, seed, width, height): - initial_noise = None - if self.variation_amount > 0 or len(self.with_variations) > 0: - # use fixed initial noise plus random noise per iteration - seed_everything(seed) - initial_noise = self.get_noise(width,height) - for v_seed, v_weight in self.with_variations: - seed = v_seed - seed_everything(seed) - next_noise = self.get_noise(width,height) - initial_noise = self.slerp(v_weight, initial_noise, next_noise) - if self.variation_amount > 0: - random.seed() # reset RNG to an actually random state, so we can get a random seed for variations - seed = random.randrange(0,np.iinfo(np.uint32).max) - return (seed, initial_noise) - else: - return (seed, None) - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - """ - Returns a tensor filled with random numbers, either form a normal distribution - (txt2img) or from the latent image (img2img, inpaint) - """ - raise NotImplementedError("get_noise() must be implemented in a descendent class") - - def get_perlin_noise(self,width,height): - fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device - return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device) - - def new_seed(self): - self.seed = random.randrange(0, np.iinfo(np.uint32).max) - return self.seed - - def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): - ''' - Spherical linear interpolation - Args: - t (float/np.ndarray): Float value between 0.0 and 1.0 - v0 (np.ndarray): Starting vector - v1 (np.ndarray): Final vector - DOT_THRESHOLD (float): Threshold for considering the two vectors as - colineal. Not recommended to alter this. - Returns: - v2 (np.ndarray): Interpolation vector between v0 and v1 - ''' - inputs_are_torch = False - if not isinstance(v0, np.ndarray): - inputs_are_torch = True - v0 = v0.detach().cpu().numpy() - if not isinstance(v1, np.ndarray): - inputs_are_torch = True - v1 = v1.detach().cpu().numpy() - - dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) - if np.abs(dot) > DOT_THRESHOLD: - v2 = (1 - t) * v0 + t * v1 - else: - theta_0 = np.arccos(dot) - sin_theta_0 = np.sin(theta_0) - theta_t = theta_0 * t - sin_theta_t = np.sin(theta_t) - s0 = np.sin(theta_0 - theta_t) / sin_theta_0 - s1 = sin_theta_t / sin_theta_0 - v2 = s0 * v0 + s1 * v1 - - if inputs_are_torch: - v2 = torch.from_numpy(v2).to(self.model.device) - - return v2 - - def safety_check(self,image:Image.Image): - ''' - If the CompViz safety checker flags an NSFW image, we - blur it out. - ''' - import diffusers - - checker = self.safety_checker['checker'] - extractor = self.safety_checker['extractor'] - features = extractor([image], return_tensors="pt") - features.to(self.model.device) - - # unfortunately checker requires the numpy version, so we have to convert back - x_image = np.array(image).astype(np.float32) / 255.0 - x_image = x_image[None].transpose(0, 3, 1, 2) - - diffusers.logging.set_verbosity_error() - checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values) - if has_nsfw_concept[0]: - print('** An image with potential non-safe content has been detected. A blurred image will be returned. **') - return self.blur(image) - else: - return image - - def blur(self,input): - blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) - try: - caution = self.get_caution_img() - if caution: - blurry.paste(caution,(0,0),caution) - except FileNotFoundError: - pass - return blurry - - def get_caution_img(self): - path = None - if self.caution_img: - return self.caution_img - path = Path(web_assets.__path__[0]) / CAUTION_IMG - caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height //2)) - return self.caution_img - - # this is a handy routine for debugging use. Given a generated sample, - # convert it into a PNG image and store it at the indicated path - def save_sample(self, sample, filepath): - image = self.sample_to_image(sample) - dirname = os.path.dirname(filepath) or '.' - if not os.path.exists(dirname): - print(f'** creating directory {dirname}') - os.makedirs(dirname, exist_ok=True) - image.save(filepath,'PNG') - - def torch_dtype(self)->torch.dtype: - return torch.float16 if self.precision == 'float16' else torch.float32 diff --git a/ldm/invoke/ckpt_generator/embiggen.py b/ldm/invoke/ckpt_generator/embiggen.py deleted file mode 100644 index 0b43d3d19b..0000000000 --- a/ldm/invoke/ckpt_generator/embiggen.py +++ /dev/null @@ -1,501 +0,0 @@ -''' -ldm.invoke.ckpt_generator.embiggen descends from ldm.invoke.ckpt_generator -and generates with ldm.invoke.ckpt_generator.img2img -''' - -import torch -import numpy as np -from tqdm import trange -from PIL import Image -from ldm.invoke.ckpt_generator.base import CkptGenerator -from ldm.invoke.ckpt_generator.img2img import CkptImg2Img -from ldm.invoke.devices import choose_autocast -from ldm.models.diffusion.ddim import DDIMSampler - -class CkptEmbiggen(CkptGenerator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None - - # Replace generate because Embiggen doesn't need/use most of what it does normallly - def generate(self,prompt,iterations=1,seed=None, - image_callback=None, step_callback=None, - **kwargs): - - scope = choose_autocast(self.precision) - make_image = self.get_make_image( - prompt, - step_callback = step_callback, - **kwargs - ) - results = [] - seed = seed if seed else self.new_seed() - - # Noise will be generated by the Img2Img generator when called - with scope(self.model.device.type), self.model.ema_scope(): - for n in trange(iterations, desc='Generating'): - # make_image will call Img2Img which will do the equivalent of get_noise itself - image = make_image() - results.append([image, seed]) - if image_callback is not None: - image_callback(image, seed, prompt_in=prompt) - seed = self.new_seed() - return results - - @torch.no_grad() - def get_make_image( - self, - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_img, - strength, - width, - height, - embiggen, - embiggen_tiles, - step_callback=None, - **kwargs - ): - """ - Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image - Return value depends on the seed at the time you call it - """ - assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models" - - # Construct embiggen arg array, and sanity check arguments - if embiggen == None: # embiggen can also be called with just embiggen_tiles - embiggen = [1.0] # If not specified, assume no scaling - elif embiggen[0] < 0: - embiggen[0] = 1.0 - print( - '>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !') - if len(embiggen) < 2: - embiggen.append(0.75) - elif embiggen[1] > 1.0 or embiggen[1] < 0: - embiggen[1] = 0.75 - print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !') - if len(embiggen) < 3: - embiggen.append(0.25) - elif embiggen[2] < 0: - embiggen[2] = 0.25 - print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !') - - # Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math - # and then sort them, because... people. - if embiggen_tiles: - embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles)) - embiggen_tiles.sort() - - if strength >= 0.5: - print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.') - - # Prep img2img generator, since we wrap over it - gen_img2img = CkptImg2Img(self.model,self.precision) - - # Open original init image (not a tensor) to manipulate - initsuperimage = Image.open(init_img) - - with Image.open(init_img) as img: - initsuperimage = img.convert('RGB') - - # Size of the target super init image in pixels - initsuperwidth, initsuperheight = initsuperimage.size - - # Increase by scaling factor if not already resized, using ESRGAN as able - if embiggen[0] != 1.0: - initsuperwidth = round(initsuperwidth*embiggen[0]) - initsuperheight = round(initsuperheight*embiggen[0]) - if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero - from ldm.invoke.restoration.realesrgan import ESRGAN - esrgan = ESRGAN() - print( - f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}') - if embiggen[0] > 2: - initsuperimage = esrgan.process( - initsuperimage, - embiggen[1], # upscale strength - self.seed, - 4, # upscale scale - ) - else: - initsuperimage = esrgan.process( - initsuperimage, - embiggen[1], # upscale strength - self.seed, - 2, # upscale scale - ) - # We could keep recursively re-running ESRGAN for a requested embiggen[0] larger than 4x - # but from personal experiance it doesn't greatly improve anything after 4x - # Resize to target scaling factor resolution - initsuperimage = initsuperimage.resize( - (initsuperwidth, initsuperheight), Image.Resampling.LANCZOS) - - # Use width and height as tile widths and height - # Determine buffer size in pixels - if embiggen[2] < 1: - if embiggen[2] < 0: - embiggen[2] = 0 - overlap_size_x = round(embiggen[2] * width) - overlap_size_y = round(embiggen[2] * height) - else: - overlap_size_x = round(embiggen[2]) - overlap_size_y = round(embiggen[2]) - - # With overall image width and height known, determine how many tiles we need - def ceildiv(a, b): - return -1 * (-a // b) - - # X and Y needs to be determined independantly (we may have savings on one based on the buffer pixel count) - # (initsuperwidth - width) is the area remaining to the right that we need to layers tiles to fill - # (width - overlap_size_x) is how much new we can fill with a single tile - emb_tiles_x = 1 - emb_tiles_y = 1 - if (initsuperwidth - width) > 0: - emb_tiles_x = ceildiv(initsuperwidth - width, - width - overlap_size_x) + 1 - if (initsuperheight - height) > 0: - emb_tiles_y = ceildiv(initsuperheight - height, - height - overlap_size_y) + 1 - # Sanity - assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.' - - # Prep alpha layers -------------- - # https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil - # agradientL is Left-side transparent - agradientL = Image.linear_gradient('L').rotate( - 90).resize((overlap_size_x, height)) - # agradientT is Top-side transparent - agradientT = Image.linear_gradient('L').resize((width, overlap_size_y)) - # radial corner is the left-top corner, made full circle then cut to just the left-top quadrant - agradientC = Image.new('L', (256, 256)) - for y in range(256): - for x in range(256): - # Find distance to lower right corner (numpy takes arrays) - distanceToLR = np.sqrt([(255 - x) ** 2 + (255 - y) ** 2])[0] - # Clamp values to max 255 - if distanceToLR > 255: - distanceToLR = 255 - #Place the pixel as invert of distance - agradientC.putpixel((x, y), round(255 - distanceToLR)) - - # Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges - # Fits for a left-fading gradient on the bottom side and full opacity on the right side. - agradientAsymC = Image.new('L', (256, 256)) - for y in range(256): - for x in range(256): - value = round(max(0, x-(255-y)) * (255 / max(1,y))) - #Clamp values - value = max(0, value) - value = min(255, value) - agradientAsymC.putpixel((x, y), value) - - # Create alpha layers default fully white - alphaLayerL = Image.new("L", (width, height), 255) - alphaLayerT = Image.new("L", (width, height), 255) - alphaLayerLTC = Image.new("L", (width, height), 255) - # Paste gradients into alpha layers - alphaLayerL.paste(agradientL, (0, 0)) - alphaLayerT.paste(agradientT, (0, 0)) - alphaLayerLTC.paste(agradientL, (0, 0)) - alphaLayerLTC.paste(agradientT, (0, 0)) - alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0)) - # make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile - # to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space - alphaLayerTaC = alphaLayerT.copy() - alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - alphaLayerLTaC = alphaLayerLTC.copy() - alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - if embiggen_tiles: - # Individual unconnected sides - alphaLayerR = Image.new("L", (width, height), 255) - alphaLayerR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerB = Image.new("L", (width, height), 255) - alphaLayerB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerTB = Image.new("L", (width, height), 255) - alphaLayerTB.paste(agradientT, (0, 0)) - alphaLayerTB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerLR = Image.new("L", (width, height), 255) - alphaLayerLR.paste(agradientL, (0, 0)) - alphaLayerLR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - - # Sides and corner Layers - alphaLayerRBC = Image.new("L", (width, height), 255) - alphaLayerRBC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerRBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerRBC.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerLBC = Image.new("L", (width, height), 255) - alphaLayerLBC.paste(agradientL, (0, 0)) - alphaLayerLBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerLBC.paste(agradientC.rotate(90).resize( - (overlap_size_x, overlap_size_y)), (0, height - overlap_size_y)) - alphaLayerRTC = Image.new("L", (width, height), 255) - alphaLayerRTC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerRTC.paste(agradientT, (0, 0)) - alphaLayerRTC.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - # All but X layers - alphaLayerABT = Image.new("L", (width, height), 255) - alphaLayerABT.paste(alphaLayerLBC, (0, 0)) - alphaLayerABT.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerABT.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerABL = Image.new("L", (width, height), 255) - alphaLayerABL.paste(alphaLayerRTC, (0, 0)) - alphaLayerABL.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerABL.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerABR = Image.new("L", (width, height), 255) - alphaLayerABR.paste(alphaLayerLBC, (0, 0)) - alphaLayerABR.paste(agradientT, (0, 0)) - alphaLayerABR.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - alphaLayerABB = Image.new("L", (width, height), 255) - alphaLayerABB.paste(alphaLayerRTC, (0, 0)) - alphaLayerABB.paste(agradientL, (0, 0)) - alphaLayerABB.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - - # All-around layer - alphaLayerAA = Image.new("L", (width, height), 255) - alphaLayerAA.paste(alphaLayerABT, (0, 0)) - alphaLayerAA.paste(agradientT, (0, 0)) - alphaLayerAA.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - alphaLayerAA.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - # Clean up temporary gradients - del agradientL - del agradientT - del agradientC - - def make_image(): - # Make main tiles ------------------------------------------------- - if embiggen_tiles: - print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...') - else: - print( - f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...') - - emb_tile_store = [] - # Although we could use the same seed for every tile for determinism, at higher strengths this may - # produce duplicated structures for each tile and make the tiling effect more obvious - # instead track and iterate a local seed we pass to Img2Img - seed = self.seed - seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy - - for tile in range(emb_tiles_x * emb_tiles_y): - # Don't iterate on first tile - if tile != 0: - if seed < seedintlimit: - seed += 1 - else: - seed = 0 - - # Determine if this is a re-run and replace - if embiggen_tiles and not tile in embiggen_tiles: - continue - # Get row and column entries - emb_row_i = tile // emb_tiles_x - emb_column_i = tile % emb_tiles_x - # Determine bounds to cut up the init image - # Determine upper-left point - if emb_column_i + 1 == emb_tiles_x: - left = initsuperwidth - width - else: - left = round(emb_column_i * (width - overlap_size_x)) - if emb_row_i + 1 == emb_tiles_y: - top = initsuperheight - height - else: - top = round(emb_row_i * (height - overlap_size_y)) - right = left + width - bottom = top + height - - # Cropped image of above dimension (does not modify the original) - newinitimage = initsuperimage.crop((left, top, right, bottom)) - # DEBUG: - # newinitimagepath = init_img[0:-4] + f'_emb_Ti{tile}.png' - # newinitimage.save(newinitimagepath) - - if embiggen_tiles: - print( - f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)') - else: - print( - f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles') - - # create a torch tensor from an Image - newinitimage = np.array( - newinitimage).astype(np.float32) / 255.0 - newinitimage = newinitimage[None].transpose(0, 3, 1, 2) - newinitimage = torch.from_numpy(newinitimage) - newinitimage = 2.0 * newinitimage - 1.0 - newinitimage = newinitimage.to(self.model.device) - - tile_results = gen_img2img.generate( - prompt, - iterations = 1, - seed = seed, - sampler = DDIMSampler(self.model, device=self.model.device), - steps = steps, - cfg_scale = cfg_scale, - conditioning = conditioning, - ddim_eta = ddim_eta, - image_callback = None, # called only after the final image is generated - step_callback = step_callback, # called after each intermediate image is generated - width = width, - height = height, - init_image = newinitimage, # notice that init_image is different from init_img - mask_image = None, - strength = strength, - ) - - emb_tile_store.append(tile_results[0][0]) - # DEBUG (but, also has other uses), worth saving if you want tiles without a transparency overlap to manually composite - # emb_tile_store[-1].save(init_img[0:-4] + f'_emb_To{tile}.png') - del newinitimage - - # Sanity check we have them all - if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)): - outputsuperimage = Image.new( - "RGBA", (initsuperwidth, initsuperheight)) - if embiggen_tiles: - outputsuperimage.alpha_composite( - initsuperimage.convert('RGBA'), (0, 0)) - for tile in range(emb_tiles_x * emb_tiles_y): - if embiggen_tiles: - if tile in embiggen_tiles: - intileimage = emb_tile_store.pop(0) - else: - continue - else: - intileimage = emb_tile_store[tile] - intileimage = intileimage.convert('RGBA') - # Get row and column entries - emb_row_i = tile // emb_tiles_x - emb_column_i = tile % emb_tiles_x - if emb_row_i == 0 and emb_column_i == 0 and not embiggen_tiles: - left = 0 - top = 0 - else: - # Determine upper-left point - if emb_column_i + 1 == emb_tiles_x: - left = initsuperwidth - width - else: - left = round(emb_column_i * - (width - overlap_size_x)) - if emb_row_i + 1 == emb_tiles_y: - top = initsuperheight - height - else: - top = round(emb_row_i * (height - overlap_size_y)) - # Handle gradients for various conditions - # Handle emb_rerun case - if embiggen_tiles: - # top of image - if emb_row_i == 0: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerB) - # Otherwise do nothing on this tile - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerR) - else: - intileimage.putalpha(alphaLayerRBC) - elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerL) - else: - intileimage.putalpha(alphaLayerLBC) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerL) - else: - intileimage.putalpha(alphaLayerLBC) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerLR) - else: - intileimage.putalpha(alphaLayerABT) - # bottom of image - elif emb_row_i == emb_tiles_y - 1: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - intileimage.putalpha(alphaLayerTaC) - else: - intileimage.putalpha(alphaLayerRTC) - elif emb_column_i == emb_tiles_x - 1: - # No tiles to look ahead to - intileimage.putalpha(alphaLayerLTC) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - intileimage.putalpha(alphaLayerLTaC) - else: - intileimage.putalpha(alphaLayerABB) - # vertical middle of image - else: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerTaC) - else: - intileimage.putalpha(alphaLayerTB) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerRTC) - else: - intileimage.putalpha(alphaLayerABL) - elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerLTC) - else: - intileimage.putalpha(alphaLayerABR) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerLTaC) - else: - intileimage.putalpha(alphaLayerABR) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerABB) - else: - intileimage.putalpha(alphaLayerAA) - # Handle normal tiling case (much simpler - since we tile left to right, top to bottom) - else: - if emb_row_i == 0 and emb_column_i >= 1: - intileimage.putalpha(alphaLayerL) - elif emb_row_i >= 1 and emb_column_i == 0: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right - intileimage.putalpha(alphaLayerT) - else: - intileimage.putalpha(alphaLayerTaC) - else: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right - intileimage.putalpha(alphaLayerLTC) - else: - intileimage.putalpha(alphaLayerLTaC) - # Layer tile onto final image - outputsuperimage.alpha_composite(intileimage, (left, top)) - else: - print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') - - # after internal loops and patching up return Embiggen image - return outputsuperimage - # end of function declaration - return make_image diff --git a/ldm/invoke/ckpt_generator/img2img.py b/ldm/invoke/ckpt_generator/img2img.py deleted file mode 100644 index e1f12b542e..0000000000 --- a/ldm/invoke/ckpt_generator/img2img.py +++ /dev/null @@ -1,97 +0,0 @@ -''' -ldm.invoke.ckpt_generator.img2img descends from ldm.invoke.generator -''' - -import torch -import numpy as np -import PIL -from torch import Tensor -from PIL import Image -from ldm.invoke.devices import choose_autocast -from ldm.invoke.ckpt_generator.base import CkptGenerator -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent - -class CkptImg2Img(CkptGenerator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None # by get_noise() - - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,init_image,strength,step_callback=None,threshold=0.0,perlin=0.0,**kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - if isinstance(init_image, PIL.Image.Image): - init_image = self._image_to_tensor(init_image.convert('RGB')) - - scope = choose_autocast(self.precision) - with scope(self.model.device.type): - self.init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - t_enc = int(strength * steps) - uc, c, extra_conditioning_info = conditioning - - def make_image(x_T): - # encode (scaled latent) - z_enc = sampler.stochastic_encode( - self.init_latent, - torch.tensor([t_enc - 1]).to(self.model.device), - noise=x_T - ) - - if self.free_gpu_mem and self.model.model.device != self.model.device: - self.model.model.to(self.model.device) - - # decode it - samples = sampler.decode( - z_enc, - c, - t_enc, - img_callback = step_callback, - unconditional_guidance_scale=cfg_scale, - unconditional_conditioning=uc, - init_latent = self.init_latent, # changes how noising is performed in ksampler - extra_conditioning_info = extra_conditioning_info, - all_timesteps_count = steps - ) - - if self.free_gpu_mem: - self.model.model.to("cpu") - - return self.sample_to_image(samples) - - return make_image - - def get_noise(self,width,height): - device = self.model.device - init_latent = self.init_latent - assert init_latent is not None,'call to get_noise() when init_latent not set' - if device.type == 'mps': - x = torch.randn_like(init_latent, device='cpu').to(device) - else: - x = torch.randn_like(init_latent, device=device) - if self.perlin > 0.0: - shape = init_latent.shape - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) - return x - - def _image_to_tensor(self, image:Image, normalize:bool=True)->Tensor: - image = np.array(image).astype(np.float32) / 255.0 - if len(image.shape) == 2: # 'L' image, as in a mask - image = image[None,None] - else: # 'RGB' image - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - if normalize: - image = 2.0 * image - 1.0 - return image.to(self.model.device) diff --git a/ldm/invoke/ckpt_generator/inpaint.py b/ldm/invoke/ckpt_generator/inpaint.py deleted file mode 100644 index 3b965b0ee3..0000000000 --- a/ldm/invoke/ckpt_generator/inpaint.py +++ /dev/null @@ -1,358 +0,0 @@ -''' -ldm.invoke.ckpt_generator.inpaint descends from ldm.invoke.ckpt_generator -''' - -import math -import torch -import torchvision.transforms as T -import numpy as np -import cv2 as cv -import PIL -from PIL import Image, ImageFilter, ImageOps, ImageChops -from skimage.exposure.histogram_matching import match_histograms -from einops import rearrange, repeat -from ldm.invoke.devices import choose_autocast -from ldm.invoke.ckpt_generator.img2img import CkptImg2Img -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.ksampler import KSampler -from ldm.invoke.generator.base import downsampling -from ldm.util import debug_image -from ldm.invoke.patchmatch import PatchMatch -from ldm.invoke.globals import Globals - -def infill_methods()->list[str]: - methods = list() - if PatchMatch.patchmatch_available(): - methods.append('patchmatch') - methods.append('tile') - return methods - -class CkptInpaint(CkptImg2Img): - def __init__(self, model, precision): - self.init_latent = None - self.pil_image = None - self.pil_mask = None - self.mask_blur_radius = 0 - self.infill_method = None - super().__init__(model, precision) - - # Outpaint support code - def get_tile_images(self, image: np.ndarray, width=8, height=8): - _nrows, _ncols, depth = image.shape - _strides = image.strides - - nrows, _m = divmod(_nrows, height) - ncols, _n = divmod(_ncols, width) - if _m != 0 or _n != 0: - return None - - return np.lib.stride_tricks.as_strided( - np.ravel(image), - shape=(nrows, ncols, height, width, depth), - strides=(height * _strides[0], width * _strides[1], *_strides), - writeable=False - ) - - def infill_patchmatch(self, im: Image.Image) -> Image: - if im.mode != 'RGBA': - return im - - # Skip patchmatch if patchmatch isn't available - if not PatchMatch.patchmatch_available(): - return im - - # Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though) - im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3) - im_patched = Image.fromarray(im_patched_np, mode = 'RGB') - return im_patched - - def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image: - # Only fill if there's an alpha layer - if im.mode != 'RGBA': - return im - - a = np.asarray(im, dtype=np.uint8) - - tile_size = (tile_size, tile_size) - - # Get the image as tiles of a specified size - tiles = self.get_tile_images(a,*tile_size).copy() - - # Get the mask as tiles - tiles_mask = tiles[:,:,:,:,3] - - # Find any mask tiles with any fully transparent pixels (we will be replacing these later) - tmask_shape = tiles_mask.shape - tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape)) - n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) - tiles_mask = (tiles_mask > 0) - tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1) - - # Get RGB tiles in single array and filter by the mask - tshape = tiles.shape - tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:])) - filtered_tiles = tiles_all[tiles_mask] - - if len(filtered_tiles) == 0: - return im - - # Find all invalid tiles and replace with a random valid tile - replace_count = (tiles_mask == False).sum() - rng = np.random.default_rng(seed = seed) - tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:] - - # Convert back to an image - tiles_all = tiles_all.reshape(tshape) - tiles_all = tiles_all.swapaxes(1,2) - st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4])) - si = Image.fromarray(st, mode='RGBA') - - return si - - - def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image: - npimg = np.asarray(mask, dtype=np.uint8) - - # Detect any partially transparent regions - npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0))) - - # Detect hard edges - npedge = cv.Canny(npimg, threshold1=100, threshold2=200) - - # Combine - npmask = npgradient + npedge - - # Expand - npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) - - new_mask = Image.fromarray(npmask) - - if edge_blur > 0: - new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur)) - - return ImageOps.invert(new_mask) - - - def seam_paint(self, - im: Image.Image, - seam_size: int, - seam_blur: int, - prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,strength, - noise, - step_callback - ) -> Image.Image: - hard_mask = self.pil_image.split()[-1].copy() - mask = self.mask_edge(hard_mask, seam_size, seam_blur) - - make_image = self.get_make_image( - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_image = im.copy().convert('RGBA'), - mask_image = mask.convert('RGB'), # Code currently requires an RGB mask - strength = strength, - mask_blur_radius = 0, - seam_size = 0, - step_callback = step_callback, - inpaint_width = im.width, - inpaint_height = im.height - ) - - seam_noise = self.get_noise(im.width, im.height) - - result = make_image(seam_noise) - - return result - - - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,init_image,mask_image,strength, - mask_blur_radius: int = 8, - # Seam settings - when 0, doesn't fill seam - seam_size: int = 0, - seam_blur: int = 0, - seam_strength: float = 0.7, - seam_steps: int = 10, - tile_size: int = 32, - step_callback=None, - inpaint_replace=False, enable_image_debugging=False, - infill_method = None, - inpaint_width=None, - inpaint_height=None, - **kwargs): - """ - Returns a function returning an image derived from the prompt and - the initial image + mask. Return value depends on the seed at - the time you call it. kwargs are 'init_latent' and 'strength' - """ - - self.enable_image_debugging = enable_image_debugging - self.infill_method = infill_method or infill_methods()[0], # The infill method to use - - self.inpaint_width = inpaint_width - self.inpaint_height = inpaint_height - - if isinstance(init_image, PIL.Image.Image): - self.pil_image = init_image.copy() - - # Do infill - if infill_method == 'patchmatch' and PatchMatch.patchmatch_available(): - init_filled = self.infill_patchmatch(self.pil_image.copy()) - else: # if infill_method == 'tile': # Only two methods right now, so always use 'tile' if not patchmatch - init_filled = self.tile_fill_missing( - self.pil_image.copy(), - seed = self.seed, - tile_size = tile_size - ) - init_filled.paste(init_image, (0,0), init_image.split()[-1]) - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - init_filled = init_filled.resize((inpaint_width, inpaint_height)) - - debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) - - # Create init tensor - init_image = self._image_to_tensor(init_filled.convert('RGB')) - - if isinstance(mask_image, PIL.Image.Image): - self.pil_mask = mask_image.copy() - debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging) - - mask_image = ImageChops.multiply(mask_image, self.pil_image.split()[-1].convert('RGB')) - self.pil_mask = mask_image - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - mask_image = mask_image.resize((inpaint_width, inpaint_height)) - - debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging) - mask_image = mask_image.resize( - ( - mask_image.width // downsampling, - mask_image.height // downsampling - ), - resample=Image.Resampling.NEAREST - ) - mask_image = self._image_to_tensor(mask_image,normalize=False) - - self.mask_blur_radius = mask_blur_radius - - # klms samplers not supported yet, so ignore previous sampler - if isinstance(sampler,KSampler): - print( - f">> Using recommended DDIM sampler for inpainting." - ) - sampler = DDIMSampler(self.model, device=self.model.device) - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - mask_image = mask_image[0][0].unsqueeze(0).repeat(4,1,1).unsqueeze(0) - mask_image = repeat(mask_image, '1 ... -> b ...', b=1) - - scope = choose_autocast(self.precision) - with scope(self.model.device.type): - self.init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - t_enc = int(strength * steps) - # todo: support cross-attention control - uc, c, _ = conditioning - - print(f">> target t_enc is {t_enc} steps") - - @torch.no_grad() - def make_image(x_T): - # encode (scaled latent) - z_enc = sampler.stochastic_encode( - self.init_latent, - torch.tensor([t_enc - 1]).to(self.model.device), - noise=x_T - ) - - # to replace masked area with latent noise, weighted by inpaint_replace strength - if inpaint_replace > 0.0: - print(f'>> inpaint will replace what was under the mask with a strength of {inpaint_replace}') - l_noise = self.get_noise(kwargs['width'],kwargs['height']) - inverted_mask = 1.0-mask_image # there will be 1s where the mask is - masked_region = (1.0-inpaint_replace) * inverted_mask * z_enc + inpaint_replace * inverted_mask * l_noise - z_enc = z_enc * mask_image + masked_region - - if self.free_gpu_mem and self.model.model.device != self.model.device: - self.model.model.to(self.model.device) - - # decode it - samples = sampler.decode( - z_enc, - c, - t_enc, - img_callback = step_callback, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc, - mask = mask_image, - init_latent = self.init_latent - ) - - result = self.sample_to_image(samples) - - # Seam paint if this is our first pass (seam_size set to 0 during seam painting) - if seam_size > 0: - old_image = self.pil_image or init_image - old_mask = self.pil_mask or mask_image - - result = self.seam_paint( - result, - seam_size, - seam_blur, - prompt, - sampler, - seam_steps, - cfg_scale, - ddim_eta, - conditioning, - seam_strength, - x_T, - step_callback) - - # Restore original settings - self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning, - old_image, - old_mask, - strength, - mask_blur_radius, seam_size, seam_blur, seam_strength, - seam_steps, tile_size, step_callback, - inpaint_replace, enable_image_debugging, - inpaint_width = inpaint_width, - inpaint_height = inpaint_height, - infill_method = infill_method, - **kwargs) - - return result - - return make_image - - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') - debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging) - - # Resize if necessary - if self.inpaint_width and self.inpaint_height: - gen_result = gen_result.resize(self.pil_image.size) - - if self.pil_image is None or self.pil_mask is None: - return gen_result - - corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) - - return corrected_result diff --git a/ldm/invoke/ckpt_generator/omnibus.py b/ldm/invoke/ckpt_generator/omnibus.py deleted file mode 100644 index a479ac85ec..0000000000 --- a/ldm/invoke/ckpt_generator/omnibus.py +++ /dev/null @@ -1,175 +0,0 @@ -"""omnibus module to be used with the runwayml 9-channel custom inpainting model""" - -import torch -import numpy as np -from einops import repeat -from PIL import Image, ImageOps, ImageChops -from ldm.invoke.devices import choose_autocast -from ldm.invoke.ckpt_generator.base import downsampling -from ldm.invoke.ckpt_generator.img2img import CkptImg2Img -from ldm.invoke.ckpt_generator.txt2img import CkptTxt2Img - -class CkptOmnibus(CkptImg2Img,CkptTxt2Img): - def __init__(self, model, precision): - super().__init__(model, precision) - self.pil_mask = None - self.pil_image = None - - def get_make_image( - self, - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - width, - height, - init_image = None, - mask_image = None, - strength = None, - step_callback=None, - threshold=0.0, - perlin=0.0, - mask_blur_radius: int = 8, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - num_samples = 1 - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - if isinstance(init_image, Image.Image): - self.pil_image = init_image - if init_image.mode != 'RGB': - init_image = init_image.convert('RGB') - init_image = self._image_to_tensor(init_image) - - if isinstance(mask_image, Image.Image): - self.pil_mask = mask_image - - mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1]) - mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False) - - self.mask_blur_radius = mask_blur_radius - - t_enc = steps - - if init_image is not None and mask_image is not None: # inpainting - masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero - - elif init_image is not None: # img2img - scope = choose_autocast(self.precision) - - with scope(self.model.device.type): - self.init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - # create a completely black mask (1s) - mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device) - # and the masked image is just a copy of the original - masked_image = init_image - - else: # txt2img - init_image = torch.zeros(1, 3, height, width, device=self.model.device) - mask_image = torch.ones(1, 1, height, width, device=self.model.device) - masked_image = init_image - - self.init_latent = init_image - height = init_image.shape[2] - width = init_image.shape[3] - model = self.model - - def make_image(x_T): - with torch.no_grad(): - scope = choose_autocast(self.precision) - with scope(self.model.device.type): - - batch = self.make_batch_sd( - init_image, - mask_image, - masked_image, - prompt=prompt, - device=model.device, - num_samples=num_samples, - ) - - c = model.cond_stage_model.encode(batch["txt"]) - c_cat = list() - for ck in model.concat_keys: - cc = batch[ck].float() - if ck != model.masked_image_key: - bchw = [num_samples, 4, height//8, width//8] - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = model.get_first_stage_encoding(model.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - - # cond - cond={"c_concat": [c_cat], "c_crossattn": [c]} - - # uncond cond - uc_cross = model.get_unconditional_conditioning(num_samples, "") - uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]} - shape = [model.channels, height//8, width//8] - - samples, _ = sampler.sample( - batch_size = 1, - S = steps, - x_T = x_T, - conditioning = cond, - shape = shape, - verbose = False, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc_full, - eta = 1.0, - img_callback = step_callback, - threshold = threshold, - ) - if self.free_gpu_mem: - self.model.model.to("cpu") - return self.sample_to_image(samples) - - return make_image - - def make_batch_sd( - self, - image, - mask, - masked_image, - prompt, - device, - num_samples=1): - batch = { - "image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples), - "txt": num_samples * [prompt], - "mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples), - "masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples), - } - return batch - - def get_noise(self, width:int, height:int): - if self.init_latent is not None: - height = self.init_latent.shape[2] - width = self.init_latent.shape[3] - return CkptTxt2Img.get_noise(self,width,height) - - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') - - if self.pil_image is None or self.pil_mask is None: - return gen_result - if self.pil_image.size != self.pil_mask.size: - return gen_result - - corrected_result = super(CkptImg2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - - return corrected_result diff --git a/ldm/invoke/ckpt_generator/txt2img.py b/ldm/invoke/ckpt_generator/txt2img.py deleted file mode 100644 index 825b8583b9..0000000000 --- a/ldm/invoke/ckpt_generator/txt2img.py +++ /dev/null @@ -1,90 +0,0 @@ -''' -ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator -''' - -import torch -import numpy as np -from ldm.invoke.ckpt_generator.base import CkptGenerator -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent -import gc - - -class CkptTxt2Img(CkptGenerator): - def __init__(self, model, precision): - super().__init__(model, precision) - - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,width,height,step_callback=None,threshold=0.0,perlin=0.0, - attention_maps_callback=None, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - kwargs are 'width' and 'height' - """ - self.perlin = perlin - uc, c, extra_conditioning_info = conditioning - - @torch.no_grad() - def make_image(x_T): - shape = [ - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor, - ] - - if self.free_gpu_mem and self.model.model.device != self.model.device: - self.model.model.to(self.model.device) - - sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False) - - samples, _ = sampler.sample( - batch_size = 1, - S = steps, - x_T = x_T, - conditioning = c, - shape = shape, - verbose = False, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc, - extra_conditioning_info = extra_conditioning_info, - eta = ddim_eta, - img_callback = step_callback, - threshold = threshold, - attention_maps_callback = attention_maps_callback, - ) - - if self.free_gpu_mem: - self.model.model.to('cpu') - self.model.cond_stage_model.device = 'cpu' - self.model.cond_stage_model.to('cpu') - gc.collect() - torch.cuda.empty_cache() - - return self.sample_to_image(samples) - - return make_image - - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - device = self.model.device - if self.use_mps_noise or device.type == 'mps': - x = torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device='cpu').to(device) - else: - x = torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device=device) - if self.perlin > 0.0: - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - return x - diff --git a/ldm/invoke/ckpt_generator/txt2img2img.py b/ldm/invoke/ckpt_generator/txt2img2img.py deleted file mode 100644 index 167debb98e..0000000000 --- a/ldm/invoke/ckpt_generator/txt2img2img.py +++ /dev/null @@ -1,182 +0,0 @@ -''' -ldm.invoke.ckpt_generator.txt2img inherits from ldm.invoke.ckpt_generator -''' - -import torch -import numpy as np -import math -import gc -from ldm.invoke.ckpt_generator.base import CkptGenerator -from ldm.invoke.ckpt_generator.omnibus import CkptOmnibus -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent -from PIL import Image - -class CkptTxt2Img2Img(CkptGenerator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None # for get_noise() - - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,width,height,strength,step_callback=None,**kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - kwargs are 'width' and 'height' - """ - uc, c, extra_conditioning_info = conditioning - scale_dim = min(width, height) - scale = 512 / scale_dim - - init_width = math.ceil(scale * width / 64) * 64 - init_height = math.ceil(scale * height / 64) * 64 - - @torch.no_grad() - def make_image(x_T): - - shape = [ - self.latent_channels, - init_height // self.downsampling_factor, - init_width // self.downsampling_factor, - ] - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - #x = self.get_noise(init_width, init_height) - x = x_T - - if self.free_gpu_mem and self.model.model.device != self.model.device: - self.model.model.to(self.model.device) - - samples, _ = sampler.sample( - batch_size = 1, - S = steps, - x_T = x, - conditioning = c, - shape = shape, - verbose = False, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc, - eta = ddim_eta, - img_callback = step_callback, - extra_conditioning_info = extra_conditioning_info - ) - - print( - f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling" - ) - - # resizing - samples = torch.nn.functional.interpolate( - samples, - size=(height // self.downsampling_factor, width // self.downsampling_factor), - mode="bilinear" - ) - - t_enc = int(strength * steps) - ddim_sampler = DDIMSampler(self.model, device=self.model.device) - ddim_sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - z_enc = ddim_sampler.stochastic_encode( - samples, - torch.tensor([t_enc-1]).to(self.model.device), - noise=self.get_noise(width,height,False) - ) - - # decode it - samples = ddim_sampler.decode( - z_enc, - c, - t_enc, - img_callback = step_callback, - unconditional_guidance_scale=cfg_scale, - unconditional_conditioning=uc, - extra_conditioning_info=extra_conditioning_info, - all_timesteps_count=steps - ) - - if self.free_gpu_mem: - self.model.model.to('cpu') - self.model.cond_stage_model.device = 'cpu' - self.model.cond_stage_model.to('cpu') - gc.collect() - torch.cuda.empty_cache() - - return self.sample_to_image(samples) - - # in the case of the inpainting model being loaded, the trick of - # providing an interpolated latent doesn't work, so we transiently - # create a 512x512 PIL image, upscale it, and run the inpainting - # over it in img2img mode. Because the inpaing model is so conservative - # it doesn't change the image (much) - def inpaint_make_image(x_T): - omnibus = CkptOmnibus(self.model,self.precision) - result = omnibus.generate( - prompt, - sampler=sampler, - width=init_width, - height=init_height, - step_callback=step_callback, - steps = steps, - cfg_scale = cfg_scale, - ddim_eta = ddim_eta, - conditioning = conditioning, - **kwargs - ) - assert result is not None and len(result)>0,'** txt2img failed **' - image = result[0][0] - interpolated_image = image.resize((width,height),resample=Image.Resampling.LANCZOS) - print(kwargs.pop('init_image',None)) - result = omnibus.generate( - prompt, - sampler=sampler, - init_image=interpolated_image, - width=width, - height=height, - seed=result[0][1], - step_callback=step_callback, - steps = steps, - cfg_scale = cfg_scale, - ddim_eta = ddim_eta, - conditioning = conditioning, - **kwargs - ) - return result[0][0] - - if sampler.uses_inpainting_model(): - return inpaint_make_image - else: - return make_image - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height,scale = True): - # print(f"Get noise: {width}x{height}") - if scale: - trained_square = 512 * 512 - actual_square = width * height - scale = math.sqrt(trained_square / actual_square) - scaled_width = math.ceil(scale * width / 64) * 64 - scaled_height = math.ceil(scale * height / 64) * 64 - else: - scaled_width = width - scaled_height = height - - device = self.model.device - if self.use_mps_noise or device.type == 'mps': - return torch.randn([1, - self.latent_channels, - scaled_height // self.downsampling_factor, - scaled_width // self.downsampling_factor], - device='cpu').to(device) - else: - return torch.randn([1, - self.latent_channels, - scaled_height // self.downsampling_factor, - scaled_width // self.downsampling_factor], - device=device) - diff --git a/ldm/invoke/ckpt_to_diffuser.py b/ldm/invoke/ckpt_to_diffuser.py index 82ba73b0a4..f6cac0b814 100644 --- a/ldm/invoke/ckpt_to_diffuser.py +++ b/ldm/invoke/ckpt_to_diffuser.py @@ -25,7 +25,7 @@ from ldm.invoke.globals import ( global_cache_dir, global_config_dir, ) -from ldm.invoke.model_manager import ModelManager, SDLegacyType +from invokeai.models import ModelManager, SDLegacyType from safetensors.torch import load_file from typing import Union @@ -56,7 +56,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS from diffusers.utils import is_safetensors_available from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from invokeai.generator import StableDiffusionGeneratorPipeline def shave_segments(path, n_shave_prefix_segments=1): """ diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 7c654caf69..7ff99c252e 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from .devices import torch_dtype -from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent +from invokeai.models import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 60abce8c8b..186af2aaae 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -20,7 +20,7 @@ from typing import List import invokeai.configs as configs from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from ..model_manager import ModelManager +from invokeai.models import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/invoke/generator/__init__.py b/ldm/invoke/generator/__init__.py deleted file mode 100644 index 2fa5573c84..0000000000 --- a/ldm/invoke/generator/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -''' -Initialization file for the ldm.invoke.generator package -''' -from .base import Generator diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py deleted file mode 100644 index 21d6f271ca..0000000000 --- a/ldm/invoke/generator/base.py +++ /dev/null @@ -1,374 +0,0 @@ -''' -Base class for ldm.invoke.generator.* -including img2img, txt2img, and inpaint -''' -from __future__ import annotations - -import os -import os.path as osp -import random -import traceback -from contextlib import nullcontext - -import cv2 -import numpy as np -import torch - -from PIL import Image, ImageFilter, ImageChops -from diffusers import DiffusionPipeline -from einops import rearrange -from pathlib import Path -from pytorch_lightning import seed_everything -from tqdm import trange - -import invokeai.assets.web as web_assets -from ldm.models.diffusion.ddpm import DiffusionWrapper -from ldm.util import rand_perlin_2d - -downsampling = 8 -CAUTION_IMG = 'caution.png' - -class Generator: - downsampling_factor: int - latent_channels: int - precision: str - model: DiffusionWrapper | DiffusionPipeline - - def __init__(self, model: DiffusionWrapper | DiffusionPipeline, precision: str): - self.model = model - self.precision = precision - self.seed = None - self.latent_channels = model.channels - self.downsampling_factor = downsampling # BUG: should come from model or config - self.safety_checker = None - self.perlin = 0.0 - self.threshold = 0 - self.variation_amount = 0 - self.with_variations = [] - self.use_mps_noise = False - self.free_gpu_mem = None - self.caution_img = None - - # this is going to be overridden in img2img.py, txt2img.py and inpaint.py - def get_make_image(self,prompt,**kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - """ - raise NotImplementedError("image_iterator() must be implemented in a descendent class") - - def set_variation(self, seed, variation_amount, with_variations): - self.seed = seed - self.variation_amount = variation_amount - self.with_variations = with_variations - - def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None, - image_callback=None, step_callback=None, threshold=0.0, perlin=0.0, - h_symmetry_time_pct=None, v_symmetry_time_pct=None, - safety_checker:dict=None, - free_gpu_mem: bool=False, - **kwargs): - scope = nullcontext - self.safety_checker = safety_checker - self.free_gpu_mem = free_gpu_mem - attention_maps_images = [] - attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image()) - make_image = self.get_make_image( - prompt, - sampler = sampler, - init_image = init_image, - width = width, - height = height, - step_callback = step_callback, - threshold = threshold, - perlin = perlin, - h_symmetry_time_pct = h_symmetry_time_pct, - v_symmetry_time_pct = v_symmetry_time_pct, - attention_maps_callback = attention_maps_callback, - **kwargs - ) - results = [] - seed = seed if seed is not None and seed >= 0 else self.new_seed() - first_seed = seed - seed, initial_noise = self.generate_initial_noise(seed, width, height) - - # There used to be an additional self.model.ema_scope() here, but it breaks - # the inpaint-1.5 model. Not sure what it did.... ? - with scope(self.model.device.type): - for n in trange(iterations, desc='Generating'): - x_T = None - if self.variation_amount > 0: - seed_everything(seed) - target_noise = self.get_noise(width,height) - x_T = self.slerp(self.variation_amount, initial_noise, target_noise) - elif initial_noise is not None: - # i.e. we specified particular variations - x_T = initial_noise - else: - seed_everything(seed) - try: - x_T = self.get_noise(width,height) - except: - print('** An error occurred while getting initial noise **') - print(traceback.format_exc()) - - image = make_image(x_T) - - if self.safety_checker is not None: - image = self.safety_check(image) - - results.append([image, seed]) - - if image_callback is not None: - attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1] - image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image) - - seed = self.new_seed() - - # Free up memory from the last generation. - clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None - if clear_cuda_cache is not None: - clear_cuda_cache() - - return results - - def sample_to_image(self,samples)->Image.Image: - """ - Given samples returned from a sampler, converts - it into a PIL Image - """ - with torch.inference_mode(): - image = self.model.decode_latents(samples) - return self.model.numpy_to_pil(image)[0] - - def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image: - if init_image is None or init_mask is None: - return result - - # Get the original alpha channel of the mask if there is one. - # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') - pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L') - pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist - - # Build an image with only visible pixels from source to use as reference for color-matching. - init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8) - init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8) - init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8) - - # Get numpy version of result - np_image = np.asarray(result, dtype=np.uint8) - - # Mask and calculate mean and standard deviation - mask_pixels = init_a_pixels * init_mask_pixels > 0 - np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :] - np_image_masked = np_image[mask_pixels, :] - - if np_init_rgb_pixels_masked.size > 0: - init_means = np_init_rgb_pixels_masked.mean(axis=0) - init_std = np_init_rgb_pixels_masked.std(axis=0) - gen_means = np_image_masked.mean(axis=0) - gen_std = np_image_masked.std(axis=0) - - # Color correct - np_matched_result = np_image.copy() - np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8) - matched_result = Image.fromarray(np_matched_result, mode='RGB') - else: - matched_result = Image.fromarray(np_image, mode='RGB') - - # Blur the mask out (into init image) by specified amount - if mask_blur_radius > 0: - nm = np.asarray(pil_init_mask, dtype=np.uint8) - nmd = cv2.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2)) - pmd = Image.fromarray(nmd, mode='L') - blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius)) - else: - blurred_init_mask = pil_init_mask - - multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) - - # Paste original on color-corrected generation (using blurred mask) - matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask) - return matched_result - - def sample_to_lowres_estimated_image(self,samples): - # origingally adapted from code by @erucipe and @keturn here: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 - - # these updated numbers for v1.5 are from @torridgristle - v1_5_latent_rgb_factors = torch.tensor([ - # R G B - [ 0.3444, 0.1385, 0.0670], # L1 - [ 0.1247, 0.4027, 0.1494], # L2 - [-0.3192, 0.2513, 0.2103], # L3 - [-0.1307, -0.1874, -0.7445] # L4 - ], dtype=samples.dtype, device=samples.device) - - latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors - latents_ubyte = (((latent_image + 1) / 2) - .clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - .byte()).cpu() - - return Image.fromarray(latents_ubyte.numpy()) - - def generate_initial_noise(self, seed, width, height): - initial_noise = None - if self.variation_amount > 0 or len(self.with_variations) > 0: - # use fixed initial noise plus random noise per iteration - seed_everything(seed) - initial_noise = self.get_noise(width,height) - for v_seed, v_weight in self.with_variations: - seed = v_seed - seed_everything(seed) - next_noise = self.get_noise(width,height) - initial_noise = self.slerp(v_weight, initial_noise, next_noise) - if self.variation_amount > 0: - random.seed() # reset RNG to an actually random state, so we can get a random seed for variations - seed = random.randrange(0,np.iinfo(np.uint32).max) - return (seed, initial_noise) - else: - return (seed, None) - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - """ - Returns a tensor filled with random numbers, either form a normal distribution - (txt2img) or from the latent image (img2img, inpaint) - """ - raise NotImplementedError("get_noise() must be implemented in a descendent class") - - def get_perlin_noise(self,width,height): - fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(self.latent_channels, 4) - # round up to the nearest block of 8 - temp_width = int((width + 7) / 8) * 8 - temp_height = int((height + 7) / 8) * 8 - noise = torch.stack([ - rand_perlin_2d((temp_height, temp_width), - (8, 8), - device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device) - return noise[0:4, 0:height, 0:width] - - def new_seed(self): - self.seed = random.randrange(0, np.iinfo(np.uint32).max) - return self.seed - - def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): - ''' - Spherical linear interpolation - Args: - t (float/np.ndarray): Float value between 0.0 and 1.0 - v0 (np.ndarray): Starting vector - v1 (np.ndarray): Final vector - DOT_THRESHOLD (float): Threshold for considering the two vectors as - colineal. Not recommended to alter this. - Returns: - v2 (np.ndarray): Interpolation vector between v0 and v1 - ''' - inputs_are_torch = False - if not isinstance(v0, np.ndarray): - inputs_are_torch = True - v0 = v0.detach().cpu().numpy() - if not isinstance(v1, np.ndarray): - inputs_are_torch = True - v1 = v1.detach().cpu().numpy() - - dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) - if np.abs(dot) > DOT_THRESHOLD: - v2 = (1 - t) * v0 + t * v1 - else: - theta_0 = np.arccos(dot) - sin_theta_0 = np.sin(theta_0) - theta_t = theta_0 * t - sin_theta_t = np.sin(theta_t) - s0 = np.sin(theta_0 - theta_t) / sin_theta_0 - s1 = sin_theta_t / sin_theta_0 - v2 = s0 * v0 + s1 * v1 - - if inputs_are_torch: - v2 = torch.from_numpy(v2).to(self.model.device) - - return v2 - - def safety_check(self,image:Image.Image): - ''' - If the CompViz safety checker flags an NSFW image, we - blur it out. - ''' - import diffusers - - checker = self.safety_checker['checker'] - extractor = self.safety_checker['extractor'] - features = extractor([image], return_tensors="pt") - features.to(self.model.device) - - # unfortunately checker requires the numpy version, so we have to convert back - x_image = np.array(image).astype(np.float32) / 255.0 - x_image = x_image[None].transpose(0, 3, 1, 2) - - diffusers.logging.set_verbosity_error() - checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values) - if has_nsfw_concept[0]: - print('** An image with potential non-safe content has been detected. A blurred image will be returned. **') - return self.blur(image) - else: - return image - - def blur(self,input): - blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) - try: - caution = self.get_caution_img() - if caution: - blurry.paste(caution,(0,0),caution) - except FileNotFoundError: - pass - return blurry - - def get_caution_img(self): - path = None - if self.caution_img: - return self.caution_img - path = Path(web_assets.__path__[0]) / CAUTION_IMG - caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height //2)) - return self.caution_img - - # this is a handy routine for debugging use. Given a generated sample, - # convert it into a PNG image and store it at the indicated path - def save_sample(self, sample, filepath): - image = self.sample_to_image(sample) - dirname = os.path.dirname(filepath) or '.' - if not os.path.exists(dirname): - print(f'** creating directory {dirname}') - os.makedirs(dirname, exist_ok=True) - image.save(filepath,'PNG') - - - def torch_dtype(self)->torch.dtype: - return torch.float16 if self.precision == 'float16' else torch.float32 - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - device = self.model.device - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(self.latent_channels, 4) - if self.use_mps_noise or device.type == 'mps': - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device='cpu').to(device) - else: - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device=device) - if self.perlin > 0.0: - perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - x = (1-self.perlin)*x + self.perlin*perlin_noise - return x diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py deleted file mode 100644 index 5e65cb5d13..0000000000 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ /dev/null @@ -1,765 +0,0 @@ -from __future__ import annotations - -import dataclasses -import inspect -import psutil -import secrets -from collections.abc import Sequence -from dataclasses import dataclass, field -from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any - -import PIL.Image -import einops -import psutil -import torch -import torchvision.transforms as T -from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from diffusers.schedulers import KarrasDiffusionSchedulers -from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput -from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.outputs import BaseOutput -from torchvision.transforms.functional import resize as tv_resize -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer -from typing_extensions import ParamSpec - -from ldm.invoke.globals import Globals -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ..devices import normalize_device, CPU_DEVICE -from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver -from compel import EmbeddingsProvider - - -@dataclass -class PipelineIntermediateState: - run_id: str - step: int - timestep: int - latents: torch.Tensor - predicted_original: Optional[torch.Tensor] = None - attention_map_saver: Optional[AttentionMapSaver] = None - - -# copied from configs/stable-diffusion/v1-inference.yaml -_default_personalization_config_params = dict( - placeholder_strings=["*"], - initializer_wods=["sculpture"], - per_image_tokens=False, - num_vectors_per_token=1, - progressive_words=False -) - - -@dataclass -class AddsMaskLatents: - """Add the channels required for inpainting model input. - - The inpainting model takes the normal latent channels as input, _plus_ a one-channel mask - and the latent encoding of the base image. - - This class assumes the same mask and base image should apply to all items in the batch. - """ - forward: Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] - mask: torch.Tensor - initial_image_latents: torch.Tensor - - def __call__(self, latents: torch.Tensor, t: torch.Tensor, text_embeddings: torch.Tensor) -> torch.Tensor: - model_input = self.add_mask_channels(latents) - return self.forward(model_input, t, text_embeddings) - - def add_mask_channels(self, latents): - batch_size = latents.size(0) - # duplicate mask and latents for each batch - mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) - image_latents = einops.repeat(self.initial_image_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) - # add mask and image as additional channels - model_input, _ = einops.pack([latents, mask, image_latents], 'b * h w') - return model_input - - -def are_like_tensors(a: torch.Tensor, b: object) -> bool: - return ( - isinstance(b, torch.Tensor) - and (a.size() == b.size()) - ) - -@dataclass -class AddsMaskGuidance: - mask: torch.FloatTensor - mask_latents: torch.FloatTensor - scheduler: SchedulerMixin - noise: torch.Tensor - _debug: Optional[Callable] = None - - def __call__(self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning) -> BaseOutput: - output_class = step_output.__class__ # We'll create a new one with masked data. - - # The problem with taking SchedulerOutput instead of the model output is that we're less certain what's in it. - # It's reasonable to assume the first thing is prev_sample, but then does it have other things - # like pred_original_sample? Should we apply the mask to them too? - # But what if there's just some other random field? - prev_sample = step_output[0] - # Mask anything that has the same shape as prev_sample, return others as-is. - return output_class( - {k: (self.apply_mask(v, self._t_for_field(k, t)) - if are_like_tensors(prev_sample, v) else v) - for k, v in step_output.items()} - ) - - def _t_for_field(self, field_name:str, t): - if field_name == "pred_original_sample": - return torch.zeros_like(t, dtype=t.dtype) # it represents t=0 - return t - - def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor: - batch_size = latents.size(0) - mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) - if t.dim() == 0: - # some schedulers expect t to be one-dimensional. - # TODO: file diffusers bug about inconsistency? - t = einops.repeat(t, '-> batch', batch=batch_size) - # Noise shouldn't be re-randomized between steps here. The multistep schedulers - # get very confused about what is happening from step to step when we do that. - mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) - # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? - # mask_latents = self.scheduler.scale_model_input(mask_latents, t) - mask_latents = einops.repeat(mask_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) - masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) - if self._debug: - self._debug(masked_input, f"t={t} lerped") - return masked_input - - -def trim_to_multiple_of(*args, multiple_of=8): - return tuple((x - x % multiple_of) for x in args) - - -def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool=True, multiple_of=8) -> torch.FloatTensor: - """ - - :param image: input image - :param normalize: scale the range to [-1, 1] instead of [0, 1] - :param multiple_of: resize the input so both dimensions are a multiple of this - """ - w, h = trim_to_multiple_of(*image.size) - transformation = T.Compose([ - T.Resize((h, w), T.InterpolationMode.LANCZOS), - T.ToTensor(), - ]) - tensor = transformation(image) - if normalize: - tensor = tensor * 2.0 - 1.0 - return tensor - - -def is_inpainting_model(unet: UNet2DConditionModel): - return unet.conv_in.in_channels == 9 - -CallbackType = TypeVar('CallbackType') -ReturnType = TypeVar('ReturnType') -ParamType = ParamSpec('ParamType') - -@dataclass(frozen=True) -class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]): - """Convert a generator to a function with a callback and a return value.""" - - generator_method: Callable[ParamType, ReturnType] - callback_arg_type: Type[CallbackType] - - def __call__(self, *args: ParamType.args, - callback:Callable[[CallbackType], Any]=None, - **kwargs: ParamType.kwargs) -> ReturnType: - result = None - for result in self.generator_method(*args, **kwargs): - if callback is not None and isinstance(result, self.callback_arg_type): - callback(result) - if result is None: - raise AssertionError("why was that an empty generator?") - return result - - -@dataclass(frozen=True) -class ConditioningData: - unconditioned_embeddings: torch.Tensor - text_embeddings: torch.Tensor - guidance_scale: float - """ - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). - Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate - images that are closely linked to the text `prompt`, usually at the expense of lower image quality. - """ - extra: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo] = None - scheduler_args: dict[str, Any] = field(default_factory=dict) - """ - Additional arguments to pass to invokeai_diffuser.do_latent_postprocessing(). - """ - postprocessing_settings: Optional[PostprocessingSettings] = None - - @property - def dtype(self): - return self.text_embeddings.dtype - - def add_scheduler_args_if_applicable(self, scheduler, **kwargs): - scheduler_args = dict(self.scheduler_args) - step_method = inspect.signature(scheduler.step) - for name, value in kwargs.items(): - try: - step_method.bind_partial(**{name: value}) - except TypeError: - # FIXME: don't silently discard arguments - pass # debug("%s does not accept argument named %r", scheduler, name) - else: - scheduler_args[name] = value - return dataclasses.replace(self, scheduler_args=scheduler_args) - -@dataclass -class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput): - r""" - Output class for InvokeAI's Stable Diffusion pipeline. - - Args: - attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user - after generation completes. Optional. - """ - attention_map_saver: Optional[AttentionMapSaver] - - -class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Implementation note: This class started as a refactored copy of diffusers.StableDiffusionPipeline. - Hopefully future versions of diffusers provide access to more of these functions so that we don't - need to duplicate them here: https://github.com/huggingface/diffusers/issues/551#issuecomment-1281508384 - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offsensive or harmful. - Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _model_group: ModelGroup - - ID_LENGTH = 8 - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: KarrasDiffusionSchedulers, - safety_checker: Optional[StableDiffusionSafetyChecker], - feature_extractor: Optional[CLIPFeatureExtractor], - requires_safety_checker: bool = False, - precision: str = 'float32', - ): - super().__init__(vae, text_encoder, tokenizer, unet, scheduler, - safety_checker, feature_extractor, requires_safety_checker) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward, is_running_diffusers=True) - use_full_precision = (precision == 'float32' or precision == 'autocast') - self.textual_inversion_manager = TextualInversionManager(tokenizer=self.tokenizer, - text_encoder=self.text_encoder, - full_precision=use_full_precision) - # InvokeAI's interface for text embeddings and whatnot - self.embeddings_provider = EmbeddingsProvider( - tokenizer=self.tokenizer, - text_encoder=self.text_encoder, - textual_inversion_manager=self.textual_inversion_manager - ) - - self._model_group = FullyLoadedModelGroup(self.unet.device) - self._model_group.install(*self._submodels) - - - def _adjust_memory_efficient_attention(self, latents: torch.Tensor): - """ - if xformers is available, use it, otherwise use sliced attention. - """ - if torch.cuda.is_available() and is_xformers_available() and not Globals.disable_xformers: - self.enable_xformers_memory_efficient_attention() - else: - if torch.backends.mps.is_available(): - # until pytorch #91617 is fixed, slicing is borked on MPS - # https://github.com/pytorch/pytorch/issues/91617 - # fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline. - pass - else: - if self.device.type == 'cpu' or self.device.type == 'mps': - mem_free = psutil.virtual_memory().free - elif self.device.type == 'cuda': - mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device)) - else: - raise ValueError(f"unrecognized device {self.device}") - # input tensor of [1, 4, h/8, w/8] - # output tensor of [16, (h/8 * w/8), (h/8 * w/8)] - bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 - max_size_required_for_baddbmm = \ - 16 * \ - latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \ - bytes_per_element_needed_for_baddbmm_duplication - if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code - self.enable_attention_slicing(slice_size='max') - else: - self.disable_attention_slicing() - - - def enable_offload_submodels(self, device: torch.device): - """ - Offload each submodel when it's not in use. - - Useful for low-vRAM situations where the size of the model in memory is a big chunk of - the total available resource, and you want to free up as much for inference as possible. - - This requires more moving parts and may add some delay as the U-Net is swapped out for the - VAE and vice-versa. - """ - models = self._submodels - if self._model_group is not None: - self._model_group.uninstall(*models) - group = LazilyLoadedModelGroup(device) - group.install(*models) - self._model_group = group - - def disable_offload_submodels(self): - """ - Leave all submodels loaded. - - Appropriate for cases where the size of the model in memory is small compared to the memory - required for inference. Avoids the delay and complexity of shuffling the submodels to and - from the GPU. - """ - models = self._submodels - if self._model_group is not None: - self._model_group.uninstall(*models) - group = FullyLoadedModelGroup(self._model_group.execution_device) - group.install(*models) - self._model_group = group - - def offload_all(self): - """Offload all this pipeline's models to CPU.""" - self._model_group.offload_current() - - def ready(self): - """ - Ready this pipeline's models. - - i.e. pre-load them to the GPU if appropriate. - """ - self._model_group.ready() - - def to(self, torch_device: Optional[Union[str, torch.device]] = None): - # overridden method; types match the superclass. - if torch_device is None: - return self - self._model_group.set_device(torch.device(torch_device)) - self._model_group.ready() - - @property - def device(self) -> torch.device: - return self._model_group.execution_device - - @property - def _submodels(self) -> Sequence[torch.nn.Module]: - module_names, _, _ = self.extract_init_dict(dict(self.config)) - values = [getattr(self, name) for name in module_names.keys()] - return [m for m in values if isinstance(m, torch.nn.Module)] - - def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - callback: Callable[[PipelineIntermediateState], None]=None, - run_id=None) -> InvokeAIStableDiffusionPipelineOutput: - r""" - Function invoked when calling the pipeline for generation. - - :param conditioning_data: - :param latents: Pre-generated un-noised latents, to be used as inputs for - image generation. Can be used to tweak the same generation with different prompts. - :param num_inference_steps: The number of denoising steps. More denoising steps usually lead to a higher quality - image at the expense of slower inference. - :param noise: Noise to add to the latents, sampled from a Gaussian distribution. - :param callback: - :param run_id: - """ - result_latents, result_attention_map_saver = self.latents_from_embeddings( - latents, num_inference_steps, - conditioning_data, - noise=noise, - run_id=run_id, - callback=callback) - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_map_saver) - return self.check_for_safety(output, dtype=conditioning_data.dtype) - - def latents_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - timesteps=None, - additional_guidance: List[Callable] = None, run_id=None, - callback: Callable[[PipelineIntermediateState], None] = None - ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: - if timesteps is None: - self.scheduler.set_timesteps(num_inference_steps, device=self._model_group.device_for(self.unet)) - timesteps = self.scheduler.timesteps - infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState) - result: PipelineIntermediateState = infer_latents_from_embeddings( - latents, timesteps, conditioning_data, - noise=noise, - additional_guidance=additional_guidance, - run_id=run_id, - callback=callback) - return result.latents, result.attention_map_saver - - def generate_latents_from_embeddings(self, latents: torch.Tensor, timesteps, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - run_id: str = None, - additional_guidance: List[Callable] = None): - self._adjust_memory_efficient_attention(latents) - if run_id is None: - run_id = secrets.token_urlsafe(self.ID_LENGTH) - if additional_guidance is None: - additional_guidance = [] - extra_conditioning_info = conditioning_data.extra - with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info, - step_count=len(self.scheduler.timesteps) - ): - - yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps, - latents=latents) - - batch_size = latents.shape[0] - batched_t = torch.full((batch_size,), timesteps[0], - dtype=timesteps.dtype, device=self._model_group.device_for(self.unet)) - latents = self.scheduler.add_noise(latents, noise, batched_t) - - attention_map_saver: Optional[AttentionMapSaver] = None - - for i, t in enumerate(self.progress_bar(timesteps)): - batched_t.fill_(t) - step_output = self.step(batched_t, latents, conditioning_data, - step_index=i, - total_step_count=len(timesteps), - additional_guidance=additional_guidance) - latents = step_output.prev_sample - - latents = self.invokeai_diffuser.do_latent_postprocessing( - postprocessing_settings=conditioning_data.postprocessing_settings, - latents=latents, - sigma=batched_t, - step_index=i, - total_step_count=len(timesteps) - ) - - predicted_original = getattr(step_output, 'pred_original_sample', None) - - # TODO resuscitate attention map saving - #if i == len(timesteps)-1 and extra_conditioning_info is not None: - # eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 - # attention_map_token_ids = range(1, eos_token_index) - # attention_map_saver = AttentionMapSaver(token_ids=attention_map_token_ids, latents_shape=latents.shape[-2:]) - # self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) - - yield PipelineIntermediateState(run_id=run_id, step=i, timestep=int(t), latents=latents, - predicted_original=predicted_original, attention_map_saver=attention_map_saver) - - return latents, attention_map_saver - - @torch.inference_mode() - def step(self, t: torch.Tensor, latents: torch.Tensor, - conditioning_data: ConditioningData, - step_index:int, total_step_count:int, - additional_guidance: List[Callable] = None): - # invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value - timestep = t[0] - - if additional_guidance is None: - additional_guidance = [] - - # TODO: should this scaling happen here or inside self._unet_forward? - # i.e. before or after passing it to InvokeAIDiffuserComponent - latent_model_input = self.scheduler.scale_model_input(latents, timestep) - - # predict the noise residual - noise_pred = self.invokeai_diffuser.do_diffusion_step( - latent_model_input, t, - conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings, - conditioning_data.guidance_scale, - step_index=step_index, - total_step_count=total_step_count, - ) - - # compute the previous noisy sample x_t -> x_t-1 - step_output = self.scheduler.step(noise_pred, timestep, latents, - **conditioning_data.scheduler_args) - - # TODO: this additional_guidance extension point feels redundant with InvokeAIDiffusionComponent. - # But the way things are now, scheduler runs _after_ that, so there was - # no way to use it to apply an operation that happens after the last scheduler.step. - for guidance in additional_guidance: - step_output = guidance(step_output, timestep, conditioning_data) - - return step_output - - def _unet_forward(self, latents, t, text_embeddings, cross_attention_kwargs: Optional[dict[str,Any]] = None): - """predict the noise residual""" - if is_inpainting_model(self.unet) and latents.size(1) == 4: - # Pad out normal non-inpainting inputs for an inpainting model. - # FIXME: There are too many layers of functions and we have too many different ways of - # overriding things! This should get handled in a way more consistent with the other - # use of AddsMaskLatents. - latents = AddsMaskLatents( - self._unet_forward, - mask=torch.ones_like(latents[:1, :1], device=latents.device, dtype=latents.dtype), - initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype) - ).add_mask_channels(latents) - - # First three args should be positional, not keywords, so torch hooks can see them. - return self.unet(latents, t, text_embeddings, - cross_attention_kwargs=cross_attention_kwargs).sample - - def img2img_from_embeddings(self, - init_image: Union[torch.FloatTensor, PIL.Image.Image], - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, - noise_func=None - ) -> InvokeAIStableDiffusionPipelineOutput: - if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) - - if init_image.dim() == 3: - init_image = einops.rearrange(init_image, 'c h w -> 1 c h w') - - # 6. Prepare latent variables - initial_latents = self.non_noised_latents_from_image( - init_image, device=self._model_group.device_for(self.unet), - dtype=self.unet.dtype) - noise = noise_func(initial_latents) - - return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps, - conditioning_data, - strength, - noise, run_id, callback) - - def img2img_from_latents_and_embeddings(self, initial_latents, num_inference_steps, - conditioning_data: ConditioningData, - strength, - noise: torch.Tensor, run_id=None, callback=None - ) -> InvokeAIStableDiffusionPipelineOutput: - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, - device=self._model_group.device_for(self.unet)) - result_latents, result_attention_maps = self.latents_from_embeddings( - initial_latents, num_inference_steps, conditioning_data, - timesteps=timesteps, - noise=noise, - run_id=run_id, - callback=callback) - - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) - return self.check_for_safety(output, dtype=conditioning_data.dtype) - - def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device) -> (torch.Tensor, int): - img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components) - assert img2img_pipeline.scheduler is self.scheduler - img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, adjusted_steps = img2img_pipeline.get_timesteps(num_inference_steps, strength, device=device) - # Workaround for low strength resulting in zero timesteps. - # TODO: submit upstream fix for zero-step img2img - if timesteps.numel() == 0: - timesteps = self.scheduler.timesteps[-1:] - adjusted_steps = timesteps.numel() - return timesteps, adjusted_steps - - def inpaint_from_embeddings( - self, - init_image: torch.FloatTensor, - mask: torch.FloatTensor, - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, - noise_func=None, - ) -> InvokeAIStableDiffusionPipelineOutput: - device = self._model_group.device_for(self.unet) - latents_dtype = self.unet.dtype - - if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) - - init_image = init_image.to(device=device, dtype=latents_dtype) - mask = mask.to(device=device, dtype=latents_dtype) - - if init_image.dim() == 3: - init_image = init_image.unsqueeze(0) - - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, device=device) - - # 6. Prepare latent variables - # can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents - # because we have our own noise function - init_image_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype) - noise = noise_func(init_image_latents) - - if mask.dim() == 3: - mask = mask.unsqueeze(0) - latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR) \ - .to(device=device, dtype=latents_dtype) - - guidance: List[Callable] = [] - - if is_inpainting_model(self.unet): - # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint - # (that's why there's a mask!) but it seems to really want that blanked out. - masked_init_image = init_image * torch.where(mask < 0.5, 1, 0) - masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype) - - # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = \ - AddsMaskLatents(self._unet_forward, latent_mask, masked_latents) - else: - guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise)) - - try: - result_latents, result_attention_maps = self.latents_from_embeddings( - init_image_latents, num_inference_steps, - conditioning_data, noise=noise, timesteps=timesteps, - additional_guidance=guidance, - run_id=run_id, callback=callback) - finally: - self.invokeai_diffuser.model_forward_callback = self._unet_forward - - # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 - torch.cuda.empty_cache() - - with torch.inference_mode(): - image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) - return self.check_for_safety(output, dtype=conditioning_data.dtype) - - def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype): - init_image = init_image.to(device=device, dtype=dtype) - with torch.inference_mode(): - if device.type == 'mps': - # workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222 - # TODO remove this workaround once kulinseth#222 is merged to pytorch mainline - self.vae.to(CPU_DEVICE) - init_image = init_image.to(CPU_DEVICE) - else: - self._model_group.load(self.vae) - init_latent_dist = self.vae.encode(init_image).latent_dist - init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible! - if device.type == 'mps': - self.vae.to(device) - init_latents = init_latents.to(device) - - init_latents = 0.18215 * init_latents - return init_latents - - def check_for_safety(self, output, dtype): - with torch.inference_mode(): - screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype) - screened_attention_map_saver = None - if has_nsfw_concept is None or not has_nsfw_concept: - screened_attention_map_saver = output.attention_map_saver - return InvokeAIStableDiffusionPipelineOutput(screened_images, - has_nsfw_concept, - # block the attention maps if NSFW content is detected - attention_map_saver=screened_attention_map_saver) - - def run_safety_checker(self, image, device=None, dtype=None): - # overriding to use the model group for device info instead of requiring the caller to know. - if self.safety_checker is not None: - device = self._model_group.device_for(self.safety_checker) - return super().run_safety_checker(image, device, dtype) - - @torch.inference_mode() - def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None): - """ - Compatibility function for ldm.models.diffusion.ddpm.LatentDiffusion. - """ - return self.embeddings_provider.get_embeddings_for_weighted_prompt_fragments( - text_batch=c, - fragment_weights_batch=fragment_weights, - should_return_tokens=return_tokens, - device=self._model_group.device_for(self.unet)) - - @property - def cond_stage_model(self): - return self.embeddings_provider - - @torch.inference_mode() - def _tokenize(self, prompt: Union[str, List[str]]): - return self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - - @property - def channels(self) -> int: - """Compatible with DiffusionWrapper""" - return self.unet.in_channels - - def decode_latents(self, latents): - # Explicit call to get the vae loaded, since `decode` isn't the forward method. - self._model_group.load(self.vae) - return super().decode_latents(latents) - - def debug_latents(self, latents, msg): - with torch.inference_mode(): - from ldm.util import debug_image - decoded = self.numpy_to_pil(self.decode_latents(latents)) - for i, img in enumerate(decoded): - debug_image(img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True) diff --git a/ldm/invoke/generator/embiggen.py b/ldm/invoke/generator/embiggen.py deleted file mode 100644 index 0a06f90b03..0000000000 --- a/ldm/invoke/generator/embiggen.py +++ /dev/null @@ -1,501 +0,0 @@ -''' -ldm.invoke.generator.embiggen descends from ldm.invoke.generator -and generates with ldm.invoke.generator.img2img -''' - -import numpy as np -import torch -from PIL import Image -from tqdm import trange - -from ldm.invoke.generator.base import Generator -from ldm.invoke.generator.img2img import Img2Img - - -class Embiggen(Generator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None - - # Replace generate because Embiggen doesn't need/use most of what it does normallly - def generate(self,prompt,iterations=1,seed=None, - image_callback=None, step_callback=None, - **kwargs): - - make_image = self.get_make_image( - prompt, - step_callback = step_callback, - **kwargs - ) - results = [] - seed = seed if seed else self.new_seed() - - # Noise will be generated by the Img2Img generator when called - for _ in trange(iterations, desc='Generating'): - # make_image will call Img2Img which will do the equivalent of get_noise itself - image = make_image() - results.append([image, seed]) - if image_callback is not None: - image_callback(image, seed, prompt_in=prompt) - seed = self.new_seed() - return results - - @torch.no_grad() - def get_make_image( - self, - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_img, - strength, - width, - height, - embiggen, - embiggen_tiles, - step_callback=None, - **kwargs - ): - """ - Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image - Return value depends on the seed at the time you call it - """ - assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models" - - # Construct embiggen arg array, and sanity check arguments - if embiggen == None: # embiggen can also be called with just embiggen_tiles - embiggen = [1.0] # If not specified, assume no scaling - elif embiggen[0] < 0: - embiggen[0] = 1.0 - print( - '>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !') - if len(embiggen) < 2: - embiggen.append(0.75) - elif embiggen[1] > 1.0 or embiggen[1] < 0: - embiggen[1] = 0.75 - print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !') - if len(embiggen) < 3: - embiggen.append(0.25) - elif embiggen[2] < 0: - embiggen[2] = 0.25 - print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !') - - # Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math - # and then sort them, because... people. - if embiggen_tiles: - embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles)) - embiggen_tiles.sort() - - if strength >= 0.5: - print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.') - - # Prep img2img generator, since we wrap over it - gen_img2img = Img2Img(self.model,self.precision) - - # Open original init image (not a tensor) to manipulate - initsuperimage = Image.open(init_img) - - with Image.open(init_img) as img: - initsuperimage = img.convert('RGB') - - # Size of the target super init image in pixels - initsuperwidth, initsuperheight = initsuperimage.size - - # Increase by scaling factor if not already resized, using ESRGAN as able - if embiggen[0] != 1.0: - initsuperwidth = round(initsuperwidth*embiggen[0]) - initsuperheight = round(initsuperheight*embiggen[0]) - if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero - from ldm.invoke.restoration.realesrgan import ESRGAN - esrgan = ESRGAN() - print( - f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}') - if embiggen[0] > 2: - initsuperimage = esrgan.process( - initsuperimage, - embiggen[1], # upscale strength - self.seed, - 4, # upscale scale - ) - else: - initsuperimage = esrgan.process( - initsuperimage, - embiggen[1], # upscale strength - self.seed, - 2, # upscale scale - ) - # We could keep recursively re-running ESRGAN for a requested embiggen[0] larger than 4x - # but from personal experiance it doesn't greatly improve anything after 4x - # Resize to target scaling factor resolution - initsuperimage = initsuperimage.resize( - (initsuperwidth, initsuperheight), Image.Resampling.LANCZOS) - - # Use width and height as tile widths and height - # Determine buffer size in pixels - if embiggen[2] < 1: - if embiggen[2] < 0: - embiggen[2] = 0 - overlap_size_x = round(embiggen[2] * width) - overlap_size_y = round(embiggen[2] * height) - else: - overlap_size_x = round(embiggen[2]) - overlap_size_y = round(embiggen[2]) - - # With overall image width and height known, determine how many tiles we need - def ceildiv(a, b): - return -1 * (-a // b) - - # X and Y needs to be determined independantly (we may have savings on one based on the buffer pixel count) - # (initsuperwidth - width) is the area remaining to the right that we need to layers tiles to fill - # (width - overlap_size_x) is how much new we can fill with a single tile - emb_tiles_x = 1 - emb_tiles_y = 1 - if (initsuperwidth - width) > 0: - emb_tiles_x = ceildiv(initsuperwidth - width, - width - overlap_size_x) + 1 - if (initsuperheight - height) > 0: - emb_tiles_y = ceildiv(initsuperheight - height, - height - overlap_size_y) + 1 - # Sanity - assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.' - - # Prep alpha layers -------------- - # https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil - # agradientL is Left-side transparent - agradientL = Image.linear_gradient('L').rotate( - 90).resize((overlap_size_x, height)) - # agradientT is Top-side transparent - agradientT = Image.linear_gradient('L').resize((width, overlap_size_y)) - # radial corner is the left-top corner, made full circle then cut to just the left-top quadrant - agradientC = Image.new('L', (256, 256)) - for y in range(256): - for x in range(256): - # Find distance to lower right corner (numpy takes arrays) - distanceToLR = np.sqrt([(255 - x) ** 2 + (255 - y) ** 2])[0] - # Clamp values to max 255 - if distanceToLR > 255: - distanceToLR = 255 - #Place the pixel as invert of distance - agradientC.putpixel((x, y), round(255 - distanceToLR)) - - # Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges - # Fits for a left-fading gradient on the bottom side and full opacity on the right side. - agradientAsymC = Image.new('L', (256, 256)) - for y in range(256): - for x in range(256): - value = round(max(0, x-(255-y)) * (255 / max(1,y))) - #Clamp values - value = max(0, value) - value = min(255, value) - agradientAsymC.putpixel((x, y), value) - - # Create alpha layers default fully white - alphaLayerL = Image.new("L", (width, height), 255) - alphaLayerT = Image.new("L", (width, height), 255) - alphaLayerLTC = Image.new("L", (width, height), 255) - # Paste gradients into alpha layers - alphaLayerL.paste(agradientL, (0, 0)) - alphaLayerT.paste(agradientT, (0, 0)) - alphaLayerLTC.paste(agradientL, (0, 0)) - alphaLayerLTC.paste(agradientT, (0, 0)) - alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0)) - # make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile - # to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space - alphaLayerTaC = alphaLayerT.copy() - alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - alphaLayerLTaC = alphaLayerLTC.copy() - alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - if embiggen_tiles: - # Individual unconnected sides - alphaLayerR = Image.new("L", (width, height), 255) - alphaLayerR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerB = Image.new("L", (width, height), 255) - alphaLayerB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerTB = Image.new("L", (width, height), 255) - alphaLayerTB.paste(agradientT, (0, 0)) - alphaLayerTB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerLR = Image.new("L", (width, height), 255) - alphaLayerLR.paste(agradientL, (0, 0)) - alphaLayerLR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - - # Sides and corner Layers - alphaLayerRBC = Image.new("L", (width, height), 255) - alphaLayerRBC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerRBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerRBC.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerLBC = Image.new("L", (width, height), 255) - alphaLayerLBC.paste(agradientL, (0, 0)) - alphaLayerLBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerLBC.paste(agradientC.rotate(90).resize( - (overlap_size_x, overlap_size_y)), (0, height - overlap_size_y)) - alphaLayerRTC = Image.new("L", (width, height), 255) - alphaLayerRTC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerRTC.paste(agradientT, (0, 0)) - alphaLayerRTC.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - # All but X layers - alphaLayerABT = Image.new("L", (width, height), 255) - alphaLayerABT.paste(alphaLayerLBC, (0, 0)) - alphaLayerABT.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerABT.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerABL = Image.new("L", (width, height), 255) - alphaLayerABL.paste(alphaLayerRTC, (0, 0)) - alphaLayerABL.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerABL.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) - alphaLayerABR = Image.new("L", (width, height), 255) - alphaLayerABR.paste(alphaLayerLBC, (0, 0)) - alphaLayerABR.paste(agradientT, (0, 0)) - alphaLayerABR.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - alphaLayerABB = Image.new("L", (width, height), 255) - alphaLayerABB.paste(alphaLayerRTC, (0, 0)) - alphaLayerABB.paste(agradientL, (0, 0)) - alphaLayerABB.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - - # All-around layer - alphaLayerAA = Image.new("L", (width, height), 255) - alphaLayerAA.paste(alphaLayerABT, (0, 0)) - alphaLayerAA.paste(agradientT, (0, 0)) - alphaLayerAA.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - alphaLayerAA.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) - - # Clean up temporary gradients - del agradientL - del agradientT - del agradientC - - def make_image(): - # Make main tiles ------------------------------------------------- - if embiggen_tiles: - print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...') - else: - print( - f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...') - - emb_tile_store = [] - # Although we could use the same seed for every tile for determinism, at higher strengths this may - # produce duplicated structures for each tile and make the tiling effect more obvious - # instead track and iterate a local seed we pass to Img2Img - seed = self.seed - seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy - - for tile in range(emb_tiles_x * emb_tiles_y): - # Don't iterate on first tile - if tile != 0: - if seed < seedintlimit: - seed += 1 - else: - seed = 0 - - # Determine if this is a re-run and replace - if embiggen_tiles and not tile in embiggen_tiles: - continue - # Get row and column entries - emb_row_i = tile // emb_tiles_x - emb_column_i = tile % emb_tiles_x - # Determine bounds to cut up the init image - # Determine upper-left point - if emb_column_i + 1 == emb_tiles_x: - left = initsuperwidth - width - else: - left = round(emb_column_i * (width - overlap_size_x)) - if emb_row_i + 1 == emb_tiles_y: - top = initsuperheight - height - else: - top = round(emb_row_i * (height - overlap_size_y)) - right = left + width - bottom = top + height - - # Cropped image of above dimension (does not modify the original) - newinitimage = initsuperimage.crop((left, top, right, bottom)) - # DEBUG: - # newinitimagepath = init_img[0:-4] + f'_emb_Ti{tile}.png' - # newinitimage.save(newinitimagepath) - - if embiggen_tiles: - print( - f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)') - else: - print( - f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles') - - # create a torch tensor from an Image - newinitimage = np.array( - newinitimage).astype(np.float32) / 255.0 - newinitimage = newinitimage[None].transpose(0, 3, 1, 2) - newinitimage = torch.from_numpy(newinitimage) - newinitimage = 2.0 * newinitimage - 1.0 - newinitimage = newinitimage.to(self.model.device) - clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None - - tile_results = gen_img2img.generate( - prompt, - iterations = 1, - seed = seed, - sampler = sampler, - steps = steps, - cfg_scale = cfg_scale, - conditioning = conditioning, - ddim_eta = ddim_eta, - image_callback = None, # called only after the final image is generated - step_callback = step_callback, # called after each intermediate image is generated - width = width, - height = height, - init_image = newinitimage, # notice that init_image is different from init_img - mask_image = None, - strength = strength, - clear_cuda_cache = clear_cuda_cache - ) - - emb_tile_store.append(tile_results[0][0]) - # DEBUG (but, also has other uses), worth saving if you want tiles without a transparency overlap to manually composite - # emb_tile_store[-1].save(init_img[0:-4] + f'_emb_To{tile}.png') - del newinitimage - - # Sanity check we have them all - if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)): - outputsuperimage = Image.new( - "RGBA", (initsuperwidth, initsuperheight)) - if embiggen_tiles: - outputsuperimage.alpha_composite( - initsuperimage.convert('RGBA'), (0, 0)) - for tile in range(emb_tiles_x * emb_tiles_y): - if embiggen_tiles: - if tile in embiggen_tiles: - intileimage = emb_tile_store.pop(0) - else: - continue - else: - intileimage = emb_tile_store[tile] - intileimage = intileimage.convert('RGBA') - # Get row and column entries - emb_row_i = tile // emb_tiles_x - emb_column_i = tile % emb_tiles_x - if emb_row_i == 0 and emb_column_i == 0 and not embiggen_tiles: - left = 0 - top = 0 - else: - # Determine upper-left point - if emb_column_i + 1 == emb_tiles_x: - left = initsuperwidth - width - else: - left = round(emb_column_i * - (width - overlap_size_x)) - if emb_row_i + 1 == emb_tiles_y: - top = initsuperheight - height - else: - top = round(emb_row_i * (height - overlap_size_y)) - # Handle gradients for various conditions - # Handle emb_rerun case - if embiggen_tiles: - # top of image - if emb_row_i == 0: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerB) - # Otherwise do nothing on this tile - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerR) - else: - intileimage.putalpha(alphaLayerRBC) - elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerL) - else: - intileimage.putalpha(alphaLayerLBC) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerL) - else: - intileimage.putalpha(alphaLayerLBC) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerLR) - else: - intileimage.putalpha(alphaLayerABT) - # bottom of image - elif emb_row_i == emb_tiles_y - 1: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - intileimage.putalpha(alphaLayerTaC) - else: - intileimage.putalpha(alphaLayerRTC) - elif emb_column_i == emb_tiles_x - 1: - # No tiles to look ahead to - intileimage.putalpha(alphaLayerLTC) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - intileimage.putalpha(alphaLayerLTaC) - else: - intileimage.putalpha(alphaLayerABB) - # vertical middle of image - else: - if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerTaC) - else: - intileimage.putalpha(alphaLayerTB) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerRTC) - else: - intileimage.putalpha(alphaLayerABL) - elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerLTC) - else: - intileimage.putalpha(alphaLayerABR) - else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down - intileimage.putalpha(alphaLayerLTaC) - else: - intileimage.putalpha(alphaLayerABR) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only - intileimage.putalpha(alphaLayerABB) - else: - intileimage.putalpha(alphaLayerAA) - # Handle normal tiling case (much simpler - since we tile left to right, top to bottom) - else: - if emb_row_i == 0 and emb_column_i >= 1: - intileimage.putalpha(alphaLayerL) - elif emb_row_i >= 1 and emb_column_i == 0: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right - intileimage.putalpha(alphaLayerT) - else: - intileimage.putalpha(alphaLayerTaC) - else: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right - intileimage.putalpha(alphaLayerLTC) - else: - intileimage.putalpha(alphaLayerLTaC) - # Layer tile onto final image - outputsuperimage.alpha_composite(intileimage, (left, top)) - else: - print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') - - # after internal loops and patching up return Embiggen image - return outputsuperimage - # end of function declaration - return make_image diff --git a/ldm/invoke/generator/img2img.py b/ldm/invoke/generator/img2img.py deleted file mode 100644 index 67a588234b..0000000000 --- a/ldm/invoke/generator/img2img.py +++ /dev/null @@ -1,70 +0,0 @@ -''' -ldm.invoke.generator.img2img descends from ldm.invoke.generator -''' - -import torch -from diffusers import logging - -from ldm.invoke.generator.base import Generator -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ldm.models.diffusion.shared_invokeai_diffusion import PostprocessingSettings - - -class Img2Img(Generator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None # by get_noise() - - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,init_image,strength,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, - h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - pipeline.scheduler = sampler - - uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings=PostprocessingSettings( - threshold=threshold, - warmup=warmup, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - - - def make_image(x_T): - # FIXME: use x_T for initial seeded noise - # We're not at the moment because the pipeline automatically resizes init_image if - # necessary, which the x_T input might not match. - logging.set_verbosity_error() # quench safety check warnings - pipeline_output = pipeline.img2img_from_embeddings( - init_image, strength, steps, conditioning_data, - noise_func=self.get_noise_like, - callback=step_callback - ) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - return pipeline.numpy_to_pil(pipeline_output.images)[0] - - return make_image - - def get_noise_like(self, like: torch.Tensor): - device = like.device - if device.type == 'mps': - x = torch.randn_like(like, device='cpu').to(device) - else: - x = torch.randn_like(like, device=device) - if self.perlin > 0.0: - shape = like.shape - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) - return x diff --git a/ldm/invoke/generator/inpaint.py b/ldm/invoke/generator/inpaint.py deleted file mode 100644 index 61c5b56582..0000000000 --- a/ldm/invoke/generator/inpaint.py +++ /dev/null @@ -1,324 +0,0 @@ -''' -ldm.invoke.generator.inpaint descends from ldm.invoke.generator -''' -from __future__ import annotations - -import math - -import PIL -import cv2 -import numpy as np -import torch -from PIL import Image, ImageFilter, ImageOps, ImageChops - -from ldm.invoke.generator.diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusionGeneratorPipeline, \ - ConditioningData -from ldm.invoke.generator.img2img import Img2Img -from ldm.invoke.patchmatch import PatchMatch -from ldm.util import debug_image - - -def infill_methods()->list[str]: - methods = [ - "tile", - "solid", - ] - if PatchMatch.patchmatch_available(): - methods.insert(0, 'patchmatch') - return methods - -class Inpaint(Img2Img): - def __init__(self, model, precision): - self.inpaint_height = 0 - self.inpaint_width = 0 - self.enable_image_debugging = False - self.init_latent = None - self.pil_image = None - self.pil_mask = None - self.mask_blur_radius = 0 - self.infill_method = None - super().__init__(model, precision) - - # Outpaint support code - def get_tile_images(self, image: np.ndarray, width=8, height=8): - _nrows, _ncols, depth = image.shape - _strides = image.strides - - nrows, _m = divmod(_nrows, height) - ncols, _n = divmod(_ncols, width) - if _m != 0 or _n != 0: - return None - - return np.lib.stride_tricks.as_strided( - np.ravel(image), - shape=(nrows, ncols, height, width, depth), - strides=(height * _strides[0], width * _strides[1], *_strides), - writeable=False - ) - - def infill_patchmatch(self, im: Image.Image) -> Image: - if im.mode != 'RGBA': - return im - - # Skip patchmatch if patchmatch isn't available - if not PatchMatch.patchmatch_available(): - return im - - # Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though) - im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3) - im_patched = Image.fromarray(im_patched_np, mode = 'RGB') - return im_patched - - def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image: - # Only fill if there's an alpha layer - if im.mode != 'RGBA': - return im - - a = np.asarray(im, dtype=np.uint8) - - tile_size = (tile_size, tile_size) - - # Get the image as tiles of a specified size - tiles = self.get_tile_images(a,*tile_size).copy() - - # Get the mask as tiles - tiles_mask = tiles[:,:,:,:,3] - - # Find any mask tiles with any fully transparent pixels (we will be replacing these later) - tmask_shape = tiles_mask.shape - tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape)) - n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) - tiles_mask = (tiles_mask > 0) - tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1) - - # Get RGB tiles in single array and filter by the mask - tshape = tiles.shape - tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:])) - filtered_tiles = tiles_all[tiles_mask] - - if len(filtered_tiles) == 0: - return im - - # Find all invalid tiles and replace with a random valid tile - replace_count = (tiles_mask == False).sum() - rng = np.random.default_rng(seed = seed) - tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:] - - # Convert back to an image - tiles_all = tiles_all.reshape(tshape) - tiles_all = tiles_all.swapaxes(1,2) - st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4])) - si = Image.fromarray(st, mode='RGBA') - - return si - - - def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image: - npimg = np.asarray(mask, dtype=np.uint8) - - # Detect any partially transparent regions - npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0))) - - # Detect hard edges - npedge = cv2.Canny(npimg, threshold1=100, threshold2=200) - - # Combine - npmask = npgradient + npedge - - # Expand - npmask = cv2.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) - - new_mask = Image.fromarray(npmask) - - if edge_blur > 0: - new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur)) - - return ImageOps.invert(new_mask) - - - def seam_paint(self, im: Image.Image, seam_size: int, seam_blur: int, prompt, sampler, steps, cfg_scale, ddim_eta, - conditioning, strength, noise, infill_method, step_callback) -> Image.Image: - hard_mask = self.pil_image.split()[-1].copy() - mask = self.mask_edge(hard_mask, seam_size, seam_blur) - - make_image = self.get_make_image( - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - init_image = im.copy().convert('RGBA'), - mask_image = mask, - strength = strength, - mask_blur_radius = 0, - seam_size = 0, - step_callback = step_callback, - inpaint_width = im.width, - inpaint_height = im.height, - infill_method = infill_method - ) - - seam_noise = self.get_noise(im.width, im.height) - - result = make_image(seam_noise) - - return result - - - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning, - init_image: PIL.Image.Image | torch.FloatTensor, - mask_image: PIL.Image.Image | torch.FloatTensor, - strength: float, - mask_blur_radius: int = 8, - # Seam settings - when 0, doesn't fill seam - seam_size: int = 0, - seam_blur: int = 0, - seam_strength: float = 0.7, - seam_steps: int = 10, - tile_size: int = 32, - step_callback=None, - inpaint_replace=False, enable_image_debugging=False, - infill_method = None, - inpaint_width=None, - inpaint_height=None, - inpaint_fill:tuple(int)=(0x7F, 0x7F, 0x7F, 0xFF), - attention_maps_callback=None, - **kwargs): - """ - Returns a function returning an image derived from the prompt and - the initial image + mask. Return value depends on the seed at - the time you call it. kwargs are 'init_latent' and 'strength' - """ - - self.enable_image_debugging = enable_image_debugging - infill_method = infill_method or infill_methods()[0] - self.infill_method = infill_method - - self.inpaint_width = inpaint_width - self.inpaint_height = inpaint_height - - if isinstance(init_image, PIL.Image.Image): - self.pil_image = init_image.copy() - - # Do infill - if infill_method == 'patchmatch' and PatchMatch.patchmatch_available(): - init_filled = self.infill_patchmatch(self.pil_image.copy()) - elif infill_method == 'tile': - init_filled = self.tile_fill_missing( - self.pil_image.copy(), - seed = self.seed, - tile_size = tile_size - ) - elif infill_method == 'solid': - solid_bg = PIL.Image.new("RGBA", init_image.size, inpaint_fill) - init_filled = PIL.Image.alpha_composite(solid_bg, init_image) - else: - raise ValueError(f"Non-supported infill type {infill_method}", infill_method) - init_filled.paste(init_image, (0,0), init_image.split()[-1]) - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - init_filled = init_filled.resize((inpaint_width, inpaint_height)) - - debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) - - # Create init tensor - init_image = image_resized_to_grid_as_tensor(init_filled.convert('RGB')) - - if isinstance(mask_image, PIL.Image.Image): - self.pil_mask = mask_image.copy() - debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging) - - init_alpha = self.pil_image.getchannel("A") - if mask_image.mode != "L": - # FIXME: why do we get passed an RGB image here? We can only use single-channel. - mask_image = mask_image.convert("L") - mask_image = ImageChops.multiply(mask_image, init_alpha) - self.pil_mask = mask_image - - # Resize if requested for inpainting - if inpaint_width and inpaint_height: - mask_image = mask_image.resize((inpaint_width, inpaint_height)) - - debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging) - mask: torch.FloatTensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) - else: - mask: torch.FloatTensor = mask_image - - self.mask_blur_radius = mask_blur_radius - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - pipeline.scheduler = sampler - - # todo: support cross-attention control - uc, c, _ = conditioning - conditioning_data = (ConditioningData(uc, c, cfg_scale) - .add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - - - def make_image(x_T): - pipeline_output = pipeline.inpaint_from_embeddings( - init_image=init_image, - mask=1 - mask, # expects white means "paint here." - strength=strength, - num_inference_steps=steps, - conditioning_data=conditioning_data, - noise_func=self.get_noise_like, - callback=step_callback, - ) - - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - - result = self.postprocess_size_and_mask(pipeline.numpy_to_pil(pipeline_output.images)[0]) - - # Seam paint if this is our first pass (seam_size set to 0 during seam painting) - if seam_size > 0: - old_image = self.pil_image or init_image - old_mask = self.pil_mask or mask_image - - result = self.seam_paint(result, seam_size, seam_blur, prompt, sampler, seam_steps, cfg_scale, ddim_eta, - conditioning, seam_strength, x_T, infill_method, step_callback) - - # Restore original settings - self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning, - old_image, - old_mask, - strength, - mask_blur_radius, seam_size, seam_blur, seam_strength, - seam_steps, tile_size, step_callback, - inpaint_replace, enable_image_debugging, - inpaint_width = inpaint_width, - inpaint_height = inpaint_height, - infill_method = infill_method, - **kwargs) - - return result - - return make_image - - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') - return self.postprocess_size_and_mask(gen_result) - - - def postprocess_size_and_mask(self, gen_result: Image.Image) -> Image.Image: - debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging) - - # Resize if necessary - if self.inpaint_width and self.inpaint_height: - gen_result = gen_result.resize(self.pil_image.size) - - if self.pil_image is None or self.pil_mask is None: - return gen_result - - corrected_result = self.repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) - - return corrected_result diff --git a/ldm/invoke/generator/omnibus.py b/ldm/invoke/generator/omnibus.py deleted file mode 100644 index a6fae3e567..0000000000 --- a/ldm/invoke/generator/omnibus.py +++ /dev/null @@ -1,173 +0,0 @@ -"""omnibus module to be used with the runwayml 9-channel custom inpainting model""" - -import torch -from PIL import Image, ImageOps -from einops import repeat - -from ldm.invoke.devices import choose_autocast -from ldm.invoke.generator.img2img import Img2Img -from ldm.invoke.generator.txt2img import Txt2Img - - -class Omnibus(Img2Img,Txt2Img): - def __init__(self, model, precision): - super().__init__(model, precision) - self.pil_mask = None - self.pil_image = None - - def get_make_image( - self, - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - width, - height, - init_image = None, - mask_image = None, - strength = None, - step_callback=None, - threshold=0.0, - perlin=0.0, - mask_blur_radius: int = 8, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - num_samples = 1 - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - if isinstance(init_image, Image.Image): - self.pil_image = init_image - if init_image.mode != 'RGB': - init_image = init_image.convert('RGB') - init_image = self._image_to_tensor(init_image) - - if isinstance(mask_image, Image.Image): - self.pil_mask = mask_image - - mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1]) - mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False) - - self.mask_blur_radius = mask_blur_radius - - if init_image is not None and mask_image is not None: # inpainting - masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero - - elif init_image is not None: # img2img - scope = choose_autocast(self.precision) - - with scope(self.model.device.type): - self.init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - # create a completely black mask (1s) - mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device) - # and the masked image is just a copy of the original - masked_image = init_image - - else: # txt2img - init_image = torch.zeros(1, 3, height, width, device=self.model.device) - mask_image = torch.ones(1, 1, height, width, device=self.model.device) - masked_image = init_image - - self.init_latent = init_image - height = init_image.shape[2] - width = init_image.shape[3] - model = self.model - - def make_image(x_T): - with torch.no_grad(): - scope = choose_autocast(self.precision) - with scope(self.model.device.type): - - batch = self.make_batch_sd( - init_image, - mask_image, - masked_image, - prompt=prompt, - device=model.device, - num_samples=num_samples, - ) - - c = model.cond_stage_model.encode(batch["txt"]) - c_cat = list() - for ck in model.concat_keys: - cc = batch[ck].float() - if ck != model.masked_image_key: - bchw = [num_samples, 4, height//8, width//8] - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = model.get_first_stage_encoding(model.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - - # cond - cond={"c_concat": [c_cat], "c_crossattn": [c]} - - # uncond cond - uc_cross = model.get_unconditional_conditioning(num_samples, "") - uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]} - shape = [model.channels, height//8, width//8] - - samples, _ = sampler.sample( - batch_size = 1, - S = steps, - x_T = x_T, - conditioning = cond, - shape = shape, - verbose = False, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc_full, - eta = 1.0, - img_callback = step_callback, - threshold = threshold, - ) - if self.free_gpu_mem: - self.model.model.to("cpu") - return self.sample_to_image(samples) - - return make_image - - def make_batch_sd( - self, - image, - mask, - masked_image, - prompt, - device, - num_samples=1): - batch = { - "image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples), - "txt": num_samples * [prompt], - "mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples), - "masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples), - } - return batch - - def get_noise(self, width:int, height:int): - if self.init_latent is not None: - height = self.init_latent.shape[2] - width = self.init_latent.shape[3] - return Txt2Img.get_noise(self,width,height) - - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') - - if self.pil_image is None or self.pil_mask is None: - return gen_result - if self.pil_image.size != self.pil_mask.size: - return gen_result - - corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - - return corrected_result diff --git a/ldm/invoke/generator/txt2img.py b/ldm/invoke/generator/txt2img.py deleted file mode 100644 index 9903de1309..0000000000 --- a/ldm/invoke/generator/txt2img.py +++ /dev/null @@ -1,61 +0,0 @@ -''' -ldm.invoke.generator.txt2img inherits from ldm.invoke.generator -''' -import PIL.Image -import torch - -from .base import Generator -from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ...models.diffusion.shared_invokeai_diffusion import PostprocessingSettings - - -class Txt2Img(Generator): - def __init__(self, model, precision): - super().__init__(model, precision) - - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,width,height,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, - h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - kwargs are 'width' and 'height' - """ - self.perlin = perlin - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - pipeline.scheduler = sampler - - uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings=PostprocessingSettings( - threshold=threshold, - warmup=warmup, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - - def make_image(x_T) -> PIL.Image.Image: - pipeline_output = pipeline.image_from_embeddings( - latents=torch.zeros_like(x_T,dtype=self.torch_dtype()), - noise=x_T, - num_inference_steps=steps, - conditioning_data=conditioning_data, - callback=step_callback, - ) - - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - - return pipeline.numpy_to_pil(pipeline_output.images)[0] - - return make_image - - - diff --git a/ldm/invoke/generator/txt2img2img.py b/ldm/invoke/generator/txt2img2img.py deleted file mode 100644 index a39dfccc3a..0000000000 --- a/ldm/invoke/generator/txt2img2img.py +++ /dev/null @@ -1,163 +0,0 @@ -''' -ldm.invoke.generator.txt2img inherits from ldm.invoke.generator -''' - -import math -from typing import Callable, Optional - -import torch -from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error - -from ldm.invoke.generator.base import Generator -from ldm.invoke.generator.diffusers_pipeline import trim_to_multiple_of, StableDiffusionGeneratorPipeline, \ - ConditioningData -from ldm.models.diffusion.shared_invokeai_diffusion import PostprocessingSettings - - -class Txt2Img2Img(Generator): - def __init__(self, model, precision): - super().__init__(model, precision) - self.init_latent = None # for get_noise() - - def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_eta, - conditioning, width:int, height:int, strength:float, - step_callback:Optional[Callable]=None, threshold=0.0, warmup=0.2, perlin=0.0, - h_symmetry_time_pct=None, v_symmetry_time_pct=None, attention_maps_callback=None, **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - kwargs are 'width' and 'height' - """ - self.perlin = perlin - - # noinspection PyTypeChecker - pipeline: StableDiffusionGeneratorPipeline = self.model - pipeline.scheduler = sampler - - uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings = PostprocessingSettings( - threshold=threshold, - warmup=0.2, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - - def make_image(x_T): - - first_pass_latent_output, _ = pipeline.latents_from_embeddings( - latents=torch.zeros_like(x_T), - num_inference_steps=steps, - conditioning_data=conditioning_data, - noise=x_T, - callback=step_callback, - ) - - # Get our initial generation width and height directly from the latent output so - # the message below is accurate. - init_width = first_pass_latent_output.size()[3] * self.downsampling_factor - init_height = first_pass_latent_output.size()[2] * self.downsampling_factor - print( - f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling" - ) - - # resizing - resized_latents = torch.nn.functional.interpolate( - first_pass_latent_output, - size=(height // self.downsampling_factor, width // self.downsampling_factor), - mode="bilinear" - ) - - # Free up memory from the last generation. - clear_cuda_cache = kwargs['clear_cuda_cache'] or None - if clear_cuda_cache is not None: - clear_cuda_cache() - - second_pass_noise = self.get_noise_like(resized_latents, override_perlin=True) - - # Clear symmetry for the second pass - from dataclasses import replace - new_postprocessing_settings = replace(conditioning_data.postprocessing_settings, h_symmetry_time_pct=None) - new_postprocessing_settings = replace(new_postprocessing_settings, v_symmetry_time_pct=None) - new_conditioning_data = replace(conditioning_data, postprocessing_settings=new_postprocessing_settings) - - verbosity = get_verbosity() - set_verbosity_error() - pipeline_output = pipeline.img2img_from_latents_and_embeddings( - resized_latents, - num_inference_steps=steps, - conditioning_data=new_conditioning_data, - strength=strength, - noise=second_pass_noise, - callback=step_callback) - set_verbosity(verbosity) - - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: - attention_maps_callback(pipeline_output.attention_map_saver) - - return pipeline.numpy_to_pil(pipeline_output.images)[0] - - - # FIXME: do we really need something entirely different for the inpainting model? - - # in the case of the inpainting model being loaded, the trick of - # providing an interpolated latent doesn't work, so we transiently - # create a 512x512 PIL image, upscale it, and run the inpainting - # over it in img2img mode. Because the inpaing model is so conservative - # it doesn't change the image (much) - - return make_image - - def get_noise_like(self, like: torch.Tensor, override_perlin: bool=False): - device = like.device - if device.type == 'mps': - x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device) - else: - x = torch.randn_like(like, device=device, dtype=self.torch_dtype()) - if self.perlin > 0.0 and override_perlin == False: - shape = like.shape - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) - return x - - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height,scale = True): - # print(f"Get noise: {width}x{height}") - if scale: - # Scale the input width and height for the initial generation - # Make their area equivalent to the model's resolution area (e.g. 512*512 = 262144), - # while keeping the minimum dimension at least 0.5 * resolution (e.g. 512*0.5 = 256) - - aspect = width / height - dimension = self.model.unet.config.sample_size * self.model.vae_scale_factor - min_dimension = math.floor(dimension * 0.5) - model_area = dimension * dimension # hardcoded for now since all models are trained on square images - - if aspect > 1.0: - init_height = max(min_dimension, math.sqrt(model_area / aspect)) - init_width = init_height * aspect - else: - init_width = max(min_dimension, math.sqrt(model_area * aspect)) - init_height = init_width / aspect - - scaled_width, scaled_height = trim_to_multiple_of(math.floor(init_width), math.floor(init_height)) - - else: - scaled_width = width - scaled_height = height - - device = self.model.device - channels = self.latent_channels - if channels == 9: - channels = 4 # we don't really want noise for all the mask channels - shape = (1, channels, - scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor) - if self.use_mps_noise or device.type == 'mps': - tensor = torch.empty(size=shape, device='cpu') - tensor = self.get_noise_like(like=tensor).to(device) - else: - tensor = torch.empty(size=shape, device=device) - tensor = self.get_noise_like(like=tensor) - return tensor diff --git a/ldm/invoke/globals.py b/ldm/invoke/globals.py index e47b5c059e..c6ee0bbc54 100644 --- a/ldm/invoke/globals.py +++ b/ldm/invoke/globals.py @@ -61,7 +61,7 @@ Globals.sequential_guidance = False Globals.full_precision = False # whether we should convert ckpt files into diffusers models on the fly -Globals.ckpt_convert = False +Globals.ckpt_convert = True # logging tokenization everywhere Globals.log_tokenization = False diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 3cb3613ee3..5c100fcf8b 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -23,7 +23,7 @@ from omegaconf import OmegaConf from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from ldm.invoke.model_manager import ModelManager +from invokeai.models import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py deleted file mode 100644 index 694d65c1a7..0000000000 --- a/ldm/invoke/model_manager.py +++ /dev/null @@ -1,1372 +0,0 @@ -""" -Manage a cache of Stable Diffusion model files for fast switching. -They are moved between GPU and CPU as necessary. If CPU memory falls -below a preset minimum, the least recently used model will be -cleared and loaded from disk when next needed. -""" -from __future__ import annotations - -import contextlib -import gc -import hashlib -import io -import os -import re -import sys -import textwrap -import time -import warnings -from enum import Enum -from pathlib import Path -from shutil import move, rmtree -from typing import Any, Optional, Union - -import safetensors -import safetensors.torch -import torch -import transformers -from diffusers import AutoencoderKL -from diffusers import logging as dlogging -from huggingface_hub import scan_cache_dir -from omegaconf import OmegaConf -from omegaconf.dictconfig import DictConfig -from picklescan.scanner import scan_file_path - -from ldm.invoke.devices import CPU_DEVICE -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline -from ldm.invoke.globals import Globals, global_cache_dir -from ldm.util import ( - ask_user, - download_with_resume, - instantiate_from_config, - url_attachment_name, -) - - -class SDLegacyType(Enum): - V1 = 1 - V1_INPAINT = 2 - V2 = 3 - UNKNOWN = 99 - - -DEFAULT_MAX_MODELS = 2 -VAE_TO_REPO_ID = { # hack, see note in convert_and_import() - "vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse", -} - - -class ModelManager(object): - def __init__( - self, - config: OmegaConf, - device_type: torch.device = CPU_DEVICE, - precision: str = "float16", - max_loaded_models=DEFAULT_MAX_MODELS, - sequential_offload=False, - ): - """ - Initialize with the path to the models.yaml config file, - the torch device type, and precision. The optional - min_avail_mem argument specifies how much unused system - (CPU) memory to preserve. The cache of models in RAM will - grow until this value is approached. Default is 2G. - """ - # prevent nasty-looking CLIP log message - transformers.logging.set_verbosity_error() - self.config = config - self.precision = precision - self.device = torch.device(device_type) - self.max_loaded_models = max_loaded_models - self.models = {} - self.stack = [] # this is an LRU FIFO - self.current_model = None - self.sequential_offload = sequential_offload - - def valid_model(self, model_name: str) -> bool: - """ - Given a model name, returns True if it is a valid - identifier. - """ - return model_name in self.config - - def get_model(self, model_name: str): - """ - Given a model named identified in models.yaml, return - the model object. If in RAM will load into GPU VRAM. - If on disk, will load from there. - """ - if not self.valid_model(model_name): - print( - f'** "{model_name}" is not a known model name. Please check your models.yaml file' - ) - return self.current_model - - if self.current_model != model_name: - if model_name not in self.models: # make room for a new one - self._make_cache_room() - self.offload_model(self.current_model) - - if model_name in self.models: - requested_model = self.models[model_name]["model"] - print(f">> Retrieving model {model_name} from system RAM cache") - self.models[model_name]["model"] = self._model_from_cpu(requested_model) - width = self.models[model_name]["width"] - height = self.models[model_name]["height"] - hash = self.models[model_name]["hash"] - - else: # we're about to load a new model, so potentially offload the least recently used one - requested_model, width, height, hash = self._load_model(model_name) - self.models[model_name] = { - "model": requested_model, - "width": width, - "height": height, - "hash": hash, - } - - self.current_model = model_name - self._push_newest_model(model_name) - return { - "model": requested_model, - "width": width, - "height": height, - "hash": hash, - } - - def default_model(self) -> str | None: - """ - Returns the name of the default model, or None - if none is defined. - """ - for model_name in self.config: - if self.config[model_name].get("default"): - return model_name - return list(self.config.keys())[0] # first one - - def set_default_model(self, model_name: str) -> None: - """ - Set the default model. The change will not take - effect until you call model_manager.commit() - """ - assert model_name in self.model_names(), f"unknown model '{model_name}'" - - config = self.config - for model in config: - config[model].pop("default", None) - config[model_name]["default"] = True - - def model_info(self, model_name: str) -> dict: - """ - Given a model name returns the OmegaConf (dict-like) object describing it. - """ - if model_name not in self.config: - return None - return self.config[model_name] - - def model_names(self) -> list[str]: - """ - Return a list consisting of all the names of models defined in models.yaml - """ - return list(self.config.keys()) - - def is_legacy(self, model_name: str) -> bool: - """ - Return true if this is a legacy (.ckpt) model - """ - # if we are converting legacy files automatically, then - # there are no legacy ckpts! - if Globals.ckpt_convert: - return False - info = self.model_info(model_name) - if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")): - return True - return False - - def list_models(self) -> dict: - """ - Return a dict of models in the format: - { model_name1: {'status': ('active'|'cached'|'not loaded'), - 'description': description, - 'format': ('ckpt'|'diffusers'|'vae'), - }, - model_name2: { etc } - Please use model_manager.models() to get all the model names, - model_manager.model_info('model-name') to get the stanza for the model - named 'model-name', and model_manager.config to get the full OmegaConf - object derived from models.yaml - """ - models = {} - for name in sorted(self.config, key=str.casefold): - stanza = self.config[name] - - # don't include VAEs in listing (legacy style) - if "config" in stanza and "/VAE/" in stanza["config"]: - continue - - models[name] = dict() - format = stanza.get("format", "ckpt") # Determine Format - - # Common Attribs - description = stanza.get("description", None) - if self.current_model == name: - status = "active" - elif name in self.models: - status = "cached" - else: - status = "not loaded" - models[name].update( - description=description, - format=format, - status=status, - ) - - # Checkpoint Config Parse - if format == "ckpt": - models[name].update( - config=str(stanza.get("config", None)), - weights=str(stanza.get("weights", None)), - vae=str(stanza.get("vae", None)), - width=str(stanza.get("width", 512)), - height=str(stanza.get("height", 512)), - ) - - # Diffusers Config Parse - if vae := stanza.get("vae", None): - if isinstance(vae, DictConfig): - vae = dict( - repo_id=str(vae.get("repo_id", None)), - path=str(vae.get("path", None)), - subfolder=str(vae.get("subfolder", None)), - ) - - if format == "diffusers": - models[name].update( - vae=vae, - repo_id=str(stanza.get("repo_id", None)), - path=str(stanza.get("path", None)), - ) - - return models - - def print_models(self) -> None: - """ - Print a table of models, their descriptions, and load status - """ - models = self.list_models() - for name in models: - if models[name]["format"] == "vae": - continue - line = f'{name:25s} {models[name]["status"]:>10s} {models[name]["format"]:10s} {models[name]["description"]}' - if models[name]["status"] == "active": - line = f"\033[1m{line}\033[0m" - print(line) - - def del_model(self, model_name: str, delete_files: bool = False) -> None: - """ - Delete the named model. - """ - omega = self.config - if model_name not in omega: - print(f"** Unknown model {model_name}") - return - # save these for use in deletion later - conf = omega[model_name] - repo_id = conf.get("repo_id", None) - path = self._abs_path(conf.get("path", None)) - weights = self._abs_path(conf.get("weights", None)) - - del omega[model_name] - if model_name in self.stack: - self.stack.remove(model_name) - if delete_files: - if weights: - print(f"** deleting file {weights}") - Path(weights).unlink(missing_ok=True) - elif path: - print(f"** deleting directory {path}") - rmtree(path, ignore_errors=True) - elif repo_id: - print(f"** deleting the cached model directory for {repo_id}") - self._delete_model_from_cache(repo_id) - - def add_model( - self, model_name: str, model_attributes: dict, clobber: bool = False - ) -> None: - """ - Update the named model with a dictionary of attributes. Will fail with an - assertion error if the name already exists. Pass clobber=True to overwrite. - On a successful update, the config will be changed in memory and the - method will return True. Will fail with an assertion error if provided - attributes are incorrect or the model name is missing. - """ - omega = self.config - assert "format" in model_attributes, 'missing required field "format"' - if model_attributes["format"] == "diffusers": - assert ( - "description" in model_attributes - ), 'required field "description" is missing' - assert ( - "path" in model_attributes or "repo_id" in model_attributes - ), 'model must have either the "path" or "repo_id" fields defined' - else: - for field in ("description", "weights", "height", "width", "config"): - assert field in model_attributes, f"required field {field} is missing" - - assert ( - clobber or model_name not in omega - ), f'attempt to overwrite existing model definition "{model_name}"' - - omega[model_name] = model_attributes - - if "weights" in omega[model_name]: - omega[model_name]["weights"].replace("\\", "/") - - if clobber: - self._invalidate_cached_model(model_name) - - def _load_model(self, model_name: str): - """Load and initialize the model from configuration variables passed at object creation time""" - if model_name not in self.config: - print( - f'"{model_name}" is not a known model name. Please check your models.yaml file' - ) - return - - mconfig = self.config[model_name] - - # for usage statistics - if self._has_cuda(): - torch.cuda.reset_peak_memory_stats() - torch.cuda.empty_cache() - - tic = time.time() - - # this does the work - model_format = mconfig.get("format", "ckpt") - if model_format == "ckpt": - weights = mconfig.weights - print(f">> Loading {model_name} from {weights}") - model, width, height, model_hash = self._load_ckpt_model( - model_name, mconfig - ) - elif model_format == "diffusers": - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - model, width, height, model_hash = self._load_diffusers_model(mconfig) - else: - raise NotImplementedError( - f"Unknown model format {model_name}: {model_format}" - ) - - # usage statistics - toc = time.time() - print(">> Model loaded in", "%4.2fs" % (toc - tic)) - if self._has_cuda(): - print( - ">> Max VRAM used to load the model:", - "%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9), - "\n>> Current VRAM usage:" - "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), - ) - return model, width, height, model_hash - - def _load_ckpt_model(self, model_name, mconfig): - config = mconfig.config - weights = mconfig.weights - vae = mconfig.get("vae") - width = mconfig.width - height = mconfig.height - - if not os.path.isabs(config): - config = os.path.join(Globals.root, config) - if not os.path.isabs(weights): - weights = os.path.normpath(os.path.join(Globals.root, weights)) - - # if converting automatically to diffusers, then we do the conversion and return - # a diffusers pipeline - if Globals.ckpt_convert: - print( - f">> Converting legacy checkpoint {model_name} into a diffusers model..." - ) - from ldm.invoke.ckpt_to_diffuser import ( - load_pipeline_from_original_stable_diffusion_ckpt, - ) - - self.offload_model(self.current_model) - if vae_config := self._choose_diffusers_vae(model_name): - vae = self._load_vae(vae_config) - if self._has_cuda(): - torch.cuda.empty_cache() - pipeline = load_pipeline_from_original_stable_diffusion_ckpt( - checkpoint_path=weights, - original_config_file=config, - vae=vae, - return_generator_pipeline=True, - precision=torch.float16 - if self.precision == "float16" - else torch.float32, - ) - if self.sequential_offload: - pipeline.enable_offload_submodels(self.device) - else: - pipeline.to(self.device) - - return ( - pipeline, - width, - height, - "NOHASH", - ) - - # scan model - self.scan_model(model_name, weights) - - print(f">> Loading {model_name} from {weights}") - - # for usage statistics - if self._has_cuda(): - torch.cuda.reset_peak_memory_stats() - torch.cuda.empty_cache() - - # this does the work - if not os.path.isabs(config): - config = os.path.join(Globals.root, config) - omega_config = OmegaConf.load(config) - with open(weights, "rb") as f: - weight_bytes = f.read() - model_hash = self._cached_sha256(weights, weight_bytes) - sd = None - if weights.endswith(".safetensors"): - sd = safetensors.torch.load(weight_bytes) - else: - sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu") - del weight_bytes - # merged models from auto11 merge board are flat for some reason - if "state_dict" in sd: - sd = sd["state_dict"] - - print(" | Forcing garbage collection prior to loading new model") - gc.collect() - model = instantiate_from_config(omega_config.model) - model.load_state_dict(sd, strict=False) - - if self.precision == "float16": - print(" | Using faster float16 precision") - model = model.to(torch.float16) - else: - print(" | Using more accurate float32 precision") - - # look and load a matching vae file. Code borrowed from AUTOMATIC1111 modules/sd_models.py - if vae: - if not os.path.isabs(vae): - vae = os.path.normpath(os.path.join(Globals.root, vae)) - if os.path.exists(vae): - print(f" | Loading VAE weights from: {vae}") - vae_ckpt = None - vae_dict = None - if vae.endswith(".safetensors"): - vae_ckpt = safetensors.torch.load_file(vae) - vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"} - else: - vae_ckpt = torch.load(vae, map_location="cpu") - vae_dict = { - k: v - for k, v in vae_ckpt["state_dict"].items() - if k[0:4] != "loss" - } - model.first_stage_model.load_state_dict(vae_dict, strict=False) - else: - print(f" | VAE file {vae} not found. Skipping.") - - model.to(self.device) - # model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here - model.cond_stage_model.device = self.device - - model.eval() - - for module in model.modules(): - if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): - module._orig_padding_mode = module.padding_mode - return model, width, height, model_hash - - def _load_diffusers_model(self, mconfig): - name_or_path = self.model_name_or_path(mconfig) - using_fp16 = self.precision == "float16" - - print(f">> Loading diffusers model from {name_or_path}") - if using_fp16: - print(" | Using faster float16 precision") - else: - print(" | Using more accurate float32 precision") - - # TODO: scan weights maybe? - pipeline_args: dict[str, Any] = dict( - safety_checker=None, local_files_only=not Globals.internet_available - ) - if "vae" in mconfig and mconfig["vae"] is not None: - if vae := self._load_vae(mconfig["vae"]): - pipeline_args.update(vae=vae) - if not isinstance(name_or_path, Path): - pipeline_args.update(cache_dir=global_cache_dir("diffusers")) - if using_fp16: - pipeline_args.update(torch_dtype=torch.float16) - fp_args_list = [{"revision": "fp16"}, {}] - else: - fp_args_list = [{}] - - verbosity = dlogging.get_verbosity() - dlogging.set_verbosity_error() - - pipeline = None - for fp_args in fp_args_list: - try: - pipeline = StableDiffusionGeneratorPipeline.from_pretrained( - name_or_path, - **pipeline_args, - **fp_args, - ) - except OSError as e: - if str(e).startswith("fp16 is not a valid"): - pass - else: - print( - f"** An unexpected error occurred while downloading the model: {e})" - ) - if pipeline: - break - - dlogging.set_verbosity(verbosity) - assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded') - - if self.sequential_offload: - pipeline.enable_offload_submodels(self.device) - else: - pipeline.to(self.device) - - model_hash = self._diffuser_sha256(name_or_path) - - # square images??? - width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor - height = width - - print(f" | Default image dimensions = {width} x {height}") - - return pipeline, width, height, model_hash - - def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path: - if isinstance(model_name, DictConfig) or isinstance(model_name, dict): - mconfig = model_name - elif model_name in self.config: - mconfig = self.config[model_name] - else: - raise ValueError( - f'"{model_name}" is not a known model name. Please check your models.yaml file' - ) - - if "path" in mconfig and mconfig["path"] is not None: - path = Path(mconfig["path"]) - if not path.is_absolute(): - path = Path(Globals.root, path).resolve() - return path - elif "repo_id" in mconfig: - return mconfig["repo_id"] - else: - raise ValueError("Model config must specify either repo_id or path.") - - def offload_model(self, model_name: str) -> None: - """ - Offload the indicated model to CPU. Will call - _make_cache_room() to free space if needed. - """ - if model_name not in self.models: - return - - print(f">> Offloading {model_name} to CPU") - model = self.models[model_name]["model"] - self.models[model_name]["model"] = self._model_to_cpu(model) - - gc.collect() - if self._has_cuda(): - torch.cuda.empty_cache() - - def scan_model(self, model_name, checkpoint): - """ - Apply picklescanner to the indicated checkpoint and issue a warning - and option to exit if an infected file is identified. - """ - # scan model - print(f">> Scanning Model: {model_name}") - scan_result = scan_file_path(checkpoint) - if scan_result.infected_files != 0: - if scan_result.infected_files == 1: - print(f"\n### Issues Found In Model: {scan_result.issues_count}") - print( - "### WARNING: The model you are trying to load seems to be infected." - ) - print("### For your safety, InvokeAI will not load this model.") - print("### Please use checkpoints from trusted sources.") - print("### Exiting InvokeAI") - sys.exit() - else: - print( - "\n### WARNING: InvokeAI was unable to scan the model you are using." - ) - model_safe_check_fail = ask_user( - "Do you want to to continue loading the model?", ["y", "n"] - ) - if model_safe_check_fail.lower() != "y": - print("### Exiting InvokeAI") - sys.exit() - else: - print(">> Model scanned ok") - - def import_diffuser_model( - self, - repo_or_path: Union[str, Path], - model_name: str = None, - model_description: str = None, - vae: dict = None, - commit_to_conf: Path = None, - ) -> bool: - """ - Attempts to install the indicated diffuser model and returns True if successful. - - "repo_or_path" can be either a repo-id or a path-like object corresponding to the - top of a downloaded diffusers directory. - - You can optionally provide a model name and/or description. If not provided, - then these will be derived from the repo name. If you provide a commit_to_conf - path to the configuration file, then the new entry will be committed to the - models.yaml file. - """ - model_name = model_name or Path(repo_or_path).stem - model_description = model_description or f"Imported diffusers model {model_name}" - new_config = dict( - description=model_description, - vae=vae, - format="diffusers", - ) - if isinstance(repo_or_path, Path) and repo_or_path.exists(): - new_config.update(path=str(repo_or_path)) - else: - new_config.update(repo_id=repo_or_path) - - self.add_model(model_name, new_config, True) - if commit_to_conf: - self.commit(commit_to_conf) - return model_name - - def import_ckpt_model( - self, - weights: Union[str, Path], - config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml", - vae: Union[str, Path] = None, - model_name: str = None, - model_description: str = None, - commit_to_conf: Path = None, - ) -> str: - """ - Attempts to install the indicated ckpt file and returns True if successful. - - "weights" can be either a path-like object corresponding to a local .ckpt file - or a http/https URL pointing to a remote model. - - "vae" is a Path or str object pointing to a ckpt or safetensors file to be used - as the VAE for this model. - - "config" is the model config file to use with this ckpt file. It defaults to - v1-inference.yaml. If a URL is provided, the config will be downloaded. - - You can optionally provide a model name and/or description. If not provided, - then these will be derived from the weight file name. If you provide a commit_to_conf - path to the configuration file, then the new entry will be committed to the - models.yaml file. - - Return value is the name of the imported file, or None if an error occurred. - """ - if str(weights).startswith(("http:", "https:")): - model_name = model_name or url_attachment_name(weights) - - weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1") - config_path = self._resolve_path(config, "configs/stable-diffusion") - - if weights_path is None or not weights_path.exists(): - return - if config_path is None or not config_path.exists(): - return - - model_name = ( - model_name or Path(weights).stem - ) # note this gives ugly pathnames if used on a URL without a Content-Disposition header - model_description = ( - model_description or f"Imported stable diffusion weights file {model_name}" - ) - new_config = dict( - weights=str(weights_path), - config=str(config_path), - description=model_description, - format="ckpt", - width=512, - height=512, - ) - if vae: - new_config["vae"] = vae - self.add_model(model_name, new_config, True) - if commit_to_conf: - self.commit(commit_to_conf) - return model_name - - @classmethod - def probe_model_type(self, checkpoint: dict) -> SDLegacyType: - """ - Given a pickle or safetensors model object, probes contents - of the object and returns an SDLegacyType indicating its - format. Valid return values include: - SDLegacyType.V1 - SDLegacyType.V1_INPAINT - SDLegacyType.V2 - SDLegacyType.UNKNOWN - """ - key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: - return SDLegacyType.V2 - - try: - state_dict = checkpoint.get("state_dict") or checkpoint - in_channels = state_dict[ - "model.diffusion_model.input_blocks.0.0.weight" - ].shape[1] - if in_channels == 9: - return SDLegacyType.V1_INPAINT - elif in_channels == 4: - return SDLegacyType.V1 - else: - return SDLegacyType.UNKNOWN - except KeyError: - return SDLegacyType.UNKNOWN - - def heuristic_import( - self, - path_url_or_repo: str, - convert: bool = False, - model_name: str = None, - description: str = None, - commit_to_conf: Path = None, - ) -> str: - """ - Accept a string which could be: - - a HF diffusers repo_id - - a URL pointing to a legacy .ckpt or .safetensors file - - a local path pointing to a legacy .ckpt or .safetensors file - - a local directory containing .ckpt and .safetensors files - - a local directory containing a diffusers model - - After determining the nature of the model and downloading it - (if necessary), the file is probed to determine the correct - configuration file (if needed) and it is imported. - - The model_name and/or description can be provided. If not, they will - be generated automatically. - - If convert is true, legacy models will be converted to diffusers - before importing. - - If commit_to_conf is provided, the newly loaded model will be written - to the `models.yaml` file at the indicated path. Otherwise, the changes - will only remain in memory. - - The (potentially derived) name of the model is returned on success, or None - on failure. When multiple models are added from a directory, only the last - imported one is returned. - """ - model_path: Path = None - thing = path_url_or_repo # to save typing - - print(f">> Probing {thing} for import") - - if thing.startswith(("http:", "https:", "ftp:")): - print(f" | {thing} appears to be a URL") - model_path = self._resolve_path( - thing, "models/ldm/stable-diffusion-v1" - ) # _resolve_path does a download if needed - - elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): - if Path(thing).stem in ["model", "diffusion_pytorch_model"]: - print( - f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import" - ) - return - else: - print(f" | {thing} appears to be a checkpoint file on disk") - model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1") - - elif Path(thing).is_dir() and Path(thing, "model_index.json").exists(): - print(f" | {thing} appears to be a diffusers file on disk") - model_name = self.import_diffuser_model( - thing, - vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), - model_name=model_name, - description=description, - commit_to_conf=commit_to_conf, - ) - - elif Path(thing).is_dir(): - if (Path(thing) / "model_index.json").exists(): - print(f" | {thing} appears to be a diffusers model.") - model_name = self.import_diffuser_model( - thing, commit_to_conf=commit_to_conf - ) - else: - print( - f" |{thing} appears to be a directory. Will scan for models to import" - ) - for m in list(Path(thing).rglob("*.ckpt")) + list( - Path(thing).rglob("*.safetensors") - ): - if model_name := self.heuristic_import( - str(m), convert, commit_to_conf=commit_to_conf - ): - print(f" >> {model_name} successfully imported") - return model_name - - elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing): - print(f" | {thing} appears to be a HuggingFace diffusers repo_id") - model_name = self.import_diffuser_model( - thing, commit_to_conf=commit_to_conf - ) - pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name]) - return model_name - else: - print( - f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id" - ) - - # Model_path is set in the event of a legacy checkpoint file. - # If not set, we're all done - if not model_path: - return - - if model_path.stem in self.config: # already imported - print(" | Already imported. Skipping") - return - - # another round of heuristics to guess the correct config file. - checkpoint = ( - safetensors.torch.load_file(model_path) - if model_path.suffix == ".safetensors" - else torch.load(model_path) - ) - model_type = self.probe_model_type(checkpoint) - - model_config_file = None - if model_type == SDLegacyType.V1: - print(" | SD-v1 model detected") - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v1-inference.yaml" - ) - elif model_type == SDLegacyType.V1_INPAINT: - print(" | SD-v1 inpainting model detected") - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" - ) - elif model_type == SDLegacyType.V2: - print( - " | SD-v2 model detected; model will be converted to diffusers format" - ) - model_config_file = Path( - Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" - ) - convert = True - else: - print( - f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import" - ) - return - - if convert: - diffuser_path = Path( - Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem - ) - model_name = self.convert_and_import( - model_path, - diffusers_path=diffuser_path, - vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), - model_name=model_name, - model_description=description, - original_config_file=model_config_file, - commit_to_conf=commit_to_conf, - ) - else: - model_name = self.import_ckpt_model( - model_path, - config=model_config_file, - model_name=model_name, - model_description=description, - vae=str( - Path( - Globals.root, - "models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt", - ) - ), - commit_to_conf=commit_to_conf, - ) - if commit_to_conf: - self.commit(commit_to_conf) - return model_name - - def convert_and_import( - self, - ckpt_path: Path, - diffusers_path: Path, - model_name=None, - model_description=None, - vae=None, - original_config_file: Path = None, - commit_to_conf: Path = None, - ) -> str: - """ - Convert a legacy ckpt weights file to diffuser model and import - into models.yaml. - """ - ckpt_path = self._resolve_path(ckpt_path, "models/ldm/stable-diffusion-v1") - if original_config_file: - original_config_file = self._resolve_path( - original_config_file, "configs/stable-diffusion" - ) - - new_config = None - - from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser - - if diffusers_path.exists(): - print( - f"ERROR: The path {str(diffusers_path)} already exists. Please move or remove it and try again." - ) - return - - model_name = model_name or diffusers_path.name - model_description = model_description or f"Optimized version of {model_name}" - print(f">> Optimizing {model_name} (30-60s)") - try: - # By passing the specified VAE to the conversion function, the autoencoder - # will be built into the model rather than tacked on afterward via the config file - vae_model = self._load_vae(vae) if vae else None - convert_ckpt_to_diffuser( - ckpt_path, - diffusers_path, - extract_ema=True, - original_config_file=original_config_file, - vae=vae_model, - ) - print( - f" | Success. Optimized model is now located at {str(diffusers_path)}" - ) - print(f" | Writing new config file entry for {model_name}") - new_config = dict( - path=str(diffusers_path), - description=model_description, - format="diffusers", - ) - if model_name in self.config: - self.del_model(model_name) - self.add_model(model_name, new_config, True) - if commit_to_conf: - self.commit(commit_to_conf) - print(">> Conversion succeeded") - except Exception as e: - print(f"** Conversion failed: {str(e)}") - print( - "** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)" - ) - - return model_name - - def search_models(self, search_folder): - print(f">> Finding Models In: {search_folder}") - models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") - models_folder_safetensors = Path(search_folder).glob("**/*.safetensors") - - ckpt_files = [x for x in models_folder_ckpt if x.is_file()] - safetensor_files = [x for x in models_folder_safetensors if x.is_file()] - - files = ckpt_files + safetensor_files - - found_models = [] - for file in files: - location = str(file.resolve()).replace("\\", "/") - if ( - "model.safetensors" not in location - and "diffusion_pytorch_model.safetensors" not in location - ): - found_models.append({"name": file.stem, "location": location}) - - return search_folder, found_models - - def _choose_diffusers_vae( - self, model_name: str, vae: str = None - ) -> Union[dict, str]: - # In the event that the original entry is using a custom ckpt VAE, we try to - # map that VAE onto a diffuser VAE using a hard-coded dictionary. - # I would prefer to do this differently: We load the ckpt model into memory, swap the - # VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped - # VAE is built into the model. However, when I tried this I got obscure key errors. - if vae: - return vae - if model_name in self.config and ( - vae_ckpt_path := self.model_info(model_name).get("vae", None) - ): - vae_basename = Path(vae_ckpt_path).stem - diffusers_vae = None - if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None): - print( - f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version" - ) - vae = {"repo_id": diffusers_vae} - else: - print( - f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown' - ) - print( - '** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config' - ) - vae = {"repo_id": "stabilityai/sd-vae-ft-mse"} - return vae - - def _make_cache_room(self) -> None: - num_loaded_models = len(self.models) - if num_loaded_models >= self.max_loaded_models: - least_recent_model = self._pop_oldest_model() - print( - f">> Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}" - ) - if least_recent_model is not None: - del self.models[least_recent_model] - gc.collect() - - def print_vram_usage(self) -> None: - if self._has_cuda: - print( - ">> Current VRAM usage: ", - "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), - ) - - def commit(self, config_file_path: str) -> None: - """ - Write current configuration out to the indicated file. - """ - yaml_str = OmegaConf.to_yaml(self.config) - if not os.path.isabs(config_file_path): - config_file_path = os.path.normpath( - os.path.join(Globals.root, config_file_path) - ) - tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") - with open(tmpfile, "w", encoding="utf-8") as outfile: - outfile.write(self.preamble()) - outfile.write(yaml_str) - os.replace(tmpfile, config_file_path) - - def preamble(self) -> str: - """ - Returns the preamble for the config file. - """ - return textwrap.dedent( - """\ - # This file describes the alternative machine learning models - # available to InvokeAI script. - # - # To add a new model, follow the examples below. Each - # model requires a model config file, a weights file, - # and the width and height of the images it - # was trained on. - """ - ) - - @classmethod - def migrate_models(cls): - """ - Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5 - to the 2.3.0 "diffusers" version. This should be a one-time operation, called at - script startup time. - """ - # Three transformer models to check: bert, clip and safety checker - legacy_locations = [ - Path( - "CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker" - ), - Path("bert-base-uncased/models--bert-base-uncased"), - Path( - "openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14" - ), - ] - models_dir = Path(Globals.root, "models") - legacy_layout = False - for model in legacy_locations: - legacy_layout = legacy_layout or Path(models_dir, model).exists() - if not legacy_layout: - return - - print( - "** Legacy version <= 2.2.5 model directory layout detected. Reorganizing." - ) - print("** This is a quick one-time operation.") - - # transformer files get moved into the hub directory - if cls._is_huggingface_hub_directory_present(): - hub = global_cache_dir("hub") - else: - hub = models_dir / "hub" - - os.makedirs(hub, exist_ok=True) - for model in legacy_locations: - source = models_dir / model - dest = hub / model.stem - print(f"** {source} => {dest}") - if source.exists(): - if dest.exists(): - rmtree(source) - else: - move(source, dest) - - # anything else gets moved into the diffusers directory - if cls._is_huggingface_hub_directory_present(): - diffusers = global_cache_dir("diffusers") - else: - diffusers = models_dir / "diffusers" - - os.makedirs(diffusers, exist_ok=True) - for root, dirs, _ in os.walk(models_dir, topdown=False): - for dir in dirs: - full_path = Path(root, dir) - if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers): - continue - if Path(dir).match("models--*--*"): - dest = diffusers / dir - print(f"** {full_path} => {dest}") - if dest.exists(): - rmtree(full_path) - else: - move(full_path, dest) - - # now clean up by removing any empty directories - empty = [ - root - for root, dirs, files, in os.walk(models_dir) - if not len(dirs) and not len(files) - ] - for d in empty: - os.rmdir(d) - print("** Migration is done. Continuing...") - - def _resolve_path( - self, source: Union[str, Path], dest_directory: str - ) -> Optional[Path]: - resolved_path = None - if str(source).startswith(("http:", "https:", "ftp:")): - dest_directory = Path(dest_directory) - if not dest_directory.is_absolute(): - dest_directory = Globals.root / dest_directory - dest_directory.mkdir(parents=True, exist_ok=True) - resolved_path = download_with_resume(str(source), dest_directory) - else: - if not os.path.isabs(source): - source = os.path.join(Globals.root, source) - resolved_path = Path(source) - return resolved_path - - def _invalidate_cached_model(self, model_name: str) -> None: - self.offload_model(model_name) - if model_name in self.stack: - self.stack.remove(model_name) - self.models.pop(model_name, None) - - def _model_to_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.offload_all() - return model - - model.cond_stage_model.device = CPU_DEVICE - model.to(CPU_DEVICE) - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(CPU_DEVICE) - except AttributeError: - pass - return model - - def _model_from_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.ready() - return model - - model.to(self.device) - model.cond_stage_model.device = self.device - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(self.device) - except AttributeError: - pass - - return model - - def _pop_oldest_model(self): - """ - Remove the first element of the FIFO, which ought - to be the least recently accessed model. Do not - pop the last one, because it is in active use! - """ - return self.stack.pop(0) - - def _push_newest_model(self, model_name: str) -> None: - """ - Maintain a simple FIFO. First element is always the - least recent, and last element is always the most recent. - """ - with contextlib.suppress(ValueError): - self.stack.remove(model_name) - self.stack.append(model_name) - - def _has_cuda(self) -> bool: - return self.device.type == "cuda" - - def _diffuser_sha256( - self, name_or_path: Union[str, Path], chunksize=4096 - ) -> Union[str, bytes]: - path = None - if isinstance(name_or_path, Path): - path = name_or_path - else: - owner, repo = name_or_path.split("/") - path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}") - if not path.exists(): - return None - hashpath = path / "checksum.sha256" - if hashpath.exists() and path.stat().st_mtime <= hashpath.stat().st_mtime: - with open(hashpath) as f: - hash = f.read() - return hash - print(" | Calculating sha256 hash of model files") - tic = time.time() - sha = hashlib.sha256() - count = 0 - for root, dirs, files in os.walk(path, followlinks=False): - for name in files: - count += 1 - with open(os.path.join(root, name), "rb") as f: - while chunk := f.read(chunksize): - sha.update(chunk) - hash = sha.hexdigest() - toc = time.time() - print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic)) - with open(hashpath, "w") as f: - f.write(hash) - return hash - - def _cached_sha256(self, path, data) -> Union[str, bytes]: - dirname = os.path.dirname(path) - basename = os.path.basename(path) - base, _ = os.path.splitext(basename) - hashpath = os.path.join(dirname, base + ".sha256") - - if os.path.exists(hashpath) and os.path.getmtime(path) <= os.path.getmtime( - hashpath - ): - with open(hashpath) as f: - hash = f.read() - return hash - - print(" | Calculating sha256 hash of weights file") - tic = time.time() - sha = hashlib.sha256() - sha.update(data) - hash = sha.hexdigest() - toc = time.time() - print(f">> sha256 = {hash}", "(%4.2fs)" % (toc - tic)) - - with open(hashpath, "w") as f: - f.write(hash) - return hash - - def _load_vae(self, vae_config) -> AutoencoderKL: - vae_args = {} - try: - name_or_path = self.model_name_or_path(vae_config) - except Exception: - return None - if name_or_path is None: - return None - using_fp16 = self.precision == "float16" - - vae_args.update( - cache_dir=global_cache_dir("diffusers"), - local_files_only=not Globals.internet_available, - ) - - print(f" | Loading diffusers VAE from {name_or_path}") - if using_fp16: - vae_args.update(torch_dtype=torch.float16) - fp_args_list = [{"revision": "fp16"}, {}] - else: - print(" | Using more accurate float32 precision") - fp_args_list = [{}] - - vae = None - deferred_error = None - - # A VAE may be in a subfolder of a model's repository. - if "subfolder" in vae_config: - vae_args["subfolder"] = vae_config["subfolder"] - - for fp_args in fp_args_list: - # At some point we might need to be able to use different classes here? But for now I think - # all Stable Diffusion VAE are AutoencoderKL. - try: - vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args) - except OSError as e: - if str(e).startswith("fp16 is not a valid"): - pass - else: - deferred_error = e - if vae: - break - - if not vae and deferred_error: - print(f"** Could not load VAE {name_or_path}: {str(deferred_error)}") - - return vae - - @staticmethod - def _delete_model_from_cache(repo_id): - cache_info = scan_cache_dir(global_cache_dir("diffusers")) - - # I'm sure there is a way to do this with comprehensions - # but the code quickly became incomprehensible! - hashes_to_delete = set() - for repo in cache_info.repos: - if repo.repo_id == repo_id: - for revision in repo.revisions: - hashes_to_delete.add(revision.commit_hash) - strategy = cache_info.delete_revisions(*hashes_to_delete) - print( - f"** deletion of this model is expected to free {strategy.expected_freed_size_str}" - ) - strategy.execute() - - @staticmethod - def _abs_path(path: str | Path) -> Path: - if path is None or Path(path).is_absolute(): - return path - return Path(Globals.root, path).resolve() - - @staticmethod - def _is_huggingface_hub_directory_present() -> bool: - return ( - os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None - ) diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py deleted file mode 100644 index 3db7b6fd73..0000000000 --- a/ldm/models/autoencoder.py +++ /dev/null @@ -1,596 +0,0 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer - -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import ( - DiagonalGaussianDistribution, -) - -from ldm.util import instantiate_from_config - - -class VQModel(pl.LightningModule): - def __init__( - self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key='image', - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False, - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer( - n_embed, - embed_dim, - beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape, - ) - self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d( - embed_dim, ddconfig['z_channels'], 1 - ) - if colorize_nlabels is not None: - assert type(colorize_nlabels) == int - self.register_buffer( - 'colorize', torch.randn(3, colorize_nlabels, 1, 1) - ) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print( - f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.' - ) - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f'>> Keeping EMAs of {len(list(self.model_ema.buffers()))}.') - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f'{context}: Switched to EMA weights') - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f'{context}: Restored training weights') - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location='cpu')['state_dict'] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' - ) - if len(missing) > 0: - print(f'Missing Keys: {missing}') - print(f'Unexpected Keys: {unexpected}') - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_, _, ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = ( - x.permute(0, 3, 1, 2) - .to(memory_format=torch.contiguous_format) - .float() - ) - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice( - np.arange(lower_size, upper_size + 16, 16) - ) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode='bicubic') - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss( - qloss, - x, - xrec, - optimizer_idx, - self.global_step, - last_layer=self.get_last_layer(), - split='train', - predicted_indices=ind, - ) - - self.log_dict( - log_dict_ae, - prog_bar=False, - logger=True, - on_step=True, - on_epoch=True, - ) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss( - qloss, - x, - xrec, - optimizer_idx, - self.global_step, - last_layer=self.get_last_layer(), - split='train', - ) - self.log_dict( - log_dict_disc, - prog_bar=False, - logger=True, - on_step=True, - on_epoch=True, - ) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step( - batch, batch_idx, suffix='_ema' - ) - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=''): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss( - qloss, - x, - xrec, - 0, - self.global_step, - last_layer=self.get_last_layer(), - split='val' + suffix, - predicted_indices=ind, - ) - - discloss, log_dict_disc = self.loss( - qloss, - x, - xrec, - 1, - self.global_step, - last_layer=self.get_last_layer(), - split='val' + suffix, - predicted_indices=ind, - ) - rec_loss = log_dict_ae[f'val{suffix}/rec_loss'] - self.log( - f'val{suffix}/rec_loss', - rec_loss, - prog_bar=True, - logger=True, - on_step=False, - on_epoch=True, - sync_dist=True, - ) - self.log( - f'val{suffix}/aeloss', - aeloss, - prog_bar=True, - logger=True, - on_step=False, - on_epoch=True, - sync_dist=True, - ) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f'val{suffix}/rec_loss'] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor * self.learning_rate - print('lr_d', lr_d) - print('lr_g', lr_g) - opt_ae = torch.optim.Adam( - list(self.encoder.parameters()) - + list(self.decoder.parameters()) - + list(self.quantize.parameters()) - + list(self.quant_conv.parameters()) - + list(self.post_quant_conv.parameters()), - lr=lr_g, - betas=(0.5, 0.9), - ) - opt_disc = torch.optim.Adam( - self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9) - ) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print('Setting up LambdaLR scheduler...') - scheduler = [ - { - 'scheduler': LambdaLR( - opt_ae, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, - }, - { - 'scheduler': LambdaLR( - opt_disc, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log['inputs'] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log['inputs'] = x - log['reconstructions'] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: - xrec_ema = self.to_rgb(xrec_ema) - log['reconstructions_ema'] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == 'segmentation' - if not hasattr(self, 'colorize'): - self.register_buffer( - 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) - ) - x = F.conv2d(x, weight=self.colorize) - x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - -class AutoencoderKL(pl.LightningModule): - def __init__( - self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key='image', - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig['double_z'] - self.quant_conv = torch.nn.Conv2d( - 2 * ddconfig['z_channels'], 2 * embed_dim, 1 - ) - self.post_quant_conv = torch.nn.Conv2d( - embed_dim, ddconfig['z_channels'], 1 - ) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels) == int - self.register_buffer( - 'colorize', torch.randn(3, colorize_nlabels, 1, 1) - ) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location='cpu')['state_dict'] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f'Restored from {path}') - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = ( - x.permute(0, 3, 1, 2) - .to(memory_format=torch.contiguous_format) - .float() - ) - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss( - inputs, - reconstructions, - posterior, - optimizer_idx, - self.global_step, - last_layer=self.get_last_layer(), - split='train', - ) - self.log( - 'aeloss', - aeloss, - prog_bar=True, - logger=True, - on_step=True, - on_epoch=True, - ) - self.log_dict( - log_dict_ae, - prog_bar=False, - logger=True, - on_step=True, - on_epoch=False, - ) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss( - inputs, - reconstructions, - posterior, - optimizer_idx, - self.global_step, - last_layer=self.get_last_layer(), - split='train', - ) - - self.log( - 'discloss', - discloss, - prog_bar=True, - logger=True, - on_step=True, - on_epoch=True, - ) - self.log_dict( - log_dict_disc, - prog_bar=False, - logger=True, - on_step=True, - on_epoch=False, - ) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss( - inputs, - reconstructions, - posterior, - 0, - self.global_step, - last_layer=self.get_last_layer(), - split='val', - ) - - discloss, log_dict_disc = self.loss( - inputs, - reconstructions, - posterior, - 1, - self.global_step, - last_layer=self.get_last_layer(), - split='val', - ) - - self.log('val/rec_loss', log_dict_ae['val/rec_loss']) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam( - list(self.encoder.parameters()) - + list(self.decoder.parameters()) - + list(self.quant_conv.parameters()) - + list(self.post_quant_conv.parameters()), - lr=lr, - betas=(0.5, 0.9), - ) - opt_disc = torch.optim.Adam( - self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9) - ) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log['samples'] = self.decode(torch.randn_like(posterior.sample())) - log['reconstructions'] = xrec - log['inputs'] = x - return log - - def to_rgb(self, x): - assert self.image_key == 'segmentation' - if not hasattr(self, 'colorize'): - self.register_buffer( - 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) - ) - x = F.conv2d(x, weight=self.colorize) - x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py deleted file mode 100644 index be0d8c1919..0000000000 --- a/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,355 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import ( - EncoderUNetModel, - UNetModel, -) -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = {'class_label': EncoderUNetModel, 'segmentation': UNetModel} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - def __init__( - self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.0e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted( - glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')) - )[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = ( - self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - ) - self.log_time_interval = ( - self.diffusion_model.num_timesteps // log_steps - ) - self.log_steps = log_steps - - self.label_key = ( - label_key - if not hasattr(self.diffusion_model, 'cond_stage_key') - else self.diffusion_model.cond_stage_key - ) - - assert ( - self.label_key is not None - ), 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location='cpu') - if 'state_dict' in list(sd.keys()): - sd = sd['state_dict'] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) - del sd[k] - missing, unexpected = ( - self.load_state_dict(sd, strict=False) - if not only_model - else self.model.load_state_dict(sd, strict=False) - ) - print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' - ) - if len(missing) > 0: - print(f'Missing Keys: {missing}') - if len(unexpected) > 0: - print(f'Unexpected Keys: {unexpected}') - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy( - self.diffusion_config.params.unet_config.params - ) - model_config.in_channels = ( - self.diffusion_config.params.unet_config.params.out_channels - ) - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print( - '#####################################################################' - ) - print(f'load from ckpt "{ckpt_path}"') - print( - '#####################################################################' - ) - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = ( - self.diffusion_model.sample_continuous_noise_level( - x.shape[0], t + 1 - ) - ) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample( - x_start=x, - t=t, - noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod, - ) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate( - targets, size=(h // 2, w // 2), mode='nearest' - ) - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction='mean'): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == 'mean': - return ( - (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - ) - elif reduction == 'none': - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f'{log_prefix}/loss'] = loss.mean() - log[f'{log_prefix}/acc@1'] = self.compute_top_k( - logits, targets, k=1, reduction='mean' - ) - log[f'{log_prefix}/acc@5'] = self.compute_top_k( - logits, targets, k=5, reduction='mean' - ) - - self.log_dict( - log, - prog_bar=False, - logger=True, - on_step=self.training, - on_epoch=True, - ) - self.log( - 'loss', log[f'{log_prefix}/loss'], prog_bar=True, logger=False - ) - self.log( - 'global_step', - self.global_step, - logger=False, - on_epoch=False, - prog_bar=True, - ) - lr = self.optimizers().param_groups[0]['lr'] - self.log( - 'lr_abs', - lr, - on_step=True, - logger=True, - on_epoch=False, - prog_bar=True, - ) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input( - batch, k=self.diffusion_model.first_stage_key - ) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint( - 0, - self.diffusion_model.num_timesteps, - (x.shape[0],), - device=self.device, - ).long() - else: - t = torch.full( - size=(x.shape[0],), fill_value=t, device=self.device - ).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = { - t: {'acc@1': [], 'acc@5': []} - for t in range( - 0, - self.diffusion_model.num_timesteps, - self.diffusion_model.log_every_t, - ) - } - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append( - self.compute_top_k(logits, targets, k=1, reduction='mean') - ) - self.noisy_acc[t]['acc@5'].append( - self.compute_top_k(logits, targets, k=5, reduction='mean') - ) - - return loss - - def configure_optimizers(self): - optimizer = AdamW( - self.model.parameters(), - lr=self.learning_rate, - weight_decay=self.weight_decay, - ) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print('Setting up LambdaLR scheduler...') - scheduler = [ - { - 'scheduler': LambdaLR( - optimizer, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, - } - ] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label']) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot( - logits.argmax(dim=1), num_classes=self.num_classes - ) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb( - pred - ) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/ldm/models/diffusion/cross_attention_control.py b/ldm/models/diffusion/cross_attention_control.py deleted file mode 100644 index a34f22e683..0000000000 --- a/ldm/models/diffusion/cross_attention_control.py +++ /dev/null @@ -1,642 +0,0 @@ - -# adapted from bloc97's CrossAttentionControl colab -# https://github.com/bloc97/CrossAttentionControl - - -import enum -import math -from typing import Optional, Callable - -import psutil -import torch -import diffusers -from torch import nn - -from compel.cross_attention_control import Arguments -from diffusers.models.unet_2d_condition import UNet2DConditionModel -from diffusers.models.cross_attention import AttnProcessor -from ldm.invoke.devices import torch_dtype - - -class CrossAttentionType(enum.Enum): - SELF = 1 - TOKENS = 2 - - -class Context: - - cross_attention_mask: Optional[torch.Tensor] - cross_attention_index_map: Optional[torch.Tensor] - - class Action(enum.Enum): - NONE = 0 - SAVE = 1, - APPLY = 2 - - def __init__(self, arguments: Arguments, step_count: int): - """ - :param arguments: Arguments for the cross-attention control process - :param step_count: The absolute total number of steps of diffusion (for img2img this is likely larger than the number of steps that will actually run) - """ - self.cross_attention_mask = None - self.cross_attention_index_map = None - self.self_cross_attention_action = Context.Action.NONE - self.tokens_cross_attention_action = Context.Action.NONE - self.arguments = arguments - self.step_count = step_count - - self.self_cross_attention_module_identifiers = [] - self.tokens_cross_attention_module_identifiers = [] - - self.saved_cross_attention_maps = {} - - self.clear_requests(cleanup=True) - - def register_cross_attention_modules(self, model): - for name,module in get_cross_attention_modules(model, CrossAttentionType.SELF): - if name in self.self_cross_attention_module_identifiers: - assert False, f"name {name} cannot appear more than once" - self.self_cross_attention_module_identifiers.append(name) - for name,module in get_cross_attention_modules(model, CrossAttentionType.TOKENS): - if name in self.tokens_cross_attention_module_identifiers: - assert False, f"name {name} cannot appear more than once" - self.tokens_cross_attention_module_identifiers.append(name) - - def request_save_attention_maps(self, cross_attention_type: CrossAttentionType): - if cross_attention_type == CrossAttentionType.SELF: - self.self_cross_attention_action = Context.Action.SAVE - else: - self.tokens_cross_attention_action = Context.Action.SAVE - - def request_apply_saved_attention_maps(self, cross_attention_type: CrossAttentionType): - if cross_attention_type == CrossAttentionType.SELF: - self.self_cross_attention_action = Context.Action.APPLY - else: - self.tokens_cross_attention_action = Context.Action.APPLY - - def is_tokens_cross_attention(self, module_identifier) -> bool: - return module_identifier in self.tokens_cross_attention_module_identifiers - - def get_should_save_maps(self, module_identifier: str) -> bool: - if module_identifier in self.self_cross_attention_module_identifiers: - return self.self_cross_attention_action == Context.Action.SAVE - elif module_identifier in self.tokens_cross_attention_module_identifiers: - return self.tokens_cross_attention_action == Context.Action.SAVE - return False - - def get_should_apply_saved_maps(self, module_identifier: str) -> bool: - if module_identifier in self.self_cross_attention_module_identifiers: - return self.self_cross_attention_action == Context.Action.APPLY - elif module_identifier in self.tokens_cross_attention_module_identifiers: - return self.tokens_cross_attention_action == Context.Action.APPLY - return False - - def get_active_cross_attention_control_types_for_step(self, percent_through:float=None)\ - -> list[CrossAttentionType]: - """ - Should cross-attention control be applied on the given step? - :param percent_through: How far through the step sequence are we (0.0=pure noise, 1.0=completely denoised image). Expected range 0.0..<1.0. - :return: A list of attention types that cross-attention control should be performed for on the given step. May be []. - """ - if percent_through is None: - return [CrossAttentionType.SELF, CrossAttentionType.TOKENS] - - opts = self.arguments.edit_options - to_control = [] - if opts['s_start'] <= percent_through < opts['s_end']: - to_control.append(CrossAttentionType.SELF) - if opts['t_start'] <= percent_through < opts['t_end']: - to_control.append(CrossAttentionType.TOKENS) - return to_control - - def save_slice(self, identifier: str, slice: torch.Tensor, dim: Optional[int], offset: int, - slice_size: Optional[int]): - if identifier not in self.saved_cross_attention_maps: - self.saved_cross_attention_maps[identifier] = { - 'dim': dim, - 'slice_size': slice_size, - 'slices': {offset or 0: slice} - } - else: - self.saved_cross_attention_maps[identifier]['slices'][offset or 0] = slice - - def get_slice(self, identifier: str, requested_dim: Optional[int], requested_offset: int, slice_size: int): - saved_attention_dict = self.saved_cross_attention_maps[identifier] - if requested_dim is None: - if saved_attention_dict['dim'] is not None: - raise RuntimeError(f"dim mismatch: expected dim=None, have {saved_attention_dict['dim']}") - return saved_attention_dict['slices'][0] - - if saved_attention_dict['dim'] == requested_dim: - if slice_size != saved_attention_dict['slice_size']: - raise RuntimeError( - f"slice_size mismatch: expected slice_size={slice_size}, have {saved_attention_dict['slice_size']}") - return saved_attention_dict['slices'][requested_offset] - - if saved_attention_dict['dim'] is None: - whole_saved_attention = saved_attention_dict['slices'][0] - if requested_dim == 0: - return whole_saved_attention[requested_offset:requested_offset + slice_size] - elif requested_dim == 1: - return whole_saved_attention[:, requested_offset:requested_offset + slice_size] - - raise RuntimeError(f"Cannot convert dim {saved_attention_dict['dim']} to requested dim {requested_dim}") - - def get_slicing_strategy(self, identifier: str) -> tuple[Optional[int], Optional[int]]: - saved_attention = self.saved_cross_attention_maps.get(identifier, None) - if saved_attention is None: - return None, None - return saved_attention['dim'], saved_attention['slice_size'] - - def clear_requests(self, cleanup=True): - self.tokens_cross_attention_action = Context.Action.NONE - self.self_cross_attention_action = Context.Action.NONE - if cleanup: - self.saved_cross_attention_maps = {} - - def offload_saved_attention_slices_to_cpu(self): - for key, map_dict in self.saved_cross_attention_maps.items(): - for offset, slice in map_dict['slices'].items(): - map_dict[offset] = slice.to('cpu') - - - -class InvokeAICrossAttentionMixin: - """ - Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls - through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling - and dymamic slicing strategy selection. - """ - def __init__(self): - self.mem_total_gb = psutil.virtual_memory().total // (1 << 30) - self.attention_slice_wrangler = None - self.slicing_strategy_getter = None - self.attention_slice_calculated_callback = None - - def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]): - ''' - Set custom attention calculator to be called when attention is calculated - :param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size), - which returns either the suggested_attention_slice or an adjusted equivalent. - `module` is the current CrossAttention module for which the callback is being invoked. - `suggested_attention_slice` is the default-calculated attention slice - `dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing. - If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length. - - Pass None to use the default attention calculation. - :return: - ''' - self.attention_slice_wrangler = wrangler - - def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]): - self.slicing_strategy_getter = getter - - def set_attention_slice_calculated_callback(self, callback: Optional[Callable[[torch.Tensor], None]]): - self.attention_slice_calculated_callback = callback - - def einsum_lowest_level(self, query, key, value, dim, offset, slice_size): - # calculate attention scores - #attention_scores = torch.einsum('b i d, b j d -> b i j', q, k) - attention_scores = torch.baddbmm( - torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), - query, - key.transpose(-1, -2), - beta=0, - alpha=self.scale, - ) - - # calculate attention slice by taking the best scores for each latent pixel - default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype) - attention_slice_wrangler = self.attention_slice_wrangler - if attention_slice_wrangler is not None: - attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size) - else: - attention_slice = default_attention_slice - - if self.attention_slice_calculated_callback is not None: - self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size) - - hidden_states = torch.bmm(attention_slice, value) - return hidden_states - - def einsum_op_slice_dim0(self, q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - for i in range(0, q.shape[0], slice_size): - end = i + slice_size - r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size) - return r - - def einsum_op_slice_dim1(self, q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size) - return r - - def einsum_op_mps_v1(self, q, k, v): - if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 - return self.einsum_lowest_level(q, k, v, None, None, None) - else: - slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) - return self.einsum_op_slice_dim1(q, k, v, slice_size) - - def einsum_op_mps_v2(self, q, k, v): - if self.mem_total_gb > 8 and q.shape[1] <= 4096: - return self.einsum_lowest_level(q, k, v, None, None, None) - else: - return self.einsum_op_slice_dim0(q, k, v, 1) - - def einsum_op_tensor_mem(self, q, k, v, max_tensor_mb): - size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20) - if size_mb <= max_tensor_mb: - return self.einsum_lowest_level(q, k, v, None, None, None) - div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length() - if div <= q.shape[0]: - return self.einsum_op_slice_dim0(q, k, v, q.shape[0] // div) - return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1)) - - def einsum_op_cuda(self, q, k, v): - # check if we already have a slicing strategy (this should only happen during cross-attention controlled generation) - slicing_strategy_getter = self.slicing_strategy_getter - if slicing_strategy_getter is not None: - (dim, slice_size) = slicing_strategy_getter(self) - if dim is not None: - # print("using saved slicing strategy with dim", dim, "slice size", slice_size) - if dim == 0: - return self.einsum_op_slice_dim0(q, k, v, slice_size) - elif dim == 1: - return self.einsum_op_slice_dim1(q, k, v, slice_size) - - # fallback for when there is no saved strategy, or saved strategy does not slice - mem_free_total = get_mem_free_total(q.device) - # Divide factor of safety as there's copying and fragmentation - return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) - - - def get_invokeai_attention_mem_efficient(self, q, k, v): - if q.device.type == 'cuda': - #print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device)) - return self.einsum_op_cuda(q, k, v) - - if q.device.type == 'mps' or q.device.type == 'cpu': - if self.mem_total_gb >= 32: - return self.einsum_op_mps_v1(q, k, v) - return self.einsum_op_mps_v2(q, k, v) - - # Smaller slices are faster due to L2/L3/SLC caches. - # Tested on i7 with 8MB L3 cache. - return self.einsum_op_tensor_mem(q, k, v, 32) - - - -def restore_default_cross_attention(model, is_running_diffusers: bool, restore_attention_processor: Optional[AttnProcessor]=None): - if is_running_diffusers: - unet = model - unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor()) - else: - remove_attention_function(model) - - -def override_cross_attention(model, context: Context, is_running_diffusers = False): - """ - Inject attention parameters and functions into the passed in model to enable cross attention editing. - - :param model: The unet model to inject into. - :return: None - """ - - # adapted from init_attention_edit - device = context.arguments.edited_conditioning.device - - # urgh. should this be hardcoded? - max_length = 77 - # mask=1 means use base prompt attention, mask=0 means use edited prompt attention - mask = torch.zeros(max_length, dtype=torch_dtype(device)) - indices_target = torch.arange(max_length, dtype=torch.long) - indices = torch.arange(max_length, dtype=torch.long) - for name, a0, a1, b0, b1 in context.arguments.edit_opcodes: - if b0 < max_length: - if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0): - # these tokens have not been edited - indices[b0:b1] = indices_target[a0:a1] - mask[b0:b1] = 1 - - context.cross_attention_mask = mask.to(device) - context.cross_attention_index_map = indices.to(device) - if is_running_diffusers: - unet = model - old_attn_processors = unet.attn_processors - if torch.backends.mps.is_available(): - # see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS - unet.set_attn_processor(SwapCrossAttnProcessor()) - else: - # try to re-use an existing slice size - default_slice_size = 4 - slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size) - unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size)) - return old_attn_processors - else: - context.register_cross_attention_modules(model) - inject_attention_function(model, context) - return None - - - - -def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]: - from ldm.modules.attention import CrossAttention # avoid circular import - cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention - which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2" - attention_module_tuples = [(name,module) for name, module in model.named_modules() if - isinstance(module, cross_attention_class) and which_attn in name] - cross_attention_modules_in_model_count = len(attention_module_tuples) - expected_count = 16 - if cross_attention_modules_in_model_count != expected_count: - # non-fatal error but .swap() won't work. - print(f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " + - f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " + - f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " + - f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " + - f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " + - f"work properly until it is fixed.") - return attention_module_tuples - - -def inject_attention_function(unet, context: Context): - # ORIGINAL SOURCE CODE: https://github.com/huggingface/diffusers/blob/91ddd2a25b848df0fa1262d4f1cd98c7ccb87750/src/diffusers/models/attention.py#L276 - - def attention_slice_wrangler(module, suggested_attention_slice:torch.Tensor, dim, offset, slice_size): - - #memory_usage = suggested_attention_slice.element_size() * suggested_attention_slice.nelement() - - attention_slice = suggested_attention_slice - - if context.get_should_save_maps(module.identifier): - #print(module.identifier, "saving suggested_attention_slice of shape", - # suggested_attention_slice.shape, "dim", dim, "offset", offset) - slice_to_save = attention_slice.to('cpu') if dim is not None else attention_slice - context.save_slice(module.identifier, slice_to_save, dim=dim, offset=offset, slice_size=slice_size) - elif context.get_should_apply_saved_maps(module.identifier): - #print(module.identifier, "applying saved attention slice for dim", dim, "offset", offset) - saved_attention_slice = context.get_slice(module.identifier, dim, offset, slice_size) - - # slice may have been offloaded to CPU - saved_attention_slice = saved_attention_slice.to(suggested_attention_slice.device) - - if context.is_tokens_cross_attention(module.identifier): - index_map = context.cross_attention_index_map - remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map) - this_attention_slice = suggested_attention_slice - - mask = context.cross_attention_mask.to(torch_dtype(suggested_attention_slice.device)) - saved_mask = mask - this_mask = 1 - mask - attention_slice = remapped_saved_attention_slice * saved_mask + \ - this_attention_slice * this_mask - else: - # just use everything - attention_slice = saved_attention_slice - - return attention_slice - - cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) - for identifier, module in cross_attention_modules: - module.identifier = identifier - try: - module.set_attention_slice_wrangler(attention_slice_wrangler) - module.set_slicing_strategy_getter( - lambda module: context.get_slicing_strategy(identifier) - ) - except AttributeError as e: - if is_attribute_error_about(e, 'set_attention_slice_wrangler'): - print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO - else: - raise - - -def remove_attention_function(unet): - cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) - for identifier, module in cross_attention_modules: - try: - # clear wrangler callback - module.set_attention_slice_wrangler(None) - module.set_slicing_strategy_getter(None) - except AttributeError as e: - if is_attribute_error_about(e, 'set_attention_slice_wrangler'): - print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") - else: - raise - - -def is_attribute_error_about(error: AttributeError, attribute: str): - if hasattr(error, 'name'): # Python 3.10 - return error.name == attribute - else: # Python 3.9 - return attribute in str(error) - - - -def get_mem_free_total(device): - #only on cuda - if not torch.cuda.is_available(): - return None - stats = torch.cuda.memory_stats(device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(device) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch - return mem_free_total - - - -class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin): - - def __init__(self, **kwargs): - super().__init__(**kwargs) - InvokeAICrossAttentionMixin.__init__(self) - - def _attention(self, query, key, value, attention_mask=None): - #default_result = super()._attention(query, key, value) - if attention_mask is not None: - print(f"{type(self).__name__} ignoring passed-in attention_mask") - attention_result = self.get_invokeai_attention_mem_efficient(query, key, value) - - hidden_states = self.reshape_batch_dim_to_heads(attention_result) - return hidden_states - - - - - -## 🧨diffusers implementation follows - - -""" -# base implementation - -class CrossAttnProcessor: - def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None): - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) - - query = attn.to_q(hidden_states) - query = attn.head_to_batch_dim(query) - - encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states - key = attn.to_k(encoder_hidden_states) - value = attn.to_v(encoder_hidden_states) - key = attn.head_to_batch_dim(key) - value = attn.head_to_batch_dim(value) - - attention_probs = attn.get_attention_scores(query, key, attention_mask) - hidden_states = torch.bmm(attention_probs, value) - hidden_states = attn.batch_to_head_dim(hidden_states) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - return hidden_states - -""" -from dataclasses import field, dataclass - -import torch - -from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor - - -@dataclass -class SwapCrossAttnContext: - modified_text_embeddings: torch.Tensor - index_map: torch.Tensor # maps from original prompt token indices to the equivalent tokens in the modified prompt - mask: torch.Tensor # in the target space of the index_map - cross_attention_types_to_do: list[CrossAttentionType] = field(default_factory=list) - - def __int__(self, - cac_types_to_do: [CrossAttentionType], - modified_text_embeddings: torch.Tensor, - index_map: torch.Tensor, - mask: torch.Tensor): - self.cross_attention_types_to_do = cac_types_to_do - self.modified_text_embeddings = modified_text_embeddings - self.index_map = index_map - self.mask = mask - - def wants_cross_attention_control(self, attn_type: CrossAttentionType) -> bool: - return attn_type in self.cross_attention_types_to_do - - @classmethod - def make_mask_and_index_map(cls, edit_opcodes: list[tuple[str, int, int, int, int]], max_length: int) \ - -> tuple[torch.Tensor, torch.Tensor]: - - # mask=1 means use original prompt attention, mask=0 means use modified prompt attention - mask = torch.zeros(max_length) - indices_target = torch.arange(max_length, dtype=torch.long) - indices = torch.arange(max_length, dtype=torch.long) - for name, a0, a1, b0, b1 in edit_opcodes: - if b0 < max_length: - if name == "equal": - # these tokens remain the same as in the original prompt - indices[b0:b1] = indices_target[a0:a1] - mask[b0:b1] = 1 - - return mask, indices - - -class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor): - - # TODO: dynamically pick slice size based on memory conditions - - def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None, - # kwargs - swap_cross_attn_context: SwapCrossAttnContext=None): - - attention_type = CrossAttentionType.SELF if encoder_hidden_states is None else CrossAttentionType.TOKENS - - # if cross-attention control is not in play, just call through to the base implementation. - if attention_type is CrossAttentionType.SELF or \ - swap_cross_attn_context is None or \ - not swap_cross_attn_context.wants_cross_attention_control(attention_type): - #print(f"SwapCrossAttnContext for {attention_type} not active - passing request to superclass") - return super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask) - #else: - # print(f"SwapCrossAttnContext for {attention_type} active") - - batch_size, sequence_length, _ = hidden_states.shape - attention_mask = attn.prepare_attention_mask( - attention_mask=attention_mask, target_length=sequence_length, - batch_size=batch_size) - - query = attn.to_q(hidden_states) - dim = query.shape[-1] - query = attn.head_to_batch_dim(query) - - original_text_embeddings = encoder_hidden_states - modified_text_embeddings = swap_cross_attn_context.modified_text_embeddings - original_text_key = attn.to_k(original_text_embeddings) - modified_text_key = attn.to_k(modified_text_embeddings) - original_value = attn.to_v(original_text_embeddings) - modified_value = attn.to_v(modified_text_embeddings) - - original_text_key = attn.head_to_batch_dim(original_text_key) - modified_text_key = attn.head_to_batch_dim(modified_text_key) - original_value = attn.head_to_batch_dim(original_value) - modified_value = attn.head_to_batch_dim(modified_value) - - # compute slices and prepare output tensor - batch_size_attention = query.shape[0] - hidden_states = torch.zeros( - (batch_size_attention, sequence_length, dim // attn.heads), device=query.device, dtype=query.dtype - ) - - # do slices - for i in range(max(1,hidden_states.shape[0] // self.slice_size)): - start_idx = i * self.slice_size - end_idx = (i + 1) * self.slice_size - - query_slice = query[start_idx:end_idx] - original_key_slice = original_text_key[start_idx:end_idx] - modified_key_slice = modified_text_key[start_idx:end_idx] - attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None - - original_attn_slice = attn.get_attention_scores(query_slice, original_key_slice, attn_mask_slice) - modified_attn_slice = attn.get_attention_scores(query_slice, modified_key_slice, attn_mask_slice) - - # because the prompt modifications may result in token sequences shifted forwards or backwards, - # the original attention probabilities must be remapped to account for token index changes in the - # modified prompt - remapped_original_attn_slice = torch.index_select(original_attn_slice, -1, - swap_cross_attn_context.index_map) - - # only some tokens taken from the original attention probabilities. this is controlled by the mask. - mask = swap_cross_attn_context.mask - inverse_mask = 1 - mask - attn_slice = \ - remapped_original_attn_slice * mask + \ - modified_attn_slice * inverse_mask - - del remapped_original_attn_slice, modified_attn_slice - - attn_slice = torch.bmm(attn_slice, modified_value[start_idx:end_idx]) - hidden_states[start_idx:end_idx] = attn_slice - - - # done - hidden_states = attn.batch_to_head_dim(hidden_states) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - return hidden_states - - -class SwapCrossAttnProcessor(SlicedSwapCrossAttnProcesser): - - def __init__(self): - super(SwapCrossAttnProcessor, self).__init__(slice_size=int(1e9)) # massive slice size = don't slice - diff --git a/ldm/models/diffusion/cross_attention_map_saving.py b/ldm/models/diffusion/cross_attention_map_saving.py deleted file mode 100644 index 82983573d3..0000000000 --- a/ldm/models/diffusion/cross_attention_map_saving.py +++ /dev/null @@ -1,95 +0,0 @@ -import math - -import PIL -import torch -from torchvision.transforms.functional import resize as tv_resize, InterpolationMode - -from ldm.models.diffusion.cross_attention_control import get_cross_attention_modules, CrossAttentionType - - -class AttentionMapSaver(): - - def __init__(self, token_ids: range, latents_shape: torch.Size): - self.token_ids = token_ids - self.latents_shape = latents_shape - #self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) - self.collated_maps = {} - - def clear_maps(self): - self.collated_maps = {} - - def add_attention_maps(self, maps: torch.Tensor, key: str): - """ - Accumulate the given attention maps and store by summing with existing maps at the passed-in key (if any). - :param maps: Attention maps to store. Expected shape [A, (H*W), N] where A is attention heads count, H and W are the map size (fixed per-key) and N is the number of tokens (typically 77). - :param key: Storage key. If a map already exists for this key it will be summed with the incoming data. In this case the maps sizes (H and W) should match. - :return: None - """ - key_and_size = f'{key}_{maps.shape[1]}' - - # extract desired tokens - maps = maps[:, :, self.token_ids] - - # merge attention heads to a single map per token - maps = torch.sum(maps, 0) - - # store - if key_and_size not in self.collated_maps: - self.collated_maps[key_and_size] = torch.zeros_like(maps, device='cpu') - self.collated_maps[key_and_size] += maps.cpu() - - def write_maps_to_disk(self, path: str): - pil_image = self.get_stacked_maps_image() - pil_image.save(path, 'PNG') - - def get_stacked_maps_image(self) -> PIL.Image: - """ - Scale all collected attention maps to the same size, blend them together and return as an image. - :return: An image containing a vertical stack of blended attention maps, one for each requested token. - """ - num_tokens = len(self.token_ids) - if num_tokens == 0: - return None - - latents_height = self.latents_shape[0] - latents_width = self.latents_shape[1] - - merged = None - - for key, maps in self.collated_maps.items(): - - # maps has shape [(H*W), N] for N tokens - # but we want [N, H, W] - this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height)) - this_maps_height = int(float(latents_height) * this_scale_factor) - this_maps_width = int(float(latents_width) * this_scale_factor) - # and we need to do some dimension juggling - maps = torch.reshape(torch.swapdims(maps, 0, 1), [num_tokens, this_maps_height, this_maps_width]) - - # scale to output size if necessary - if this_scale_factor != 1: - maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC) - - # normalize - maps_min = torch.min(maps) - maps_range = torch.max(maps) - maps_min - #print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}") - maps_normalized = (maps - maps_min) / maps_range - # expand to (-0.1, 1.1) and clamp - maps_normalized_expanded = maps_normalized * 1.1 - 0.05 - maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1) - - # merge together, producing a vertical stack - maps_stacked = torch.reshape(maps_normalized_expanded_clamped, [num_tokens * latents_height, latents_width]) - - if merged is None: - merged = maps_stacked - else: - # screen blend - merged = 1 - (1 - maps_stacked)*(1 - merged) - - if merged is None: - return None - - merged_bytes = merged.mul(0xff).byte() - return PIL.Image.fromarray(merged_bytes.numpy(), mode='L') diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py deleted file mode 100644 index 304009c1d3..0000000000 --- a/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,111 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent -from ldm.models.diffusion.sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like - -class DDIMSampler(Sampler): - def __init__(self, model, schedule='linear', device=None, **kwargs): - super().__init__(model,schedule,model.num_timesteps,device) - - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, - model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) - - def prepare_to_sample(self, t_enc, **kwargs): - super().prepare_to_sample(t_enc, **kwargs) - - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) - - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) - else: - self.invokeai_diffuser.restore_default_cross_attention() - - - # This is the central routine - @torch.no_grad() - def p_sample( - self, - x, - c, - t, - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - step_count:int=1000, # total number of steps - **kwargs, - ): - b, *_, device = *x.shape, x.device - - if ( - unconditional_conditioning is None - or unconditional_guidance_scale == 1.0 - ): - # damian0815 would like to know when/if this code path is used - e_t = self.model.apply_model(x, t, c) - else: - # step_index counts in the opposite direction to index - step_index = step_count-(index+1) - e_t = self.invokeai_diffuser.do_diffusion_step( - x, t, - unconditional_conditioning, c, - unconditional_guidance_scale, - step_index=step_index - ) - if score_corrector is not None: - assert self.model.parameterization == 'eps' - e_t = score_corrector.modify_score( - self.model, e_t, x, t, c, **corrector_kwargs - ) - - alphas = ( - self.model.alphas_cumprod - if use_original_steps - else self.ddim_alphas - ) - alphas_prev = ( - self.model.alphas_cumprod_prev - if use_original_steps - else self.ddim_alphas_prev - ) - sqrt_one_minus_alphas = ( - self.model.sqrt_one_minus_alphas_cumprod - if use_original_steps - else self.ddim_sqrt_one_minus_alphas - ) - sigmas = ( - self.model.ddim_sigmas_for_original_num_steps - if use_original_steps - else self.ddim_sigmas - ) - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full( - (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device - ) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t - noise = ( - sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - ) - if noise_dropout > 0.0: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0, None - diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py deleted file mode 100644 index 7c7ba9f5fd..0000000000 --- a/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,2271 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch - -import torch.nn as nn -import os -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig -import urllib - -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ldm.util import ( - log_txt_as_img, - exists, - default, - ismap, - isimage, - mean_flat, - count_params, - instantiate_from_config, -) -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import ( - normal_kl, - DiagonalGaussianDistribution, -) -from ldm.models.autoencoder import ( - VQModelInterface, - IdentityFirstStage, - AutoencoderKL, -) -from ldm.modules.diffusionmodules.util import ( - make_beta_schedule, - extract_into_tensor, - noise_like, -) -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = { - 'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y', -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__( - self, - unet_config, - timesteps=1000, - beta_schedule='linear', - loss_type='l2', - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor='val/loss', - use_ema=True, - first_stage_key='image', - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0.0, - embedding_reg_weight=0.0, - v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1.0, - conditioning_key=None, - parameterization='eps', # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0.0, - ): - super().__init__() - assert parameterization in [ - 'eps', - 'x0', - ], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print( - f' | {self.__class__.__name__}: Running in {self.parameterization}-prediction mode' - ) - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f' | Keeping EMAs of {len(list(self.model_ema.buffers()))}.') - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - self.embedding_reg_weight = embedding_reg_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt( - ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet - ) - - self.register_schedule( - given_betas=given_betas, - beta_schedule=beta_schedule, - timesteps=timesteps, - linear_start=linear_start, - linear_end=linear_end, - cosine_s=cosine_s, - ) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full( - fill_value=logvar_init, size=(self.num_timesteps,) - ) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - def register_schedule( - self, - given_betas=None, - beta_schedule='linear', - timesteps=1000, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - ): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule( - beta_schedule, - timesteps, - linear_start=linear_start, - linear_end=linear_end, - cosine_s=cosine_s, - ) - alphas = 1.0 - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) - - (timesteps,) = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert ( - alphas_cumprod.shape[0] == self.num_timesteps - ), 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer( - 'alphas_cumprod_prev', to_torch(alphas_cumprod_prev) - ) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer( - 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)) - ) - self.register_buffer( - 'sqrt_one_minus_alphas_cumprod', - to_torch(np.sqrt(1.0 - alphas_cumprod)), - ) - self.register_buffer( - 'log_one_minus_alphas_cumprod', - to_torch(np.log(1.0 - alphas_cumprod)), - ) - self.register_buffer( - 'sqrt_recip_alphas_cumprod', - to_torch(np.sqrt(1.0 / alphas_cumprod)), - ) - self.register_buffer( - 'sqrt_recipm1_alphas_cumprod', - to_torch(np.sqrt(1.0 / alphas_cumprod - 1)), - ) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * ( - 1.0 - alphas_cumprod_prev - ) / (1.0 - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer( - 'posterior_variance', to_torch(posterior_variance) - ) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer( - 'posterior_log_variance_clipped', - to_torch(np.log(np.maximum(posterior_variance, 1e-20))), - ) - self.register_buffer( - 'posterior_mean_coef1', - to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) - ), - ) - self.register_buffer( - 'posterior_mean_coef2', - to_torch( - (1.0 - alphas_cumprod_prev) - * np.sqrt(alphas) - / (1.0 - alphas_cumprod) - ), - ) - - if self.parameterization == 'eps': - lvlb_weights = self.betas**2 / ( - 2 - * self.posterior_variance - * to_torch(alphas) - * (1 - self.alphas_cumprod) - ) - elif self.parameterization == 'x0': - lvlb_weights = ( - 0.5 - * np.sqrt(torch.Tensor(alphas_cumprod)) - / (2.0 * 1 - torch.Tensor(alphas_cumprod)) - ) - else: - raise NotImplementedError('mu not supported') - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f'{context}: Switched to EMA weights') - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f'{context}: Restored training weights') - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location='cpu') - if 'state_dict' in list(sd.keys()): - sd = sd['state_dict'] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) - del sd[k] - missing, unexpected = ( - self.load_state_dict(sd, strict=False) - if not only_model - else self.model.load_state_dict(sd, strict=False) - ) - print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' - ) - if len(missing) > 0: - print(f'Missing Keys: {missing}') - if len(unexpected) > 0: - print(f'Unexpected Keys: {unexpected}') - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) - * x_start - ) - variance = extract_into_tensor( - 1.0 - self.alphas_cumprod, t, x_start.shape - ) - log_variance = extract_into_tensor( - self.log_one_minus_alphas_cumprod, t, x_start.shape - ) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) - * x_t - - extract_into_tensor( - self.sqrt_recipm1_alphas_cumprod, t, x_t.shape - ) - * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) - * x_start - + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) - * x_t - ) - posterior_variance = extract_into_tensor( - self.posterior_variance, t, x_t.shape - ) - posterior_log_variance_clipped = extract_into_tensor( - self.posterior_log_variance_clipped, t, x_t.shape - ) - return ( - posterior_mean, - posterior_variance, - posterior_log_variance_clipped, - ) - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == 'eps': - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == 'x0': - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1.0, 1.0) - - ( - model_mean, - posterior_variance, - posterior_log_variance, - ) = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance( - x=x, t=t, clip_denoised=clip_denoised - ) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape( - b, *((1,) * (len(x.shape) - 1)) - ) - return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise - ) - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm( - reversed(range(0, self.num_timesteps)), - desc='Sampling t', - total=self.num_timesteps, - dynamic_ncols=True, - ): - img = self.p_sample( - img, - torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised, - ) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop( - (batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates, - ) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) - * x_start - + extract_into_tensor( - self.sqrt_one_minus_alphas_cumprod, t, x_start.shape - ) - * noise - ) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss( - target, pred, reduction='none' - ) - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == 'eps': - target = noise - elif self.parameterization == 'x0': - target = x_start - else: - raise NotImplementedError( - f'Paramterization {self.parameterization} not yet supported' - ) - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint( - 0, self.num_timesteps, (x.shape[0],), device=self.device - ).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict( - loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True - ) - - self.log( - 'global_step', - self.global_step, - prog_bar=True, - logger=True, - on_step=True, - on_epoch=False, - ) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log( - 'lr_abs', - lr, - prog_bar=True, - logger=True, - on_step=True, - on_epoch=False, - ) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = { - key + '_ema': loss_dict_ema[key] for key in loss_dict_ema - } - self.log_dict( - loss_dict_no_ema, - prog_bar=False, - logger=True, - on_step=False, - on_epoch=True, - ) - self.log_dict( - loss_dict_ema, - prog_bar=False, - logger=True, - on_step=False, - on_epoch=True, - ) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images( - self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs - ): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log['inputs'] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log['diffusion_row'] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope('Plotting'): - samples, denoise_row = self.sample( - batch_size=N, return_intermediates=True - ) - - log['samples'] = samples - log['denoise_row'] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - - def __init__( - self, - first_stage_config, - cond_stage_config, - personalization_config, - num_timesteps_cond=None, - cond_stage_key='image', - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - *args, - **kwargs, - ): - - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop('ckpt_path', None) - ignore_keys = kwargs.pop('ignore_keys', []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - - try: - self.num_downs = ( - len(first_stage_config.params.ddconfig.ch_mult) - 1 - ) - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - - self.model.eval() - self.model.train = disabled_train - for param in self.model.parameters(): - param.requires_grad = False - - self.embedding_manager = self.instantiate_embedding_manager( - personalization_config, self.cond_stage_model - ) - self.textual_inversion_manager = TextualInversionManager( - tokenizer = self.cond_stage_model.tokenizer, - text_encoder = self.cond_stage_model.transformer, - full_precision = True - ) - # this circular component dependency is gross and bad, needs to be rethought - self.cond_stage_model.set_textual_inversion_manager(self.textual_inversion_manager) - - self.emb_ckpt_counter = 0 - - # if self.embedding_manager.is_clip: - # self.cond_stage_model.update_embedding_func(self.embedding_manager) - - for param in self.embedding_manager.embedding_parameters(): - param.requires_grad = True - - def make_cond_schedule( - self, - ): - self.cond_ids = torch.full( - size=(self.num_timesteps,), - fill_value=self.num_timesteps - 1, - dtype=torch.long, - ) - ids = torch.round( - torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) - ).long() - self.cond_ids[: self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx=None): - # only for very first batch - if ( - self.scale_by_std - and self.current_epoch == 0 - and self.global_step == 0 - and batch_idx == 0 - and not self.restarted_from_ckpt - ): - assert ( - self.scale_factor == 1.0 - ), 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print('### USING STD-RESCALING ###') - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1.0 / z.flatten().std()) - print(f'setting self.scale_factor to {self.scale_factor}') - print('### USING STD-RESCALING ###') - - def register_schedule( - self, - given_betas=None, - beta_schedule='linear', - timesteps=1000, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - ): - super().register_schedule( - given_betas, - beta_schedule, - timesteps, - linear_start, - linear_end, - cosine_s, - ) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == '__is_first_stage__': - print('Using first stage also as cond stage.') - self.cond_stage_model = self.first_stage_model - elif config == '__is_unconditional__': - print( - f'Training {self.__class__.__name__} as an unconditional model.' - ) - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - try: - model = instantiate_from_config(config) - except urllib.error.URLError: - raise SystemExit( - "* Couldn't load a dependency. Try running scripts/preload_models.py from an internet-conected machine." - ) - self.cond_stage_model = model - - def instantiate_embedding_manager(self, config, embedder): - model = instantiate_from_config(config, embedder=embedder) - - if config.params.get( - 'embedding_manager_ckpt', None - ): # do not load if missing OR empty string - model.load(config.params.embedding_manager_ckpt) - - return model - - def _get_denoise_row_from_list( - self, samples, desc='', force_no_decoder_quantization=False - ): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append( - self.decode_first_stage( - zd.to(self.device), - force_not_quantize=force_no_decoder_quantization, - ) - ) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError( - f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" - ) - return self.scale_factor * z - - def get_learned_conditioning(self, c, **kwargs): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable( - self.cond_stage_model.encode - ): - c = self.cond_stage_model.encode( - c, embedding_manager=self.embedding_manager,**kwargs - ) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c, **kwargs) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c, **kwargs) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min( - torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 - )[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip( - weighting, - self.split_input_params['clip_min_weight'], - self.split_input_params['clip_max_weight'], - ) - weighting = ( - weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - ) - - if self.split_input_params['tie_braker']: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip( - L_weighting, - self.split_input_params['clip_min_tie_weight'], - self.split_input_params['clip_max_tie_weight'], - ) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold( - self, x, kernel_size, stride, uf=1, df=1 - ): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict( - kernel_size=kernel_size, dilation=1, padding=0, stride=stride - ) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting( - kernel_size[0], kernel_size[1], Ly, Lx, x.device - ).to(x.dtype) - normalization = fold(weighting).view( - 1, 1, h, w - ) # normalizes the overlap - weighting = weighting.view( - (1, 1, kernel_size[0], kernel_size[1], Ly * Lx) - ) - - elif uf > 1 and df == 1: - fold_params = dict( - kernel_size=kernel_size, dilation=1, padding=0, stride=stride - ) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict( - kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, - padding=0, - stride=(stride[0] * uf, stride[1] * uf), - ) - fold = torch.nn.Fold( - output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 - ) - - weighting = self.get_weighting( - kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device - ).to(x.dtype) - normalization = fold(weighting).view( - 1, 1, h * uf, w * uf - ) # normalizes the overlap - weighting = weighting.view( - (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) - ) - - elif df > 1 and uf == 1: - fold_params = dict( - kernel_size=kernel_size, dilation=1, padding=0, stride=stride - ) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict( - kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, - padding=0, - stride=(stride[0] // df, stride[1] // df), - ) - fold = torch.nn.Fold( - output_size=(x.shape[2] // df, x.shape[3] // df), - **fold_params2, - ) - - weighting = self.get_weighting( - kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device - ).to(x.dtype) - normalization = fold(weighting).view( - 1, 1, h // df, w // df - ) # normalizes the overlap - weighting = weighting.view( - (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) - ) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input( - self, - batch, - k, - return_first_stage_outputs=False, - force_c_encode=False, - cond_key=None, - return_original_cond=False, - bs=None, - ): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage( - self, z, predict_cids=False, force_not_quantize=False - ): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry( - z, shape=None - ) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1.0 / self.scale_factor * z - - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - uf = self.split_input_params['vqf'] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') - - fold, unfold, normalization, weighting = self.get_fold_unfold( - z, ks, stride, uf=uf - ) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view( - (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) - ) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [ - self.first_stage_model.decode( - z[:, :, :, :, i], - force_not_quantize=predict_cids - or force_not_quantize, - ) - for i in range(z.shape[-1]) - ] - else: - - output_list = [ - self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1]) - ] - - o = torch.stack( - output_list, axis=-1 - ) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode( - z, - force_not_quantize=predict_cids or force_not_quantize, - ) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode( - z, force_not_quantize=predict_cids or force_not_quantize - ) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage( - self, z, predict_cids=False, force_not_quantize=False - ): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry( - z, shape=None - ) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1.0 / self.scale_factor * z - - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - uf = self.split_input_params['vqf'] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') - - fold, unfold, normalization, weighting = self.get_fold_unfold( - z, ks, stride, uf=uf - ) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view( - (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) - ) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [ - self.first_stage_model.decode( - z[:, :, :, :, i], - force_not_quantize=predict_cids - or force_not_quantize, - ) - for i in range(z.shape[-1]) - ] - else: - - output_list = [ - self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1]) - ] - - o = torch.stack( - output_list, axis=-1 - ) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode( - z, - force_not_quantize=predict_cids or force_not_quantize, - ) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode( - z, force_not_quantize=predict_cids or force_not_quantize - ) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - df = self.split_input_params['vqf'] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') - - fold, unfold, normalization, weighting = self.get_fold_unfold( - x, ks, stride, df=df - ) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view( - (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) - ) # (bn, nc, ks[0], ks[1], L ) - - output_list = [ - self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1]) - ] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint( - 0, self.num_timesteps, (x.shape[0],), device=self.device - ).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample( - x_start=c, t=tc, noise=torch.randn_like(c.float()) - ) - - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations( - self, bboxes, crop_coordinates - ): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = ( - 'c_concat' - if self.model.conditioning_key == 'concat' - else 'c_crossattn' - ) - cond = {key: cond} - - if hasattr(self, 'split_input_params'): - assert ( - len(cond) == 1 - ) # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold( - x_noisy, ks, stride - ) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view( - (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) - ) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if ( - self.cond_stage_key - in ['image', 'LR_image', 'segmentation', 'bbox_img'] - and self.model.conditioning_key - ): # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert ( - len(c) == 1 - ) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view( - (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) - ) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [ - {c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1]) - ] - - elif self.cond_stage_key == 'coordinates_bbox': - assert ( - 'original_image_size' in self.split_input_params - ), 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params[ - 'original_image_size' - ] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left positions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [ - ( - rescale_latent - * stride[0] - * (patch_nr % n_patches_per_row) - / full_img_w, - rescale_latent - * stride[1] - * (patch_nr // n_patches_per_row) - / full_img_h, - ) - for patch_nr in range(z.shape[-1]) - ] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [ - ( - x_tl, - y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h, - ) - for x_tl, y_tl in tl_patch_coordinates - ] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [ - torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[ - None - ].to(self.device) - for bbox in patch_limits - ] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance( - cond, dict - ), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack( - [ - torch.cat([cut_cond, p], dim=1) - for p in patch_limits_tknzd - ] - ) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange( - adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1] - ) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [ - cond for i in range(z.shape[-1]) - ] # Todo make this more efficient - - # apply model by loop over crops - output_list = [ - self.model(z_list[i], t, **cond_list[i]) - for i in range(z.shape[-1]) - ] - assert not isinstance( - output_list[0], tuple - ) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) - * x_t - - pred_xstart - ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor( - [self.num_timesteps - 1] * batch_size, device=x_start.device - ) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl( - mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 - ) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == 'x0': - target = x_start - elif self.parameterization == 'eps': - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean( - [1, 2, 3] - ) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t.item()].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean( - dim=(1, 2, 3) - ) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += self.original_elbo_weight * loss_vlb - loss_dict.update({f'{prefix}/loss': loss}) - - if self.embedding_reg_weight > 0: - loss_embedding_reg = ( - self.embedding_manager.embedding_to_coarse_loss().mean() - ) - - loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg}) - - loss += self.embedding_reg_weight * loss_embedding_reg - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance( - self, - x, - c, - t, - clip_denoised: bool, - return_codebook_ids=False, - quantize_denoised=False, - return_x0=False, - score_corrector=None, - corrector_kwargs=None, - ): - t_in = t - model_out = self.apply_model( - x, t_in, c, return_ids=return_codebook_ids - ) - - if score_corrector is not None: - assert self.parameterization == 'eps' - model_out = score_corrector.modify_score( - self, model_out, x, t, c, **corrector_kwargs - ) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == 'eps': - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == 'x0': - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1.0, 1.0) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize( - x_recon - ) - ( - model_mean, - posterior_variance, - posterior_log_variance, - ) = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return ( - model_mean, - posterior_variance, - posterior_log_variance, - logits, - ) - elif return_x0: - return ( - model_mean, - posterior_variance, - posterior_log_variance, - x_recon, - ) - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample( - self, - x, - c, - t, - clip_denoised=False, - repeat_noise=False, - return_codebook_ids=False, - quantize_denoised=False, - return_x0=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - ): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance( - x=x, - c=c, - t=t, - clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - ) - if return_codebook_ids: - raise DeprecationWarning('Support dropped.') - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.0: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape( - b, *((1,) * (len(x.shape) - 1)) - ) - - if return_codebook_ids: - return model_mean + nonzero_mask * ( - 0.5 * model_log_variance - ).exp() * noise, logits.argmax(dim=1) - if return_x0: - return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise, - x0, - ) - else: - return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise - ) - - @torch.no_grad() - def progressive_denoising( - self, - cond, - shape, - verbose=True, - callback=None, - quantize_denoised=False, - img_callback=None, - mask=None, - x0=None, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - batch_size=None, - x_T=None, - start_T=None, - log_every_t=None, - ): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = { - key: cond[key][:batch_size] - if not isinstance(cond[key], list) - else list(map(lambda x: x[:batch_size], cond[key])) - for key in cond - } - else: - cond = ( - [c[:batch_size] for c in cond] - if isinstance(cond, list) - else cond[:batch_size] - ) - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = ( - tqdm( - reversed(range(0, timesteps)), - desc='Progressive Generation', - total=timesteps, - ) - if verbose - else reversed(range(0, timesteps)) - ) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample( - x_start=cond, t=tc, noise=torch.randn_like(cond) - ) - - img, x0_partial = self.p_sample( - img, - cond, - ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, - return_x0=True, - temperature=temperature[i], - noise_dropout=noise_dropout, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - ) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1.0 - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: - callback(i) - if img_callback: - img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop( - self, - cond, - shape, - return_intermediates=False, - x_T=None, - verbose=True, - callback=None, - timesteps=None, - quantize_denoised=False, - mask=None, - x0=None, - img_callback=None, - start_T=None, - log_every_t=None, - ): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = ( - tqdm( - reversed(range(0, timesteps)), - desc='Sampling t', - total=timesteps, - ) - if verbose - else reversed(range(0, timesteps)) - ) - - if mask is not None: - assert x0 is not None - assert ( - x0.shape[2:3] == mask.shape[2:3] - ) # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample( - x_start=cond, t=tc, noise=torch.randn_like(cond) - ) - - img = self.p_sample( - img, - cond, - ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, - ) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1.0 - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: - callback(i) - if img_callback: - img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample( - self, - cond, - batch_size=16, - return_intermediates=False, - x_T=None, - verbose=True, - timesteps=None, - quantize_denoised=False, - mask=None, - x0=None, - shape=None, - **kwargs, - ): - if shape is None: - shape = ( - batch_size, - self.channels, - self.image_size, - self.image_size, - ) - if cond is not None: - if isinstance(cond, dict): - cond = { - key: cond[key][:batch_size] - if not isinstance(cond[key], list) - else list(map(lambda x: x[:batch_size], cond[key])) - for key in cond - } - else: - cond = ( - [c[:batch_size] for c in cond] - if isinstance(cond, list) - else cond[:batch_size] - ) - return self.p_sample_loop( - cond, - shape, - return_intermediates=return_intermediates, - x_T=x_T, - verbose=verbose, - timesteps=timesteps, - quantize_denoised=quantize_denoised, - mask=mask, - x0=x0, - ) - - @torch.no_grad() - def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates = ddim_sampler.sample( - ddim_steps, batch_size, shape, cond, verbose=False, **kwargs - ) - - else: - samples, intermediates = self.sample( - cond=cond, - batch_size=batch_size, - return_intermediates=True, - **kwargs, - ) - - return samples, intermediates - - @torch.no_grad() - def get_unconditional_conditioning(self, batch_size, null_label=None): - if null_label is not None: - xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - if hasattr(xc, "to"): - xc = xc.to(self.device) - c = self.get_learned_conditioning(xc) - else: - # todo: get null label from cond_stage_model - raise NotImplementedError() - c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) - return c - - @torch.no_grad() - def log_images( - self, - batch, - N=8, - n_row=4, - sample=True, - ddim_steps=50, - ddim_eta=1.0, - return_keys=None, - quantize_denoised=True, - inpaint=False, - plot_denoise_rows=False, - plot_progressive_rows=False, - plot_diffusion_rows=False, - **kwargs, - ): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input( - batch, - self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N, - ) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log['inputs'] = x - log['reconstruction'] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, 'decode'): - xc = self.cond_stage_model.decode(c) - log['conditioning'] = xc - elif self.cond_stage_key in ['caption']: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption']) - log['conditioning'] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img( - (x.shape[2], x.shape[3]), batch['human_label'] - ) - log['conditioning'] = xc - elif isimage(xc): - log['conditioning'] = xc - if ismap(xc): - log['original_conditioning'] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack( - diffusion_row - ) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange( - diffusion_grid, 'b n c h w -> (b n) c h w' - ) - diffusion_grid = make_grid( - diffusion_grid, nrow=diffusion_row.shape[0] - ) - log['diffusion_row'] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope('Plotting'): - samples, z_denoise_row = self.sample_log( - cond=c, - batch_size=N, - ddim=use_ddim, - ddim_steps=ddim_steps, - eta=ddim_eta, - ) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log['samples'] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log['denoise_row'] = denoise_grid - - uc = self.get_learned_conditioning(len(c) * ['']) - sample_scaled, _ = self.sample_log( - cond=c, - batch_size=N, - ddim=use_ddim, - ddim_steps=ddim_steps, - eta=ddim_eta, - unconditional_guidance_scale=5.0, - unconditional_conditioning=uc, - ) - log['samples_scaled'] = self.decode_first_stage(sample_scaled) - - if ( - quantize_denoised - and not isinstance(self.first_stage_model, AutoencoderKL) - and not isinstance(self.first_stage_model, IdentityFirstStage) - ): - # also display when quantizing x0 while sampling - with self.ema_scope('Plotting Quantized Denoised'): - samples, z_denoise_row = self.sample_log( - cond=c, - batch_size=N, - ddim=use_ddim, - ddim_steps=ddim_steps, - eta=ddim_eta, - quantize_denoised=True, - ) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_x0_quantized'] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 - mask = mask[:, None, ...] - with self.ema_scope('Plotting Inpaint'): - - samples, _ = self.sample_log( - cond=c, - batch_size=N, - ddim=use_ddim, - eta=ddim_eta, - ddim_steps=ddim_steps, - x0=z[:N], - mask=mask, - ) - x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_inpainting'] = x_samples - log['mask'] = mask - - # outpaint - with self.ema_scope('Plotting Outpaint'): - samples, _ = self.sample_log( - cond=c, - batch_size=N, - ddim=use_ddim, - eta=ddim_eta, - ddim_steps=ddim_steps, - x0=z[:N], - mask=mask, - ) - x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_outpainting'] = x_samples - - if plot_progressive_rows: - with self.ema_scope('Plotting Progressives'): - img, progressives = self.progressive_denoising( - c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N, - ) - prog_row = self._get_denoise_row_from_list( - progressives, desc='Progressive Generation' - ) - log['progressive_row'] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - - if self.embedding_manager is not None: - params = list(self.embedding_manager.embedding_parameters()) - # params = list(self.cond_stage_model.transformer.text_model.embeddings.embedding_manager.embedding_parameters()) - else: - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print( - f'{self.__class__.__name__}: Also optimizing conditioner params!' - ) - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print('Setting up LambdaLR scheduler...') - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1, - } - ] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, 'colorize'): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 - return x - - @rank_zero_only - def on_save_checkpoint(self, checkpoint): - checkpoint.clear() - - if os.path.isdir(self.trainer.checkpoint_callback.dirpath): - self.embedding_manager.save( - os.path.join( - self.trainer.checkpoint_callback.dirpath, 'embeddings.pt' - ) - ) - - if (self.global_step - self.emb_ckpt_counter) > 500: - self.embedding_manager.save( - os.path.join( - self.trainer.checkpoint_callback.dirpath, - f'embeddings_gs-{self.global_step}.pt', - ) - ) - - self.emb_ckpt_counter += 500 - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [ - None, - 'concat', - 'crossattn', - 'hybrid', - 'adm', - ] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - cc = torch.cat(c_crossattn, 1) - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert ( - cond_stage_key == 'coordinates_bbox' - ), 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label( - dset.get_category_id(catno) - ) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot( - tknzd_bbox.detach().cpu(), map_fn, (256, 256) - ) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs - -class LatentInpaintDiffusion(LatentDiffusion): - def __init__( - self, - concat_keys=("mask", "masked_image"), - masked_image_key="masked_image", - finetune_keys=None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.masked_image_key = masked_image_key - assert self.masked_image_key in concat_keys - self.concat_keys = concat_keys - - - @torch.no_grad() - def get_input( - self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False - ): - # note: restricted to non-trainable encoders currently - assert ( - not self.cond_stage_trainable - ), "trainable cond stages not yet supported for inpainting" - z, c, x, xrec, xc = super().get_input( - batch, - self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=bs, - ) - - assert exists(self.concat_keys) - c_cat = list() - for ck in self.concat_keys: - cc = ( - rearrange(batch[ck], "b h w c -> b c h w") - .to(memory_format=torch.contiguous_format) - .float() - ) - if bs is not None: - cc = cc[:bs] - cc = cc.to(self.device) - bchw = z.shape - if ck != self.masked_image_key: - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} - if return_first_stage_outputs: - return z, all_conds, x, xrec, xc - return z, all_conds diff --git a/ldm/models/diffusion/ksampler.py b/ldm/models/diffusion/ksampler.py deleted file mode 100644 index f98ca8de21..0000000000 --- a/ldm/models/diffusion/ksampler.py +++ /dev/null @@ -1,312 +0,0 @@ -"""wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers""" - -import k_diffusion as K -import torch -from torch import nn - -from .cross_attention_map_saving import AttentionMapSaver -from .sampler import Sampler -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent - - -# at this threshold, the scheduler will stop using the Karras -# noise schedule and start using the model's schedule -STEP_THRESHOLD = 30 - -def cfg_apply_threshold(result, threshold = 0.0, scale = 0.7): - if threshold <= 0.0: - return result - maxval = 0.0 + torch.max(result).cpu().numpy() - minval = 0.0 + torch.min(result).cpu().numpy() - if maxval < threshold and minval > -threshold: - return result - if maxval > threshold: - maxval = min(max(1, scale*maxval), threshold) - if minval < -threshold: - minval = max(min(-1, scale*minval), -threshold) - return torch.clamp(result, min=minval, max=maxval) - - -class CFGDenoiser(nn.Module): - def __init__(self, model, threshold = 0, warmup = 0): - super().__init__() - self.inner_model = model - self.threshold = threshold - self.warmup_max = warmup - self.warmup = max(warmup / 10, 1) - self.invokeai_diffuser = InvokeAIDiffuserComponent(model, - model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond)) - - - def prepare_to_sample(self, t_enc, **kwargs): - - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = t_enc) - else: - self.invokeai_diffuser.restore_default_cross_attention() - - - def forward(self, x, sigma, uncond, cond, cond_scale): - next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale) - if self.warmup < self.warmup_max: - thresh = max(1, 1 + (self.threshold - 1) * (self.warmup / self.warmup_max)) - self.warmup += 1 - else: - thresh = self.threshold - if thresh > self.threshold: - thresh = self.threshold - return cfg_apply_threshold(next_x, thresh) - -class KSampler(Sampler): - def __init__(self, model, schedule='lms', device=None, **kwargs): - denoiser = K.external.CompVisDenoiser(model) - super().__init__( - denoiser, - schedule, - steps=model.num_timesteps, - ) - self.sigmas = None - self.ds = None - self.s_in = None - self.karras_max = kwargs.get('karras_max',STEP_THRESHOLD) - if self.karras_max is None: - self.karras_max = STEP_THRESHOLD - - def make_schedule( - self, - ddim_num_steps, - ddim_discretize='uniform', - ddim_eta=0.0, - verbose=False, - ): - outer_model = self.model - self.model = outer_model.inner_model - super().make_schedule( - ddim_num_steps, - ddim_discretize='uniform', - ddim_eta=0.0, - verbose=False, - ) - self.model = outer_model - self.ddim_num_steps = ddim_num_steps - # we don't need both of these sigmas, but storing them here to make - # comparison easier later on - self.model_sigmas = self.model.get_sigmas(ddim_num_steps) - self.karras_sigmas = K.sampling.get_sigmas_karras( - n=ddim_num_steps, - sigma_min=self.model.sigmas[0].item(), - sigma_max=self.model.sigmas[-1].item(), - rho=7., - device=self.device, - ) - - if ddim_num_steps >= self.karras_max: - print(f'>> Ksampler using model noise schedule (steps >= {self.karras_max})') - self.sigmas = self.model_sigmas - else: - print(f'>> Ksampler using karras noise schedule (steps < {self.karras_max})') - self.sigmas = self.karras_sigmas - - # ALERT: We are completely overriding the sample() method in the base class, which - # means that inpainting will not work. To get this to work we need to be able to - # modify the inner loop of k_heun, k_lms, etc, as is done in an ugly way - # in the lstein/k-diffusion branch. - - @torch.no_grad() - def decode( - self, - z_enc, - cond, - t_enc, - img_callback=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_original_steps=False, - init_latent = None, - mask = None, - **kwargs - ): - samples,_ = self.sample( - batch_size = 1, - S = t_enc, - x_T = z_enc, - shape = z_enc.shape[1:], - conditioning = cond, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning = unconditional_conditioning, - img_callback = img_callback, - x0 = init_latent, - mask = mask, - **kwargs - ) - return samples - - # this is a no-op, provided here for compatibility with ddim and plms samplers - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - return x0 - - # Most of these arguments are ignored and are only present for compatibility with - # other samples - @torch.no_grad() - def sample( - self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - attention_maps_callback=None, - quantize_x0=False, - eta=0.0, - mask=None, - x0=None, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo=None, - threshold = 0, - perlin = 0, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs, - ): - def route_callback(k_callback_values): - if img_callback is not None: - img_callback(k_callback_values['x'],k_callback_values['i']) - - # if make_schedule() hasn't been called, we do it now - if self.sigmas is None: - self.make_schedule( - ddim_num_steps=S, - ddim_eta = eta, - verbose = False, - ) - - # sigmas are set up in make_schedule - we take the last steps items - sigmas = self.sigmas[-S-1:] - - # x_T is variation noise. When an init image is provided (in x0) we need to add - # more randomness to the starting image. - if x_T is not None: - if x0 is not None: - x = x_T + torch.randn_like(x0, device=self.device) * sigmas[0] - else: - x = x_T * sigmas[0] - else: - x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] - - model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10)) - model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info) - - # setup attention maps saving. checks for None are because there are multiple code paths to get here. - attention_map_saver = None - if attention_maps_callback is not None and extra_conditioning_info is not None: - eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 - attention_map_token_ids = range(1, eos_token_index) - attention_map_saver = AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:]) - model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) - - extra_args = { - 'cond': conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': unconditional_guidance_scale, - } - print(f'>> Sampling with k_{self.schedule} starting at step {len(self.sigmas)-S-1} of {len(self.sigmas)-1} ({S} new sampling steps)') - sampling_result = ( - K.sampling.__dict__[f'sample_{self.schedule}']( - model_wrap_cfg, x, sigmas, extra_args=extra_args, - callback=route_callback - ), - None, - ) - if attention_map_saver is not None: - attention_maps_callback(attention_map_saver) - return sampling_result - - # this code will support inpainting if and when ksampler API modified or - # a workaround is found. - @torch.no_grad() - def p_sample( - self, - img, - cond, - ts, - index, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - extra_conditioning_info=None, - **kwargs, - ): - if self.model_wrap is None: - self.model_wrap = CFGDenoiser(self.model) - extra_args = { - 'cond': cond, - 'uncond': unconditional_conditioning, - 'cond_scale': unconditional_guidance_scale, - } - if self.s_in is None: - self.s_in = img.new_ones([img.shape[0]]) - if self.ds is None: - self.ds = [] - - # terrible, confusing names here - steps = self.ddim_num_steps - t_enc = self.t_enc - - # sigmas is a full steps in length, but t_enc might - # be less. We start in the middle of the sigma array - # and work our way to the end after t_enc steps. - # index starts at t_enc and works its way to zero, - # so the actual formula for indexing into sigmas: - # sigma_index = (steps-index) - s_index = t_enc - index - 1 - self.model_wrap.prepare_to_sample(s_index, extra_conditioning_info=extra_conditioning_info) - img = K.sampling.__dict__[f'_{self.schedule}']( - self.model_wrap, - img, - self.sigmas, - s_index, - s_in = self.s_in, - ds = self.ds, - extra_args=extra_args, - ) - - return img, None, None - - # REVIEW THIS METHOD: it has never been tested. In particular, - # we should not be multiplying by self.sigmas[0] if we - # are at an intermediate step in img2img. See similar in - # sample() which does work. - def get_initial_image(self,x_T,shape,steps): - print(f'WARNING: ksampler.get_initial_image(): get_initial_image needs testing') - x = (torch.randn(shape, device=self.device) * self.sigmas[0]) - if x_T is not None: - return x_T + x - else: - return x - - def prepare_to_sample(self,t_enc,**kwargs): - self.t_enc = t_enc - self.model_wrap = None - self.ds = None - self.s_in = None - - def q_sample(self,x0,ts): - ''' - Overrides parent method to return the q_sample of the inner model. - ''' - return self.model.inner_model.q_sample(x0,ts) - - def conditioning_key(self)->str: - return self.model.inner_model.model.conditioning_key - diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py deleted file mode 100644 index 9edd333780..0000000000 --- a/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,146 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial -from ldm.invoke.devices import choose_torch_device -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent -from ldm.models.diffusion.sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like - - -class PLMSSampler(Sampler): - def __init__(self, model, schedule='linear', device=None, **kwargs): - super().__init__(model,schedule,model.num_timesteps, device) - - def prepare_to_sample(self, t_enc, **kwargs): - super().prepare_to_sample(t_enc, **kwargs) - - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) - - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) - else: - self.invokeai_diffuser.restore_default_cross_attention() - - - # this is the essential routine - @torch.no_grad() - def p_sample( - self, - x, # image, called 'img' elsewhere - c, # conditioning, called 'cond' elsewhere - t, # timesteps, called 'ts' elsewhere - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - old_eps=[], - t_next=None, - step_count:int=1000, # total number of steps - **kwargs, - ): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if ( - unconditional_conditioning is None - or unconditional_guidance_scale == 1.0 - ): - # damian0815 would like to know when/if this code path is used - e_t = self.model.apply_model(x, t, c) - else: - # step_index counts in the opposite direction to index - step_index = step_count-(index+1) - e_t = self.invokeai_diffuser.do_diffusion_step(x, t, - unconditional_conditioning, c, - unconditional_guidance_scale, - step_index=step_index) - if score_corrector is not None: - assert self.model.parameterization == 'eps' - e_t = score_corrector.modify_score( - self.model, e_t, x, t, c, **corrector_kwargs - ) - - return e_t - - alphas = ( - self.model.alphas_cumprod - if use_original_steps - else self.ddim_alphas - ) - alphas_prev = ( - self.model.alphas_cumprod_prev - if use_original_steps - else self.ddim_alphas_prev - ) - sqrt_one_minus_alphas = ( - self.model.sqrt_one_minus_alphas_cumprod - if use_original_steps - else self.ddim_sqrt_one_minus_alphas - ) - sigmas = ( - self.model.ddim_sigmas_for_original_num_steps - if use_original_steps - else self.ddim_sigmas - ) - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full( - (b, 1, 1, 1), alphas_prev[index], device=device - ) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full( - (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device - ) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t - noise = ( - sigma_t - * noise_like(x.shape, device, repeat_noise) - * temperature - ) - if noise_dropout > 0.0: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = ( - 55 * e_t - - 59 * old_eps[-1] - + 37 * old_eps[-2] - - 9 * old_eps[-3] - ) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/ldm/models/diffusion/sampler.py b/ldm/models/diffusion/sampler.py deleted file mode 100644 index d7ec5bf1f4..0000000000 --- a/ldm/models/diffusion/sampler.py +++ /dev/null @@ -1,450 +0,0 @@ -''' -ldm.models.diffusion.sampler - -Base class for ldm.models.diffusion.ddim, ldm.models.diffusion.ksampler, etc -''' -import torch -import numpy as np -from tqdm import tqdm -from functools import partial -from ldm.invoke.devices import choose_torch_device -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent - -from ldm.modules.diffusionmodules.util import ( - make_ddim_sampling_parameters, - make_ddim_timesteps, - noise_like, - extract_into_tensor, -) - -class Sampler(object): - def __init__(self, model, schedule='linear', steps=None, device=None, **kwargs): - self.model = model - self.ddim_timesteps = None - self.ddpm_num_timesteps = steps - self.schedule = schedule - self.device = device or choose_torch_device() - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, - model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device(self.device): - attr = attr.to(torch.float32).to(torch.device(self.device)) - setattr(self, name, attr) - - # This method was copied over from ddim.py and probably does stuff that is - # ddim-specific. Disentangle at some point. - def make_schedule( - self, - ddim_num_steps, - ddim_discretize='uniform', - ddim_eta=0.0, - verbose=False, - ): - self.total_steps = ddim_num_steps - self.ddim_timesteps = make_ddim_timesteps( - ddim_discr_method=ddim_discretize, - num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps, - verbose=verbose, - ) - alphas_cumprod = self.model.alphas_cumprod - assert ( - alphas_cumprod.shape[0] == self.ddpm_num_timesteps - ), 'alphas have to be defined for each timestep' - to_torch = ( - lambda x: x.clone() - .detach() - .to(torch.float32) - .to(self.model.device) - ) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer( - 'alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev) - ) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer( - 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())) - ) - self.register_buffer( - 'sqrt_one_minus_alphas_cumprod', - to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), - ) - self.register_buffer( - 'log_one_minus_alphas_cumprod', - to_torch(np.log(1.0 - alphas_cumprod.cpu())), - ) - self.register_buffer( - 'sqrt_recip_alphas_cumprod', - to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())), - ) - self.register_buffer( - 'sqrt_recipm1_alphas_cumprod', - to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), - ) - - # ddim sampling parameters - ( - ddim_sigmas, - ddim_alphas, - ddim_alphas_prev, - ) = make_ddim_sampling_parameters( - alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta, - verbose=verbose, - ) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer( - 'ddim_sqrt_one_minus_alphas', np.sqrt(1.0 - ddim_alphas) - ) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) - / (1 - self.alphas_cumprod) - * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) - ) - self.register_buffer( - 'ddim_sigmas_for_original_num_steps', - sigmas_for_original_sampling_steps, - ) - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return ( - extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 - + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) - * noise - ) - - @torch.no_grad() - def sample( - self, - S, # S is steps - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, # TODO: this is very confusing because it is called "step_callback" elsewhere. Change. - quantize_x0=False, - eta=0.0, - mask=None, - x0=None, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - verbose=False, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs, - ): - - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): - ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # check to see if make_schedule() has run, and if not, run it - if self.ddim_timesteps is None: - self.make_schedule( - ddim_num_steps=S, - ddim_eta = eta, - verbose = False, - ) - - ts = self.get_timesteps(S) - - # sampling - C, H, W = shape - shape = (batch_size, C, H, W) - samples, intermediates = self.do_sampling( - conditioning, - shape, - timesteps=ts, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, - x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - steps=S, - **kwargs - ) - return samples, intermediates - - @torch.no_grad() - def do_sampling( - self, - cond, - shape, - timesteps=None, - x_T=None, - ddim_use_original_steps=False, - callback=None, - quantize_denoised=False, - mask=None, - x0=None, - img_callback=None, - log_every_t=100, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - steps=None, - **kwargs - ): - b = shape[0] - time_range = ( - list(reversed(range(0, timesteps))) - if ddim_use_original_steps - else np.flip(timesteps) - ) - - total_steps=steps - - iterator = tqdm( - time_range, - desc=f'{self.__class__.__name__}', - total=total_steps, - dynamic_ncols=True, - ) - old_eps = [] - self.prepare_to_sample(t_enc=total_steps,all_timesteps_count=steps,**kwargs) - img = self.get_initial_image(x_T,shape,total_steps) - - # probably don't need this at all - intermediates = {'x_inter': [img], 'pred_x0': [img]} - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full( - (b,), - step, - device=self.device, - dtype=torch.long - ) - ts_next = torch.full( - (b,), - time_range[min(i + 1, len(time_range) - 1)], - device=self.device, - dtype=torch.long, - ) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample( - x0, ts - ) # TODO: deterministic forward pass? - img = img_orig * mask + (1.0 - mask) * img - - outs = self.p_sample( - img, - cond, - ts, - index=index, - use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, - temperature=temperature, - noise_dropout=noise_dropout, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, - t_next=ts_next, - step_count=steps - ) - img, pred_x0, e_t = outs - - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: - callback(i) - if img_callback: - img_callback(img,i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - # NOTE that decode() and sample() are almost the same code, and do the same thing. - # The variable names are changed in order to be confusing. - @torch.no_grad() - def decode( - self, - x_latent, - cond, - t_start, - img_callback=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_original_steps=False, - init_latent = None, - mask = None, - all_timesteps_count = None, - **kwargs - ): - timesteps = ( - np.arange(self.ddpm_num_timesteps) - if use_original_steps - else self.ddim_timesteps - ) - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f'>> Running {self.__class__.__name__} sampling starting at step {self.total_steps - t_start} of {self.total_steps} ({total_steps} new sampling steps)') - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - x0 = init_latent - self.prepare_to_sample(t_enc=total_steps, all_timesteps_count=all_timesteps_count, **kwargs) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full( - (x_latent.shape[0],), - step, - device=x_latent.device, - dtype=torch.long, - ) - - ts_next = torch.full( - (x_latent.shape[0],), - time_range[min(i + 1, len(time_range) - 1)], - device=self.device, - dtype=torch.long, - ) - - if mask is not None: - assert x0 is not None - xdec_orig = self.q_sample(x0, ts) # TODO: deterministic forward pass? - x_dec = xdec_orig * mask + (1.0 - mask) * x_dec - - outs = self.p_sample( - x_dec, - cond, - ts, - index=index, - use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - t_next = ts_next, - step_count=len(self.ddim_timesteps) - ) - - x_dec, pred_x0, e_t = outs - if img_callback: - img_callback(x_dec,i) - - return x_dec - - def get_initial_image(self,x_T,shape,timesteps=None): - if x_T is None: - return torch.randn(shape, device=self.device) - else: - return x_T - - def p_sample( - self, - img, - cond, - ts, - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - old_eps=None, - t_next=None, - steps=None, - ): - raise NotImplementedError("p_sample() must be implemented in a descendent class") - - def prepare_to_sample(self,t_enc,**kwargs): - ''' - Hook that will be called right before the very first invocation of p_sample() - to allow subclass to do additional initialization. t_enc corresponds to the actual - number of steps that will be run, and may be less than total steps if img2img is - active. - ''' - pass - - def get_timesteps(self,ddim_steps): - ''' - The ddim and plms samplers work on timesteps. This method is called after - ddim_timesteps are created in make_schedule(), and selects the portion of - timesteps that will be used for sampling, depending on the t_enc in img2img. - ''' - return self.ddim_timesteps[:ddim_steps] - - def q_sample(self,x0,ts): - ''' - Returns self.model.q_sample(x0,ts). Is overridden in the k* samplers to - return self.model.inner_model.q_sample(x0,ts) - ''' - return self.model.q_sample(x0,ts) - - def conditioning_key(self)->str: - return self.model.model.conditioning_key - - def uses_inpainting_model(self)->bool: - return self.conditioning_key() in ('hybrid','concat') - - def adjust_settings(self,**kwargs): - ''' - This is a catch-all method for adjusting any instance variables - after the sampler is instantiated. No type-checking performed - here, so use with care! - ''' - for k in kwargs.keys(): - try: - setattr(self,k,kwargs[k]) - except AttributeError: - print(f'** Warning: attempt to set unknown attribute {k} in sampler of type {type(self)}') diff --git a/ldm/models/diffusion/shared_invokeai_diffusion.py b/ldm/models/diffusion/shared_invokeai_diffusion.py deleted file mode 100644 index cddddd3e86..0000000000 --- a/ldm/models/diffusion/shared_invokeai_diffusion.py +++ /dev/null @@ -1,491 +0,0 @@ -from contextlib import contextmanager -from dataclasses import dataclass -from math import ceil -from typing import Callable, Optional, Union, Any, Dict - -import numpy as np -import torch -from diffusers.models.cross_attention import AttnProcessor -from typing_extensions import TypeAlias - -from ldm.invoke.globals import Globals -from ldm.models.diffusion.cross_attention_control import Arguments, \ - restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ - CrossAttentionType, SwapCrossAttnContext -from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver - -ModelForwardCallback: TypeAlias = Union[ - # x, t, conditioning, Optional[cross-attention kwargs] - Callable[[torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]], torch.Tensor], - Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] -] - -@dataclass(frozen=True) -class PostprocessingSettings: - threshold: float - warmup: float - h_symmetry_time_pct: Optional[float] - v_symmetry_time_pct: Optional[float] - - -class InvokeAIDiffuserComponent: - ''' - The aim of this component is to provide a single place for code that can be applied identically to - all InvokeAI diffusion procedures. - - At the moment it includes the following features: - * Cross attention control ("prompt2prompt") - * Hybrid conditioning (used for inpainting) - ''' - debug_thresholding = False - sequential_guidance = False - - @dataclass - class ExtraConditioningInfo: - - tokens_count_including_eos_bos: int - cross_attention_control_args: Optional[Arguments] = None - - @property - def wants_cross_attention_control(self): - return self.cross_attention_control_args is not None - - - def __init__(self, model, model_forward_callback: ModelForwardCallback, - is_running_diffusers: bool=False, - ): - """ - :param model: the unet model to pass through to cross attention control - :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) - """ - self.conditioning = None - self.model = model - self.is_running_diffusers = is_running_diffusers - self.model_forward_callback = model_forward_callback - self.cross_attention_control_context = None - self.sequential_guidance = Globals.sequential_guidance - - @contextmanager - def custom_attention_context(self, - extra_conditioning_info: Optional[ExtraConditioningInfo], - step_count: int): - do_swap = extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control - old_attn_processor = None - if do_swap: - old_attn_processor = self.override_cross_attention(extra_conditioning_info, - step_count=step_count) - try: - yield None - finally: - if old_attn_processor is not None: - self.restore_default_cross_attention(old_attn_processor) - # TODO resuscitate attention map saving - #self.remove_attention_map_saving() - - def override_cross_attention(self, conditioning: ExtraConditioningInfo, step_count: int) -> Dict[str, AttnProcessor]: - """ - setup cross attention .swap control. for diffusers this replaces the attention processor, so - the previous attention processor is returned so that the caller can restore it later. - """ - self.conditioning = conditioning - self.cross_attention_control_context = Context( - arguments=self.conditioning.cross_attention_control_args, - step_count=step_count - ) - return override_cross_attention(self.model, - self.cross_attention_control_context, - is_running_diffusers=self.is_running_diffusers) - - def restore_default_cross_attention(self, restore_attention_processor: Optional['AttnProcessor']=None): - self.conditioning = None - self.cross_attention_control_context = None - restore_default_cross_attention(self.model, - is_running_diffusers=self.is_running_diffusers, - restore_attention_processor=restore_attention_processor) - - def setup_attention_map_saving(self, saver: AttentionMapSaver): - def callback(slice, dim, offset, slice_size, key): - if dim is not None: - # sliced tokens attention map saving is not implemented - return - saver.add_attention_maps(slice, key) - - tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) - for identifier, module in tokens_cross_attention_modules: - key = ('down' if identifier.startswith('down') else - 'up' if identifier.startswith('up') else - 'mid') - module.set_attention_slice_calculated_callback( - lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key)) - - def remove_attention_map_saving(self): - tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) - for _, module in tokens_cross_attention_modules: - module.set_attention_slice_calculated_callback(None) - - def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor, - unconditioning: Union[torch.Tensor,dict], - conditioning: Union[torch.Tensor,dict], - unconditional_guidance_scale: float, - step_index: Optional[int]=None, - total_step_count: Optional[int]=None, - ): - """ - :param x: current latents - :param sigma: aka t, passed to the internal model to control how much denoising will occur - :param unconditioning: embeddings for unconditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] - :param conditioning: embeddings for conditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] - :param unconditional_guidance_scale: aka CFG scale, controls how much effect the conditioning tensor has - :param step_index: counts upwards from 0 to (step_count-1) (as passed to setup_cross_attention_control, if using). May be called multiple times for a single step, therefore do not assume that its value will monotically increase. If None, will be estimated by comparing sigma against self.model.sigmas . - :return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning. - """ - - - cross_attention_control_types_to_do = [] - context: Context = self.cross_attention_control_context - if self.cross_attention_control_context is not None: - percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) - cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through) - - wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0) - wants_hybrid_conditioning = isinstance(conditioning, dict) - - if wants_hybrid_conditioning: - unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(x, sigma, unconditioning, - conditioning) - elif wants_cross_attention_control: - unconditioned_next_x, conditioned_next_x = self._apply_cross_attention_controlled_conditioning(x, sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do) - elif self.sequential_guidance: - unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially( - x, sigma, unconditioning, conditioning) - - else: - unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning( - x, sigma, unconditioning, conditioning) - - combined_next_x = self._combine(unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale) - - return combined_next_x - - def do_latent_postprocessing( - self, - postprocessing_settings: PostprocessingSettings, - latents: torch.Tensor, - sigma, - step_index, - total_step_count - ) -> torch.Tensor: - if postprocessing_settings is not None: - percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) - latents = self.apply_threshold(postprocessing_settings, latents, percent_through) - latents = self.apply_symmetry(postprocessing_settings, latents, percent_through) - return latents - - def calculate_percent_through(self, sigma, step_index, total_step_count): - if step_index is not None and total_step_count is not None: - # 🧨diffusers codepath - percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate - else: - # legacy compvis codepath - # TODO remove when compvis codepath support is dropped - if step_index is None and sigma is None: - raise ValueError( - f"Either step_index or sigma is required when doing cross attention control, but both are None.") - percent_through = self.estimate_percent_through(step_index, sigma) - return percent_through - - # methods below are called from do_diffusion_step and should be considered private to this class. - - def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning): - # fast batched path - x_twice = torch.cat([x] * 2) - sigma_twice = torch.cat([sigma] * 2) - both_conditionings = torch.cat([unconditioning, conditioning]) - both_results = self.model_forward_callback(x_twice, sigma_twice, both_conditionings) - unconditioned_next_x, conditioned_next_x = both_results.chunk(2) - if conditioned_next_x.device.type == 'mps': - # prevent a result filled with zeros. seems to be a torch bug. - conditioned_next_x = conditioned_next_x.clone() - return unconditioned_next_x, conditioned_next_x - - - def _apply_standard_conditioning_sequentially(self, x: torch.Tensor, sigma, unconditioning: torch.Tensor, conditioning: torch.Tensor): - # low-memory sequential path - unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) - conditioned_next_x = self.model_forward_callback(x, sigma, conditioning) - if conditioned_next_x.device.type == 'mps': - # prevent a result filled with zeros. seems to be a torch bug. - conditioned_next_x = conditioned_next_x.clone() - return unconditioned_next_x, conditioned_next_x - - - def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning): - assert isinstance(conditioning, dict) - assert isinstance(unconditioning, dict) - x_twice = torch.cat([x] * 2) - sigma_twice = torch.cat([sigma] * 2) - both_conditionings = dict() - for k in conditioning: - if isinstance(conditioning[k], list): - both_conditionings[k] = [ - torch.cat([unconditioning[k][i], conditioning[k][i]]) - for i in range(len(conditioning[k])) - ] - else: - both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]]) - unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2) - return unconditioned_next_x, conditioned_next_x - - - def _apply_cross_attention_controlled_conditioning(self, - x: torch.Tensor, - sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do): - if self.is_running_diffusers: - return self._apply_cross_attention_controlled_conditioning__diffusers(x, sigma, unconditioning, - conditioning, - cross_attention_control_types_to_do) - else: - return self._apply_cross_attention_controlled_conditioning__compvis(x, sigma, unconditioning, conditioning, - cross_attention_control_types_to_do) - - def _apply_cross_attention_controlled_conditioning__diffusers(self, - x: torch.Tensor, - sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do): - context: Context = self.cross_attention_control_context - - cross_attn_processor_context = SwapCrossAttnContext(modified_text_embeddings=context.arguments.edited_conditioning, - index_map=context.cross_attention_index_map, - mask=context.cross_attention_mask, - cross_attention_types_to_do=[]) - # no cross attention for unconditioning (negative prompt) - unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning, - {"swap_cross_attn_context": cross_attn_processor_context}) - - # do requested cross attention types for conditioning (positive prompt) - cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do - conditioned_next_x = self.model_forward_callback(x, sigma, conditioning, - {"swap_cross_attn_context": cross_attn_processor_context}) - return unconditioned_next_x, conditioned_next_x - - - def _apply_cross_attention_controlled_conditioning__compvis(self, x:torch.Tensor, sigma, unconditioning, conditioning, cross_attention_control_types_to_do): - # print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do) - # slower non-batched path (20% slower on mac MPS) - # We are only interested in using attention maps for conditioned_next_x, but batching them with generation of - # unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x. - # This messes app their application later, due to mismatched shape of dim 0 (seems to be 16 for batched vs. 8) - # (For the batched invocation the `wrangler` function gets attention tensor with shape[0]=16, - # representing batched uncond + cond, but then when it comes to applying the saved attention, the - # wrangler gets an attention tensor which only has shape[0]=8, representing just self.edited_conditionings.) - # todo: give CrossAttentionControl's `wrangler` function more info so it can work with a batched call as well. - context:Context = self.cross_attention_control_context - - try: - unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) - - # process x using the original prompt, saving the attention maps - #print("saving attention maps for", cross_attention_control_types_to_do) - for ca_type in cross_attention_control_types_to_do: - context.request_save_attention_maps(ca_type) - _ = self.model_forward_callback(x, sigma, conditioning) - context.clear_requests(cleanup=False) - - # process x again, using the saved attention maps to control where self.edited_conditioning will be applied - #print("applying saved attention maps for", cross_attention_control_types_to_do) - for ca_type in cross_attention_control_types_to_do: - context.request_apply_saved_attention_maps(ca_type) - edited_conditioning = self.conditioning.cross_attention_control_args.edited_conditioning - conditioned_next_x = self.model_forward_callback(x, sigma, edited_conditioning) - context.clear_requests(cleanup=True) - - except: - context.clear_requests(cleanup=True) - raise - - return unconditioned_next_x, conditioned_next_x - - def _combine(self, unconditioned_next_x, conditioned_next_x, guidance_scale): - # to scale how much effect conditioning has, calculate the changes it does and then scale that - scaled_delta = (conditioned_next_x - unconditioned_next_x) * guidance_scale - combined_next_x = unconditioned_next_x + scaled_delta - return combined_next_x - - def apply_threshold( - self, - postprocessing_settings: PostprocessingSettings, - latents: torch.Tensor, - percent_through: float - ) -> torch.Tensor: - - if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0: - return latents - - threshold = postprocessing_settings.threshold - warmup = postprocessing_settings.warmup - - if percent_through < warmup: - current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup)) - else: - current_threshold = threshold - - if current_threshold <= 0: - return latents - - maxval = latents.max().item() - minval = latents.min().item() - - scale = 0.7 # default value from #395 - - if self.debug_thresholding: - std, mean = [i.item() for i in torch.std_mean(latents)] - outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold)) - print(f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n" - f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n" - f" | {outside / latents.numel() * 100:.2f}% values outside threshold") - - if maxval < current_threshold and minval > -current_threshold: - return latents - - num_altered = 0 - - # MPS torch.rand_like is fine because torch.rand_like is wrapped in generate.py! - - if maxval > current_threshold: - latents = torch.clone(latents) - maxval = np.clip(maxval * scale, 1, current_threshold) - num_altered += torch.count_nonzero(latents > maxval) - latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval - - if minval < -current_threshold: - latents = torch.clone(latents) - minval = np.clip(minval * scale, -current_threshold, -1) - num_altered += torch.count_nonzero(latents < minval) - latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval - - if self.debug_thresholding: - print(f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n" - f" | {num_altered / latents.numel() * 100:.2f}% values altered") - - return latents - - def apply_symmetry( - self, - postprocessing_settings: PostprocessingSettings, - latents: torch.Tensor, - percent_through: float - ) -> torch.Tensor: - - # Reset our last percent through if this is our first step. - if percent_through == 0.0: - self.last_percent_through = 0.0 - - if postprocessing_settings is None: - return latents - - # Check for out of bounds - h_symmetry_time_pct = postprocessing_settings.h_symmetry_time_pct - if (h_symmetry_time_pct is not None and (h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0)): - h_symmetry_time_pct = None - - v_symmetry_time_pct = postprocessing_settings.v_symmetry_time_pct - if (v_symmetry_time_pct is not None and (v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0)): - v_symmetry_time_pct = None - - dev = latents.device.type - - latents.to(device='cpu') - - if ( - h_symmetry_time_pct != None and - self.last_percent_through < h_symmetry_time_pct and - percent_through >= h_symmetry_time_pct - ): - # Horizontal symmetry occurs on the 3rd dimension of the latent - width = latents.shape[3] - x_flipped = torch.flip(latents, dims=[3]) - latents = torch.cat([latents[:, :, :, 0:int(width/2)], x_flipped[:, :, :, int(width/2):int(width)]], dim=3) - - if ( - v_symmetry_time_pct != None and - self.last_percent_through < v_symmetry_time_pct and - percent_through >= v_symmetry_time_pct - ): - # Vertical symmetry occurs on the 2nd dimension of the latent - height = latents.shape[2] - y_flipped = torch.flip(latents, dims=[2]) - latents = torch.cat([latents[:, :, 0:int(height / 2)], y_flipped[:, :, int(height / 2):int(height)]], dim=2) - - self.last_percent_through = percent_through - return latents.to(device=dev) - - def estimate_percent_through(self, step_index, sigma): - if step_index is not None and self.cross_attention_control_context is not None: - # percent_through will never reach 1.0 (but this is intended) - return float(step_index) / float(self.cross_attention_control_context.step_count) - # find the best possible index of the current sigma in the sigma sequence - smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma) - sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0 - # flip because sigmas[0] is for the fully denoised image - # percent_through must be <1 - return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0]) - # print('estimated percent_through', percent_through, 'from sigma', sigma.item()) - - - # todo: make this work - @classmethod - def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale): - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) # aka sigmas - - deltas = None - uncond_latents = None - weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)] - - # below is fugly omg - num_actual_conditionings = len(c_or_weighted_c_list) - conditionings = [uc] + [c for c,weight in weighted_cond_list] - weights = [1] + [weight for c,weight in weighted_cond_list] - chunk_count = ceil(len(conditionings)/2) - deltas = None - for chunk_index in range(chunk_count): - offset = chunk_index*2 - chunk_size = min(2, len(conditionings)-offset) - - if chunk_size == 1: - c_in = conditionings[offset] - latents_a = forward_func(x_in[:-1], t_in[:-1], c_in) - latents_b = None - else: - c_in = torch.cat(conditionings[offset:offset+2]) - latents_a, latents_b = forward_func(x_in, t_in, c_in).chunk(2) - - # first chunk is guaranteed to be 2 entries: uncond_latents + first conditioining - if chunk_index == 0: - uncond_latents = latents_a - deltas = latents_b - uncond_latents - else: - deltas = torch.cat((deltas, latents_a - uncond_latents)) - if latents_b is not None: - deltas = torch.cat((deltas, latents_b - uncond_latents)) - - # merge the weighted deltas together into a single merged delta - per_delta_weights = torch.tensor(weights[1:], dtype=deltas.dtype, device=deltas.device) - normalize = False - if normalize: - per_delta_weights /= torch.sum(per_delta_weights) - reshaped_weights = per_delta_weights.reshape(per_delta_weights.shape + (1, 1, 1)) - deltas_merged = torch.sum(deltas * reshaped_weights, dim=0, keepdim=True) - - # old_return_value = super().forward(x, sigma, uncond, cond, cond_scale) - # assert(0 == len(torch.nonzero(old_return_value - (uncond_latents + deltas_merged * cond_scale)))) - - return uncond_latents + deltas_merged * global_guidance_scale diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 6737ed2060..0cd69366ce 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from ldm.models.diffusion.cross_attention_control import InvokeAICrossAttentionMixin +from invokeai.models.diffusion.cross_attention_control import InvokeAICrossAttentionMixin from ldm.modules.diffusionmodules.util import checkpoint def exists(val): diff --git a/pyproject.toml b/pyproject.toml index 3d50bd124d..4b5a5d5fda 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,7 +129,12 @@ version = { attr = "ldm.invoke.__version__" } [tool.setuptools.packages.find] "where" = ["."] -"include" = ["invokeai.assets.web*", "invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"] +"include" = [ + "invokeai.assets.web*", "invokeai.models*", + "invokeai.generator*","invokeai.backend*", + "invokeai.frontend.dist*", "invokeai.configs*", + "ldm*" +] [tool.setuptools.package-data] "invokeai.assets.web" = ["**.png"] From d334f7f1f6430cf20ed05579aba3000cfc6ae857 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 00:31:15 -0500 Subject: [PATCH 02/19] add missing files --- .gitignore | 9 +- invokeai/models/__init__.py | 10 + invokeai/models/__init__.py~ | 10 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 653 bytes .../__pycache__/autoencoder.cpython-310.pyc | Bin 0 -> 13718 bytes .../__pycache__/model_manager.cpython-310.pyc | Bin 0 -> 33421 bytes invokeai/models/autoencoder.py | 596 +++++ invokeai/models/diffusion/__init__.py | 4 + invokeai/models/diffusion/__init__.py~ | 4 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 304 bytes .../cross_attention_control.cpython-310.pyc | Bin 0 -> 20594 bytes ...cross_attention_map_saving.cpython-310.pyc | Bin 0 -> 3333 bytes .../__pycache__/ddim.cpython-310.pyc | Bin 0 -> 3044 bytes .../__pycache__/ddpm.cpython-310.pyc | Bin 0 -> 48129 bytes .../__pycache__/ksampler.cpython-310.pyc | Bin 0 -> 7538 bytes .../__pycache__/plms.cpython-310.pyc | Bin 0 -> 3913 bytes .../__pycache__/sampler.cpython-310.pyc | Bin 0 -> 9799 bytes .../shared_invokeai_diffusion.cpython-310.pyc | Bin 0 -> 15344 bytes invokeai/models/diffusion/classifier.py | 355 +++ .../diffusion/cross_attention_control.py | 642 +++++ .../diffusion/cross_attention_map_saving.py | 95 + invokeai/models/diffusion/ddim.py | 111 + invokeai/models/diffusion/ddpm.py | 2271 +++++++++++++++++ invokeai/models/diffusion/ksampler.py | 312 +++ invokeai/models/diffusion/plms.py | 146 ++ invokeai/models/diffusion/sampler.py | 450 ++++ .../diffusion/shared_invokeai_diffusion.py | 491 ++++ invokeai/models/model_manager.py | 1221 +++++++++ 28 files changed, 6721 insertions(+), 6 deletions(-) create mode 100644 invokeai/models/__init__.py create mode 100644 invokeai/models/__init__.py~ create mode 100644 invokeai/models/__pycache__/__init__.cpython-310.pyc create mode 100644 invokeai/models/__pycache__/autoencoder.cpython-310.pyc create mode 100644 invokeai/models/__pycache__/model_manager.cpython-310.pyc create mode 100644 invokeai/models/autoencoder.py create mode 100644 invokeai/models/diffusion/__init__.py create mode 100644 invokeai/models/diffusion/__init__.py~ create mode 100644 invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/cross_attention_control.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/cross_attention_map_saving.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/ddim.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/ddpm.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/ksampler.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/plms.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/sampler.cpython-310.pyc create mode 100644 invokeai/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc create mode 100644 invokeai/models/diffusion/classifier.py create mode 100644 invokeai/models/diffusion/cross_attention_control.py create mode 100644 invokeai/models/diffusion/cross_attention_map_saving.py create mode 100644 invokeai/models/diffusion/ddim.py create mode 100644 invokeai/models/diffusion/ddpm.py create mode 100644 invokeai/models/diffusion/ksampler.py create mode 100644 invokeai/models/diffusion/plms.py create mode 100644 invokeai/models/diffusion/sampler.py create mode 100644 invokeai/models/diffusion/shared_invokeai_diffusion.py create mode 100644 invokeai/models/model_manager.py diff --git a/.gitignore b/.gitignore index 9b33e07164..e28b2c432c 100644 --- a/.gitignore +++ b/.gitignore @@ -214,9 +214,9 @@ gfpgan/ configs/models.yaml # weights (will be created by installer) -models/ldm/stable-diffusion-v1/*.ckpt -models/clipseg -models/gfpgan +# models/ldm/stable-diffusion-v1/*.ckpt +# models/clipseg +# models/gfpgan # ignore initfile .invokeai @@ -232,6 +232,3 @@ installer/install.bat installer/install.sh installer/update.bat installer/update.sh - -# no longer stored in source directory -models diff --git a/invokeai/models/__init__.py b/invokeai/models/__init__.py new file mode 100644 index 0000000000..70abd4358e --- /dev/null +++ b/invokeai/models/__init__.py @@ -0,0 +1,10 @@ +''' +Initialization file for the invokeai.models package +''' +from .model_manager import ModelManager, SDLegacyType +from .diffusion import InvokeAIDiffuserComponent +from .diffusion.ddim import DDIMSampler +from .diffusion.ksampler import KSampler +from .diffusion.plms import PLMSSampler +from .diffusion.cross_attention_map_saving import AttentionMapSaver +from .diffusion.shared_invokeai_diffusion import PostprocessingSettings diff --git a/invokeai/models/__init__.py~ b/invokeai/models/__init__.py~ new file mode 100644 index 0000000000..6e060d7fa5 --- /dev/null +++ b/invokeai/models/__init__.py~ @@ -0,0 +1,10 @@ +''' +Initialization file for the invokeai.models package +''' +from .model_manager import ModelManager, SDLegacyType +from .diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent +from .diffusion.ddim import DDIMSampler +from .diffusion.ksampler import KSampler +from .diffusion.plms import PLMSSampler +from .diffusion.cross_attention_map_saving import AttentionMapSaver +from .diffusion.shared_invokeai_diffusion import PostprocessingSettings diff --git a/invokeai/models/__pycache__/__init__.cpython-310.pyc b/invokeai/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53157f298f8a4a4dd7ca0a62b4b1346bba613bab GIT binary patch literal 653 zcmZ8e%Zl4D6!jyC9VbpRT@<=Aw3}?=RX3%S4wFsjxCGplHmb0dM2%z#NfXHI=UYmD zqo3hzSN(;7fnGUyQYz!)bB^vc=Snh76A#vpho|b7?|HwS_;&>$KEZ3g0n{GyYQGM+ zF9H;Z5QVh#8g_iZM%?V>Isgh$mu#CLNFXRHP{FcnrNWGy^^^y+v~W zE?GCsGQ!zCu}n7EmT{VGrOxc0W~@2N10`%JWKFrr6sZnmN0UDg-CG3hv~Yew8ZdP` zEmya6N2=4;lcEd1eZB75uh%QK-QF3hZ=_JNp^b&j$!fJON+J}e8f5(W!Y7;CqP)nf zYipqg!WBf7nb1RLzPy+RbDltt{;O787#%tD?WMZX(ilwoaUhw( zh8!Wdj-gbK0e*V=%5eve@AJJBH0Q=r*5n&4zthT^yg%2xkDZ_IC&w@dvluVcsf|B? VdB^ETcUNW(F!1K#)bF3U{}*0p!rA}; literal 0 HcmV?d00001 diff --git a/invokeai/models/__pycache__/autoencoder.cpython-310.pyc b/invokeai/models/__pycache__/autoencoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a47741b0d4fb45a304320ffa59c48fc41f4c2b82 GIT binary patch literal 13718 zcmcIrd5k1id9S;wkD2M2J!aRtYq!^n?dF)ZR}hS`F~;_S@p_ECKJZ}D?&(+4)3bf~ zRn2<0(-Rxkk|Uxugea%I$qa|WkO&DWA^8J|BvK$zAQzEHiYOuhl2{=Ll1Vs>z4`s# z>*{OAfkaBT>g(?v_0I2puiH~oSp%ONKKEkvzB>)$>r9M(93bS%R(n(mTkvczim&0r}|xZTJ!vr9RJJB@sEYH6xDy)>=TZeyldSSl#oYs@z1 zmgY?3?S}A0@RT8f&|2Gd&R*J$oQ%jKCmZfYdJob$kw-e;udx?7Q(_u9)1k5Etj(j= zK4G3TN;A)6%*JBLOs1>tRvey*o0V3j7E0t49t^8-Ti&}~X~p$!D36t_#J{6eZHrJQ z{_UYcQ19UF^-8VXsx;nSS&yQ6rFDBfie-IyJ+8M~D4(ylqL^(|;;_6T+s!h%TB+Bn z?0WQ*>nEbm0||{KQy5r4vxWr|X8&vB*AdQ2?rCGi6t3`|$}HKTBd%48VxBXX+|WD6 zGA@a^vUvOT0!L5wAkYZgI7^20444HXLJVFBbTl#ftn)7N^6}K#^yW;@6!w0!Eu1a3 zTbG_i48Sgq}MfbeZct6_S+%4RAYIBe9P=~EJTNcKeQT3O5 z8ML{#mj#^f8Ef+@_jJ4u`2Mxao&^o}vVg$_=bVYRW>D_{$_~g6#Rq$a8vCJmpUA|Q z$5$Z#&}MN9x#vvvj;-EtRh!SE^va&w^F;PM`_G}J!@WGjq}TBRPQpY-YUvUs}>d$bD_KIEpI8_6^0!a+;R7`9M$rvll9V-xA8IC) zu1X2biPC4vX|$s#nVDeSBzUaP#fb}+uC69ltCe`SwOgl;i6pBE$(5&*nNB;3%Oe?1 zd=?91*IRKiBf}b4JCx<+^_3M2+r@-MEAg9c48ARsIkhBZwIpF#tw!Q@B;U=H%hg6D ziZDe?wpD3{!2I38!Jg5M87!8)zP@zkL8m4xMrCu8Y_1y@v7X-%*&Wr zNO!*6YE+g%5VFLwR};K};3o+31XBbH0NbW~E%7}7Ke+kmYP%U8ZD7Ibt)uUf?KOzg z=;)oT)9q8?t#=-+E2L6Cs^%OWt*poG{>CnJ&dPbTF^eZU0g#>ZOnwXaW=(748@#yJ z_mJmZ4D5pGpxgn*x~w;cl;;q?+zhN~mD?@!1=2!FbX5yoq#xqcca+XbqrJ}nhZ*t^ zpt)&*@9iyPi#*FTB$L7+sv#u~#cq!rYF>c!F^z6f7!N{zJ)~Q>QN7`fw4mR*ssEOx z-y+whkLvk>S9tHi@%J7#_$}Fs-TgNfPd;7;bB3ZAx5pnCQ||T>LthWUM5~Jt(PpTJDcv*pp7cU zQ2L>|2=Ls7C;ALPj4grqHX-OO3ik~=hT7nUoijI_o^!kL-n|=c&%*EQhSzhkecqNS zuZB8tHj&E zAjdRxiO8LDIG>k-Q?2rVq#Z2>yVFmh~;vlU9He`NU|^+U^0k$ zHU2^I_OKDg+%1T~;!kZ9DFuqd3Jb^ax<|VMm8S=LtZH?=BFJI2*qtgi>#$JJ2;^3= zTPUu#!ZRJPC8&WSSyJYAr|<4px(iu17Q3?#3~TAEQbrb0lft$lyO*V7 zytL|=wBjoqr(yw)0waW7v}JBkQkgxgk6FYXu{Z1u2Mhp?&pa2%CFabhnWkkteg*0m|y`bUG7VVQlmq zc{=(R^eXQ#daFOqZEHBrZN1Kt`z{|Lm>AET?hfLr+>xQ0!aAzGzy;&j9CNtFmsb5T zM@cIX0!OX!$S99+JO=^#)F9srJh5S?Opemiwo8{UgaE_U(gkDVpzV{cRzw|5njG|R zKsRx>?cT_~ayOH#HmQ`mQb$I~(c9}Wr@Wl_+S*x`t0DG?a857VXgu#euFQn9|DG|`cmm1OR*%-Uuv*bK1e28UIC$W`uUBP9Kq6MjD5a=1W z^;Ty+PIk#KUY9Lyw4ga#uZEFcb4S!yRuZ#J`%UaPKOdwFCKXSi?TC7RubNvHN~k-N z&aYo*Y;LqR`P{}A>G}4+ZA#@pfviE12=kOBXP`b*F4PcIl5*bMEoaS+oi(ZzbJN>` znxdjn76f&@uT&Q-3t$dfgwL`T2>+BR-_XNqwWi|CT6QxRXEyWXV-d{5kJ#e+c|DJP z3wOV9%9i)iqXDmbbOq0Y_c4YU)f?8flv|WEuePm)MJ4v-3{ub2`hg;uYKG0WJX_Y% z`*~8{^K{T)iq@;^c;E^^!3u$Ek(kHfLcq+BopuAN zF#-R?mFg-i7o|vCB~Rr^bg3jm0(FkNhUFPhq73YnK^s*rUHYN1OcGNh^GP6@9k9LT zmIeES&Zx3o6k~W`2V2-6qGPCGVQaD3*iK^&&!)3w2&d-@_j~}$B-ntvx#q(8^w0vo z{&^RcOaLabMSwO=zET?^bKXY_*$p_3XdySzLRK52rR?JUu3KV~Hzp zm>QL{^oOEOqaG)-WJK5@LvB32?Ib}*wrkLMGVvPi8bU0I59Ow|JOC1Q+SOG>gpl(e zWz*-rtUhCpaGS%+H^(Lb@Wh6Cm zIA>&7g?gh>laeoZl~8H8T-sc@62g0`M^%+QHOOYxB;ib@_OzPQN6}vNE`Vu!4z35t zrth>kG)wT!e$UCJd6wzD==hh`%6{L;jMs7gy}KusN@M+v$tL>61H<^8Nc0KK0-2F` z5LNl$tny%#)KWwT%itkcH4eV#Y#PBQjq8o*^2cH8@cS_1Fq9kiV&c$2Ez#VB8(cpF zSx_jSt~BaGnXPL2k~Cg&hnpcUAVWR}kmLu;h-M~Nab}{SFXN494nXT1&kQbRFFH=) zhmO;oPgi2%ox~VpJG!R>wtE?gAIWwDb`#_5X3s+>jj>#xEfE76Wy{feWULHW)rN!M zU!YTh*bHRzp2W>ou6h%S+#N6KSw(~pC-%$7+^ zttyx;io5<%%`jc}`i0joTvy8N#H{jJzGY+66aO^)NP0Fd+{y_0q0>lm<#Io_Tvqf^ znwq4V-(cU$())E{n*`)nxkd010p<8K`4HR+zMRb2o0-;1!R+o!>2i`Q##{_qqGVL14ZZ^eWMOQ;q=XL(Y`3vt z#TEjW<~fA*fWf>|n2i9o85UV!PA_Jpn zlOa%5LL+2G876UDAPzs;#aLYB=g!hoUYnzL%;>YoJZHzV)KcSlHA}l3H49D$%sILr zHLGS2hhBjfDa%5^+BUM)=WDmrzVtrz(Q#?^=E{n>E z`-+1w*uugBvJw2u4C*C;vLZnGNoIxa0bGEyrCrHXBdkcqL=e1# zm7zjhjCN=%##Krrac&irrt*5VKXMBz(A-zflXjzyFiCxtOetLf-P5YT8q%RN+Kow& z4nGOfarGT9`P+Q^cUZ|omr4ooSGDw6mQ6T2yLCu!9g)?6o{RGEXI1dbgG;Q;75wH+ z>tf!rUe4RrOL@ooq2-dAJH7uBU(gR|do4BS;lgpz7-DXnF1KOFdpZyWJA>BT#3Yp%c>@}Jfi@}}%Yhp| zZVQZwes)Vmco&}N?*VwIK!ciY6UIY@2%~wEV%|{(v?ce!(sG4)lhJcQt47Z|XX@Bb z&x6>8F%O%}kvH`GVJcea9|@o>)rx^=lCK<^v@IWPI%w0DP4w|->~a4c+b!gsStwfbLmhu|DQzW>SfO{mlYw~FTI8W+$L$#dP z5I_KxGqA1|cPV{3zWS2J);(k~4ub2lzO# zPZCg-%P$bnwvb;T;MkL?{wYf}Y&2j_45xP!tK3XLalq+C%nA(4A64urKK*U$%^|^&+igMF4Ty<6p&+e&U2_Btro-&pk z=u1!dI5TzCS*Zu^&EUvcpOxN89~Y-M;TYx8m*hi3Eh+*~n!#GIXeHDULg8^JqzWjw zjDvTyXf3J}9>k&cqPkvv#@YHIAXO;e%_7CBYL(QYsim4n!F$Ms?38hl?*7T~?RkL|qEv|qO#UNkqHFSx{5rum2)G5BEfhz5lgVLU zL#qBK6fChrl6L@SDRAao$y~1Y*oQVJKhcK)QVJd3GFSmU;`RWkRNX94cL{jU0F{{Y~?forND^R*u^ACqqJv)*;U+x?5j;x?!7iu$v!%uQY3aI!C``{2oy6= zP{>yiTupEd!L0!r=v0gk+Z8E+y`@7zdiH-YN@&BW9@JII#~iI!SEn@f*(^YF#y zV2^q>k99qp$AQCxU3E}+vAcJSMuuXDB$0kNM|xSe;fXo`|DWfBum$>OgTrIoQm`m* zN5Aq8g0~aA1K_y4lPHbSQXq@Oc{ZxG;oZa*30^~R55c{~WWsdwlF#PmJkIjyD~% zTtPysq8hON=}cmFUr~mC6g_ICG|`h5+V_uXB+f1EN^2xTYa?hNOXd-MN8D}0X(95n zX%WNn!4-x!av9hio^&`$;v>9PX?Ae8qsLL#IbeM*CtJXId55Lt;fnn$>wSx0CzTYT z;1gUx6@>;v(b-eS9DBwWai_x$N{|63y%h=A82|Uf4u%nZef%I}G&Vec(Bkbh@<8s_ zvuK0hrwNo{TO{@*!TSJGGv))#P)wj`YJzm0LFuPC^En`sv3xiT^W*d1Id-4JyBR!+ z_WulsrhSO)LF``ni9F82F%Kf{Z$g=+V)n=ZPUFLY?nBf?Mci?l0J#kGA*9&za=O-S$Tu#6zt1eu8HSK1J{x0OEjeB(ApQ01iZ*e|6Bp7;!FF!nY{GwPgY6VyVQ*>BJTMQ4lV{s+QB5ESVi4q9b30`%K$P$lLk#c#c3NwtjuW`ghnF_{InKVTs5O;=bL|j!X zsF_{Mv2;)Z5#F;0QA>MKeg-|u-vz*t)8~j634Wj84+xGCD8+XRv4aFx5h&YAvCbbN znToLAW6>WGe4b!Jgna>dd!UM=A`r%^$0K<-UIO8J0i$?%yL)(iF%bs9ZR7Fa;Mj^{ z*9MU~`Nv3T9r-83zDO`J8gly)j7EM5$fW%+9*rCK*#gw38o3KkL`kE%1RLyiVIc%}Oohz)sp#542oBe{RETdFjKgWkW~S#e46_CA z5cal>P#g|aRY=aoF9K{mN3#rhLWY=6n(X9$#WtF+QzF{w1wmx)nV zD{@G(>VjywQ9l*Re?u z<9q*O#K7d=a2$V2@Ku7Z5lr;XY2Cmb{|6wGR+kP3mCXCK54&qeA6Bsg%~56B=p~An zR7*)guR}YebgfQ`z;Fk+U9L%7GUNPJj7NTx*l>I|F?ln=zW}r;fvyIE$c~rFpXu*T z9}aSXhZs%nuK zw+{8}2BLyGMr`6vJZwkp9j5V%|3vxF=SwAu%6&JBUMaS7JL!o%F~ZtCaOoI_ZWYIp ze)tB*cn_Wzm|#Ftiwthdw!U7ZL%zfQhdZV=>rvEv2U=Cpd|2}Pcj$cwbAGgM`Cn*b z*suI9lM~&L74Jef@_Rsb>_l7gV?F~r;ll}zUjD;%UHlir{D`S8Ct=cvui-p=aZ!E; zHFxa)3KT4InJMv+PCfDx4(`f{yM=o2<8bM zB~ZeP!`6}4SLC)AP~|qxjQ&Ib=Ms@p8+Wi>$&%83S;P;nf}F*vS9KL45ciLHvL-*x z{9P({q19SgS#POh37kS-$&&0y#?vYT`imEf8}(%sIZh766$Gv?}P>VOg)_M4xzQ&774fTft{Xc?h#%mA%!V@jJmT;J_$qYS^H#wE Ubmna13FC>viMc(-6Z;!66d_y$MJ#9nB*+!1(O|k4JBzvS zx)~RQM&H{Ow#oJOh3)vxH*ah0SlA(D3&`KTaEIiJ%{yE6f-U*6=3TAXg;~j$nmb!} zFWfEp@#a0Pdl&AN{6uqCYxlx#tAu*wteF1XYE?sv5)ZEv)gXn^}L!|t=K0Q z&dAHfQf^Kw`>$$$&YP1`zmDXO{N2i)DTU4l^?KfBL6%VcGPF-m< zn`p3DM-hd)Y_Hj=Imj-mPRn*0-bMS$5_;2iU-pr>cvLCAL)a@D;aIKR?)Wtyqx9x+ zZ5?Z`wD|pW&0i{;p~Vq}S=I8D>z31D`fz(S%%AS^g<3PrJljSF<;M@N_#L-h$3&@* zK7|{(X6M2MOb)UWUcJ_?%IsF1hQgh~>6Uw;cBIo@#6|IFqwX``=ojWxSG(*2o@4r| zM~^*s^2o7wOx^2fg^~q2;kI2>^E>M4M%QgN+AeyYJJIZ%uQk1J@`7XrI*<1#r!eID>e;kUL=MIF$=MGd)&Od$lOg4X<%ZKkZC9Q^@6<&&Ynafbo97 z+3ak=y@K;uXPdJfxuWy8ogL2Y$c^FMJDfX_D>*;t*v?(ZjXSS9v(8TBCY;YXZ#efj z_o9^z1rt|qI%Q`Mt|rmaea>E#nR4cw%4?~GY3GNW1I|I**@zbJcOF1)#(B_r2+uYZ z9@hVyA9jAkdCd7RYS>(OJh}UM=SQ87I7e`Ii@VkNJI--?DW zlyeFtw#RQ39(I1*`GWI3&NH}sn{&oFi^yCiY7MzbE zcc=4~^OEy%|8|c_c%?b zh3|Wvw$s7)E|lmxmr!E2v+gLzL#|x#oomidI#-;R(StqCPdTejfcy73UvYZQ8ghHx zIp-DUd!@As@;~i-pYur>?>=0uJ6E08;;=^uFLc$lpV zW>DjRnsCfn>;RN8pT_41>FZHs{M53s zZeB|nsh-hGRnxsRQp?Y*XM5%~-pTeXq`5{4Y5oFJ&{CwuUIr=XDEe&HjsRqfcJz0$ z-S8W=W+QNIViG~y#9+4Hk-Xk!mDO6SY3nfwLD`#BKs+x)vh|Q`j&iRc^ki%m8>GhW zMCdnp6||ySyIgBDtDq<#L>GX=+MrNfOvz#cL~99@rL|IDvRB$dJG@n(g;w(ZZlKt* z9Z@6nS4fb$?CRzcx~PX>H`?|yhfiUY&1O==1%(z?+J2*n!8Sbma;>@I+Q`(pUDY9B zbt?8zcd@q8^m+T>iAwVBe7O{sd{t|Epsg)ed1?%66mA3+tX_82`HlxfQgxN;D770; z)O~!Mk+}-9(fUudYA;tc;UnsFf%vo0cLvy~elZ-cuP8-iF1N#J&%LyQORQP7vq*Sz zfTu0bZ7%9o%5hWb0CNvAA!gOd-N+GPgG&3=>IZP;O(Q8zW{s>-!bkrr8o{Ph1CU*b z-w2D+4MF&zudctM+M#v6(`nX8*J;}pIWl}3%k879i+ zSr)w;D1!wo20x!H!_5om*vZl$8!@4tAdq-%ledXG1fJ~9dD3~|FbDRju!N`tdz(z@ zbJ)NA)dn*P9+!;bUfI;TuyBv4SGoO?fwgCkJ$wDbv)sbq8v$?^+u)xDz^-CHO=i`# zvE}O*?bXhT(oc2I1?Pns>1#0a*kkrHuCE%v>g@|rCBbIRxM~z7m_`s#n7Pt${G~9v zAJDV#S@m2QQ z!RDcjmsEYd2x@5e?D-DF5AvPyANMPnFWG|J<^b{3(bP*5Yl308b=I*JuNi8pXP6+B zXHu8aK7e^SwVu8PCc-hd5eub*7ZWhVVhY9~I1&d@m}1_E5Wfs|uMG)`-AP^pZffJ2 zK!uWE-yxvb(O;O0URLZp)|(U{Ok;eZ)ogf3FS@IqdIWbz=XX4MQ_t~hC?TwDF*Avo zHE&Ft!SZ%dAg!lx!R@`Cx(00QO*{!9y^pp&$vHIE%$|uEEdrTf z<6}NeC+%3*Akbgh=n!EDdU_<~SeVCba5)+D8OVP(&09o_muhWDt0MMs2l@c$EMo`MY!pe&~hLj4p9#-3O@q6 z6jLIiXeekvgjV2=0H-+soHt|GghFzz6Oq*3%?9V1jf-wMLZ<*rudW*BiKQBTA2{dL zhtT>hfiEKOjrL;a3n=4};uS3myF1u2G+;fyEO)&?fc_Zv>q|pBm4KhdM}|9rjL*F) z4D3}hqQ<}=^$xUaHmf3^>EWB*#!e(G#5>dD4QV>rb0ah+fsb-p&7)Dx6rDy6+<`3q zPTrM-%#b>RW=(VOwfB56Q^tWu39t)8KPZ7v8!OKLgodttM-|YD8D1 zp{wzsD@ghSB`5WjGxd4~b+aZZZKc+7>Mg&}%lXBtCUO>P&f)2FQWvi_4qeUYD`(T| z>9rjCxf%WZr*S_~lkRE12DTQ};B4X8E){#+bX#lp0|g}jl}RTe~2l_$%J7L-y`nwYm>xephc-UBo72i08Fvv#Z>f7h?(;fA7*r zN@PP+G;sAD-q9VCb_v*M%n_|8q%h>6xA_h&t_{}P-`@u@9^3Wn_F6LBh$V#RIYNvy z2HIVwkPMQZpeVU7EQHr@L^ly@sPq*#+3sDEpW|fiN~+?tV^-TiO;onLlwwJzSlqD( z-_^qQa5Isp9`#Swas_xap%zJXn(@Z8E396me3hy+!mI`%wZMxsR!+FjcStCOGcIQ| z2Iq6N#N=@%hmnLi<#s#Oh7(%UScC;{<@{o&iJ_o{WtO_gq{*U%1U~Rgs!0lz$Iyw6 z0{g35+RO6!I)njMrk-ORrnh53-kbIhAl3fEYNwY`77>Mbip$0LDr?&`^5jPi5R#J&x z7%oScKKP&)8n)sB1=eE+_IrV8+hH0Ry>hNRxb0&HT7h-j$M(18%ck;iPhpP8;|mq# z+AD>Gj6U@Hvc8hYB9}zCi&pusP)zhMc`FZ|(lkDR6vC5XT@SXM8q~iGz7>|h1AxL; z#j;aRqmuY9j|^Ehbja}j92pQkDg`i!P(36{inl<{V2YxGLR1GJpO$d>rY4{3Pzm4~ zs_bh}Pk>m0d67FHhH@vz_a4;@tOde)2<=!=YAolR zoRfb&Zau3$fl`HDHYt?{G0r)~*J(av|1wIU#8@vAmq1?%y$sb7d>((wrJyew9F^%w zn8wKF)aQSLDd+>99g-gA2M`8PjTDh-CKt+yw$IY2(`tCw@)xR|i@|18l9Ygf2qFxv zVkkASL^<~;J(3vM8z5>HycQHTXBnt_ewLJf!;TI!LJj{_T|DJjbQm6aRe4wLG_N!?G_t01GS_^ub` z>acm8cU7H(i~oj{Zoh}S0@E-ilezZBAi)B@Ai0`?iqb$J_W}j+*)}}O@EPEc*Y^Mw z$v5ujdA~j76hwEB0UO9C58(-v1TRJeW8xU{>M_KsoySeFg&7@n3yS{~O=!B_&> zK@y(ew=k$L=$tbldI%VTuK|*KX>~pp3jnuMfLnaZ)3wiHN)uG{tPqt^u+?h$8sG#B z7Oa;5F$zV=q*d&5w52XWjUE+kCpFeQ*b4cUEox|LQwe`0OyF6RTXR-z_hnEW5Be_@ zwxjufRCKRk9tdbxRKp)ork$p~)?2C5D7?7Rv^6Qz_TGr=h>W|?rBUeDT0m_wrkNK# zNIKYZ{iTkh)dEViFrxOo#h74;zG*B_(O-tLv9A>F&x)9}8!+rbcTUb$$-o8R^zb2E zMSHoA7lalBcSiRiW?x#t<^)sJaGQ=jtGS(7)dCC(K?rt@78ziHMA;Zsr5O=&9KoZy z53Lo*xl6UnuI)ksb9MJ-$;QmGv$GLrGppN9gk5k$4qF#)h&+(yhIX@NQR5BtHyER4 zD(XS7A9Lfjy3o7~tg2poVmRF^C_QKxAi&ICH)f@o>&A=MjeYZAo7jr*T+LeD1-5}H zmMVELLRvK$sSgKri|RG@@%x#4AK%W%RNjPQYEJYnoGnrw(NwlbLYOtJ8>VT!m$l3r zV0k2OzM8kT8)jPHDdK6~Dw#k#&^TH*MvzTX@sRLNwkD5{P|5#}4E8sz6NcIVS~H*z z!(N5E1)p(uQ#7UdP0jjgQadW~C6|?4ZasGmwARnB7p|q=fa?izMd-~ysqo$yO>|JZ zL*>4fUCW(A%@!1yS0S3U^9NGikt-<2e=o3BT~{8xfl%Aa_tINan^RYbNpufdcmg^q zt(UH)oWgB*?iA&EJicb#z($!KDDNV5bz{K`Pmu-UE|hiFc0Zy7z5b#+*PD0+jtPW= z%Qe*?f92U-?m)bZ-6YC-yAA{*W&8%rU!TG}s>hIk_t*3A=|5w9K52C?{6c)=W!N>7w0 z`ot4-6PosFeZ{G%eRv(T4%u*vc&SvoZtWtCjv&}oGK5|@#?7&+0XeidqA-7@rrOvX zUN}*&!DptLT$j8?tJ}n8Y5MTR*rb*1iG-o4WjI6AUepG3SRK^iIv{$w)#MJe{(bFB zuBB%`O#6-ch>ajNz0rVx=bieBixKkx>K_#Du)Bv@-N#*>EvE#jb zW}2#G{3FjPOfw=^mj3|J6`HV@^TM33`ZXbvEW?LmDPHVU@SMOc=6(S0Q>TUu?! z9pU5K&~`ZRAK=z;QogmEDS=484h;?f-Jp)92XYNbA+=&4pMt$huCE!;n`CguTF${* zl_Lx3=X<7KxN3S&@~&SbQFjV_0)o3B_r{h>U?y?L8FNYvqmjB`LIae>H`rQ#e0>62 zHiz#G+`>+34eTcvf``@$!MH!UJk>K7@lMbBiqR{4#rT}DmOqzj&sr&edi4uNYOQFb zg3bQMUh%52KI4o-yZ4!P9^;$n6~RJm&^Ehs*wU!E_)4*z(cf2$E2&;yeHmNdxYR)F zn|e7x6M2r>`^6_wzcbl0QO6YP5cVduyjlHfFTcJ8v%Hn1+dvHK+u|$C)OMnnwXt4) z`L^X9%eUkEj+xY2saIMXN1vv9CDgaEH;z@2`3G7l^~QT+f*NM#)h;Y-U}Jv~N025Y z06mBuf>|48nPpDgn0$CoH$fii`@ec zUEj#GZ}=0E6+8--{BRo<2lr(-8(?2X3{r<3CCR_CM3z5xvf^O-Xvw~Alm*=dn`WV_ zUIA}F<;HpVyJ6;BC%BWtKN@=twCJYi0HHvs4aK(os2!y3m+kVM;j~~~?L5^{n#ZS; zfXI_t@gU0EX~?;*YQria-V#K|KyPfUN`*<6uUuk6wpqx|Vn* zD01>@8cDc8L#|e*0mbVRak_ZBV;iya5&zLeCVq1Bk=@Q!qQ?_uBy~=goR}2VLmPlj~!hkG+gx- zC**M^hneh5lDnDvF(eML>moA5rxPYM;Ca(b=Z)-*ne?T~xUxnYq4M zd>Lte*$dat*d_6$*ns*pSjr*%O&H-vn4ixh10B5K8^EGP$(hhekN1Gh;VnS6h#L5< z{MIZG@Eb+1#M1%770iSv^BA>15#D~xhd9Nns|J)0>GkZjl=q0A@pI6(r>~l1ka!qAW@-`lPg+Ou# zGBcv819{a;ZIwWD2_AX=GlM&a7FTx65Goj2wKVEWR*j0&gk=@mA=p9a{ER;*=g-+Bv`P~hAc{ECZY>q z+~9~6$!s*;CgR|3BA54G-MePys206+sXhHZPuuGz0 z1Nf%Gam{K~{f>A&m>23$A#|Eepmg;n3TQO=L(KgM638$KtEHK%k>xvvLch%ABU&tG zEMpQoat4bf`i>hThW8;Kj3G5#2CpZp^>euJF{50cNEHTUh29y<;FI7T^mH$c990+h zqan#IO$rLbl zZJ6O;M{(!Qo%Tt#?b*jWD`@OwJ1!R-;lp!>pP4^7f8vmRHj#MfZ9orrU#$v}k7N+f zbz2^L38EhNt`a=VN{$1whFy4@ zFa2+94}M2btW<&-zHtmw#9qZM1Rs$xCc}vDW2QauG9<$6GuNrluCTK*L?M=yVD4x~ zZe6KCdCsNm(B6)Aux9MIOhY_B9}bPx&}hrt%alvyU=i%j#d)X|xeL_>B;iCnO`35F zO9BNDz_1cNd|Ow&(BhJ-O)Q$fyrNWPtfw1P^G^%eh)%&@_Bl>x*O;h>xcIarN(XB6ylw|Hh)lVlqNHZO z(-q}t!ngxa<6c6C7HirE`tpK!8?_VPWFkAQRYc>ms*$#|?d(H`23A;n70LI$)$GM0`9Jyl0N*dx#(ZK3`4^KZE-m%aW z$Ia^+iD5=&Oolk%<2ZnCx1DTt#Vk57g?dIr0}5j50ARILMvesjHMFkpY*V)E};u86_Wj|InWMp3{f zC}q&$zNjI+{WD?%9(9@6D15izg6zA~(1Dap&BI&92&a9^imva`&3gMF4*}KAX~=?1 zx9{?SIif$fY1duxbQR5Mox3~!D|GYRhyF!E4VF90C>` z?Sg?o3Pu2m6k6M8k;|?XpfouR3{!9lbSegJu#hhbw#cfNa0hle!4{%RtbIB?V7vI6 zu?2kuG=cx_5k=(ObTf|lo6!P1r>I7-#VJbKP4mL=cs?qlNF&pz{%E!<0lQEJ$n ztq!ym0zU)DVivNgsP${GmS8WUB6_Jt>d@gTWVkHQcgE7A#|8wp<8g+hL!jP73Gk_i zd_EG+VE}1r;FIQASnouAGpU%KD$ywpRFTX$P>c`MC8rpO&y$94f?GjGHJK4qkYG0H zUes2MQexc*yRihptYK_wY zL3HSi7-en6iBAS9q_h+eR~pG9t!Ox7@SHlLz!uwkkOturjfmF<+Uvw+MH3J;F~H}zt85k zA{mnH$p9v#;gh&<$ohd8K&vthIUl|>%P?|7nx}*~?HfE<63l|_qg%Go-3Dz9JtX08 zgK|6t0(TB4hq7gqwa!7Wg6ll458yh(YkX(<4NZ1#^>J-@;=L?%)n`&ya1^12bIg6y zbnJwz1wlc419(m2B8>+35^J1?2!^?b6RO`=Aw&QkVaq`x00waIoM|js5*;@ND$w@V ziVf8b*u%(NEE@?DW4qp5aooPE;$Vf9=ME(MZ{&V_AoYEoAH4Pb=-_JdFgT@2MZyti zV5&6Q-4(xj9+c>!S2scW&UXK@9@L8d#@Xurzf) znENgo*576WC*TOBtN(#+U}Hf( zg24ydiJnP`x-17i3|k4h4X9h>Nj6rb!I>Kz68Nud`>T+-z&nYwH7_o^dAu{P?=EMZ z0ygZ2SwnP(EfjSak8!9+;iFv1_6{Wa}`y^f`d~dl3m)|iS zEPfrHU9|B!ldosi((T?rEx)PrPU$vqZm9K*q#iLU#r5FbX_(*A?T_R6Mj88z?z3Nt zY8!VpT?02caJ`w=TX^nfGx*Zf@`S%Z_(@Z0OT$!`R=c1sD+8)m~M;7-8^D~XQH}VY1HQLS#YeoK(T2jt!($^i)n_2JcoMTA5@K#&i zh?RI3wcjpEEq#B+x#ObwrlAV{rsd5rqk)%&F>>?zR_0(fOTm~nhFl3yGTtls+m~;X z^7o>iJ6VsP_F-0YY$(;n+Z8FHYkU|b?iwgDD`=N# zBe_?PL?A`Cv-d}TwE^q=0P3`Ddk&?ZI6ffwi2`Kc_+?b)X4W}6ub|m(a4&sU@hdJo~%bF4?UnmglSsbeF4r$I!{pk~^p$^@$8hT+dxCY$JH3Qz~ zd=D?eZU815DMMgTHvw+04PR4e7bDWA=_Gp*GtZjDB~!9n^x;vsjk5NsiglubRz-e~ z8-cP9O++qENgbN!8Pp8ex>`oiC~MyjIZ+#Pngi_n*;ud;T+gy2N{?7s6H5ldWF07;}PgdhEf5VA8o#kM5|cNq5B&cnWQ1DK(RleyC>5A zCp1G0&lut06Y~gxx?-QkO?9Q=X$?)}6xWA3X@_QRN}T{#urkq+_JJXAVRm;Rlghzw zl#TRBWv!hWf-ekC^G28|@a&Na?&jl*ct%Ak;1i}BYOGo~Kt)-E zI%)!cPD$U4Vq_v_C@L|Cq;;gJzMb0~4k#tArV;@D^s?N+_?d-e2jckS)NB!!Z}CZdXi zI|xIEBWKsR&;6BFFLL6xve22Mkc^|;j0R%&KY~4?h;Vy^MoaxF z8#1b*T8N8?A5>(M8kpa3JYE#NCOg@PBzH6SEhG+QwSR?-HA~|+Tv4Fbu;AXZ*`m~j zD>xL=D85~um@ublRY?9@rTnBQO5opc!?ft!GM$D@M+ZctlW811fVUczjxAX_#ayM6MlRy?>06xc?&+D((?g2u9&q3u zBYJuZ<)M%7%a6g6u-JVBEkQbh+ybW?$Uy}vhj2C~+bcRH9x}yS<8dpI`+x;!#J)2@ zUO$C82qzO@MMQg$D=4>iQ8y$j(eKk+IStFgnCSj<>J>aIX%|JbFhx1$9C|M5qw!Vv zyR41D5po94rcwVG$C$LfQJ01bWO{Av71Rso63NjCvOE)9Oqhow5g*vcO0*aZsT@J5 zVi1myBbjNvtCMOskU$_6EW7}p<69((4Q(;!G> zy$Bt4@PV7pOz4;)JdZ>2qfh2AU=6!P`&;b-h(#sX#K?0kxVJer8k5+3VA-^zi+&>9 zP{H1P|Ngm$_IqUsVSvDQ3NCO}u!RDZ!W;zqgkI?L7ho=viZwL?R!3wLv3=0m2bMIr zmoJ>y?pi3kN_+LXJNTJRmFSI*07w z(;(#F>SM2izGFo+)7s%9riPrhgO%s?dO>%nCp5M(4+=O#-QMk1z#NGABCe@VCvwNp;eH^|FYjQbMR)kF@d{UOPvKeg|8xNpeKZto47tL zr6*8d{#En(h#Ok8NCU)aNL5Wd$n#j3EyMc)GSFTOJ2pWtgp-4GL^g4BQ(pnA;4nY1 z;%mT%M^m(M!tThcv|RBBer8K*EqejZ*gZO6-@b;k4cexO`rz*EC*~IE&rq5VHUoH%6PN(`r>Ix-?2~Q4XkpKueG#WS zDcJ#XU&rhxe^>>oVWAUG8GlJ)&!B2A<#`B}5mXeE^gm=?LTj>yFn#|u%DiocW@Ue9 zygah7Vet_d>oL2CYwTM}E;@_~bCRpyqI!o{;$7 zz=FYZh!mWx8KP$PA4GR>)haoEQ_fq-x(vY(t_fxW`h+}%Df?F(j1AQtfrH+FB z0^+W5XCYeWHB^5?cM?p3C!$)?09_g@YeX zIyS_-1Whb_fuuylkKkII199f45SJ+z=QY_$Qv%#BHfqs0i3^TMmk4Il_d;NAum0xjIL!9kpA<6`W!DQEO+<1UTKZ=_#?SDREHoI@r0(7`ONlD7=f zIr||Jo+xxmdlv%FGU8T3R9pvynJNoe+K8Np@C~OE!Sd%Y`PwK#@D&IRC(!5W+0)f$ zjy-+4dh)0~>OBxWOJ*k=Cq_>g$*{!B^N1io%Wj|T_QYLZHe<3QaeAn})aiJ%@kFXc zmU@nWw+Fqj^2FRT;vr5;q-MhNX{%slHoSv7E zX-9Yz83BHvh_iR&Y}BK{#xhtY2@e55e?>9wu8nxI>89J(PQC}wsc@=_$W2wEf;c*Y zmJojz57qA>iQIa_DZ~I4ANe?tnHNrzpYUNPVg%xdHd#EQY#MhOmVao~olmsPEWu)#Qo^1U1=1scs9AfniN zI5RmcA&tHhPJ$_byOrQm5tN2lf(&ZQH#DvU znH7I=?gKcVgy2dDDh+_1FK^I6U~>>I7)4bp8qAI--g054;o;apm`A{XmWcM&G90Pp z1cIm&5J;|}1dUi7&`pFBWUq081v{q&FEk;lv8{F=pfAj`6`X2BI9l{dG!{7wM=0J- ztd9l~;RdL8_!L2cuwTLfg{*Lwo70o{F5#r$WIEV*YS197z4UH^c+{Z*fD9r50Maq6 zZe=t0P%_=x^F@2S#J!LFooK}%h>1+npW)gf#$yls?SwQ235P?0-c4eg!P9^^y~dN6 zu(lwGjokuD!7(OAAD8o+kuaEagca_zlcQ8z=fsm*N3iL4LCTK2dA38fxLp%rN{9(G zk~nn$xr<$KUzX!c9^AZet70ERcSdT^6ev18PCeWpNs{3qV9Ai^+jlOFdQSlpjY)7x z5jS;w$=KA?;HfjXMf7#^`aODzN2k1?r0PWfBtMGf zmdJ3HMdC9ODmGr*At0FqT+kiz2(iRJM;#O#=!VGP4M@SrTu3t>7`#w;KGa0?G#Ioc z*31}of4X}os1q0(7{#GbSxaBqguo2Dtuh{4J?rXYyJ?}8Jvbd)Kf51 zJRl7giRX_pJR8CAB2xD*)Qwv4G|Na}%Pf~ejU|R8amHz4jwoGL6gkTfIjGQa{bEnf zV_Z4{OvTV9sPWQ~sBIr@PoV9*v^{}3^U`)cZW~v?UtqL0NYt}OUrdW}d#qQ4@blnW z31cnv-~a$a^!PfAyuhD7Avro6eEHJrcwV|{tWO}kgrRQr#^LmU%&JzXUds=jR(#JpNcp((VIXoa%&rKcSCQ3h*iIjIT2M3XB=vqKT06_ z6dZARw3E>I%4m4@0rk8-=YT?yT^BkBXkVkF-%F~qa$yOYfP<9>lbR#^d$4kUWxqX} zut&2od&$U>zy{uu9Kk$K!w~FFb3D5Ze{M$BhwQRff=P-hB&k?$az+AD{z}*83;mrC zJbwfN+H;MY_4solTddBBw%k=w1p2wTsQla&-@Y?s5Z>9dWSHodb=iFCBOwc~O^|o4a)}VWW=E2PeTkKcyoH#tNuM z9@Wzw;hw~{2a6tbH}n_P7LW>J-FDQ4TDuW|oK=E*QI*WQ7!VL41L1OSp7u(ZURpWN zkU4qr_eQ`R^}n(IVM==Ru}Ig1Ut55~K2704Nf4knH@64>hY$xjak3bcz&-L)B7twf zhJh4=gXTaAR}Ig^H&BQ-ERVx1wg;#NX0;i7X>y9NoQTwI^Li(08z6=hjsKjOjkqeC z%A4Sk-i4O=U8quS1p7`6lEIb+`tKm62RkuOdT-Ek5|ia%mMYYx@vIG}vQjBeYJbPR z@j&btK$5BOo+?ZmOWJ0JldvxG=V|JIS&C5$fMp?G`1-W({pfm_XOLhSotQ@`QYy|| zXkDo_FXDiX2R^tdBoaC>^)4S}R10sEC&P4|KRKbdCU|Hq%zKeRK^rc{xqjmPn(Kuz+YWP2nHIj09T<4QFHx!inLD{b?>o6Lpyd zXt=2Ir|#;8^{rBY$0%n+Gp0}TfLn*5ArFs!p7Ei?q`wgd;nn!b*60a!Xnb2v2jRK- zjqEs<8*vff^JxUPeTSu1r#PMokwuclxIRHOEPq+#V^2d?!8ibk3e*BAX^Xd+Q4xA3FgotQtqkfk9Qgr#5$AophtdFjaf1RMt+{6J0rJ%A zWAd(;CZgj6Qm$;xtG`6sXZZ^wuzv$`JargDf&OwY?yFy5!fvX6$s_?tn8+?-oQe#h zBADr8uMaTy5Z{lYo^e{#StQ|jxiu64Jd%Ycmp+w7QWs#=D|H5li@LW zg$b)tXPG?0WM`7x&D>{^7?>-MVh<%oM!^d18G@Sk3yw3?eau7OKXDls=pGm#IO-x~ z9vB;7GatvL=>$xvkt_@DLO5T}RFPlL`Z83<0J+|@W(GxHLhXS?ik_THUAi4EzWLRI z+Qs(;TqE=ZYI)m4M4xbK5y;SI1e6PrLRy49XYfvZ>UKZ+r-&N$dFpB8!m;=WEWCQu ziv)pR8`y0Ja)9%pR?PKg%=K6@*GVbN z_MI}@4<@sHr=D%Kz^zh{cnG6&n2|7CGI4qeKhAlqGWi6P5#T$H?r*z!b`Ul|W@mFe zJLzD@FhI6Qzmf>?Lo^o6^n)2wJHT-d$Z-ES;hdhq!5Bzb-|UDykdB`` zej3sE#j%haU0?hFGY1ZhFN#VAil695p%5Dck;K5DNeA(38#or%>AwfvkA4e!9P$}Z z8oKow54u2&1gc6G1&KUMPV%m#quRcQ^VArP-pRMnkEr`F^1Yl)A`(Qvxnp?BM)giZ zK*d)Qy$4r?Vy_RK#2XWRJVQ(b4F#~pK&Yq9gBl7PK+~bj6l0GLq4?m6~1ar!T z&Bplr(@#e9x~U zI^i%`C;p?dL;Cd#n1Gn9Gn9scA`2eXn}9=1YC>MbF?@)hz~&Tpp^>Lv9yTuum$%U# z1|m%i4}lhpE{}46_~Z00@2O`PI{I1)e$83^ebG?Q0Q*4vd}AE;vK+%UFffq=Ak$#h z%b!Uhrr1Y^01fy2LKH-xxL(9N1%GTE(Zkk?PHt_?A76&5%*i8wk_jC5A#m6T90b2( z3WN8rZRm|T#fM?(2NN>BykUJ3!GjR&+8MiQzHyuG1r>6zOiZdD?iJUkdJs*q5zxE% zzW1rYK1$THFAetaQv1>cygAL$__~Kv=y9o+7m;=h>BL(QnR`>cLL3i`^WaRP4-?Xd zjXem+vgJnS;$IK8#Q7;t&5P+v7}g*!*aregVB)huL_GYYT17{z3HI2qp&zL=>nmiS zp{CaN5y8*HF~BG!y(n%x!92$y_>5_t5aV-tau@?HN9Tia))c*sNI<5&JDAijN@c)< z2$Gi5LDI@0GaOy~KAGnFDJ$@JZOzBlgm#;9toi`n2rbeSttI|E3+32P zZsDwMS#xs7xoySVPfkt_rm|*Mkh7GzX-bhk@MB|;9 zUZUVcL*_aDf>(m)X))Bl!bOZtJ|4U!^~*46OCBe$WV<8CpIm34ea<*IDgpB~j-8-1 zbVbnF;6MQKVArle=OsDZ#m0UTG0mXO!p6?S{EcH9YlXp0(<{iv9tsx7dx(7o!~zLn zY<-Ly+AmR4jzbM&(0_}b9JvjwEhe7C>}G;$cADMCVPdOHk9te z_YZ9-+(XQsAQx|#z4qU*V+Q?tf)b32{XLQ2Jp}$!19Hd1nEx=JSl05g8|Wt^5G)%a z59YJK6;z9$4J4gX+zG19%MKIrfvU@#0Q$HFbZ!R-vNfoN*p^yFg5S4-u0|wT#3IJc z6(%3y`-E|InU}YK;BcZ^hwc~FC_T0sf%yTvK~Thx;ut%?&Snw$X#kKT>6Ku-Rn8yA zAU-0M4%e(miH5in-WbD2$P*_AzqVioJWdWqmXM1$c119&#wxr7_zwp0;%Vn7Q~e~SR9!=&W4t{T#Y;(alllxdz2((UvFJ}T z`57i(W4;$P@UmDk;{LUIRR-g0q)`@ z*IWHOlYhh{k?CU(i=r5)#mExonU^SOMC<6X{8?^gM0#b69Q*;08?kPs9Si-^O=8eLC(;3QVK z@(5)FQ}Pg01FCB(1d>CbYZsk14WSvdFd9~^>TOEi)2Hoy3L0 zISef%+V2aWyK&LOu0@y&Mxz63yl#L6hUFJD4hj)Op@Z3+Y%yG}Ucjr+-JoZqS}XP2 zy!Y>!>`aoonOj5hD;x{qX9nPiUve7$39=;IdL1Q({?fP-aHC=(HgeMcp*t>DptHOJ z)=1BXxhIZSpE!N$SUBEU%cg_A?4H;{wQ6Mxuv zf56-SmPx<8KjfugNg_R6Gmp?{XzEW#Mh<{TMH4#6fm9?jD>-@neJdZSGMe|_Pnwj5 zw&JpEozPml^-IFPlN-qo) z9L$r%p$ivT`Zt(JX^yCTKygc|+nLCEyVeXHaqe)$zVe&OzewNAaFgecTK0bJYxi^{o5|dYWkMuVz zQT%JvqZVJ~L!vlE6mm=4axb2861>kN$*17VNRx_Dx?bE~%oXn|7K^1~zL+hJ7e7)O zH*YW9DDKSPReZcSlb0QNb=6(>8Nz{YiI8^Lm`Qb2Etva2${!#J86&|ClRuwft zisVE6E_1@o6P>DW^YW`q9^)kkr+yR(g3iG!Oc8a{@ZnPmmi<6ccm==vD<(4i`x#ki zk09Ht9AC~eDKNRgcc=P(r5-{xz(ZI2U(K-uV-jl)Y>c^|;|m)`Om{;5!-D+o2Ay5B zQ_xIuU7>fV_JWb05(=mLK^H4Lum@3FN7|bnaA?=RBJ62+-w_?R?I=_h`x3^HZgr&J z!FToV%X^xqoME5ytXU>y6UVWc$rdKWQ0h%i>^(@rtd8L!D|IU%3-gbF?&D|RxfUsp m;BG;^$}pP4|Fj^a18njMuakcaz5;FkWDX~U@H+rA*8c&RJEf2S literal 0 HcmV?d00001 diff --git a/invokeai/models/autoencoder.py b/invokeai/models/autoencoder.py new file mode 100644 index 0000000000..3db7b6fd73 --- /dev/null +++ b/invokeai/models/autoencoder.py @@ -0,0 +1,596 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import ( + DiagonalGaussianDistribution, +) + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__( + self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key='image', + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False, + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer( + n_embed, + embed_dim, + beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape, + ) + self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d( + embed_dim, ddconfig['z_channels'], 1 + ) + if colorize_nlabels is not None: + assert type(colorize_nlabels) == int + self.register_buffer( + 'colorize', torch.randn(3, colorize_nlabels, 1, 1) + ) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print( + f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.' + ) + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f'>> Keeping EMAs of {len(list(self.model_ema.buffers()))}.') + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f'{context}: Switched to EMA weights') + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f'{context}: Restored training weights') + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location='cpu')['state_dict'] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print('Deleting key {} from state_dict.'.format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print( + f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + ) + if len(missing) > 0: + print(f'Missing Keys: {missing}') + print(f'Unexpected Keys: {unexpected}') + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_, _, ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = ( + x.permute(0, 3, 1, 2) + .to(memory_format=torch.contiguous_format) + .float() + ) + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice( + np.arange(lower_size, upper_size + 16, 16) + ) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode='bicubic') + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss( + qloss, + x, + xrec, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split='train', + predicted_indices=ind, + ) + + self.log_dict( + log_dict_ae, + prog_bar=False, + logger=True, + on_step=True, + on_epoch=True, + ) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss( + qloss, + x, + xrec, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split='train', + ) + self.log_dict( + log_dict_disc, + prog_bar=False, + logger=True, + on_step=True, + on_epoch=True, + ) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step( + batch, batch_idx, suffix='_ema' + ) + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=''): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss( + qloss, + x, + xrec, + 0, + self.global_step, + last_layer=self.get_last_layer(), + split='val' + suffix, + predicted_indices=ind, + ) + + discloss, log_dict_disc = self.loss( + qloss, + x, + xrec, + 1, + self.global_step, + last_layer=self.get_last_layer(), + split='val' + suffix, + predicted_indices=ind, + ) + rec_loss = log_dict_ae[f'val{suffix}/rec_loss'] + self.log( + f'val{suffix}/rec_loss', + rec_loss, + prog_bar=True, + logger=True, + on_step=False, + on_epoch=True, + sync_dist=True, + ) + self.log( + f'val{suffix}/aeloss', + aeloss, + prog_bar=True, + logger=True, + on_step=False, + on_epoch=True, + sync_dist=True, + ) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f'val{suffix}/rec_loss'] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor * self.learning_rate + print('lr_d', lr_d) + print('lr_g', lr_g) + opt_ae = torch.optim.Adam( + list(self.encoder.parameters()) + + list(self.decoder.parameters()) + + list(self.quantize.parameters()) + + list(self.quant_conv.parameters()) + + list(self.post_quant_conv.parameters()), + lr=lr_g, + betas=(0.5, 0.9), + ) + opt_disc = torch.optim.Adam( + self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9) + ) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print('Setting up LambdaLR scheduler...') + scheduler = [ + { + 'scheduler': LambdaLR( + opt_ae, lr_lambda=scheduler.schedule + ), + 'interval': 'step', + 'frequency': 1, + }, + { + 'scheduler': LambdaLR( + opt_disc, lr_lambda=scheduler.schedule + ), + 'interval': 'step', + 'frequency': 1, + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log['inputs'] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log['inputs'] = x + log['reconstructions'] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: + xrec_ema = self.to_rgb(xrec_ema) + log['reconstructions_ema'] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == 'segmentation' + if not hasattr(self, 'colorize'): + self.register_buffer( + 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) + ) + x = F.conv2d(x, weight=self.colorize) + x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__( + self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key='image', + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig['double_z'] + self.quant_conv = torch.nn.Conv2d( + 2 * ddconfig['z_channels'], 2 * embed_dim, 1 + ) + self.post_quant_conv = torch.nn.Conv2d( + embed_dim, ddconfig['z_channels'], 1 + ) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels) == int + self.register_buffer( + 'colorize', torch.randn(3, colorize_nlabels, 1, 1) + ) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location='cpu')['state_dict'] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print('Deleting key {} from state_dict.'.format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f'Restored from {path}') + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = ( + x.permute(0, 3, 1, 2) + .to(memory_format=torch.contiguous_format) + .float() + ) + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss( + inputs, + reconstructions, + posterior, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split='train', + ) + self.log( + 'aeloss', + aeloss, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=True, + ) + self.log_dict( + log_dict_ae, + prog_bar=False, + logger=True, + on_step=True, + on_epoch=False, + ) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss( + inputs, + reconstructions, + posterior, + optimizer_idx, + self.global_step, + last_layer=self.get_last_layer(), + split='train', + ) + + self.log( + 'discloss', + discloss, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=True, + ) + self.log_dict( + log_dict_disc, + prog_bar=False, + logger=True, + on_step=True, + on_epoch=False, + ) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss( + inputs, + reconstructions, + posterior, + 0, + self.global_step, + last_layer=self.get_last_layer(), + split='val', + ) + + discloss, log_dict_disc = self.loss( + inputs, + reconstructions, + posterior, + 1, + self.global_step, + last_layer=self.get_last_layer(), + split='val', + ) + + self.log('val/rec_loss', log_dict_ae['val/rec_loss']) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam( + list(self.encoder.parameters()) + + list(self.decoder.parameters()) + + list(self.quant_conv.parameters()) + + list(self.post_quant_conv.parameters()), + lr=lr, + betas=(0.5, 0.9), + ) + opt_disc = torch.optim.Adam( + self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9) + ) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log['samples'] = self.decode(torch.randn_like(posterior.sample())) + log['reconstructions'] = xrec + log['inputs'] = x + return log + + def to_rgb(self, x): + assert self.image_key == 'segmentation' + if not hasattr(self, 'colorize'): + self.register_buffer( + 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) + ) + x = F.conv2d(x, weight=self.colorize) + x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/invokeai/models/diffusion/__init__.py b/invokeai/models/diffusion/__init__.py new file mode 100644 index 0000000000..749f5c3f6e --- /dev/null +++ b/invokeai/models/diffusion/__init__.py @@ -0,0 +1,4 @@ +''' +Initialization file for invokeai.models.diffusion +''' +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent diff --git a/invokeai/models/diffusion/__init__.py~ b/invokeai/models/diffusion/__init__.py~ new file mode 100644 index 0000000000..d7706c27eb --- /dev/null +++ b/invokeai/models/diffusion/__init__.py~ @@ -0,0 +1,4 @@ +''' +Initialization file for invokeai.models.diffusion +''' +from shared_invokeai_diffusion import InvokeAIDiffuserComponent diff --git a/invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f528d77f34feb82cc4d7b0e6d2b5932ac68f438c GIT binary patch literal 304 zcmYk0u}Z{15QcY?h!Qxku(>X6nrvY!qIgXj5$vv6LUzK9Zg!5{J;C!Od<|c~he>PY zD+q#<969`9{$ZHmo1YeoT(Cahf7>_iU*qs!2@f}1;eySGwJ_2oR`$s{k;0^pqRjS} zdF?P_V{zX^Bv*83q3FmL=w9d<8mxLUU_)iF>vn-p@^8t!QVz>{I=QV^(+d0@^;>e_ z;(eJ8r&=3owH}sX(|9m?2I?cpj|o47KjAjn&q{5m2W3M9bn3y=6SOg?`H`9#>JUmD cQA#r(qt?~-bvV1?_0Gce7&K&TnK)1X0HCZ{qW}N^ literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/__pycache__/cross_attention_control.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/cross_attention_control.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7836a01912073b7143dba52e4b19f516d2d5b88c GIT binary patch literal 20594 zcmb_^du&`+df$D`lfxl7r1;SD>S>KFQIfmsI9l1Ysy$f4#z zoqH)+)1l*rO4pm#X}3+0wi}~V8w61LNSmVFK7s}fuuY0~fu`?Xv_&5%5FrIx)cvDb zudV)m-<>~=bqk)kd>H(s@bDbI!Oxq9 z;TfJ;HCw6NpI?2VtLlP;7$9}Rm+?44&ZstoAnFcoImd`_y_%& zslM*YaogH9mmkOTOt$6`6U-v6vaN(uq?bc2I>Pr{uUgH8f_3Oa} zuU4&Y2AIQz3a3;m$8#!GTFp?k>Wdq9RR%pDz&A)DNgIxtHl=LW+Q;zzIaYiMtBKeC z6$5DJtS1%>Kk1oQ2A`}eL}c4@uz3!UB#rIMcnLr2CH*lVRLV;Op>kfv%L1Xsy)i!z zWF=xd%M;zFyf=ZTNpI4h!n-N2fTsdcRZ1K|Z3jo7Uoz{>q0RvuiUofbwdT5qf{2}1 zp7#!;?GerxsBQaW3kJ}8%yV_Vn)Qx*C(wRD>YeP?I3+d4s%h`EcLr|{%G*i*kbd87 zivxvKJ?5Q7jl(#6L}L?2NYHors5eiB+0>uiFUp_;^Nf`@lml|(Nv^?ihH_LOsX6o#+l@iM9$KyOV3Bn z^{ZcfK1y7D?X@p`t&e~@S75cI?(rD%K^h6MNAPEXZ!_ii_(fSD>jsc+vl&)wyo*1~ z^Lq{7UjP&CSM&uiRGEWyOG*-idV2a?Dy|^+|j&b}ex6zh1lP*YCP@OcD*lwQ^IY zHQEtggzlYMz3!GPA!r7gsLf`vH@&6TLB~>+x?gT?ZWPBAiIw6RSJOVbR+~pX$JeeRtMS3A@=gvp1pJlTg@XGtk!%LM8$o2+@tEjpnTi+c8!g5 z5JY*dOqG;>Yts)v-0jHW4Ei$~Kvuz15Zqd`7M4n z;x<|e7)ab{H7kBG6&+gj!`+Y+z7q(kR})x{;xUer?4;?bNtU=sijF$T7YX4hk;5^g zq~5VnIJ?SUtpV57exv;udJ4EDGl2c1nK$EK=cD%782X>o$!^Lita@;-nCF*!^}!h= z*ePS(ylJUikCs4UyKeNJAh%sN9+*qT)Br^Kr)3U(s3S~{B6(m5Ow@QhRd9OA@I5@a2b&jK-N3zeH$rk!^R*QTbne2&p zve@>`r(Wc`%eA?avu`&b>lVwua{v8!gYZgP3@uz zqqASA1*N?y$}8yo0|F~+wvWft-nX{;ENbpQ)z1t}Rpl_{E@!%FUDr!U;LOgWJgKpX z<>WpOtfd#QH9S^&8%EH{@l&Poo79=rvRn&=G<7be1-JBjZDY%%uPdOd8qNVR=E`*u4AlJ$o*~Vm?lVmrZz0onYYXY-b1_-I0U@f#Zm-KO?JGR2 zKrq1Zs`(xWvhefO+HJonLct&Gb z%00pX{qeyA`XlulW&6?-U{J4B{K)odjVRfwRs%nTT+OmlP;2`Y@|F0*JP(Z=m_LL} zm(-eSrek*!9cLp8N!zNKp7o11iEsz18#$oJFL;7lgadc!3Q(kS;jG%BI2|CYuZ2rs6W(euH$Sqhta%;MOR>{0s+fa5l5 zL8Ba2)-Jn#eBe?1%u=ff8nFf;#g*-o&6OQ$7yDSXSL^=UXWFT_3-a^!^ffJr;*{L> zRS0dT;kFCVh9DSwbh!dphT)1`m1-p#br_*&toYL7BnIvj8uPpXjy|2W^e|bkWH>r-?n=> zr(>!q(YQs1CUwvPTyjLe)S`ok%|aTwV)J2iJF-ja1j>E^-++ij&CKEVQF}Jtg<+kq z0{+|k@UZtZvnzPeTXEljUN#H8%#`iG0pBsDYz$@gmeX-|urc>R2Rrr^pwLhVK*?9(7f^;TKSaD%e7 zrks#pOBm;U3?eHpKqHzotw&h-`FQ1@s)w!w{d)1(h%HBPN27!oHDOPLLf=x+M5!bv zwM<1nfbr4sVPlA#=aFjj0O1+gHCPFGU=$HtGusQyUE*73rPZpdrYd+IXZ5d2Ys zOAaH4LW6pq#b09bWqB8FLLyOLVF5Q?{Unn<)FekuN^pq`J>cM3BX8x+f>lVQr}No- zGM_Big;Y8PHU80jf$7n)6SLW5PUBz&O@=-^56|Hn{9`1;#xl=_`O$%iGT|p_rt~xG zS$~ZGRujy3E#(jky(CO~IWHC0Ts8f1)?QVl7P0JF8Xx-+*LKZ@BJ5Cm7wic(p+S z>(FNV$_$OrH}w2&aE)SZ-c$;eMo*!xCFrl`B6Yj#W!n`vepcO@IDEX3A@*H*Z&c5( zmN)C+`972fpf3>VfkGw?h)LV?m)u%a<}ELoTJ^40jX49H ze`t+T5xcM&t}SMJYkI951TKYNV&P^$OQ{dtSYR>$h!{FO(!ohx?&`z6&X%C`G;kCk zz@5a8y2WJpBu0mUYNL2qiV^v5pg}+)lpSwB{lgCD59=gKYKpnR8>~-rZu`eRY1;er5TyZQAz@4VPF#f?Mr@p>Y9Sv__cqH% z01Y?9Za~NTI6yIflWk?{*FI_LBjkfbR~Y0#ZxyPS@rTMQB@JOIe+e1N%=lUzbKRzj zDZio&;F-TBJX|K6k@kBDY>>5+^c*j7A1nwmY|2Z%m)J^mAd`3wQn*atv$oQBliA92 zGNP_t?qoVfCrwqfliW^hWt&zfy_H3sth|4zW9(!*S^fS#;uS(;XAI9al$5a!rHJ&B zR{coQ;ZRSA<2y=v{%xG3hmY`i_%RZ^a#sAX93{&2jkPj1LiGB#*ea*qz)zG~DSO^Z zqY)+i#zuG-7K^nq3_q-*?y^D#Y`Gb1v;rUR081Yho>nz%l;4t5Bs(10=m9n%pyMzzz(6r) z0H0u~2CgMLDViCe{lqyt&Y?BxdWrXv#46T4Y}&~IY!UDA155T+cKJcqHq_gm z2PusiiM9^`RlS2%^lL{6F+8e2j?xEdz2WAq$h;Yux3$`Ds>rNGHvLf}XMLs%z1Buo zm;$f3_@B^`kh;gsS!~7A*p~LiA9lp|>g50X9uUU+>_OM8BM_$kKur|JrylHw-^Y8P z{sh7JCzXyu4MR113qj@}5M8AL!>$1K>{r$B6gc(M#w ztY8UA!b)ZB*dCEnENF548RL*~--Hrwz_BSUI?jCu*5?*UPDV^oUn~{ zOFPMf2H}S{By%`YVJe;p-+6G@cj4P-e9-77!3qVma3IuuhK-44paEURvZCw?{0ds2 z6vzwiMK;)1Y`Yvrz-Sz=N(cjk$)(g-0keaho*CHiZ;vA807Z*i_>JG^CgqVl4th2N zyJX%x2r(gQYfIRlcuPYRyNe;$qtEcDcARUPEBh5$7N4`%NDw|#zUczpmh(%Lz z8eSb5h9}Le4b$-BpN>MXqanbPJ|yuKz^@@gyaZh!Xvg~?yicE_egohJ3*EepTIaA; zh^V`nP^VG9OF5-}56s=CF7A6VRgD}@2iUq1sIOw^hb|J}tq5ig{aNa-p+r-dzsMZr zS49A;`%Fdvc8Xh46CJc%8<4dKX1`j6X$U81hF~QA<`C{MZ%NpLIq9^Y`(Xh5G#wY` zN6eg2s#Nk?l~PIDE2&Z`n+ahaC0~(f7ZFC(n=CFfQB0~#HksUE(xa<#y446I1Vq2a zdVO$6uE4TYB)^1ja2Sb%QYIcxo|!mmr|tAY8rDkuOQtiqg4B#=5RrhmttO54Ixvhu zQV1Y{%}e%3N| z?5NOHn3|>D0R7Nn@v9)01n|29gCL~ zi{_Q#aMV*olQe2Fd~gP`v-$^kcmW}7I#zOM*#vj*5Eal;ACGm|^gRd^A!7!eFwsHO zm1jX;N}vQ171ESECr6%zIjpB4BOiMsv6bG+Y-P8`wsM^@uqFcxCJmNt;Wr6QD=QLn z3h#21iJ_Gx;{2^soy1NCdKKtnDcec)@?#xKZDCu+JLBuQtvp5_U(fGM+&7fp8Q+-< zr}QRu@`xHqY)ym(FX^T38J+yjG`$tuW@lpC+M1NQGo48<{hkh6K>d!nbD$$z973e% znA;#*UIs!`w*9xnM)6XU!NCJ&4T*t7Jb;2x7k1Nl9}#@mggJz!kPX;JV8g1xy2Bu1 zT1k9uKbB1!=!6XWvs86d!a01C61rK{Ow*_4rP(V~3oVwJxt*;u0wu6&5_E6dCZ=!ZI zeO(MyR|iACyPmknTD%yU8>1+kxw7gv;XqZ-zDi`}jI4KDqx}R9hEj0TkA?~?Ua7aB z;s(zy_F7_TWJOEGISEyarU^ls65^P2KvT1|H&9LeDJDD>2+9*p2@aK3bF~-WP{IBL z>JPKl43h(_O8s`=JO@#lMq+4Cf%;Xx&w=KYZH+|dv!eyQ#{+~b#;@X(rW3!8YMNLG+sRTs(gGEQdW1jc5$+7F9C2L? zfeoWn_gRs?MOOJDzLz3v1-~no)Zb^}bPEwuqpL2&l!nG9ZQ|Wi9hl#5`_6HfKZE|W z8OJVw`A>>Y2`P*o<9FyQCb3G257|jNAI+l%e3syGj;a0wCzcTFt$GsqAp;kQ7sW3@ zVCrNZ2vyx^z)cDB4Cr)dZ$lmkohGE$Zvvsi#Cj44J?q&`({sLQe#7{N30D%`Q7{JW zq%@h>vcoiq-WJS6kgP$P5SQ@9Fyp1)H4q<=dEZ8iKms`sHl(@t6LYvgf^S|*C*HS_ zpTu`+PB?``Iu*3Kk8t9Aj3Hp_Fp9zq1Vp=}f1VsVoLm?6#A$p3A_haW-<;BpSR7F6 zT?6a0-UXpV;GF0|RqI>UEqKP|)M2n}Er8u6EHi%9R=Y27vH@XYL)G|NulXEV4I5Q)n1$`snhXY$Dbo;-Fb?--vvntM+qFB4C#!*PAa*@i zL{vTe9iT60jZ70RglZZ7v~%tX7KCaj?8eMXAlhO8Qg*f|g8)A@s*0P!s|6c%#F$Cc zJpC1I_fAXQWMn@iUxddo5~bel66^Q9#bP!(fXg(CF*5WzR5UcH|H6c4IXV_gv!9py zHejy9XQdyd7L7A}+Us8W2_hTBFG>Jf%@9hDh*JNN-Ex3vt~X$ONl0AhL3CyxbcHz1 zntom1w21OCI_YP9CjKk?7isl0a?3msXOLkyOaTfeqt~GEfZ$n=5QOz{*0w&f9P0;G zBKe_{RBW@0n2n)Oh}o|o<4L4-)%u1}1zCe{J-20Pq6U31&LQp{mPiPykh(6vVSN)s z3*^8lCEHVo%E0{&3CwhM(p^#qTjaeo%YMSRHSZW<=I)nGm=8>&JsX0kc}WnIFN9-W zD$K#wX6nE#1bUi3jTT>n0KAoWJ8^4X|K2h0h)4{}ceJptzii0FK^2B1TS(e(BhxXr z67L!iv!TA-H7TwEc(!;JL&s2`~c=w7Ug3*7S@h7+o(y){#Fjs z>DWBw&P*o<6HK-0kXzjwR8CIDeEiGE$9Mc=3Ln6YFbo4Lok} zY>tBBH9>gC3YSAO=-aVZp;15wktJS$e=sNy3v?Fe$NzicG*?{|=E z5HM6AX6*SBIqW3;ISws8kMedFx~OI4K2E=5eeC?cV*Q~#N}zzW?;5_rA43A+20@v; z5!B5=ki&_CpaVMhBm^MnsO=oob6DDh!XXS@=Rnu;plivy&w;MNO9#4^0=5| zVG4z2xhGw7-ga)~LD%rxqicv@6VQGiQz~YKW{<+Z2YU4(DybhZ>4P?*LHiL)Wv7T^ z>SHE?T#^V3{~pTj6NU$onFrheKg4PAa>4-YK2)p+m5z)Y_X6Y19^xcKPjV4~ri%<= zC=j4G6EM{GEwmwnmqrkXBT^ymf2APpuvaZ5R?ry!$w&B6(EoN#nydX)PCAJUV+~<& zVA;^-l63W^vxEn_-*&9~Mu4l`u&3||#2v&IQ=_?19OlK1;GqBYnX%kwt1=FY~S_yhLAt2K@rw7&b;$dK^j*Y8gB{WMz_EOx3R)3Y_jqLtk zLD_c*Na8{k)KDy?P=sJFr5eQd+>D_Qfy$4F9dRU{NDBbpy)>>;iw_)EXPs{#!giTl zDy4oGEr#}o=t3==2QYAvgvK`Z#4{0?YW|Y>a}XBbd4d}~wtx{`IDeo^BG`n`r!9r6 zEYjf{QS#;;1`<8M)$x+nDPT|RBl{^-{Vgt+;}(GA?X#aC`SoI|mWIR$iV`Fq*YFLV zM$$FC=R=SW+UeGLanNM+ImCsYUkL0Tf&wrr0jz^NSB^mYgj-u}wje^Tc5OH3p;h#G zjPSQa|8Q%&>91kXh*(AseQAXX1taMhj=JWTy&!gI{0!=e`(2T~Mfu@=2fgp;3sFVt zXW3vFO>oP-*X*AXd}L>7Ylc(;IMcVL?;}1h_M69H#yO5M&V-Vg-pDC*9gTPQt>V|~ z@!rX7@Yfc)WS^MyT~33iKW*f!f+N_)*auuwt$R@hx=mSBN+_Qq>;GMj@{f@0!sZML zpTjrcd5p1{m*UqEPpU7(OU~lec*!Bwm~p1?)SJs`Kjmj|bspCUU``A2GehQ_ZoqLg4<&*(c0Ks6-oi~A++y}T;8&nNz#>JEf{s2Bn^>x# zD4;*lp;7*;FUbIelm4u9aJE|5{c02^;VyvERVpPhZa`Yk8^OF2+&;V)OynS=x-hS%EM&J~ zhf9c;q5c3K;>aaVwM%2vA9~#viHJWrd*i8|H*b^r_>Op4@Y4KeVy>C!EK> zKr&=RC)FhW3&d|DgRmf2n?P5{y>NR0Da`3GpF1PZ@V0hM<;ks-HkU)euwW{G3inj# z415ZEmFlF$RG!dAb09Zwwuk{8VGMJ~C%QKw)IY@NO2WxV7vND4_9n*9-{Py^X6YEh z@#{fX)l#uA;+qSukYEHnR|69o{H_GVef{cbx1G3R%lO4Dvhq zNZRZoYCMnp1m}>%L@i@xaL0*+$-`g#sDe2|!KY$AzMK$7O@5S8{~S%0A_o+|iNI!< zWBO>_lpA*-DMNSFTr?m)CBS)E)Jfb#CjH5 zvxzvl9Ln;N8%J&extx?uN{(ey$YnbDZoN!rqFXQ1nM98DGMy=@$I%d}wN(h=|LGJk zBaFK~anPvRn8_r*Q{V|UtR;nT+QZc}T_3XV%t07Tw|VR;ThpCsmdqGiGab~LgLoiM z2cX~2bPn*)ZOwM_z5HBfqL-iViW%PlJi2vZP&T&UV1^%QwlmjRfQWIh`!p|4hr;ol z+0H>;iCdq;+r#*s-#UW%9f3%Dv~##~1h$1kougt~IQI78x7lK$bF4EJkAXfjOtc2MR|yFQBOz` zrW5h)O-XO}DOQva=}DVM-r`)o#pIWnyv(uW!d6cjDg_dc zNPI7{*Let)tGGu}-z5f+>qb+{rt z6nClSy%nY4z>>_E1cY`?LWN%67%496_1`bkJ~1KsVsU}XC|()KRJY}HujN4D;hw2! zk0#or1hW@hfPv;JQMTV$iOkt2&-8K^$!F?V-Hem9VQqMn23amRIfu?9%S?Wpb>#9J zEKK~Be?rEQvmFrgn zmNomau!LPahUA_fiIs?X%%B=5JcWTmw)!6G4Jm(^gYXOC88kg~6(?&QL1VBPK$Dce z0Yoc-y+jqy@W1!HtLnSRK*AsukWRJcP)4cRU^2R6D0_!X!q+%xwvX)v!4@H?|GW%z z^AX@tW!xWRT-V#|p9*E#n6dPv&f}?QzEC``N^H){fjIkjH|QSbOmtvNjXBCn&{tx= zgN}ANN@tf3Yr=4pg>YZUlZxy;I?-!H2M7LI*dqP)r6&j6(b~I#D>M8nVnFe@;z+-W z+&e1JRf3~{zZtZsW5)w=UaK2;t7-8xbKDFq=rG1ZJf&I|s`8O&q77ER!yL^<0~RXU z6Xb8tV7elSYX^_k(SDaN$9k$8T;n2VF$78V^f!@r1R*u3<#hHFiczqd0w8eDJxeSL zo0y-k{B?I>?%&Mt6(iGqD>4iSq#S0O>wAs z(A`bh6ow}|$Z0P*C0PWZZt-85Tac5Fxh@bSUo|PpwmiTT`!)6Is;`R0`g)7u`RA+u z#(&vh?B6tb`|&XO46ptTh+x7=S%2~C^c_2Q`!0Un)XV&SEeraA!-OaNw4OEkjjY*k zI_z^MY9e^WMDWt>w}f-ddi6Kp$Bue#6)35bb6IU2=0>VYJO*_!Cc_GlgR4Y7%NM{L2U?P_bK$m3{y z9Gyv3HBE6U^Dq&*T8C{=Ux#`aK~3`QLd&lUOr~8oui(o z8rWA&kZL_HQerB?EE?;cSNU3|gR29tnOeuW?|;8PEHb&DY9o_;|0`9DWNh^QVSWbX z4i5JdJBX5fxK*ZlUnGOUM8nVf*ZdxhLmizZ`N`gRS~bEj$rBTXZAj4pNZfCUm>p3?)BF&ScJ`%RuL31&?yUwJDV3Pp4c@vJCgapC@L><%>icnk~*5 z%VQjAEycYg=dhmdCIc{-_V!jHAB-avW%iQ?eC5Oi3sWp4?eSl~K=2~wxD8|ehl9I( z_lqAt>D}c=-|*){32hE|0_9e^4rL@XkBdoe?(#3J)=zkZb7UA46IPY8Uo#xHum76 zVv-7oHSzE*KPzb_Du2L_id?R|Upb+oC_GY>6_po{Cq1VMggi@P*{fGI>u;OfRSlNK z!^*>*+XSgv=Mq2|rpc*PU3}j2mCcwfj1?tE)rNf`0wyG9zn@9lkq~dAD}w7Ie58p- zx4z@Gy_VN?K6cK3v5c3wKb02p-Ysx{#r(;A8m zP=YW3$bk_G&&b{PoZ(*=;IY?RFt4`>vWb>W?Q>WNt|15^|5#5JAK#!C;W%evoZ2qe7ZPS2%+|^dBtc8!3RxSaFWd=J ze=av>{$n?u&wlWSeJnZR+ne~YCYr6e;^=t-bEm6nvX^|MmNjN`OXCsb zA@bvyGK#v0%1d%n)dp!18FiBu>S(v9`=}cFc{COYK5Ck?aM6S*%F?R-%fm;%!&`ZP zJCNu_Rg2Ro8)H04EC1=#NTlfDmsRc1=3{f9iY!tpnpQR7^my9avbkBcFhkbCT3vN6 zo1dXfPKM^H8G4=w=p1BHok(G!N>OH2TLj9Xrb`b8Q4GMVHtiDKbhzZQakaAG)3Cdw zc*~Adlmjs~>^Lt}7NrOaQSH395Xu*0U?X;HwF|F~u69D}1Dt5vZU_uju#C zNBt0n+{3GX3eu`I93Rb5%kkZ9cgNoZ{M>OH-nP^7I!@bdyB(+NHQd0t0qQ#)x9gnm zUdP)HqvBC-gW9nip&y1-D_nNS7`Mal*(6GrJ2mT(MKJisR0`1dGqnj)1z+Q);Ac-8 zrEb&mCq#Zm)V~)n%bpPmXQs^Pi*}< zOQ^@(Gf+vR3}27=f2RllgUjP6A literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/__pycache__/ddim.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/ddim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4435ff4af259c2dce78ec14aa1d50f0c6343dd71 GIT binary patch literal 3044 zcmZuzTW=f36`q;Bk|HIFq^_2H3DBf`k@DCF!%a}bfzyOFYM?;aB7^nLQd(-c%gioq zSy%!^Eg*Su(f$Le=u7@Xf5|))1^UqXt$8h)s^83#3axqSi(-&*cw=rfOZml>xRJe&d_68m$ zpAg{*|C|Uva$Z>jU$Cd7S36sM`tj#aK6~8%S9f-MBb2zhG?EUujs)4n%HDe!*8_I z%EO0`Kc5?<+$2t;K=woJu*IeAaPh3*$v?zUkXPh_mb7Fg9kJ57v?(d9Z^+0t_Q190 zzOY`iZwPk;!7g?`v@maRX<9GZ!IJ|nf~k5zqL;k1FoE`aOgZCBM2T7nCX?hPHy=Gq z(NpE*@zFR?c9^B&TdM3lN`~K(e}8m%oQkJ$xdwQ4|*W;R1=n*gz7*hi0k# zP#9?9EIkw=9v@6zD*t{GjGu|%K~r1CFto_o`%4Zw;5<&_g7bs>Nfri4{s2TZ1z>KP zh50a(F9Inz+gZ<2+1)Jm*|$`=$1*>|&_l$@S`8>asd*T%C2_^-rPx z;l-u#(LrE9{|yKY`Us#8X-rD?XZm|e@xEZl6mrRCmZ1?4pzYEUWJ=`+r8TlJA}~fy zX_dAP3gi|-Li2RER$mAZwH2KfGT<81xBw_|dIVn&Gu43`e{dBPrY4E%+H>k}DG0vYilL z{NIl}`UmuJ{p+W5f{-`=MAXRJV1|0d#dQya#`@=Y^1q@S9U-ko!j^+6`-*#vQ1{)IgDD`MCLcjF5q-$ zow9Dg{#4H%xxzkS`TbAH?`T;cd74*tsuhP00%t`B4(7P>iP85{hIehY0nQhOQ#YKI z*(Ny845wi@&Dj<>Cx)|XIBT=*qBUACTY~9(XqOwMy+g{ja9&%pog1>vCE4x`+4hp` z)(zR#l57uuvR8Jx@JG3G(Y_?N$ZWs#XHCtXtd;NB@~!aK3$-?01UXlt}xYL5S!L9%P+?nCqT7Afy+k=fny zZn-bWP?ySDU;YSCOP#!^K~-ABsfbRMh0@ZaavPF2S}fnMbm*kFgOYR{peAXdsXS6w zi7FS?MK-TKI!$z*sTT4o7)M1U6Z_r^wW|W~p5>;hj3_y-KFrLaysC&t!ZQr$NGX zoi#OfZjxyoIg&M#;jyxZ(9r=x zt*IrZekuA)QEp-bLBtFBWBq6D9L|+FFb2krcLU8i47d9q-)(_z zQ2PpBSIn(n*^GV9ym!C%+&#Jn{Wf&Fj!nCCn{^qx@~t-LI(@r$F3jjL>A{G8um0%K zqbebFvh}Ov?8^`E(W~nNK#DZtT-CXbF*I_}4bBk-NhL8ITk1X`e}X}IX~vIaAe0rO zUHw|Kn)5~x+yMv>7(jOh z5;-0;iYZ4_6WLDE#%XHh1G??&Q`6?Xrmsnxv@eOfq)yxPW$9~syE#r9N9mHJN}O1h z#D0JOb7un}*?IkZ9`M1zx#ymH_VfS$&wo3sgM)E+VO(DabSGt0v(Bhw=m&n}Oyj82bQJhwcyGCn}(^eIse3!PxRmD>qNyOnf}p6-)%Xzm%H3B{&-F3HCmpoW3>K7wmsNF@4*E ziQtOh!1IaVKsC2?`-Rl>9i$u#%A}O5ckp~C&sPSAcs>;8xQmp-!4Xo9RPQ9myLi4T zxSHpy?fLG&dn_?^Y-WwgDLlKl9F$iU&o$P=YI$|OaHi2%tKW6v#KL0Z%=+x{xz&{u z%jO&Rb7FRRb@s&Vx8HEvY%o7} z+x)FJ-9CTo9l^~v&);#&?CrC+-gHy-wi~OrRd2rWwp(Uz?v_;PlvHV#)Tw3ryW?x; zcWUD5TD3N_cw%v7EnI!38gyIdT2;OCmRs()^QJp)o}Inp_Sp+vL<#GRi$XvoagZJpAaC7v;n1Oi1>HYLp49Yt@+s8A@}jwMO+^ zV`Zi`vrr95$*#?Wjm4Q|qJHD)V1@trm6@~E%0jpp=nINCTWMCq)yisZ`8+9whZpOO z`&Mi7iwivMK234!Gs_Rvo}o=l-6L_)xu~?fx=?AHYgA_HmBp2XC{sPhm-Q$cROe^b zmm5)fvA#017G;_I6`DiERfc4Kd8QE+=T_HijS7vMS*b_6StgB{8trUUEA!#%N`-#7 z`ZEtNHXc}+QHN`c%gl1+>~ggK{>7Pv)!NMRgYh`uPsQQl?0SO+zUnbrQ>ke9V;^~B zHK@|^Mm3zDnX5*l53xuZi;eRq7sGnvv4+|-_}=x#YPB{;&hUc|^Vz74&TO?YQ>o9L zsRrxI)o6?{2xsORmBm_PwbH28>Z@T?(5kLhmKV=fUu9Dy7_5Q&?|0cKDAsYs?W?U+h>I`Kf!9X zUaqY+%5!J5lFBo+^DC=i_4q3Z2HdY#m**p2b%a+_y#=q|d*aOMO7+BYy-{7Po%nFL zx>TKO)K9QJR?k-7`_PF+i?FY(ZS4N`o<9);Yb$Jns1z*LXJ(hHLB*PMkGiM6`l;|5 z{>`bY?H|R{{Mi`nC(=r^l9P!>Vkz13f@CYTnEYIFoV48<8}C$<398R5&Q)oC+8|8i zv%rHXKk~vG`MW7f*U!wXRl{p3tJ|w^ipOYVy|$>$$4;oYVys%VZGSSnk$-di_5JLN zpX#4trm6{$RbrZU1zwN}{Dnl24l+SD$OZYJ5DXA227^H<@B#Vy9VRxyUz*>c`F$@W z=92>Q&*!I8c|Y&vllfwPFkcGp7q|z&`_**tfWZFq$qUKpOf`Ff8s@!VGT6i0T(CFT z2l&qi`-3a^UkDBa2l+n$&@Ylp0N0eq#5#3}C&js;Uz z*C_Q~6I@FzW5Gkgb;0$-#z{LKoFKLH}ijYa7%D2|0jdng4_AOCob=f z;Lhh0l)pE)D|i>_`+~cJd-%WKz0do>2ZDQp_galt1n&#(5t&w2h zr3|U`xpJ_zkFeaha_LZOKlu)`x{EKVH%E9XhdEx%DsvbT6MT3rPXb*49=*Yqf>)A?E53a4RSuI(NgNW*VT@DmMr3DnGhj zt0@DJu{nJGT38Jh9Z(cpIt{SS0!3$6>(%Bi`{sky>YDOD@W^}X&@|;Jc^n`zu($x) zsaDQb&)1`ZDz6w^o61B33yaTGYZYONdNkONF!GJXl`0T&tsWKKLal({!bVhxpQ^P0 z1hEQLRISvhr!c>`yj)=wuUCP)i_Izx91N>ZuP=txI=Ff!m^upRS_g7Pxe9n_u~Df+ z!-hYr;J3x*j39oLUJJodQNB`{Tb`-c$wThi%t}?cK{|oju~2oLhz94D7uNuFhRK3x zKnT6cu$-?nqM>;q$v8XRj|#T2Dry`}2Vc~x%k^k-9UQnuM^ru7*-$CtSdWJ9Z$tja z!o+`FW_0Xi%Bk*f@-aJJFa< zhz6cf8#E3e=JC~VabeLKvpl<6dA7Q^aEAUY({y3xcEWgdWwsgww6zje7uu=Wl~u-; zQc5<)ju<(+)`&*PM5C%&oZKfk`b91YgiS1R4r zka0O8uU3ol9~Ska*_*l|JgQFy)@#*9+^6tCQo@r8&gxx$j#fZ0HO_?h=^5&4CNP?< zqOZa;%K4;%#}#~B!6y`aOueB3kn1$!+8R?P;5dqILv+cVZEDE zFs)!&xwKm7tgXdYlHsFzVPt}EY4{-mRaIB9g@c8pm-64t_+D}%>0i#IGRf|5FbCjksn}b3Q$J={jE=#@bpAr!rJ~eRl z&w$SxPz)E6VYZdr$ky@=XbB*gFSr&YUrKLe1Fw~xOl;&D-ckxU2rhrg+sHS(O`kXU z09gK6Z%^Wqw^3+im(oupH&TsE3m6M*%Np#O zm&SwLV~Jolv0c!L1FhTzm8%|1sNMS#8-q1rsIg$uzBv@^QOS*g&D|PZD}QhMNj)UL z-*koXKVA{g%Asj1Y~ z9r>M^J`bEmvpeApB#5 z1Kvmn29nYaGnL%OmAQ4DbAxEC|G5HYd?p&JKOJ(8vuZjyudu{bFcfFg_4?M=`-Lsu z_U#_~IIPZrV78RIuls7{#w{vlehDUNCYqQn%T=jhuW^^7oJEeLPWn-nNItSMs`B4_kycq0>^ zFSGMN_{PmQMME9Tf7lxa3xEkO z`{|Oxl$RvsE#ZW%|Hnx$W+?e`#`E6F@xAZcm*mwuheyVFPduaAg=1W9r4{o$QCs|d z%6l_&xkynZQi_!5U(RKUv}bR!Ibj&5GdpdnWZNOWKxc(K>pxBKy#f-6@E~Dw6JmHN zwdr31=nCkQ0@_$g*%M%5JsBPZv516EzRd9=nP^@aBtFKm<8fEYJ@!quuN3#)d*bgr z_g$N?)_-zC8oce7)BRsQo6zr6>Qr;jUFFAMGKeW6Cy9 zl8#?a zz3{t4jNf34Xt1>(q#9n!-%ROz^-LlhYmu@ivEg@9H`1;2{fQ?hHZm=b|05gOR%X+0 zWiKVe8ye}r-^{c!!g)-(-^$#Rc>3Ve`&y~`yOXyIV#WKrPY0C!HOsAl>8>T=bNUl51oPj8e(CYQSzjteek#F8U(sk1UH zS2N={Rn0*-$S_G+C_240hs%($=hk2*v4(WqGDO%cJrZa7AW3(Xng_zzJ zK^#8CY(>7lw-$$uy7*#lS!KlBfgd+yq;t2qwj9F>Us9GWIKM+@hxe;MuO5VdRgvV` z$Xh%M#C5HT3f)F;S)Ab_`S)vC9w$;_+2xW&{&LBJ(YxMrEW^z3<&u|q(@P%!sufcu za2JA#gjUL`=`RQZlDh*E9=PEZ$9q(kl%XTt}jA_`o z%Rc+$sq(q+tkB#bf#GP*uh~Ia)6px3F7aC=zWd9f}nhevblSbcY1LSFbWL#EMIZk_|Yh)EU8|iYb|5 zZ=G9KO*Hg$#i*8cZe%@)`UEAHa^R}V{QAOxUaCM?NHxc8rEK}A51470bXceYk=vJw zrHF;&7EfwCF$tb{C3)%<-}Uq}eCB$3f!LJqI+l(t#j+Ly_g>*AJ$W9(bT zlj@WX{w1M*kUYmjy2OTf%e&mX!n*W!-}dz_OBtQM=>!`0jlQ(ELEN`?if6}?_BM!9 zH`10%o58*VY};W_+amlKie*}D#=noa8;2JZGb-z|dY`os3IC>2|E>ZXmQp_{+ReXp zqz0bu@-IHD!8t-1ZgAouVqmWsIj|59ynt5=ZGOK zSw^N(3I9G1Q+c=clIL8hVr;b8N2}}bjLyN$fn10Rj-{l{WaA!9bcRT5n$7`Y>r@P_ z^>4sAO~+npDFV1h`SbaOWs&jTaHMO&i#Orc4>jiPpCXaY9B4N!Fat}8FDD6;KbU-! zv*D+b8)=PUXC6}UA~dc!Qu7U9`Vyz@mVY6&QQ-88gyAgwh)ZnMC8&*sWZ;W%TY@GD zJe_L4OsStsz*~T?5TslA@x*5K66vjiwgB)xqZ8;?XiIk_?f4a+KwbPI@1fuf%9+gM zm-K-c;!)ZTC`PQANJR(~-ubz;siNBr{bff*C#I|z2#AuiQL^I1F#kv&j=K5kn;I>d zs90OAnS63(rhazoY~|J}b(6Q%HBH*1d*KRYlEurV)UW{;@GdZO)L>`KBWi!shfziE zX{Q)Q{W1~efxWT8xCco`-N6e&;>7OoMBgnWH!=;sk#4{|ZAraC}BQ>#9oB&(HBf8I?kQq%T*m>`VZX9f-bs%{9 z%9a;?NIa@}MDeXOGnhZso-0kw>je`%ilS9MgM4iiZ z_M}bbr}=Cu>9~VBd2{}2@z1@q(`Z`avsx%X8|7dXaMtAztW@O7c82{`eXyeU{_<*V z!4+mq$Ip?-+BvEWcJLi{=vZRZDl+as^4_|u?4PTEl*Gt`eQ%@W;?_AIY|kJ2cy;Z9 zCjJ_}&UvEwIrtgn&jBu3ctZy~4t>eYoBPw?nCI*enQV?a3==OME9TWYu==Kq>4?-o26mC7Ym6jWRJLRa zwq#5&Zry=7YZ%(-{8`%J< z;b)RSYh;}_hPP5@L)8my!@RfcWr%pjb-=gBy9ka!!Zmp!BlW%57#YB^F^5bDXz0c|o@b2NFw# z7J`CS4&+N7ka7FFA-)@kzfnGz`9_&SK*c2wtK@d64(Lyfy#0<0{}ZES#A^7T73(hk z&b|wOhZn!VScb0>i8q{%`1N|VzJ0^BDYNc|l0Y`RSKkhs>`DSRCwMG3M~zQeJrDUJ@1}iwZ5LVC^UubesN=Z z=tC{i@DCN(qRh3KPf=!lEdaZiK+E`>Ch2NZCI*i>{SwhAqU3z|4=5=X>xt13{tuP> zni2~lOxOkUi{~WyLhk3JiBZlTXlT!fJ0RB{*`L>5I7T6(tbcZcu{Xuy^gNy}@ApgI zB<#geZyazm?&Z7=nc`-qkDsVLk>OABwffxzVoezV0x)5{L#jY#aKwj9@i)^3fD(We zaMc1ZUqs;|7~wCYHUqv&IRYa5H45SkW8gb0@ZI513vW9N{cjaRw%-N{Mmi#<2$A(> z&OWREj*#2}6S8n!cpLl&4TH9|p8_Rc_Ti=OO*M-K_2Vpk_*a-ryZ=7^>NTXH;IDa5#qQ;jj{{fm-4ra>uO<`)C0E2cVx5e0A-=tFc;pVpHXk@zJv< zL*Mi%qiAYt=rR#^>b%r$91Z5A$t2pZ_fpyz;GAmc3d`7jwTM}9TF&7ue9v-T$^_{> zNKu(`vC2KTq3JETHQBjSr(f}-91^dE$}DO^lDk8&Mp;z5)R!nnq`I~`cLqHUbWqV; zQ1VHmr$My(BCLQbSp{SCTiblWn4jUFqc7nejnynMltR{!y`dC=o8-iJTUek~try6)Z0vjd;7isH$B?Z@tX+P_K=5;?G z6S#dv^-YeTh$ctNy8ao0t&_8vyp;GnI?ui7FDANlVDXJT4XoUQqv{TK6}NByyG`SDOkYT(gvLc%R3nyF<7`;iD)XXy?Yl`*}Bd! z%$dv0V+NAINsC|*Sl>1iJ?OFX;mOk>>f)mHwM4>8)Eea9CDjM##rMB8_ z&Q;1_7-MKlox^IFjy5IczGS3-?(+V({B(D|GvD^J-KWC0V^+kLg8BwlbFM$bAYyDV zONme_2zD+I71I_5INn#PE59+!OMmT1ms7*@3bw$vQO3;H9$Qa`9R#08J7X*w@N#pv zjq&z;eXve&t^?}?c!B)D{SA-v3IHNL%K-l^2K*OOgu}Um_x`EZCu(iz?f`wwf%?}c zXUlO^>Bo4Q>L3BX7Op-^7Y+7noI-7sHfYbf(@8xl)ui)WuOR%b@Ih3JwWv(5dh39U zRRqk*2D*@Tv$UJ8)rja6C9voUCBCRl z%K&NfffBNCj=sZ1D5B>%W#}?%LPz9LXeN5nYk_N#Ws7=uCmPucXu&`bgL3Un zz9B*wX%pOyTOn|b#Em%@C+#U8Aa8)kKHC^*6yaSI79fn#flR>@fpgU=v}@ZeL7WuG zV;U^dfms?xuMsU5@e|P;$!(6bipKf4jUJ7*(DR^GS+p$t@W)9R3rP7qSdA~onR;|= zU-CeGgBzu7VCi#xU}=zY2BfV)sU<75$WuD}*~YHTi3WY&+%4H2d>7A+oRJ!T^CDa` z1A*x9J`Yz%An-9D5fEs|5mHgc2{nw~eLZudEjy@SaW}$0B7hIm){3)bll$=UYD1E= z(^Fpfb>$NGAUwfbMg#37#lq>^!K|DPXnalQSObk&fyQ32*s}@`Q69}O)McAh@FqM; z>L)17PT&)X=3!fYLUg9;*Lx)Bz`UtR_(~#;JJdv!LLTF&q!I->V(}@JA1SCQFm{l( zvO9HtK{2Bze_Ju5=|88~O)B7$V!x%}2MAEX6t}33VDLsKgqs?2@JHlJlstv-K|^U@ zL<2F0NdiCzo4%rgZdbYeM1KgJa|0dTshADRyA(56Z1lbxhMO27BegS#UWWNaMIo6< z!2|a8qs}(W?$w|79106w(KG(=<^7j)QdRH}>_OQ>)D3MJ+`bv@gLLBBu%r_kNyn*i zf@aZnkU?TIa7tZ3;hn9VS;{Wu#6}SRDTaIADP~%v#Z(^jC$&VDS0&V8=qZiG6AE5d zAgU}}RUqIRrHzt_((essRvvG&R8&xuF}kK6rO454r`V??4tMXIgXY@qL2tW=)Ha>Z%CnnI%KpM;_++#-8Qu|)+xt>EVrNXQWRv#YDi z;VNUZcPk_kD_efFK%- zb#&*dX#Q4uhV@w$ZyWR`Vh{3He~uuR@Olt`7wLbeSn0RC>299Fx4dPhqmA1oDksVW2RQJDAf?I&(!KnVZP@nJnkj%fnNlS~JIA-%lGI)z2~IT>}UV z7`(iU7}EUr@)=b7pD@q~Zw2!PU%>kL#B{of5yNHMYtdB9934#jyi_ErY}<6O$A-(VgCkOwXI;ZBKOJX`H=ly4z; zfAEmyI}+#nfR))(c5o*6VDPZzxyt1U9tlpFp~uzn^OM1cf)87cqj4!8$v;YMkI^pG zsxjv`SPV`FAGLhPf{z7{GrLo92_FwWVI^Ku%>>iICoO)hnTA{k`dx~K9tObG8XcYU zW?@A}QKkidD!h#~3$-pT!koZT`)7oRRPLSaHGnaM>GMNv#SfVN2#Iw6bB zChMb8-;@III+T7Z8}1EK@B@ALfnFfa;-Mfxd=MdNCXjk?$>P}s#fKXH1@RY)DEj6X zDN{Tch!=!En1jHHM0Hq7!?|V=HJ#*AVF?vsaXC=mNK^(NPt>yRWQAfd&!0e%wf=;e zj+APoYz_s5#xNukO0$=o3h^lHWz_(w1XolsnR1K=>entaLz%Gn?pDck%`TF262p`s zeUI7@#vu(;U&R;%CDzUTmpJu3ncf)YtKrHOPyZmP2R0AN494;uuzUx$<@+m?TZTH9 zA}r%sxw3Ic@ukBS9+6ShOT4XIrRPnVgj{8(5cTsYPgiap>(pGdnt#PkjHOeKuP6{1 z9#(LKAR0#pypB>lh$d*8B~-$?-ruKST>*2AXt9on0gFRS`&Pq~l9}ZjSh3Y~nvaUo zICJ(4Je8!)F9zTo){LvQsVQSeMx%Y|{ZidWL)4Wzd!B=15T({u*TPe(QU@+<0nH}R z3@{?$&bCE{9hhVmXGWauROlFY5_!K+MlJ~W0&I)E+?mRXY%KF?Kmg;v9)6T8;SZ@M z;jBBQ5ayT+E%jB9?cS19)6l+ zlRB=q3%R1sq3i8m4^v0Qj4I3)l{xOQ2r+_3rWy6A5hu7C?X`!AXF+_x_Dsb|6Ve8j zB}~j1Z(DPGLX#sS!SG21Pbdf!*lM(?o7Q9ZP~BOu81=|dBs99f$W$W^w4Ba}&I*VW zM>6mZb1;-;FHz)o0tT~wJ#W~PjfB!sgoKj-A1Su7kVYEXuP~oEgqrx2TzZF=iG7>Q zy;;g+yi!^kw9UyaFt%%KLCZToG%1ngPkXKYD1j+Yau_wfv?q8*ip1#J^JwC1qJ6WO zF~*<0WyfbPp#v+rS7#cYeL<_G>}S2ZjW;pnyPeN5{b4EyKd9i-3e*rpUA6&MlwijX z9W`Wb#bhl8b)?rAA7g)oEmnOE!{AVp18I3w*;3@_A;yfxbvD6nvSXXzpVi3g?Bz33 zHnwm2Y11l(uRWe>9@;uqW>(x+#GI)9_K!wPm?*_x{VN3Y7j7*f7y!&7eO*R}*e4}z z=KqtVr0Nwy*rX^9T!C?kNwK70YHFYvj1*B8B}WeYgCKGfh`I!RFj?6^%O?iL*(^%I zU~`bs1`9nUIa&!7R1ZlMhN7FpmoRt`Mxb2D%lx*=XuF;^w`lACDHMP-ABJ|D_i_R` zgF>IIjWOpc&#j~4yHW;<0E63Fpa9J4yD3Om0C6+5Dtz|k^UZyS4<9an^s$FdJy?G1 z^!?XA`oLrNz4u`~k#4}RsU2@>sj^%?-V5BNu*^=@7Q^bzSra<`uVdxT^TGkpkEq24 zuRAB)uyd}BMxCj6r3V;9lVa}lBy?=xWQ^RvIKzN)gg!q8;25{1=w(}B%vkVZ5n>fR zgPsN|tT2?sCe|7N4m!*9u69_IY;FMr*t_+!+YBuKlGdk$H*gCt=U9_DX|ns?8$5qI zmvfr+IbQ-P@fQ8$Bxkf@a{RwFk8EAJI~D)7r?o!2D=q2{n_0paXE)1|6tm7$>@GR} zzLqo#uOcEL6=MA=&899R62%Hb^>9{gYL+O=c5Sq)eg;cW>@fSbR`@JK+8*Nvl=-BB z2Nk?u!9xl@pkNDBRmoq|*i90FIEAC${`FrkVzD5RUGvJVWAx5O0NUu<*ai}H&TTU? zTb>y0^jlmxHh9GpOC;woLJK)aD^0Sof7! zC?%HzP!8_+uYI`%$c3|{eN#Obfh?w^lS$Frqg#91i@bXpv5k4Fxwk(5VL{lFPOmlZ>LKr>ZFq!t8V6@Rop!d%My^^eX}O6B%-CgZ>{7;)Okr)u+Sx30 z#tw_T%D~gN#U&@trt0?_`L0@0^$#fC%(P2`ydLxujpEYaQt3P>TEC5<0Dbz;B%g&Z z+HNC~rD)U$%$|-{D$T1tYO3s(C{1`dQ&_IAmNDDX%5+J`!_y3MGk>~mBGKHZ3^O%! z-<2z-R%5pgC!6!vlaM{e>^6_Rhw%o#K(EUMzj^gLtxJt31UM7XC;8G=)SoR&kqO$0!l=eSOUfn(=MCUH z6k=1GQo2kofhRdTNvxPMFD#BYg=6EMR22RI-EFhgKC9vQ zeg(#kyISupFw*V}FZQJ3tq6(c!Ba$`(f69yzf+gDwI=F!rlIX9kZgTFLu5flE1muW za)zM?d?yELrRhDJ@KUVus4;jt*Wf*yk8J~G!8^XsNy3L9AxjL^M~me-)gq{6Oc5n8 zZW-|;e}ftk$>Xn5&|2(GYtMIXZ`}Yc&$rDY%{hD-1O={2W0v8lcV_a7gPOUb^_$ zosBe&vMidJI~y`;vBV};JQOS4s*bK`SB%BG`Fm;CZ&E(v0!LmG8$Eq-qk{fviT0Xg zriUxvmF67QZ0)CRKP6VD)6?_c%3v%ef;*P)T(fod-oC&-SgKBS7VZ5!Xwn+6DX?y3 z!9uuswJ}RWvy9X+?3%q=0Rk=Q;2VlcUPr|#+d>K67ISmoHu^%=4^Dfmx$9_0rN<2C zo#EhA4o?8UI))pw%upn-GaF`F> zVCmfApg{B3)^lqtvzq_+3%oPuA>8N}oQvR^aP zM>U(pjkXWNlW+{8rec3W0Aw~`k>Nls&sqL6i`8dCZ7q!6@Sqi2?mwRBf!1=e>_k1=&dKepeIubyIwSEM59+DNBbjR{T!3dspGi@l zrFkrTB>l!#3KMkK^R{LM97n0dXfzm2f6{4KAab?zjMDjIY+4x`DclY)UJCV{gVMR? z-`{G|b>^<}8S|g9^~JMyVa69U&UDk;rdG~P7f?Mg(=kB_oxV(#R-RoAIpAE^`KtUZ z{S~-4=9$$^fU^iW`!TYZZ;t+HzI`#z}UVECFTz z?5CRhx5B{QPy18?w5qhnY=3PMVbvIhhXasl$l_RMcn%h*1L!||ZPph2XIxv@>cTNX ziXDjC3ZB_lz84f{PqjnGHV=I-nl$y0tWx{}nKQ5IuMX9VRmAb7QXal*2P_69L z?5SwT{Lrun+Fn@Vgqd@3g3WAHXe?GMvmqYM!f34Xi4tQUFL5HDbrU%<{e&jb_PwvK zqS5-=GX8T+R3E#bb8A3=z=(_O)9*ceKJp$uXVUE`@9G0+MgtGWmGiI_L<|a3b+Vq( z3?HEeDMZUO8F>2Uk&GzW-jvCVxJk_`w$6B8SvDSh^FB(!ntPk~ajKFj&=bBh%0ecA zlvr~PY_YlD3qgPcxq!k25H(Kcf1D0)$kf%JLAg$9>(7AI$fx5d8aDn$QI1i7G&yiz z(QxFTrKHiE27`iW;T1_?1RUz#NgoY_lh^qrGSxY^hklI-C^auf3= zAU#r&Oa8`iYbbu+^6hwHUTbpb3&~o^TI+}3j$8Zf@1nIv^V15}cibvsNZDK;wLA32 z_J?A;QNKsF_A<_=9%o&A-lNYWHYUA&cJ-*=ihbw4D;Fk1?7r<|u&u22Skzv$5%6&| z@)RXXegJhyOv_#c{G$Oue>{q#6=ML1eEyTcHeAzpgxYUHQy) zGUK%oJLJwM7!=1}0h#cMW!62Pyy3~~uw(B&-}q#=ye@5mXTpJ=T!?rUmz}{qo;%Gu zl{2Ids?h-UjaW=O+aX~&r_gLr2gPsJu*70aE7j_V0__vhj|5Ba-)zS9lnOTpk zRATtI^?tBT61a{Pd$QlO6=n_@b+7VVIo~6wDf+#XolL=_#9jyGo6DoAvge~P-{x%s zGw}*CiLKu-ma#5}=S>*507=Ljh0NZVj`jWhov);d=%>n*H~vmv;W)xi@AQ?QE_%&_ zJ3(seqU*t&z;gRXU|Q&zUM1>(mci+AL_wvdX@}E~5&7P0A?|=Q%b^FS;|Kx~8KK}s z0MRvds}&e95COyeKYsppI1d_sBCQk@&_)Yj|@Uq%tD18g@=5{0F4qNy?sv z0S?&1dI(l$(C5Z}7;kyds<@HEM6n{-C!$+(smC#_x{OS2wO%>r{+)-={Abz?|D0fI zqDOkr5gdf)^tmunlL;$wlmCeBOyiCaVP4gU|UX830RNT7;An zZpdo8xo4-1YD8e4B(6033nb-RuLyp3}#KqJ&PmO_#m@Zt9gR&Co=!8I3f74=l zmcDPX0FxVzp-!Ae2bIOWP9$q8 zi2RX}NNtX&t*QejYpylcLQBP2nkYBMV3e}bjWIyTKYStaLh=Rg1#D??h&%dxt}))u zD^+7SjzhH}d+!BfJHKaEcD3YEcO0(cFc`nY34{Ltm*GxZ6jS{Iro+&m!<0PI8fgu3 zp5{N?n@*dUnVj0UG;uL?G5Lk$S{kL{U9$33>L#f&eC^9$?#qA0PWh)MPQ7ZU3^Rxm zO9IBIVeSEK6)7l%KSd0GWu`jcalQhx=9#A>?`++n<^%GQxs9QV!-OWQ2o#jAczHRF>Y@#MaZB;7#lYzsN@S(xF$mJlWu802Dd zZ7*GfasGp5h4Bh9x-U@vD_$rWf`p&qvhsLX)&EB-Ko<3bdR3G;scbUlBy2y*n;yQP zN!$x}OGnJC3>_ z`($!IZzes3!ULttIsCh^L@pQ8h_f@f*NcUsk#FFH%Q++{GCj=6AspJ<@)ZC!d8?Q% zp~NBcs-nMLabx?UdRuf6{u>(377rtJrL)Duf2+s-4IchGrFF;gKj@J^Hi}|jQ}CM# z-f`Q8UssmjQqWm@p^c;Qnrtw&=)S;UhQCXs`M|b4gFOOXlvpuhFUO*hUTMppY72$` zMQ!;R^`qOM|Eje92K|^C^xu@;S^eRE*W+uQyq)C`T`8N1cWmQgXR#nB{Qq&W-1w&+ z%|0Q8pqfB|od&e%8(5Q=VB#MDeqj4T6#RXQ$-kW3Gawl!hSLUOqTYA z7E2b=K+EP$hBJdF;-Zy>P`Hga3{bZxITREg}}6&ynE22yorZTvYpMAHL{ZYpflX zIv5iQNf8GyBTB&#gh96bE+z~H8##Qd4B}UR7{p)VxL*YE^B;CosZ+LMDo=>N1SOr4*T`hxA4vyjde(2V@kW@Idd5UfX;TFN`}9n z#l7Q(Zlj>?{^3j^H0tC%!YSbn+vxvoZ=>zF(2113(?+)K2lL>&KoN!*Ovq>0q0>0D z2MpI4`QHU-*4OV6F@%4p{{4}HZz?cYVcj(drnA%E(++If6V1~LI41HLjb-2 zziBR-y}1MW=n;6^LHzcy>f5Fns-^mb$r8&%I8=(K&UU*bgER9zp=FNj+oVqp0FKh^ zUL)Whdn7v-y+%n}*dJy{`d`&m)#C2nJK&2i%26BrO$H}neh17h4&6LkT3JoMu)ZTzj9F#GPxrw5 zq_Wt#_%n*t71%lZclFtCEBLB{3@_R$V74}G&~wV3SMXD0!lZw0>l74t`3IT;;cuTq zrt?)G4HP31%tn70Vqw4E+`V;bVwLGWL7$*|`_BqBvl5{D^SKw3KsE*}@UbVfgARj} z?l**%j8Lmj6lz`AvcdWjA6=+IoO}KQ{N*4UNwwaAEJ!1?cOVRV=SecHobCLQ_e>&_ zb{>I*Sa@T(d4mfv-hf^l05|SwTc#@BqoZ!_AO_PU4QB-J(jqLU(il&{EeY@CRpgz& z!K7zK7hCJt?T?9C} z65FQ7X0iQJt=UHMcLr1AgmuFSJyV;oI3~n(I2H@Z7rl!GlL3h*G7)&#!N`>v{E9{1HPROg(gora z{c{aCAK0;AA5*`^V2d9f*MGjxO8Wd_)?&y{UX(;AhYZQc86Yd}@GcgBLCK5RivyQZ zkPycEfB{GvB*fsw!Ogt1u>x2^-h0_+8wtRP7h7WkcqYJ`9F6N?bMbE9iNTNG*|zU$U;p?DGNDwGX-5 zZnYq%yyA2`;k}rCF~ig7(im>=(_Cs$3~;(XmUuD$Li&Zw3%M8aFAU75;i~K~HXDPM zC;LJkzRb9Cv<8hEQ+lDmc4lH<=b#C@R+OE@J)buwNzUr5%kro9ttFB~sml8wo3 z&d|Bf!jl4*OwhXB@Pquuu8V&B_2lNB7jTz{WuNLCQVrtRH1@WJgFV2dVQL$8d08L( zTEq6XlV0V|BG4=jmf1wq!xmMkfftrkArU~{ogFKM%$~xVp<6zNilA3jbd;P ze{f^0R$xSc%@?_}X_0X>S)$at#^O4Dt%ZcI{%fr<)r%g|7)Yah%2_~_*}>tJ3Vuqk z!_J^xqU=wU#^2jeq1yU}W{wjvs5mC5_|7blUIvK4MG>n2N@E%H1IiO^KzTgTJh2r} z;vv6%yUtEWPBbUQp5YF>N1b^(c2?B|>7+TSQcfUKJ{h*b`h2@rx3qgTL9f1JzZQ~< zZCqgI*>2xF<7~56)F1}A)%urD#aSR$E{AG>!w+Nayy>E40x6Qn7_1vQ!qX%wl zYkR@o=@=nQyRAPe-Dc_Tc7OKLpVUQ+WcGl9SUBDU|1T!RY$g>V!KLn1DnMes!?AN~ z!4`?S5u-Z%<{0y_7J|)j4u|0z<61e>6Mr@usp&ErtYNx7FiLHdyKEu~#~|t-p2syj zN2m&B-hlVVaA3?$zxn?4`<}j?x!l)tQBa-r*_}y{gve9K6#5DQtG>oP_gv4d$9}v@ zGoa}$~QX}iE7tg5+#6{DbH zeZj6GY@0}VgG}r_JCOB{yv*L~B{%Z^@SCVnV-}Kmvw5`yIOP?raqM0ZxU%sTBl9S5 zp)<}Eg0Se8kM~W6`qx>8T0Yjl0rk&(@+HwRgF0=qhH0Ixu|FXvaijDvsilszijr&4 zhPgRp+a^uwZ}|7B;hK4tj5iF!v^l(7mID{LZXq^Rt~JNwNI6iddmT__a(P>`MIAlH zpBZmivvHh(OfuQs?+oCT{z>sVZpT}cD;NfT?D;?KIqw=giuIN)pXF#QwFJhu9lLn zyqL0X;n>M2({w5Qlkvsg$=KWGUUPrd7P)2jxMrLMbEabm=?2;wcKOc6RQMDFh)+wT zm66Y_mZ5XCpTSOa9UA!a`r?AJWaDKY4aw*gV6(bxH-6U5y?=KzxCLfh3QdC@7V$MG z2k=wlu8WW;?gSW)vzDVmEDM!5;)q_oBOg1QQESE<*aI7!>^aZK1(;kOE#ndl>Fj9l zQ>AgX*2Fw`CP2g-E8q5TKK`Mas?o;3i5*qbf<9_qC=XTPh=MHus-QH3QnurbBd^(W z$ag|-?XWP=gIotSg}*|k0++AozO6zEdr!}MGlw<<&#=kGV8>|*?}{CU`u>!UJ}|n# z(hGib81}l$%MB~&y~1#ZO`jv=*|Zk-r%lW4vN$lKesLi4N5ey2HuEj`@qXqF+-LJ0 zo|(~aEK{Gvxu)xQxezPN%i2R)$HQJhC-2L{v@Y|O2kX9Yxg-X@vGPm)cxn{JKc*4e z*d#rD?&H`C0x$+nRZe>c(h9J}K^I=6yE~zU4U}^W zt;#vH;=^q&QYK|LZt^a!FK{*kF5~~HsSr$VyP@^o!ph@~?W)ypS3n<|%E8FEU(=LI z%Z)`Khgq_+lDa2q#q?D40P1_rS(Y?OQ0#-11tUvbW7OTbl?114=EoWZPZT*XBpbQS ze2}>WRROt~op~2~prar4V0>2uw-0h5)dwHuzK3wdE~nBB5_Xq?PUl=bYg)9|&&mMz zI$hhKt7(O2Iz`R0Z?4C3fa{o7y7w-e*1fh?=-iB~p=P;vI`+FqIoMDwHr&;a&OS1nl>iM><1vXPQ$7a$1`{b!7)n5mz%E!=SRtLCALYGm{ zjW{RwrHEARmeoUf{Tfph-eaqZD-!Wne`A!z2@|7sqkF!o{d`mSqOuhnbIj1_7xZyW zf$_?ITCrUU9jYj~6r3puGQOEs&lfYsc)mB#OW*2i`j;&bgtLF@X@var~DewRyT zj3`G~NR}Y>B8*FPjO@Y&sW0R1*A$S$gMkktA}viR)^`Thgq;26Yw|AU2i(CI-q=D8 zQA;(_7aW^Yt~c_q9B}x%iK7lV{N+H#y?&^ra&JyB@G`gC$~Ta8Ii4&9ht_7sX=jX&cA+ zLQujHvA*KIRirqBrBR2RjZTgIue&JzW!8doQM}Dys_j#^_k`%&gBu&!*}i_8 zWfb14;5`Hv8y=?S>q5Pj!<)Ul2xJeXytjuMuM)BZfu8W;RV_%u%(3FjiqsG_t zVOnX}IN;d0TQ41VoBkWhqMdJC^{;Bn-_MgA8q3KU%2uKy;E!kUHj#lEm`qJ@@m9v0 zL>gNnO%_C>$YFH9TLBTihsGt&j`2OQ-K#`@PH!+>PYd;#fNzVmmRj_gs~lS!hupcM^xj@<08D3H`WRw4UE4AI7v!28GjSr zP7FGlyaNVt1`UQFe+7oT2aU!-V2E!p#1B7!n;+fbow?+hX5q*ARtA!TXf>v!8bw&* zS9dtWLQ|=_`GM5ojS-kX>F})XPKHrb6r&x-LiPB0(4Os`-Kmn(1p`-T{b^~3|F?pB z75uz{cPsb>0&YpMMXL2_%VBXIZ!}n~RGpJETe82X+#;;Q-&Y{VXH#Q6vOFU~ZE1Z} z#o+KocPVvMPUK2sq^*w9y+s(O^p@}34bhNQ$rs$-EEhXcs*Ix*=P?>HLF_P`-);Hm z1$Tx<2#Y|CvWwV6&v6F$M{4Tx3jVPIJCj&oYx0*!dtGbtG!efjKPOPJx+jS>2{p=- z?f@*XBHbFrO&{;1{=xlQSueiG1&`Fa4S-)?7e{pBh3s}6-z|5&82X+Yr26)cI5z|Q z*;1Wgd^rO`j(r&mubvBESQ@q{jDaBuqAX+J1#(>~OF55@S|+@bOTFQrBa_R8*R+P= zfsgRj=*D<0%~Q%fHO%S)h1oO&tbEV!Y85bZ_rnJorj^LDQ;M#!-Nj`vXd`lY40?%N z9#d!ya(PU)A>KP2QSpd7m&Z&{|0)Y}0-sf*pqrvyAj2i9yWu*bOP2<1BARE(=ECoJ zn|$N;e#bIE7ll)-)vqv6?#$C!ZsDz3{BJt?)0D{x0?RUd0Ub|38Xt*%`k|+U*&3#>a{59oC6#>u=_deH3jS4A)5tn68z7 zS@-7nzIF*;y`{5R!EDhyb`hsQ&wta)#<%Khvv+J?&@v|?wEeS>h<1n(c_rH38m>$J zEGUC7Vxk71787rS_(GEF71;+6S6(f)B=UKK)TP9CSZwPG4Uho5 zG`pf_4Au40#uzs4`zw@6mk{u7YANS(cZ+;?$#LTDrZtUzR1rer1fi%X7Hr~$Zu?8 zPpqW>TMmKNf8EuAl6@}S0Z3HOVKZZ}dFq~t>~f92z+N+Ny2V8EeOnJdaz+o+s=oM))ab!#w%%#rhO&l z_-ap46K-~A*1|)y@|D;Kx{%nYssrSk+Upz{ z+j09TeP~DUf329EzPn6NsX{m>7XLs#e}i7Vm%#Z4n3V$zq0Fc#d5v5KD2WRwb#vOi%DhwmzODke%kgxSHKs`A zwCm=B`aoI_;lEe#ACxU8{_G5-oau9;K2r~0r_E8}+zm1SLy43tgjBhtS#GKy(r6mj zVAqa2pw0@YKS$yIxO~p#I3b|AYnaFvj}V#--!Fddja+{GGCa#Sg`C@BX5dNxKt!>w@; z5Efi`n@>V^iaU{P?E(Seg#-iyUj_Ie5A9M1Q#qg$JhkISG;XErxJE%mdyWJ*f=_}j(U07i7VJ8S+ z`}Kci^T(8Cck}-{RexGd6&MIVqL}SlqnsbpRTk2=3ix>g%`0lSuG@$$Fq&s?FH+$-6FFTa#K@6inR zzxVD-bE5N{)cSX_hi}uEutadM`kS<6Zh)ZuBLuEz{nfvZz_vmn#PvT*&@uekp!5Dl zA*K#*ZFo?JcrpsFjHyFY(d(fO^%ylmdQ6{2msTyHzk z5k9D5MQDTuy1O$k3gcZW`vWR_8*lFI%kJwD-*$+I@Pn$W+asCVZS9e$g!@$MBi~)E z9ijm2zGZvturrXfe?kMQ1JYI@=GRDT-xy|F?(53*oy&A5&Adj?2Cstw%~8@}`cl7z zl5#_wqFCGfz9F2}kLCRI){`2z?vI-jJI`nzJ6$X@HmK8A)-DAE9+Ef<4N-aja2mmA zAR&P{j?_Q!zwm>N^ zf46LsXFR3>SM@$FjO3jQfC5uUZjB0;bH&WJ+(2`n^$Tz0vfHH=-J<&Rdf_;A<%rMm z;1sB_yp~EWgKmnGLmXQ%i;PnMFZDh4PRGoHU_P7|fj4~)pJK~M)$$tT3!0`Z>2mth z&VfOx_>H)e!k7|@8#zcPpRZdcrB*HCv01uNx31geBZ`E#k;o_;d8)+`KBeFuEzloV zU^03mI}I^>QEw$BZC_-hS&RxbDFT`GbNDaym9aW(fm>iC-T@h<^*J*%l@4=k$Z2Om zQ=x+cQevns(Rvv8!i|GoOHByQAK$vZ-?>0{2Sh}3UcHH8`&e+tNKkJL1?*?GIQT*h zZJ4DyYy)is+X^gtewjUP8+#(XG32-$V{cqnPEO3R176nDJhO|47WRMYIl)Z2y&>MJ_YMd<)X}I|D_bD`Nim$ zTDxCdXIEgT@dy{fvvT z(Q-Ha>}rTDgQ6)^;T=YoLD@lt;i0qs*Nr{VKI;EQW2`X_lVD;&^d46C!3Z=TlpNF< zjE76f%}L}+6Xe;`+Qk@%39%P;>43X$5weD{n2(EZ3OU(y2%GT3^HueYjI!CGB-w@4zb>pa1NbBvV zWA4d%L;lb2+SZ$7P-exxU4PDR?@xP1I{mtm7UXTrhT@g3|5}Oq&q_?Cywt{(8;3Ry zGbdAwmh7>=9{&&be;a@#pb*Yd62hp)yCD^dgs+Y)nC`rq1M&G^}%sg z{?XP^dUS$%vefefjq5j$H-=C@KOt51R@UyXlF~K1miHGrH(2SC61ZViN zm>B-50Q;{g_zeZWsDQsjcne{Kf+wnCsEN5ths{DbDoT5-4d9VZeO2Fd4^#H|PxbY{ znRQ(AI}8%->ZrT+e(u=!x2go;EGN^wR4y^w@Ed=f6`yZ!xWYhj)1|4@QJY{nfC(cb>Iy^W4ajzHi_SY-{G z``VX!l|K?YI1S488^f;}KySWX3mofayj>U^m9}s#FcF7Y6(8s7-T2C|@?xwDt zn5k1YI6_ChMZ(9`N;{cbqo2^@x&otsBtEn??JjE25i%TBApI0>zh13`3$u=PGKQX= z^X)t>V#zTBzo1x`lrkpYZ!7J06^MgpzFC}wz1Yomyvjy|08QGv9?6l?k$XVS<;qStw&=nXYZYMF|nS*$$v$aI9)^|7QdqE|F!Bf0`hiZ zQMTz+vE1Fkl4L2#u4Be0ey3o0+H}|A4K{mQ7Uj>yx4PHOXD<$`=AuEje&hXU%1w@u z`DNApmlPOlM0`y-qv-JE>+zMbx=+s5q^(3-g!l2)d6BPLp}Elv1Z%%!j zX8eknmDid=@_>b&oH?5NRK1nDLoK}xJp`a4^ z?E{w!c`;dxc2R4_x-C$F*1PIqbf>ZMt9GZQ92U&?!pW z$2h3JLIjK`2fI+`MoABIej+Y+JDj4t+LS%p)z*6E9lG+4+FsIy?8%pr@PK7K%g1e# z`c6Vaat?%dHnOqbdC!bk(a^wH37#g#s_sn1*^QhdXeG)A{U`=k9~7svF{USK^eG{& z8X7nVZ~05RxUCXbKGGojQVK3Er!|CncJZ>g3@@B&UUgUbz48SgYb*kJ%_qF!Of&@D z<$6~(uk2i2? zM6pK|Jf^^4=dxl3<4mpUIz66L<_8tb>TynisBp_EbD0oQ0X9VQB z*ui$<2Y7KU=)kVGU?+GeA>qG+mB`K+wd+{q2`cshD;pkEd7~?8$;F3an#)vP2nI7*s(c>JIdux^89w?x4dt4$m|BDGgLvkqz_6iYi|4@PNA>nA3bqXSD&Kxk zLtf;i_`3&?vK-NQeH%Z@VaaU+$j5E~zmJF;K(-J<1LtL42j@#k6sur+Z00OoUbDFZ zf+0&@axUAXtHLJ6x3Pz2%X@dstIb;g4EQgj&}^aspv%%g;N70^5|x48J8CerZVq;o z-MA~Eu_!+T%~Ch5vQWRinc~es{g&a6bfL07t#q@T)@&TI*%(FR zP6)xf>|Y+vI2jte#JAWCS^p3G>~?<#+l%8`i};efQ>Sb>opfB;54r|^j(@s*Q&Ld3 z0@Ei?o;r!cypt!xhn0utlc&(-;U7sRCb@^qB$ZB+F)r2USRVM98aR3C^eKC#@50|C zw^EdMYM{&4;jRoVT$On0vh8YG1~OG~*qATn|&`_H!{9y%;Dy_Q(pVemu`X1v&l^k_A z8^|3&&po$)3(S@NF!!i8sJpw3of|F`6W^0NlFJrGz1!R6`QP-?g>Pr>fs*Oe;bO(tC}RiC zbvs}2_cE28-xvQ!=0oT#Y%TAPGAAH~#^4W*Y=5iV#n))xO_?(5KR22THA2EMeLtcx z^kW#KeMxCoGh`ZHHi2Ko+M(*h0?!0lF4|1n<&ttxoew|Bw|%VN6bVd&(`C{!bIvm_ zq5BKjfz7zCi-DlveltNaIo_;~i;Rdb@{xP`!+d5J7CKSmRXl{+uO#RbE9i5LO^lI-E9u;?+H{zd)KZ{_(ZVvC3E3N*I_vYNW4!)+2ZC}d<;4F3E zbEvNl$*HXlr`3yMqMIQDd4EE0ZLZ*NY$|TjVNIEFM@T(HoawXi|MS|z27E5D8cjdE zZGZ6@GE(?`an1cw;Wd#t{y^cIlA03B z7G5_2ho5&M!?!z zI2tfTS#3sUpVijFhP#!LFe}MQbrOj{9*~de5OA13z;G zd&n3B;KR;tZ5xDrN^_Z0kXNw7PW-$^uRzl|L7Ms7UUToit;?P5XsECc`V8vp_5ggD zNZetmt9wE6OQ@8XV!N~}xoRu+%^{Hz8j?8WlFtn^+b}PI+R`>aM>5x$4axjOs`3V5 zfH7YY0gT2bF?i#^Py|91<&6xcHz?a<24hZipT1GoXQAA|VF)6(*y3KnAP;L&^NOCY z7k0T3uhZ(Nz_|k^n=Hfnj)aU{M?zc|0|ths{%F-DNa|P{55F@b5)QR*_;h}G&3`mX zz14-)@O))aM!;zvYpbtdWE*flpu?*M7=MSNUv4jEpF7XE>+FUd7RK@J(Y1%e6zk~G zt=lQb1!}pe8%KrM9RX`-s^r#*VfxF;@e%+6@h0XUSZZ)tIbOes)40035;ndEtTMa4 zxXg7Kbz5$cFJ6mtAFN4t2Q33LKTN$-gIhU1on;vLbif(5Mj5mS0rpY8?Jl8iNtO8; z*DJ_A)%iNk>NR>?vFBf6ZY{q|%N;4A-u!tD-hSFLiIIo=?@F z+QIJs(A@nbK{PnewQr5}nt`|aJHxA=(D3q?7|pp8b=w$XYyUd{>tx@>6xzS~TqILn z$c1j98VRh1OoFeH1Xj~cQaM5E7u=jOIV{e9oVy>48<4DCNY=~7iOC?T98d~i={~Na z&=se0HH>P`5?XN5YyS17){2i1LFuF6Oy}kMt+_ z8cq;ac=9D4dsWmWIavGf4O(jQCJxWYz8%1yxbcskzMAjwByLuOSP``bpm(wuRTRF& zwclv%eJ_;AkQ6Ceg?WIXo}^MSHE^nVn;jo#aJ|`cPhh=s@mAg0I$$CzywqjgpAKg= zn*)qzSXA&Q+CoWp9d3tY0XCHxb|W+3>KNa~R!x@~E?g4Y%6LjK&5~nZ8(D~GnCl#z+X?A&$|~}w?HQ;`dszNe zfZPy8XTvXQ)%vq@XP%AnXtkP7L47NBRj2;0R_pty1&*X)*Nkyg+sBIG*GpXC2^nQZ zYH&Vf^bXuo+I|cl#Fv%XkS;`S!dH{0^=<21NQaqNZ$O9tbpEI{35VNuT zi27**|F+=lCn)W&sRwS;;1z`fPU(#d=HZitRY5XIsoj2>>Xk_iY<-jGu!kI=5=40foEk~2?Gh;~}hfmn%!-bA7m#Hq+xZ}dD z2SF?6RTviy-1I0{A_*9Vh{PJJtIPFJ#E5A-MP%ahx;oQky?Ix~&!V7DQLA zAMbH;Z3p)#zq(Rgn9-3Y6r~itK&PXtmV=e!4qw)fw~_v~Tfgm|aEu$ttLnp|zPCkQ zQcN_Xlj)2oHmblKw0cXgVi-EktLSKNYNwgzXRhzbnOslklYyD_#%kL?i&R}qW@FNu z=*s@$;*(vwPuAXj`a&qx*&>-(({!f(g_e&F0pX1V(a^)~>3qaUi0~%8dQ6!_qZ<}< zE5;6j-9VI2l)KwiDp6RXK1%@klatc5lVIp7;O;My0Pk3`Sm0tRe>`1GyMuV<%|bSp Y8TAw2<9>$^wPRcVkB$r<9>3y$0)N-zTL1t6 literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/__pycache__/ksampler.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/ksampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8d88acc70edb365cc1201e59dfea3e1c5979569 GIT binary patch literal 7538 zcmZ`;%X1vZd7t;r&Mp><2LS>kz!7DOT*oHmC{`SqCM8pX#4uW^43?Zir7{^#4}e+h z&Okl0B)FNaN(E3^(h>HZ$S@3Gm`l&9hP{y%*Y{mZvC?O*BT>}BEQb=>(L3Z`vnOlL+{-!SCU+%WMp zyJpYYuyl=C%<~Iu|{ll zruNN^23n?B6D`fy#rrff*0t8`p!bxAySp*>L!R|h=I@5Q@Uu<-VOVU(JV|5!KF^-! zS$ZS)cfP}t&CPzEWU1dx9`let`?lW;can6=PYOQ@yInuZdb?qfJnqK+)1=t;Gg`)9 z4|}`anCB-rtk!Pn!bsB-9d%S^-YAMVEwJdj;qH3)B<5(WjAo#^^dNnb?Zj_9cvF5n z=J)Z5EX957uu0tYRCGK zajYGg$J(Mc1m)3cU1+sRv($GqextNXy{7G0s9^=!YVq$CRssH%MrqD#`$nnn8$)w6 zQsZwGdSNjWy_V{&Va(7fw2obB7-cf+&;&Q^Hg~Z@PWwOk3RlYzW-m!mgnKPV*hR*p zZC*#WFr(eRu+x5T_ZhEIgB^82gVVZ;#1-yUTjFdl`$n`Rxh+Z|~%arMGsUiMeQV zD}WSrp9Pa|HnDO}%e%UzFW@(er>oEF9_meRNu}`f|Q7Lt~3w-~;W%ePpx(ou)a@r@P>5^&Z zB#%BsADqLT6FENCIvV(`m(X#^Z-X?!in5g9SxeG1=7A)1%M|(^xRz}7LVg8PKf|~@ z?rutk@kO)<_YU~|n1y#=qV)|FO>OYf84}zIf+S6fAh>m>n}PrNT~yQ*d~QL0DA>&S z(~z@(boX%>?SPDS-Ul8CcRZb6#oBz9zUg6XufH2Wj+7KSs)0MyI|HeLqJ~MJ!~}|L zai4l@`7yZ$r|sf6)gXYt=6MkCYiN;Y;@?1x-Xys^-I2d01jp0sUk&EJ?r;&Ee0&j) z>3)bizk{MQjtE$_V^T+xX?>k9mS%@atz&f11~@Pd4ZtTUGun|my@H1U zu_z&g0@8t~n;8h`L0xvy(@zU1ow*ekV&!R=7I_fy4Ce~Rs7A93o^|;RY)D+r0B}5E z@x@`{8nhJ(9tSMm457dm2TX`r1aTS(X9u+3%K7hMRV5arZAvy~c6kgSkAor$l%BTG zmlL?ujfOF=H;lo`S!zBvJ|d2eFPc}ueIIweivolq+=L{9fL3WA+e6@K$G~j@BX0uj zfUI?#eY)C?$IYSKuwqP=4SSzl4_g;P3ZWuNSn(U$tv zswg7Puxun_;7LG?kYdHZiM>g|RH8;0({iN<@)hbLdm)^DO8k_1B|NGIBX~@~?O;zh z3dAlFwKCF`SFnf_(0RSC`@k~CAW`-7!Q5Grjz+f(2}QDKRY$kt4_7TUXX1rs{(ed*gkTOwW?9|a93~}?7H$b_Vta5>SZrVpF>BB zI~k9W@>ju{RP#p9Y1Er?HKRIHonhZ#vvIZKZ5nKj%^%oUqc&QDzG$#-vP*2?fYz?F zMRpm`KgE{VGM)`~g)&aQ0zk{KovHs|-_k&08{P?Z)ci#JxH~AZAY&l8`ZqfiZkG5ly zJmIkTNfO2U_hmC1+fFH16Eu?Q4}b}U!Ko1@+s!AqUFhqTI+9NXBj`NLwGRngP*$z$3PymsonfcWT_*s<-*O*t6Ne|eRM?5Z)K`dO^5Er4S0Qetg z@J+}y;p8)H;l_3Kbr=vm6;;X4fE>PYHQW;>-_AH0w83}p-t`}j+%6wpIJ|CYSAFU7 z_+zg6H>G&^cYo;rR&RaXEG2yZ&$K&!tI0`Dl-(!k6H{>VW1!?nY#Q=RB#a|zhVctV z!dO>+g?i&ZqS>z8Gqx*cP6q~R6MT(ESVNHazYknjq77lBAImf@eEiMOeE zkBawcL@nz>eU$UWFNp647(hIdPNh?EM*>Vj?EJ6eK!8mW=>~oz*6tUcI}2o=HR}3c zVItD!DDXL!!}Xu{A{peVH*E?}x~2E=eN-Sf8iOB6UIqY!WLf*jSb|pqNp@g3-4ej@ zORw~Pt^ck5XU4ELtd})FmNhbWp70)rMYIj#LX`A%VeSQwfFaxAZY=6&y%KR%SvBdU zgxz4PpRh2Eh-u{cFQ4x{H?yAfwkH0cF!o*+RxiwVq@h6o;*2DY;Xth^2_wQDkP9L$ zDE}_n`8E}sRFF)I*?t}e8BewnAlgVs;NQX-?S>TV@eU1@s8dnP=R!Yg+N6gIbczsm z^f5Lk>*h3wX9jqqo~pEdW7NF=vZe;p6JDu#=g@|bhxG0~Ek-V4sjtHgZL3q-3LO)) z3%%eydfF%$2W?zPhA;}tEDQBgZ`-8GB zpMOx!7UbvFF&8n@p;2C8l_lwj4X>sPXImD-wSfSWS7;!_^Y;5&Z3s%7yWhkomGUyjIkhU0#DD=oj_U zFRyl{7&$m9v3WKBQ_{kf@@jdde4$)smkw>FZ5qSt<*MB4KSMKD zr6sryZCq$aBs$}Z>LfvPxby2MJ_hk|(CLq%@Jz?T2pg9`7!^j0Y`=6sg`OyZL)K$w zqH_Ua@qV0trYkj?7+%ppN~S=dFpACokv>Rf(?tjkdwb(l+GH$#8t0v#x7ISb1kDLv zN@p71Bczfdx%J7<={mW`$;QKHrEqT)s>ulBK;P7Ui1;hJSM~@2Bxoe{3#fcis{s zhzI~io{GmP@Dc8qF0BP=MK}RHCjb^64^x%~-DD?LR>hVL(j!*3jdCc`#}=#dM2Olb zl7kZ!b^8q43*syfV6FvC-HJJJoJ$qHL0?$T;{t*3X^__ocM)nm!A$<=H1v;Yl-Z3_ zX#@ENy~6otNrZ^J^`Zit$22meLarQBZJ$OqQ#efs*@Ha(un(MxrD%x7bKk?BN0K3G z2p^C_8^i(N+l05F5LNT8pBLU;tE@Vqg=MhDmC^&LH85hk-H5kR0 zWM6qio2qp)kOf{0(mbbNuL4k+9&qp(a9aX9AJ9Cq?n?3-$}ga7V}Nv#m^*hAA{}xL z(?s-^Qy361-uj-(@n832zh<=0YRj8Fr;H@C1> zAQ5|H=if=8V|HF|ilJTQ=nf_G)-L1(|IM+>lPKIK_9Zwmbmf)R#O2viK{t9g`(`kveP3w%* zj+dZ+31}yWKL@+O11%y4GZGTlJ0VkJ|hR&6C613c~0(SWh*z z=J;zg;0IJtdRV2}KA@WP8!BlQZ1Irs52=X)50%-z^cJOo@1HsD10;c^ec?wm>X-_N zX+(Nq<^|_;aQ;IoPASr&S81E>qDFVlP*BRwm?0QyeBsuT2L)?)p{d1O@N{Qq`eab4G0a5#MpBqjymWTaY6QGr8}MMyx_c24uD3YH~Gkobt@ zi}~y`t_~^1*gxZ(&`&JE9po_sP@VfT0Et>jOa1_ZogtJYnF8%Q gurWwh-6dAaf0QiEgPR==jGNWVv=p5NsWysgv|dEkZ6TpJ!FN#?tGB8CSp_wNNm|C~a9kZs?H z?k~bYJ*vztrr?66Dq!4Xk!g18S7)C&?Yg=T?RT7j7l@d?5nvUmhgCA;6gltik<}Hwp909eRd9LrWuf z{dg!k&=#9WFnq)sl65420z~WpX~UDDf4a268nnf_@El)VcF=R%`rx0?AFhA@eBK}= z_?HF~ZUb|OGcGP%0Vh{(`~uQ+pOl?3`3nU~|3rUH{zyNir)=6eHD=mOpP4gfR-d(I ztF!jV2G_C&_H?Z<#Vcif%*I;G;H{q$;Pe#7P#p7=0_P3Iu@uKXZ35>l#i=QdbK01Z z8J)42F|%g&tOn~eX3g2ktOHq!nQc$I#ah`Zv=cU6&+A3&gmUAMiKyrn>!o?ZK4H_1 z>1MH3Y!oZdXRBB*wl)FwULw=&_`~@K3r2j-?P9%4iWaYJldVOMH|IS}=<#RnU=I$f zV4p7bYCyY*ZJ4vA=6rcR!ag*_-}s6evAQ&(y)>c&?X~$#T^tQsZ!c-RQEV3LBMq=( zz5h(6VV2qOU~FWc11U|&9+KIm#VM%TF7+{j4k>6!&a^3uJEc(q%x!|wQw^lx7$r4J zQd*FE+Cy{7$|?tchNH(i*AH2?_YwJmmfF}r9zd9yabyIfq?pfBR^Vx1^u?>7g+K&A ztX6pz#N0oU8lspfRzM=_UYdsYT?IMfDstL9nP(7Y7Y>uFkd9vD=e`JzJxn^;fC0hi zzMBmpiF&%EN0R0+v73u;;)1rML%9k<0T`DEw?af|R0^!9VUW3VvC1_S^>SwtKfw+! z#N>X0Bl60#vi(WGy?E$@5*{2#Px4W&0$Nl%a+67(PVygvO4(~bOvWzYdE7A?nnQKU z;5HxG`YN;=RKI|)3td0neI{+^Ahi2-n7~5WJG+;se zK4jBcp{cPZB&)qiirOZbI)zi1BaB3=KMz8Ikfx&Y9M33m4G4rLO?!0!S}a7QKAMCf zoUeC9Ud3m76o@Q`F!c8Q%BRx4JAs2BIQ9kR8ywdMUg}F0$I=-=gkXGnp|rS2QgDB- zF20M4?P6ceb8~5WE+d@LmHZ3ey8GgB()tYF7F?wM{4*RB<%dLbex5!-=CLTsGjC^S#_vpVe_=DwQ(1 zep#dWLb)G7Y2xJ*0RS@q#U{qGBxn9gs8Qj2cp;z`jAXk`9I}qMiwCde8blRm` zjKOKeF5vZ;UD!qkcs4!$(f?0?=a&*#K8hbA!2~#mlZ>2Sz;%u%=lLoUC3uWHJkHMF zMDiArZy~86d4y6tW_u0sQ+QQTqrj>)H}r54r6S?7&Up~Ib1y)Ff!=WE!pa+~OFb&i z)fbv}F-q>J0Wa($uT?UPn#>&mswE=W4 zj^ZW|*?|iC=}c-C@hg;mjN~IEdq}>IZ)~B<|>_)LP|f>zETZKC2czQ@HPTYOraj6ARZidKe&Wrn@50$N3rJ~A5g#M~B#DK;>A$9oJ1XS0|CbRI^ zdH&YFX6^3R0>|`LrRBOj8N$43c>^LWy$>Ll3WQEsQ%IL2VJ0xmWGCd&ZdCy9R(~Ox zz=}j}13DrSTq=$MBL)5v0SH#VbHc)}S8Vgs@fex literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/__pycache__/sampler.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d749938ac4a274a6f78acb9873e6f93cced235dd GIT binary patch literal 9799 zcma)C+m9sIS+84HS65%A=RUKuyRKc^$+o?oHI4%cG8-qhH?~P8AzorKI21K~YG$gZ zyQ}w9_3rHCG>JTG;Q*TeMktAGF(Z(`0wgYiARhPwctgCP^U4FVP?Q7+fyDOm`_Adh z%E-C<;N?l&Sr3F6>jpEK)i&2n z^|aP)JngpAan>D^%A9tgr zZEmM&7KeGto7*9auP4p;Z4>oTw)~vx@+Y2qT6Hz%PonRCm;niZIP%UE|x#gbPlu?x&+W#kuP_n>%aum-EJ>Me8MT=#TN z$r}Djs88wI@L5873U3yBh}GG&YEwGWhRW%tAJrSR#yfkM&9GV3!XLK)r%`=&kxO4vpNVh zJun46B&-GqtzpT6k_TCidcF3|P`xy4Y3|BGmTYw*=`_v^f6+C^ja%Zo8a=7#Th@-n&j_n! z=jMSejFx?9?%0%b;Gnc{Xwq|3a%c{$+|_HoZv!^5QFs;X8b!d-&0Z`E3VNh_RY8Cr zl$EElxY*0}kd%~g)27b;1ZbqCCC65#RLc?$GjBU2 zFL-lWg+W0SS}wDeY1?DUvNO*c%-%O&HkI@^tZ>U4K$;GU%oQY6PRrXj4=oe*kyjM1 zC>(kBhIL9u@=C(Z{e!Y798?aCL*U%FwXaLqux#mgX+@P*->0;CyzR>IwzZR`Y-++i zuj#&EG`0Y4u=;K1C3{dDc!QD})wC#%`#2-Kae7vi#_2g46Z+;6U&RDJaR)#6cT``u7In4@d3Zc9aF%(e*gU(_kYGSiA{%WlCkoQFA*OwQW+1(M4(6JPHkI zUkc;EI0PQzwsO1WKyCn~Eo_=Yb0Q;!W*ZupAR-5A{s0>C^F$sbLWrXzreShISPcMU zdEkr4`xtH|1Fqqio>?*XK0J~ErK3k!bT`<^!Un#Y$(kN>*z9+D95Xk2{5|aP_`00U zuJJrPULwz^Obj6GB;9^?qIf83-~YHg-A=c{_p2tC6&D^ilimpqa%ED~`Kc3yoRkmN zzN9*Z&S!iXInu_U_$5k}wfRD7CL6x!0m!;48IDGc@06}fgFM+$dHx6=QVe0rldYs1 zwH59wux`|4IZtzBY3wFY;-`pEpY%e0g!)v-wxb?EpK>1~@;K#M-5wMmJ}&KMuP<%P z7cWvN?S)2J1EW?ww1w3-@3Ii7yXY>uef)ZDl$anIhD3DG7 zn%e@1gm-uR?#vmBV+B~^AH`SrPZLoh{S2kl0z8S7^m)`}U1cqEa&9>FtWnf!ZS6iP z&0Nm(VNZ{Dn_l*Uva*jbSF1Dz1v#DNX%iThC(SVKHsOu*!8oJM=r}GxgavB(X5FgL zy4|7Q-tuS$?`q9|1})!aAWizn=pu}x`TTj@*{4Zk(p;JRIT#W!5cVzpQ9PkH59~u@ zANJ(3v1|+$P`Z#?C_N_@C~uOJEs2E|v@G%##iFoabUdCHm?aiphh`Pl>&BbTzG)iW zIpOGfs?Kw&&M@bAO{}Y{*6#~Q6=9Z^81(XOm?;2fPp#*Yu%W@02d4%rSjA;=N~~-; z12TI`!V&hSmB1?$ORt&G*ac8e6hTYE1NDUuS{7x{3UffKq6%6Q70@YB1FefG&}lIZ zIwNL4XT>b&oR|Zh7v^iGnExa5>*h<=;Pl{(I4#a>l1I6)ru9+v#fWzykUtxID$Rqv za{*b)Ne;IXnOU&W&x4I9Z*D)50RVy|3$DP7dKOE<$)?jVSQ;s;OnM`8R#uYE*4Vg} z)7Sb@moNk_qS{_eIsnZb{#kI5iwalnu@$S@;bT1QrTrWT7^&xvPf044w$yqI@lmPC0x z^c*Je;Oc6xf>m}M#9Q%!$UbnvzvEjT($-gN)+y7s3D4>0zU#ZbO$e;s-UEie3;ccu z==&4LwSMdrt^ajA>wlb*^&`i({@W>AKXfYAe>qj_Kb@NOAI_BZ1E+3%-uQtXT)$1nNO{0sfpc&i>TK=A(#Zs^^CklKjH*zz_yRi4+uk4#$C$|qEssQJC zF#|X$hX4OuAX& z4Qp^J_r>DA`Lyw+Z$dm*2B*<>1tzMyWDL&abQtfqnNDtvy2#aWa5L6}Lh%5K8XgNlN{x3jkpC&)=# zk@4A{2qWO5k8qSFi@Ov^iaBCiJZ2%}h;+|emi8wLj9g8ns2UYnNV+WE<-dRm{FjK_ zOUA}RC5vgBg%rMW^RSDkmGnu^>My@d4m|%gB3~gwV$@jRF&@%`S&UFv%wz$Yd@JTp zQ?9ewPI}y;WGM_)urCbBT_>*=scG~daWsm^X_SfOG~o|LBoML-lTNf1%Tf$8kB~H0 zrUSV4+ig8o2M)uQv|(_=OpLoaD&bulG4pz(l#7T<+-1`4rM<=j3dcA}l|nE6ixg4kz4^W&)EJ zGtFP7?_8s-VxH+G@JKt6-6I_+mP}1P4Er!Y5fR`Cn*B5G>=uZ(XxVdjJPYvTS{4jL z4Os6u?gEU*noS0eqhO4vi+sm~(W%lNj7%~;ojY^3gSH>^bARAZ`{3oHzK7d4mn`rH zaC+9>>b|4suI)7DNryeG?%CNWCwrFkx!UyKN7qggK}S zZ!nFTWtb~9;juZSsydM%*d%wQJfGeA8bs5h5aTQwNJst>@vS)`|sS7&8l z-~5TvYCa7l@_~ReqDq)EM!-p%1&w_CTVh%v;MX()9?SyC=7E1?3YLI>6@lP@s4M)d zqu+DDItxfY2lOidxfbvZ%p1N^yE9W?o~8i?}-h%*3Dk8dV{G zfo~D{4I;lughWoE5C1JneVxd8BHti#n#gYx`5hv^OXM{ozX#G-(YB7a8@2&FFp&r? zAd_$gF&oO1Ibw@Y&t+K!Qis(Qatw$a*(n-fzDX(7sc%vJ!Y&NVrnY=ur=mBAsF@%D z;BQi;(k|*K>F0kynF@IiD5b0zMHZ+cq9)|td<-&_5ky(jwL6gAcPj-S?#-$RM-?9S{Ob zxXy!n{=bS^ z=?^U--_)GTU)hcX6|J;i4=P^J-FKe$pEPUu)hp-K8K4%qrpd(0X}!$!>MyDC(0 zyzji}sszNYa>N9?)4 z{Arm#u}=wuzCBtKr#RW8cNsy)7Kav!7D(+;ih)9aut35jZAbzX0m&!JbQKdp9;zB# zuieQsIKwszZlruCz)Flg;ML&jHoRmVBPM@&xT4V}td#Nal)pP}6Tn;xnj9Nei@>oc z*oB6kRGePXIh9(KUJB)*)_=@#!tB_7f={gj%OlNm5#mKw8z~a%H-@GcAEGy0mm{kH z99hQnK-tgL5 z$3E(8?j-ARE6jt6n3GByo+fEkqmbvs-lR%@I-Zfj1TRJ%m#*%`U15GS(p!_FcC%}gZ$N~55Bp#G_oB}w!W(mBqB;Y$dc4Y4a~f* zgo6%U-R+wjm}u#O5x@xhgg$kzKFE3O!3^$4FM@-GZKvn>Z$`bsdBT}dukKm%Sl38u zX;zeN`#I{GNv9z2pwA7gJs-zu@a1U-EgN)N_@Z&mag5x$`Jf3jhLwuHX;7e#SvjoO zeRCA@`y%SyKoAhS&e{9Uc)gV)ynM(_x|Ddc>1x z1L!yAql8~=MN}PePxYbrK)88t~KhY zgdeJmGmo|b=b~<{D^<~`QMq#E%2AG*6?niSC*xQ8A#GlqyTi|}gdw7CZS8gQ0LA*u zqQ;AIpLG-tmL;u(mtm8>$kmi|xN&%)uHu`uBu{Hy%C`@E&jIo`is<-2ZtoKxk8 z{xeqP-Yxv^dGlV$^J>MKU9ma!u`wfyVaU>E7!FNRm=e94%KaDyGd2-?%`xJPlaR}q zHtVeF|1zxV|2Jg%-ww+-;Gz)CiT^o3AL>6qoTG+%nU4HPYLwB>leRwjBHNUgh!CJi lH{GB=f~el!YpU^=56$B;_Og+Y>*5$*feu9VZ^@b~{vQuKu%`e3 literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..644fb9f1dceace8413603bb522f5a9276bb2d899 GIT binary patch literal 15344 zcmd5@TX5Xgc?Pfm7Q0+7FQRynXc?AlS!>IrFNy6avMpJ@#7WIuBqyCs90c5hC6~lv zR|i;$++sV4Dc4S?o@x8iOOlqF>2$^qy*#upZ5}eyX4>iWy)$@9C!Kcu)|ouDv88_h zIl$gX%1+vcE{Ox23(on^fByS9XiZKQ75skx)<-p`rYJw9%U?vdy3ky#E;g5{OU)D26U~#= zlg;JovS>5m-`6}<1(;sJ%X{Um`}>#W+1>+Qv92vCOUi~$<@7w-Y9p7GQ(g76H|w2Y zrHhJ}W0S1>p8Bf7rr7i?g-!2idusJTnFw=0Unav^EHMF8`|ET6g{M1FVoz$tQ)&e52F!f+$Sx=Uy1K zxaTmh?sojhsqt1AI&Ku9n#Ngd-S9YXFz-N3{)(JKa&p5v@Sf9bv5xPB$?O&C5a7xF z2&`o8>P@$ul?Q1zfODqb>~*(&)y2Tc-0Lmo`LDG2O_wvm#=2YEzJ|7DowvGP0E!&_ z<&eFED|{M>r&Lv@RJAv-NS@AAS}fc(+zl^>1=d+^=vluAqQf$o$^4gEVbqrRYy=xu zy(q#hObXE^5VqO!Sz-dBW~ZGT+jPR+X48xKt`jwyp3|;H$*~&;3)j%E zQWW92J#G(=T3Pi+5LcM5;eBG*URhyb{Zorvoqd(>_O2-nNq;>5Ic*}PO*pCIgEBctFT9K zx6|?N6KyBcwvV#&X!`(rjIE%}Y1sx4ud>Hcex{#)f<1}+gZ=zd>{G};#6HcQ#(QUn zpm+w*N=PjBEc*;vJ$yv%1@>9go?{o;bLgYuJ;I)6FWgdq=||ZNi2q8mlu^)2f?zy; zrPXY=0x+=q2v)gh<9BV-v)xA1Zq@B*vk}_0tci_G)Uv@lHy|`@*ABrrzGt_6x8~XP z7DrQ*N1JYB*WAEfN1JxrZ+Of`d&ry`n0yyqh+*(-XVBPvDy80Q9_xS=SBj$Dh{_Dz~ZdDyc2O3(zFlSYeBu0oQfa~AV6xZP7s0R2`_-q@mis? z-U`=})DEb!YQuYv*${Y--mwrDN61~lH{;P`~k`myh5(> zGF4bu?Y76GU0$JzzND2;Viv~_Me?I)o{7ucw5n-km7k{`=Y{BXKpdpM9HMPfV98t~ zba*Eq`bmYa$HSVASF#^H_i_CQGE9vXM~Nv_;~Y{g){y2zxgHr?CUPdjT2wUmfs9Xy zS1+_(?l!?3&;l0(Lv#W!l6k?aFgj4oTb+$f$_sip$P|JF#``-kwF|au`)+fcx%SOQ zv}uEgq^V(_+c^(mu+enSkMNHZwH%TFyOkBZ0e*)>AUuJmxYq{1dd%Nl0kmOc`;BeS z-#rgtA!VS3J3d2~O}s2#kyBbpv3pVb0!MrW=;n_hN$x+2H;6}I zzwr{aIu?4@J0!bY-`VIim>bl*iq6kbH;-q1$zBIECCCZ(KqF{GjziW;NS&E^QY(PPauf7O zvf zRUu7)kC)I>xQs;mc3Ia|9T?SA^R5O{#sY7a?v_;TZuiNfv3pdOkevNdL7`VX0TPD-q%YkEa-GA=0SBTEr2bge$kdo=A z6`^jVK7`p<$(2b=Ph{uC-&Qs^_jTzOT{e#Ii=ozuTZ^TAjeMdoWCd=O{ko>ql zMG~Z9tCwQ05c%F(D6 z+&e@=0Kfruq3BmG`Ym)AKEIN6o9L`{Ge|Uek-27r9hf|gC?-$*@h(jl(ipm5--DMb zCMxO9l}WCo-_jH?IhC?;vBv+ zWUvrq*^j87M3KPa-{4_G=~)m!O0PgEQ(=tE!{i|5Fys8~T(1}xV44(ft34~q#nv9g z*0$c8fa-;z43SleOMC@ym>EOe*0Bncy>f6ip5PZz15-${T5R1m(z->AX7SIXwvg5K z-z%cDI9`gfiFh(D#|B`Uil<1?yG0O0Us0x}`OXEq*5RCdSKo3|n_A}2=^7ONB zp0`_=0>9Y^hhhc50Gf`q90zxp)D8gKul#=j>JaJ<@PS;@E3dq23$_rx5v^S~SUj&@ zM_^A{Uqe>DouX;eJxDEN`3u$pVUgb-+MPCepjb-8HSEWUbj}Uf?Xf2+_Bl5kqAMvi zkMMrZ+l{)7NFw4KEB5Q|?vTsYL=3snM!bhYp^|XiECfX3k?-bl4=$Q#vzCqcrVUz# zhlp}mm<>RB!}U9!q|;^#4bq4ZA{X3qLvO`?t!}R&9Cg0$E+T~8Xp%;;*Wm~vZsT$y zb-^>XyMdStd`F5&tO);ZMO1|Lz+Sx|O{x{+mR@+ zJAqg^hKD}cQDZk?HrDmAw05{|F!CCwph~60$qY=E`pVHk9HIt6;G}+e@&pCh-CERu zJU?PtoCv}7kUVlBNt2xK1xb{cwqc9QX7@|^V;2Zn9$@0CLB07%6TW}Oz6(hZG#s&8s!Yi2)EwteS@A`_|My#$4(ko_qX8sbe*xkBkey}{*y z${pkot&}EAj{hopNTx)LhsZhIKPk(CZ0`h~p|AnTDC1;r%XRNBG-p&cX5cJhrg8$# z=mh@^jYUAtvdF9}BLr|SukfgDYGk}+zswyGX2?q@g-=g$kaCQOiMYcyKS{9@{i>`} z_z8hI+#{GNd+J+=1_(1ny^V+pwS!aGee-1)%Mdd{9t){$L;FeTCEW1qod&jR?3 zke>u^tQ{gaXkcfwwwdH=ZY0BjMs2ebY$ql|;DqEU3Eae}`Orp{i6cc;lHYNnoAknj zSkyE#OY1ZuPQ#=m%D^H}qaml6^9@Qy>7IWJW&eaLB-3~r9;qbq5|+YJyH6jD{(Fhv zkrfyb0ncI-;52nV!R!cMr8ylrGd8!>Ed;oL)5X}_&jF`q4Ezh6YHOV1efTRBQRO!% zA&->rP-)8bhY9*4XYb&}f59afyiD;tIqxgK+`LCL)Cdxa7=+|vjt2W7k_cXSr0!{9 z{`1OPDsr00l|@e5&&6t=B#nsj!z8NiDZTu=3S7QNzDw^a@2X2mgiTR?=^aE@-qGsV zG-bJ4R%Aq`tlfjmcuCn)q>!toq+lVC4KM`oyJZK?gltk4Pw=!^n^vW!|Oa*C-jI%5PH{&5gf_ z@xCy)?7PUFvn))u^}mF_GXi^$z8s*rN$A=4aD&qR{6XhIRuqLCNOOCSKLj-`S| zhzKQ8CHG4*l}u5_gztIS@NY7!2UI$F$g6^9r`x!1F z=uDFgBWRgBC7pE5C!m;5IJFV<64&?9OLnISHJFIxn_Gx@BGqvj8_?+Q=|b70VFaed z`G$I1M{vT3^A4#YS%ZTRY0ZEeb88hnS%fpuKqQMfWSu$#k2@&{JAlHMiQaU>?d0V6 zRHU&l?b`*~92mF-0cjQLv0@3og3n@W{)=43FAb5vhi@VD_Ug2OWQYnm`KV8$WX|54E}vHYf91b^vwEvu-YbED_9`?6)9QSpen0 z7{RU&CN|VsPg_5TezB9gm4{K1v2zjcO^ElPALu>oz~$o!lIg1W8?Ji`igxW4#KH45 z-*bab`&vc=Vm}iRLF`n95rw43;({R#rQsFsq)o!)bk;bv)bYxAXkEHwXMqs@TS(x? z9w|rrO79LFOd+iLTj;QdMM&K{webV#-{yXTA_Iu&Gz>*SA zWWbeT(3RK+#*DULYQx_%wxQ$T^+Ar!;V>7KoKzN-IUEz~nF02I$pmEjw#pwBZ)^M7 zZAF-Y`B>Z6(`O6&KNPSvcu(y7D3J+^1@;74I55tO2+*4Q76ox9lx@PTxvjrQ=6)$E zFzh9L4+lLlPVsv<90pkEUWzSopNuEcJC2H>yc`$NdMd_j_KUZblQ@(K$Y)S9`-7Q1 z;=Me2SNOZIj%YL;Pnb8;dy}F@=YIuQCJ4Ho7M1p?v>@hg3O=7qAlL;c+5rM~4EtAzkuNL`1}`tP!RLSY%5D^{{dG>^`y@^qNM!Ynqbe}t#YsZ2Hhgo~w za`9RgRJdS2^jvoWVY)QP@LV^y@>IQR*ynIgt-C-+KVrMt2+rG02uGyuj{Th7)y~^p zqYy^yxvn9ae82!1-hHy(J@JqXNQM;h4(Ui+zxNOB;@3Ui@6*Pw-{-`6%F+*=mkQ_7 zC?EoqT~or*o$hz-`Uo?6tTM%y2w!AD!UuJC_#eopMv%-hZ@sfIh@T(|AZ**j0^ANw zCz+H-yn`~W+K7U};0+L6^@X>UXaUTLn%`(A1(?Ciq4PAAvXt3dG_nZ1(By{z znMt=M24iF2&D}wm7WYO#cSFoB$pI9YD+EhE#a1$b;hl7TFLSUrqOB(j2G*eqt7gdx&)n^x3f74p=&MfWtYZeTFS^S++iwK>~i6;}iOjE!t z*R6bF@){AA6oa5+GD28>pM)h8Hw0upJBUSslY_y8xKt^eBf>zZa96k0aGAs<1j{LL zoCf|esRq$v6?ba!81B?!6)lVyrsWn1QVr1woR<{_0(#ue@wdp>h;@jowQcR25smmYq^2` zSpmIkGy_2NV*wGVg&zy`VBk&=S%9dJLS&^76%T=kV1YD1Z@6nOB;u`-wELJMd{__O{ZQ32|iBq`YgB%h%mWbW|P>46-2&48A zRZJvvICdY6$HVwZRPtp?`Z0?{i@;YnJ&iCjIhKiD2Qvxpmy;uw1%bwRsoxV0Ab2+t z<7R_lcf7FU(A#YfXUR9c#>Qrp6n3+saaL)f_n27a5*{pz*aYm7TLfDWgCIJrl{7P_ z!5Y@?;Qmq3oL0Yy7NaF!L2H~VJNI!ckws-kjyREupi6KBMQ_ruFdY;FB_RZQ3{Db4 z7TKYvJ@q7XMT8?Eq^NK}QQJlwj`q5@z(qJ$t%CY1;47TI-nVWmI3fZ*VmZ98GZ<58 zPr@Lk_SD}ZdaM|7+k}4~K=8kygftiYe%eLhHYIeJOs-Kr6^P=nLSo9zAO6==PvSL; zdy0@JX||*sHtGB!c2Q_sJItbX!YLYGmo{Gbb6S@o9%j`!ZB{#_p3dDX*nCs~wnk28 z_9GS0cS8RX1B^vL^J5WE@Ny6526{!DJu-l|9AwjSYy=3^Gp&v=l-T`74feiuaHcqs zA5W0!1tV^fQJ~78Q~Ld~GQv=LJc+$?U>)z_hz@w$jHf6h^`_ICj;ARa+nb57ma=6$ zv#4NqeKwvI@zoa0j~PgWV~`Pf@VEIk)>%j$`E+bA*PBDH$Aft>8?=sR7~r9HpcC;P zOeeJK2ejsT$1!dma`?C7IXFqj@=wzm(Nfk>Ye>m5 zgM<_?6}BK}wkDYMUamLK3cZDRf&VBbs7mqtZS_8-Hy_W#2%O(4v*H55wul}|h#N1$ zD9qEh3V3rVF2+l-NoFx1z^>NEItX;j{w18;QeC;o@80%5IRf@ zMnMcAUSGkXKVg0LlWT}AUP8MhzwLQ#h;CR%By**-6%ph;%2p{MeDYhA{1GJ-(@RXb zKhFP-N=SsmKIt@*JlTdMz)1pBPC!xkA*Xm$qDDz6JE>g0?59e^Q``hL z6QYR6H^diJ{nNgwFQ@Nd*B#pelwy+r4jsH|xr3PQQ{jWCL0{M=mEA20BdHeQBcQPj z0oZhdbG>1bmoGymiJb^E#^!#Okpk*6O{FNS#o0E!M&K0EjXuX6Jgz4*z=QNW(<$N; zr=Z0l|L9cQbc!n0ubh;Vg1yN%JT|t{$&57DhY0M4Sn2RZKt!7qI#kvP)ymDmMnjHV zc-U&RWwDGv&b&5{Ivp2U9-(Y|EdYs2%vxE1vk8S2#5)+*=6KLE%h zl-a7`ld3-kWMU7S6H|&K`+T18r?0nKK7Wb|$w`sl2#|*7&r(8mAz8az91NAmSH%&| z<5c@aO1?}9(JM~IiQ_Ds#JP+~+w|0+gn|e>poC0d{w+#qdqYH%IYmE36pT|Cgu9fG z&CJOf;G}x6^z_LfzCPP*G5!-OK0TV8q3nZ5=+lSrcac~M&}t!)h9%J^th=<=td+$f zTAYwB-8nTqS1#wRDXpNX(?6$^(k0jw8f*$v*FH3j;?K2J4JA5C^nCHZtgR+As};aPi7 UE#)-rEbK?D@f`~m<81za0ElTcJOBUy literal 0 HcmV?d00001 diff --git a/invokeai/models/diffusion/classifier.py b/invokeai/models/diffusion/classifier.py new file mode 100644 index 0000000000..be0d8c1919 --- /dev/null +++ b/invokeai/models/diffusion/classifier.py @@ -0,0 +1,355 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import ( + EncoderUNetModel, + UNetModel, +) +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = {'class_label': EncoderUNetModel, 'segmentation': UNetModel} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + def __init__( + self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.0e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted( + glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')) + )[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = ( + self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + ) + self.log_time_interval = ( + self.diffusion_model.num_timesteps // log_steps + ) + self.log_steps = log_steps + + self.label_key = ( + label_key + if not hasattr(self.diffusion_model, 'cond_stage_key') + else self.diffusion_model.cond_stage_key + ) + + assert ( + self.label_key is not None + ), 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location='cpu') + if 'state_dict' in list(sd.keys()): + sd = sd['state_dict'] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print('Deleting key {} from state_dict.'.format(k)) + del sd[k] + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + ) + if len(missing) > 0: + print(f'Missing Keys: {missing}') + if len(unexpected) > 0: + print(f'Unexpected Keys: {unexpected}') + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy( + self.diffusion_config.params.unet_config.params + ) + model_config.in_channels = ( + self.diffusion_config.params.unet_config.params.out_channels + ) + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print( + '#####################################################################' + ) + print(f'load from ckpt "{ckpt_path}"') + print( + '#####################################################################' + ) + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = ( + self.diffusion_model.sample_continuous_noise_level( + x.shape[0], t + 1 + ) + ) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample( + x_start=x, + t=t, + noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod, + ) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate( + targets, size=(h // 2, w // 2), mode='nearest' + ) + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction='mean'): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == 'mean': + return ( + (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + ) + elif reduction == 'none': + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f'{log_prefix}/loss'] = loss.mean() + log[f'{log_prefix}/acc@1'] = self.compute_top_k( + logits, targets, k=1, reduction='mean' + ) + log[f'{log_prefix}/acc@5'] = self.compute_top_k( + logits, targets, k=5, reduction='mean' + ) + + self.log_dict( + log, + prog_bar=False, + logger=True, + on_step=self.training, + on_epoch=True, + ) + self.log( + 'loss', log[f'{log_prefix}/loss'], prog_bar=True, logger=False + ) + self.log( + 'global_step', + self.global_step, + logger=False, + on_epoch=False, + prog_bar=True, + ) + lr = self.optimizers().param_groups[0]['lr'] + self.log( + 'lr_abs', + lr, + on_step=True, + logger=True, + on_epoch=False, + prog_bar=True, + ) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input( + batch, k=self.diffusion_model.first_stage_key + ) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint( + 0, + self.diffusion_model.num_timesteps, + (x.shape[0],), + device=self.device, + ).long() + else: + t = torch.full( + size=(x.shape[0],), fill_value=t, device=self.device + ).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = { + t: {'acc@1': [], 'acc@5': []} + for t in range( + 0, + self.diffusion_model.num_timesteps, + self.diffusion_model.log_every_t, + ) + } + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append( + self.compute_top_k(logits, targets, k=1, reduction='mean') + ) + self.noisy_acc[t]['acc@5'].append( + self.compute_top_k(logits, targets, k=5, reduction='mean') + ) + + return loss + + def configure_optimizers(self): + optimizer = AdamW( + self.model.parameters(), + lr=self.learning_rate, + weight_decay=self.weight_decay, + ) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print('Setting up LambdaLR scheduler...') + scheduler = [ + { + 'scheduler': LambdaLR( + optimizer, lr_lambda=scheduler.schedule + ), + 'interval': 'step', + 'frequency': 1, + } + ] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label']) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot( + logits.argmax(dim=1), num_classes=self.num_classes + ) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb( + pred + ) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/invokeai/models/diffusion/cross_attention_control.py b/invokeai/models/diffusion/cross_attention_control.py new file mode 100644 index 0000000000..a34f22e683 --- /dev/null +++ b/invokeai/models/diffusion/cross_attention_control.py @@ -0,0 +1,642 @@ + +# adapted from bloc97's CrossAttentionControl colab +# https://github.com/bloc97/CrossAttentionControl + + +import enum +import math +from typing import Optional, Callable + +import psutil +import torch +import diffusers +from torch import nn + +from compel.cross_attention_control import Arguments +from diffusers.models.unet_2d_condition import UNet2DConditionModel +from diffusers.models.cross_attention import AttnProcessor +from ldm.invoke.devices import torch_dtype + + +class CrossAttentionType(enum.Enum): + SELF = 1 + TOKENS = 2 + + +class Context: + + cross_attention_mask: Optional[torch.Tensor] + cross_attention_index_map: Optional[torch.Tensor] + + class Action(enum.Enum): + NONE = 0 + SAVE = 1, + APPLY = 2 + + def __init__(self, arguments: Arguments, step_count: int): + """ + :param arguments: Arguments for the cross-attention control process + :param step_count: The absolute total number of steps of diffusion (for img2img this is likely larger than the number of steps that will actually run) + """ + self.cross_attention_mask = None + self.cross_attention_index_map = None + self.self_cross_attention_action = Context.Action.NONE + self.tokens_cross_attention_action = Context.Action.NONE + self.arguments = arguments + self.step_count = step_count + + self.self_cross_attention_module_identifiers = [] + self.tokens_cross_attention_module_identifiers = [] + + self.saved_cross_attention_maps = {} + + self.clear_requests(cleanup=True) + + def register_cross_attention_modules(self, model): + for name,module in get_cross_attention_modules(model, CrossAttentionType.SELF): + if name in self.self_cross_attention_module_identifiers: + assert False, f"name {name} cannot appear more than once" + self.self_cross_attention_module_identifiers.append(name) + for name,module in get_cross_attention_modules(model, CrossAttentionType.TOKENS): + if name in self.tokens_cross_attention_module_identifiers: + assert False, f"name {name} cannot appear more than once" + self.tokens_cross_attention_module_identifiers.append(name) + + def request_save_attention_maps(self, cross_attention_type: CrossAttentionType): + if cross_attention_type == CrossAttentionType.SELF: + self.self_cross_attention_action = Context.Action.SAVE + else: + self.tokens_cross_attention_action = Context.Action.SAVE + + def request_apply_saved_attention_maps(self, cross_attention_type: CrossAttentionType): + if cross_attention_type == CrossAttentionType.SELF: + self.self_cross_attention_action = Context.Action.APPLY + else: + self.tokens_cross_attention_action = Context.Action.APPLY + + def is_tokens_cross_attention(self, module_identifier) -> bool: + return module_identifier in self.tokens_cross_attention_module_identifiers + + def get_should_save_maps(self, module_identifier: str) -> bool: + if module_identifier in self.self_cross_attention_module_identifiers: + return self.self_cross_attention_action == Context.Action.SAVE + elif module_identifier in self.tokens_cross_attention_module_identifiers: + return self.tokens_cross_attention_action == Context.Action.SAVE + return False + + def get_should_apply_saved_maps(self, module_identifier: str) -> bool: + if module_identifier in self.self_cross_attention_module_identifiers: + return self.self_cross_attention_action == Context.Action.APPLY + elif module_identifier in self.tokens_cross_attention_module_identifiers: + return self.tokens_cross_attention_action == Context.Action.APPLY + return False + + def get_active_cross_attention_control_types_for_step(self, percent_through:float=None)\ + -> list[CrossAttentionType]: + """ + Should cross-attention control be applied on the given step? + :param percent_through: How far through the step sequence are we (0.0=pure noise, 1.0=completely denoised image). Expected range 0.0..<1.0. + :return: A list of attention types that cross-attention control should be performed for on the given step. May be []. + """ + if percent_through is None: + return [CrossAttentionType.SELF, CrossAttentionType.TOKENS] + + opts = self.arguments.edit_options + to_control = [] + if opts['s_start'] <= percent_through < opts['s_end']: + to_control.append(CrossAttentionType.SELF) + if opts['t_start'] <= percent_through < opts['t_end']: + to_control.append(CrossAttentionType.TOKENS) + return to_control + + def save_slice(self, identifier: str, slice: torch.Tensor, dim: Optional[int], offset: int, + slice_size: Optional[int]): + if identifier not in self.saved_cross_attention_maps: + self.saved_cross_attention_maps[identifier] = { + 'dim': dim, + 'slice_size': slice_size, + 'slices': {offset or 0: slice} + } + else: + self.saved_cross_attention_maps[identifier]['slices'][offset or 0] = slice + + def get_slice(self, identifier: str, requested_dim: Optional[int], requested_offset: int, slice_size: int): + saved_attention_dict = self.saved_cross_attention_maps[identifier] + if requested_dim is None: + if saved_attention_dict['dim'] is not None: + raise RuntimeError(f"dim mismatch: expected dim=None, have {saved_attention_dict['dim']}") + return saved_attention_dict['slices'][0] + + if saved_attention_dict['dim'] == requested_dim: + if slice_size != saved_attention_dict['slice_size']: + raise RuntimeError( + f"slice_size mismatch: expected slice_size={slice_size}, have {saved_attention_dict['slice_size']}") + return saved_attention_dict['slices'][requested_offset] + + if saved_attention_dict['dim'] is None: + whole_saved_attention = saved_attention_dict['slices'][0] + if requested_dim == 0: + return whole_saved_attention[requested_offset:requested_offset + slice_size] + elif requested_dim == 1: + return whole_saved_attention[:, requested_offset:requested_offset + slice_size] + + raise RuntimeError(f"Cannot convert dim {saved_attention_dict['dim']} to requested dim {requested_dim}") + + def get_slicing_strategy(self, identifier: str) -> tuple[Optional[int], Optional[int]]: + saved_attention = self.saved_cross_attention_maps.get(identifier, None) + if saved_attention is None: + return None, None + return saved_attention['dim'], saved_attention['slice_size'] + + def clear_requests(self, cleanup=True): + self.tokens_cross_attention_action = Context.Action.NONE + self.self_cross_attention_action = Context.Action.NONE + if cleanup: + self.saved_cross_attention_maps = {} + + def offload_saved_attention_slices_to_cpu(self): + for key, map_dict in self.saved_cross_attention_maps.items(): + for offset, slice in map_dict['slices'].items(): + map_dict[offset] = slice.to('cpu') + + + +class InvokeAICrossAttentionMixin: + """ + Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls + through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling + and dymamic slicing strategy selection. + """ + def __init__(self): + self.mem_total_gb = psutil.virtual_memory().total // (1 << 30) + self.attention_slice_wrangler = None + self.slicing_strategy_getter = None + self.attention_slice_calculated_callback = None + + def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]): + ''' + Set custom attention calculator to be called when attention is calculated + :param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size), + which returns either the suggested_attention_slice or an adjusted equivalent. + `module` is the current CrossAttention module for which the callback is being invoked. + `suggested_attention_slice` is the default-calculated attention slice + `dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing. + If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length. + + Pass None to use the default attention calculation. + :return: + ''' + self.attention_slice_wrangler = wrangler + + def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]): + self.slicing_strategy_getter = getter + + def set_attention_slice_calculated_callback(self, callback: Optional[Callable[[torch.Tensor], None]]): + self.attention_slice_calculated_callback = callback + + def einsum_lowest_level(self, query, key, value, dim, offset, slice_size): + # calculate attention scores + #attention_scores = torch.einsum('b i d, b j d -> b i j', q, k) + attention_scores = torch.baddbmm( + torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), + query, + key.transpose(-1, -2), + beta=0, + alpha=self.scale, + ) + + # calculate attention slice by taking the best scores for each latent pixel + default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype) + attention_slice_wrangler = self.attention_slice_wrangler + if attention_slice_wrangler is not None: + attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size) + else: + attention_slice = default_attention_slice + + if self.attention_slice_calculated_callback is not None: + self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size) + + hidden_states = torch.bmm(attention_slice, value) + return hidden_states + + def einsum_op_slice_dim0(self, q, k, v, slice_size): + r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + for i in range(0, q.shape[0], slice_size): + end = i + slice_size + r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size) + return r + + def einsum_op_slice_dim1(self, q, k, v, slice_size): + r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size) + return r + + def einsum_op_mps_v1(self, q, k, v): + if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 + return self.einsum_lowest_level(q, k, v, None, None, None) + else: + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + return self.einsum_op_slice_dim1(q, k, v, slice_size) + + def einsum_op_mps_v2(self, q, k, v): + if self.mem_total_gb > 8 and q.shape[1] <= 4096: + return self.einsum_lowest_level(q, k, v, None, None, None) + else: + return self.einsum_op_slice_dim0(q, k, v, 1) + + def einsum_op_tensor_mem(self, q, k, v, max_tensor_mb): + size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20) + if size_mb <= max_tensor_mb: + return self.einsum_lowest_level(q, k, v, None, None, None) + div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length() + if div <= q.shape[0]: + return self.einsum_op_slice_dim0(q, k, v, q.shape[0] // div) + return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1)) + + def einsum_op_cuda(self, q, k, v): + # check if we already have a slicing strategy (this should only happen during cross-attention controlled generation) + slicing_strategy_getter = self.slicing_strategy_getter + if slicing_strategy_getter is not None: + (dim, slice_size) = slicing_strategy_getter(self) + if dim is not None: + # print("using saved slicing strategy with dim", dim, "slice size", slice_size) + if dim == 0: + return self.einsum_op_slice_dim0(q, k, v, slice_size) + elif dim == 1: + return self.einsum_op_slice_dim1(q, k, v, slice_size) + + # fallback for when there is no saved strategy, or saved strategy does not slice + mem_free_total = get_mem_free_total(q.device) + # Divide factor of safety as there's copying and fragmentation + return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) + + + def get_invokeai_attention_mem_efficient(self, q, k, v): + if q.device.type == 'cuda': + #print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device)) + return self.einsum_op_cuda(q, k, v) + + if q.device.type == 'mps' or q.device.type == 'cpu': + if self.mem_total_gb >= 32: + return self.einsum_op_mps_v1(q, k, v) + return self.einsum_op_mps_v2(q, k, v) + + # Smaller slices are faster due to L2/L3/SLC caches. + # Tested on i7 with 8MB L3 cache. + return self.einsum_op_tensor_mem(q, k, v, 32) + + + +def restore_default_cross_attention(model, is_running_diffusers: bool, restore_attention_processor: Optional[AttnProcessor]=None): + if is_running_diffusers: + unet = model + unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor()) + else: + remove_attention_function(model) + + +def override_cross_attention(model, context: Context, is_running_diffusers = False): + """ + Inject attention parameters and functions into the passed in model to enable cross attention editing. + + :param model: The unet model to inject into. + :return: None + """ + + # adapted from init_attention_edit + device = context.arguments.edited_conditioning.device + + # urgh. should this be hardcoded? + max_length = 77 + # mask=1 means use base prompt attention, mask=0 means use edited prompt attention + mask = torch.zeros(max_length, dtype=torch_dtype(device)) + indices_target = torch.arange(max_length, dtype=torch.long) + indices = torch.arange(max_length, dtype=torch.long) + for name, a0, a1, b0, b1 in context.arguments.edit_opcodes: + if b0 < max_length: + if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0): + # these tokens have not been edited + indices[b0:b1] = indices_target[a0:a1] + mask[b0:b1] = 1 + + context.cross_attention_mask = mask.to(device) + context.cross_attention_index_map = indices.to(device) + if is_running_diffusers: + unet = model + old_attn_processors = unet.attn_processors + if torch.backends.mps.is_available(): + # see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS + unet.set_attn_processor(SwapCrossAttnProcessor()) + else: + # try to re-use an existing slice size + default_slice_size = 4 + slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size) + unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size)) + return old_attn_processors + else: + context.register_cross_attention_modules(model) + inject_attention_function(model, context) + return None + + + + +def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]: + from ldm.modules.attention import CrossAttention # avoid circular import + cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention + which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2" + attention_module_tuples = [(name,module) for name, module in model.named_modules() if + isinstance(module, cross_attention_class) and which_attn in name] + cross_attention_modules_in_model_count = len(attention_module_tuples) + expected_count = 16 + if cross_attention_modules_in_model_count != expected_count: + # non-fatal error but .swap() won't work. + print(f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " + + f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " + + f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " + + f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " + + f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " + + f"work properly until it is fixed.") + return attention_module_tuples + + +def inject_attention_function(unet, context: Context): + # ORIGINAL SOURCE CODE: https://github.com/huggingface/diffusers/blob/91ddd2a25b848df0fa1262d4f1cd98c7ccb87750/src/diffusers/models/attention.py#L276 + + def attention_slice_wrangler(module, suggested_attention_slice:torch.Tensor, dim, offset, slice_size): + + #memory_usage = suggested_attention_slice.element_size() * suggested_attention_slice.nelement() + + attention_slice = suggested_attention_slice + + if context.get_should_save_maps(module.identifier): + #print(module.identifier, "saving suggested_attention_slice of shape", + # suggested_attention_slice.shape, "dim", dim, "offset", offset) + slice_to_save = attention_slice.to('cpu') if dim is not None else attention_slice + context.save_slice(module.identifier, slice_to_save, dim=dim, offset=offset, slice_size=slice_size) + elif context.get_should_apply_saved_maps(module.identifier): + #print(module.identifier, "applying saved attention slice for dim", dim, "offset", offset) + saved_attention_slice = context.get_slice(module.identifier, dim, offset, slice_size) + + # slice may have been offloaded to CPU + saved_attention_slice = saved_attention_slice.to(suggested_attention_slice.device) + + if context.is_tokens_cross_attention(module.identifier): + index_map = context.cross_attention_index_map + remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map) + this_attention_slice = suggested_attention_slice + + mask = context.cross_attention_mask.to(torch_dtype(suggested_attention_slice.device)) + saved_mask = mask + this_mask = 1 - mask + attention_slice = remapped_saved_attention_slice * saved_mask + \ + this_attention_slice * this_mask + else: + # just use everything + attention_slice = saved_attention_slice + + return attention_slice + + cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) + for identifier, module in cross_attention_modules: + module.identifier = identifier + try: + module.set_attention_slice_wrangler(attention_slice_wrangler) + module.set_slicing_strategy_getter( + lambda module: context.get_slicing_strategy(identifier) + ) + except AttributeError as e: + if is_attribute_error_about(e, 'set_attention_slice_wrangler'): + print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO + else: + raise + + +def remove_attention_function(unet): + cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) + for identifier, module in cross_attention_modules: + try: + # clear wrangler callback + module.set_attention_slice_wrangler(None) + module.set_slicing_strategy_getter(None) + except AttributeError as e: + if is_attribute_error_about(e, 'set_attention_slice_wrangler'): + print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") + else: + raise + + +def is_attribute_error_about(error: AttributeError, attribute: str): + if hasattr(error, 'name'): # Python 3.10 + return error.name == attribute + else: # Python 3.9 + return attribute in str(error) + + + +def get_mem_free_total(device): + #only on cuda + if not torch.cuda.is_available(): + return None + stats = torch.cuda.memory_stats(device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(device) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + return mem_free_total + + + +class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + InvokeAICrossAttentionMixin.__init__(self) + + def _attention(self, query, key, value, attention_mask=None): + #default_result = super()._attention(query, key, value) + if attention_mask is not None: + print(f"{type(self).__name__} ignoring passed-in attention_mask") + attention_result = self.get_invokeai_attention_mem_efficient(query, key, value) + + hidden_states = self.reshape_batch_dim_to_heads(attention_result) + return hidden_states + + + + + +## 🧨diffusers implementation follows + + +""" +# base implementation + +class CrossAttnProcessor: + def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length) + + query = attn.to_q(hidden_states) + query = attn.head_to_batch_dim(query) + + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + +""" +from dataclasses import field, dataclass + +import torch + +from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor + + +@dataclass +class SwapCrossAttnContext: + modified_text_embeddings: torch.Tensor + index_map: torch.Tensor # maps from original prompt token indices to the equivalent tokens in the modified prompt + mask: torch.Tensor # in the target space of the index_map + cross_attention_types_to_do: list[CrossAttentionType] = field(default_factory=list) + + def __int__(self, + cac_types_to_do: [CrossAttentionType], + modified_text_embeddings: torch.Tensor, + index_map: torch.Tensor, + mask: torch.Tensor): + self.cross_attention_types_to_do = cac_types_to_do + self.modified_text_embeddings = modified_text_embeddings + self.index_map = index_map + self.mask = mask + + def wants_cross_attention_control(self, attn_type: CrossAttentionType) -> bool: + return attn_type in self.cross_attention_types_to_do + + @classmethod + def make_mask_and_index_map(cls, edit_opcodes: list[tuple[str, int, int, int, int]], max_length: int) \ + -> tuple[torch.Tensor, torch.Tensor]: + + # mask=1 means use original prompt attention, mask=0 means use modified prompt attention + mask = torch.zeros(max_length) + indices_target = torch.arange(max_length, dtype=torch.long) + indices = torch.arange(max_length, dtype=torch.long) + for name, a0, a1, b0, b1 in edit_opcodes: + if b0 < max_length: + if name == "equal": + # these tokens remain the same as in the original prompt + indices[b0:b1] = indices_target[a0:a1] + mask[b0:b1] = 1 + + return mask, indices + + +class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor): + + # TODO: dynamically pick slice size based on memory conditions + + def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None, + # kwargs + swap_cross_attn_context: SwapCrossAttnContext=None): + + attention_type = CrossAttentionType.SELF if encoder_hidden_states is None else CrossAttentionType.TOKENS + + # if cross-attention control is not in play, just call through to the base implementation. + if attention_type is CrossAttentionType.SELF or \ + swap_cross_attn_context is None or \ + not swap_cross_attn_context.wants_cross_attention_control(attention_type): + #print(f"SwapCrossAttnContext for {attention_type} not active - passing request to superclass") + return super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask) + #else: + # print(f"SwapCrossAttnContext for {attention_type} active") + + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask( + attention_mask=attention_mask, target_length=sequence_length, + batch_size=batch_size) + + query = attn.to_q(hidden_states) + dim = query.shape[-1] + query = attn.head_to_batch_dim(query) + + original_text_embeddings = encoder_hidden_states + modified_text_embeddings = swap_cross_attn_context.modified_text_embeddings + original_text_key = attn.to_k(original_text_embeddings) + modified_text_key = attn.to_k(modified_text_embeddings) + original_value = attn.to_v(original_text_embeddings) + modified_value = attn.to_v(modified_text_embeddings) + + original_text_key = attn.head_to_batch_dim(original_text_key) + modified_text_key = attn.head_to_batch_dim(modified_text_key) + original_value = attn.head_to_batch_dim(original_value) + modified_value = attn.head_to_batch_dim(modified_value) + + # compute slices and prepare output tensor + batch_size_attention = query.shape[0] + hidden_states = torch.zeros( + (batch_size_attention, sequence_length, dim // attn.heads), device=query.device, dtype=query.dtype + ) + + # do slices + for i in range(max(1,hidden_states.shape[0] // self.slice_size)): + start_idx = i * self.slice_size + end_idx = (i + 1) * self.slice_size + + query_slice = query[start_idx:end_idx] + original_key_slice = original_text_key[start_idx:end_idx] + modified_key_slice = modified_text_key[start_idx:end_idx] + attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None + + original_attn_slice = attn.get_attention_scores(query_slice, original_key_slice, attn_mask_slice) + modified_attn_slice = attn.get_attention_scores(query_slice, modified_key_slice, attn_mask_slice) + + # because the prompt modifications may result in token sequences shifted forwards or backwards, + # the original attention probabilities must be remapped to account for token index changes in the + # modified prompt + remapped_original_attn_slice = torch.index_select(original_attn_slice, -1, + swap_cross_attn_context.index_map) + + # only some tokens taken from the original attention probabilities. this is controlled by the mask. + mask = swap_cross_attn_context.mask + inverse_mask = 1 - mask + attn_slice = \ + remapped_original_attn_slice * mask + \ + modified_attn_slice * inverse_mask + + del remapped_original_attn_slice, modified_attn_slice + + attn_slice = torch.bmm(attn_slice, modified_value[start_idx:end_idx]) + hidden_states[start_idx:end_idx] = attn_slice + + + # done + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class SwapCrossAttnProcessor(SlicedSwapCrossAttnProcesser): + + def __init__(self): + super(SwapCrossAttnProcessor, self).__init__(slice_size=int(1e9)) # massive slice size = don't slice + diff --git a/invokeai/models/diffusion/cross_attention_map_saving.py b/invokeai/models/diffusion/cross_attention_map_saving.py new file mode 100644 index 0000000000..eede431d33 --- /dev/null +++ b/invokeai/models/diffusion/cross_attention_map_saving.py @@ -0,0 +1,95 @@ +import math + +import PIL +import torch +from torchvision.transforms.functional import resize as tv_resize, InterpolationMode + +from .cross_attention_control import get_cross_attention_modules, CrossAttentionType + + +class AttentionMapSaver(): + + def __init__(self, token_ids: range, latents_shape: torch.Size): + self.token_ids = token_ids + self.latents_shape = latents_shape + #self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) + self.collated_maps = {} + + def clear_maps(self): + self.collated_maps = {} + + def add_attention_maps(self, maps: torch.Tensor, key: str): + """ + Accumulate the given attention maps and store by summing with existing maps at the passed-in key (if any). + :param maps: Attention maps to store. Expected shape [A, (H*W), N] where A is attention heads count, H and W are the map size (fixed per-key) and N is the number of tokens (typically 77). + :param key: Storage key. If a map already exists for this key it will be summed with the incoming data. In this case the maps sizes (H and W) should match. + :return: None + """ + key_and_size = f'{key}_{maps.shape[1]}' + + # extract desired tokens + maps = maps[:, :, self.token_ids] + + # merge attention heads to a single map per token + maps = torch.sum(maps, 0) + + # store + if key_and_size not in self.collated_maps: + self.collated_maps[key_and_size] = torch.zeros_like(maps, device='cpu') + self.collated_maps[key_and_size] += maps.cpu() + + def write_maps_to_disk(self, path: str): + pil_image = self.get_stacked_maps_image() + pil_image.save(path, 'PNG') + + def get_stacked_maps_image(self) -> PIL.Image: + """ + Scale all collected attention maps to the same size, blend them together and return as an image. + :return: An image containing a vertical stack of blended attention maps, one for each requested token. + """ + num_tokens = len(self.token_ids) + if num_tokens == 0: + return None + + latents_height = self.latents_shape[0] + latents_width = self.latents_shape[1] + + merged = None + + for key, maps in self.collated_maps.items(): + + # maps has shape [(H*W), N] for N tokens + # but we want [N, H, W] + this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height)) + this_maps_height = int(float(latents_height) * this_scale_factor) + this_maps_width = int(float(latents_width) * this_scale_factor) + # and we need to do some dimension juggling + maps = torch.reshape(torch.swapdims(maps, 0, 1), [num_tokens, this_maps_height, this_maps_width]) + + # scale to output size if necessary + if this_scale_factor != 1: + maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC) + + # normalize + maps_min = torch.min(maps) + maps_range = torch.max(maps) - maps_min + #print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}") + maps_normalized = (maps - maps_min) / maps_range + # expand to (-0.1, 1.1) and clamp + maps_normalized_expanded = maps_normalized * 1.1 - 0.05 + maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1) + + # merge together, producing a vertical stack + maps_stacked = torch.reshape(maps_normalized_expanded_clamped, [num_tokens * latents_height, latents_width]) + + if merged is None: + merged = maps_stacked + else: + # screen blend + merged = 1 - (1 - maps_stacked)*(1 - merged) + + if merged is None: + return None + + merged_bytes = merged.mul(0xff).byte() + return PIL.Image.fromarray(merged_bytes.numpy(), mode='L') diff --git a/invokeai/models/diffusion/ddim.py b/invokeai/models/diffusion/ddim.py new file mode 100644 index 0000000000..f2c6f4c591 --- /dev/null +++ b/invokeai/models/diffusion/ddim.py @@ -0,0 +1,111 @@ +"""SAMPLING ONLY.""" + +import torch +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent +from .sampler import Sampler +from ldm.modules.diffusionmodules.util import noise_like + +class DDIMSampler(Sampler): + def __init__(self, model, schedule='linear', device=None, **kwargs): + super().__init__(model,schedule,model.num_timesteps,device) + + self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, + model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) + + def prepare_to_sample(self, t_enc, **kwargs): + super().prepare_to_sample(t_enc, **kwargs) + + extra_conditioning_info = kwargs.get('extra_conditioning_info', None) + all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) + + if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: + self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) + else: + self.invokeai_diffuser.restore_default_cross_attention() + + + # This is the central routine + @torch.no_grad() + def p_sample( + self, + x, + c, + t, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + step_count:int=1000, # total number of steps + **kwargs, + ): + b, *_, device = *x.shape, x.device + + if ( + unconditional_conditioning is None + or unconditional_guidance_scale == 1.0 + ): + # damian0815 would like to know when/if this code path is used + e_t = self.model.apply_model(x, t, c) + else: + # step_index counts in the opposite direction to index + step_index = step_count-(index+1) + e_t = self.invokeai_diffuser.do_diffusion_step( + x, t, + unconditional_conditioning, c, + unconditional_guidance_scale, + step_index=step_index + ) + if score_corrector is not None: + assert self.model.parameterization == 'eps' + e_t = score_corrector.modify_score( + self.model, e_t, x, t, c, **corrector_kwargs + ) + + alphas = ( + self.model.alphas_cumprod + if use_original_steps + else self.ddim_alphas + ) + alphas_prev = ( + self.model.alphas_cumprod_prev + if use_original_steps + else self.ddim_alphas_prev + ) + sqrt_one_minus_alphas = ( + self.model.sqrt_one_minus_alphas_cumprod + if use_original_steps + else self.ddim_sqrt_one_minus_alphas + ) + sigmas = ( + self.model.ddim_sigmas_for_original_num_steps + if use_original_steps + else self.ddim_sigmas + ) + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full( + (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device + ) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t + noise = ( + sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + ) + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0, None + diff --git a/invokeai/models/diffusion/ddpm.py b/invokeai/models/diffusion/ddpm.py new file mode 100644 index 0000000000..1fe059cef4 --- /dev/null +++ b/invokeai/models/diffusion/ddpm.py @@ -0,0 +1,2271 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch + +import torch.nn as nn +import os +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only +from omegaconf import ListConfig +import urllib + +from ldm.modules.textual_inversion_manager import TextualInversionManager +from ldm.util import ( + log_txt_as_img, + exists, + default, + ismap, + isimage, + mean_flat, + count_params, + instantiate_from_config, +) +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import ( + normal_kl, + DiagonalGaussianDistribution, +) +from ..autoencoder import ( + VQModelInterface, + IdentityFirstStage, + AutoencoderKL, +) +from ldm.modules.diffusionmodules.util import ( + make_beta_schedule, + extract_into_tensor, + noise_like, +) +from .ddim import DDIMSampler + + +__conditioning_keys__ = { + 'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y', +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__( + self, + unet_config, + timesteps=1000, + beta_schedule='linear', + loss_type='l2', + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor='val/loss', + use_ema=True, + first_stage_key='image', + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0.0, + embedding_reg_weight=0.0, + v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1.0, + conditioning_key=None, + parameterization='eps', # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0.0, + ): + super().__init__() + assert parameterization in [ + 'eps', + 'x0', + ], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print( + f' | {self.__class__.__name__}: Running in {self.parameterization}-prediction mode' + ) + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f' | Keeping EMAs of {len(list(self.model_ema.buffers()))}.') + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + self.embedding_reg_weight = embedding_reg_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt( + ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet + ) + + self.register_schedule( + given_betas=given_betas, + beta_schedule=beta_schedule, + timesteps=timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full( + fill_value=logvar_init, size=(self.num_timesteps,) + ) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + def register_schedule( + self, + given_betas=None, + beta_schedule='linear', + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule( + beta_schedule, + timesteps, + linear_start=linear_start, + linear_end=linear_end, + cosine_s=cosine_s, + ) + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) + + (timesteps,) = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert ( + alphas_cumprod.shape[0] == self.num_timesteps + ), 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer( + 'alphas_cumprod_prev', to_torch(alphas_cumprod_prev) + ) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer( + 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)) + ) + self.register_buffer( + 'sqrt_one_minus_alphas_cumprod', + to_torch(np.sqrt(1.0 - alphas_cumprod)), + ) + self.register_buffer( + 'log_one_minus_alphas_cumprod', + to_torch(np.log(1.0 - alphas_cumprod)), + ) + self.register_buffer( + 'sqrt_recip_alphas_cumprod', + to_torch(np.sqrt(1.0 / alphas_cumprod)), + ) + self.register_buffer( + 'sqrt_recipm1_alphas_cumprod', + to_torch(np.sqrt(1.0 / alphas_cumprod - 1)), + ) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * ( + 1.0 - alphas_cumprod_prev + ) / (1.0 - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer( + 'posterior_variance', to_torch(posterior_variance) + ) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer( + 'posterior_log_variance_clipped', + to_torch(np.log(np.maximum(posterior_variance, 1e-20))), + ) + self.register_buffer( + 'posterior_mean_coef1', + to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) + ), + ) + self.register_buffer( + 'posterior_mean_coef2', + to_torch( + (1.0 - alphas_cumprod_prev) + * np.sqrt(alphas) + / (1.0 - alphas_cumprod) + ), + ) + + if self.parameterization == 'eps': + lvlb_weights = self.betas**2 / ( + 2 + * self.posterior_variance + * to_torch(alphas) + * (1 - self.alphas_cumprod) + ) + elif self.parameterization == 'x0': + lvlb_weights = ( + 0.5 + * np.sqrt(torch.Tensor(alphas_cumprod)) + / (2.0 * 1 - torch.Tensor(alphas_cumprod)) + ) + else: + raise NotImplementedError('mu not supported') + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f'{context}: Switched to EMA weights') + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f'{context}: Restored training weights') + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location='cpu') + if 'state_dict' in list(sd.keys()): + sd = sd['state_dict'] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print('Deleting key {} from state_dict.'.format(k)) + del sd[k] + missing, unexpected = ( + self.load_state_dict(sd, strict=False) + if not only_model + else self.model.load_state_dict(sd, strict=False) + ) + print( + f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + ) + if len(missing) > 0: + print(f'Missing Keys: {missing}') + if len(unexpected) > 0: + print(f'Unexpected Keys: {unexpected}') + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) + * x_start + ) + variance = extract_into_tensor( + 1.0 - self.alphas_cumprod, t, x_start.shape + ) + log_variance = extract_into_tensor( + self.log_one_minus_alphas_cumprod, t, x_start.shape + ) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) + * x_t + - extract_into_tensor( + self.sqrt_recipm1_alphas_cumprod, t, x_t.shape + ) + * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) + * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) + * x_t + ) + posterior_variance = extract_into_tensor( + self.posterior_variance, t, x_t.shape + ) + posterior_log_variance_clipped = extract_into_tensor( + self.posterior_log_variance_clipped, t, x_t.shape + ) + return ( + posterior_mean, + posterior_variance, + posterior_log_variance_clipped, + ) + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == 'eps': + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == 'x0': + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + + ( + model_mean, + posterior_variance, + posterior_log_variance, + ) = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance( + x=x, t=t, clip_denoised=clip_denoised + ) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape( + b, *((1,) * (len(x.shape) - 1)) + ) + return ( + model_mean + + nonzero_mask * (0.5 * model_log_variance).exp() * noise + ) + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm( + reversed(range(0, self.num_timesteps)), + desc='Sampling t', + total=self.num_timesteps, + dynamic_ncols=True, + ): + img = self.p_sample( + img, + torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised, + ) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop( + (batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates, + ) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) + * x_start + + extract_into_tensor( + self.sqrt_one_minus_alphas_cumprod, t, x_start.shape + ) + * noise + ) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss( + target, pred, reduction='none' + ) + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == 'eps': + target = noise + elif self.parameterization == 'x0': + target = x_start + else: + raise NotImplementedError( + f'Paramterization {self.parameterization} not yet supported' + ) + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict( + loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True + ) + + self.log( + 'global_step', + self.global_step, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log( + 'lr_abs', + lr, + prog_bar=True, + logger=True, + on_step=True, + on_epoch=False, + ) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = { + key + '_ema': loss_dict_ema[key] for key in loss_dict_ema + } + self.log_dict( + loss_dict_no_ema, + prog_bar=False, + logger=True, + on_step=False, + on_epoch=True, + ) + self.log_dict( + loss_dict_ema, + prog_bar=False, + logger=True, + on_step=False, + on_epoch=True, + ) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images( + self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs + ): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log['inputs'] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log['diffusion_row'] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope('Plotting'): + samples, denoise_row = self.sample( + batch_size=N, return_intermediates=True + ) + + log['samples'] = samples + log['denoise_row'] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + + def __init__( + self, + first_stage_config, + cond_stage_config, + personalization_config, + num_timesteps_cond=None, + cond_stage_key='image', + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, + **kwargs, + ): + + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop('ckpt_path', None) + ignore_keys = kwargs.pop('ignore_keys', []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + + try: + self.num_downs = ( + len(first_stage_config.params.ddconfig.ch_mult) - 1 + ) + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + + self.model.eval() + self.model.train = disabled_train + for param in self.model.parameters(): + param.requires_grad = False + + self.embedding_manager = self.instantiate_embedding_manager( + personalization_config, self.cond_stage_model + ) + self.textual_inversion_manager = TextualInversionManager( + tokenizer = self.cond_stage_model.tokenizer, + text_encoder = self.cond_stage_model.transformer, + full_precision = True + ) + # this circular component dependency is gross and bad, needs to be rethought + self.cond_stage_model.set_textual_inversion_manager(self.textual_inversion_manager) + + self.emb_ckpt_counter = 0 + + # if self.embedding_manager.is_clip: + # self.cond_stage_model.update_embedding_func(self.embedding_manager) + + for param in self.embedding_manager.embedding_parameters(): + param.requires_grad = True + + def make_cond_schedule( + self, + ): + self.cond_ids = torch.full( + size=(self.num_timesteps,), + fill_value=self.num_timesteps - 1, + dtype=torch.long, + ) + ids = torch.round( + torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) + ).long() + self.cond_ids[: self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx=None): + # only for very first batch + if ( + self.scale_by_std + and self.current_epoch == 0 + and self.global_step == 0 + and batch_idx == 0 + and not self.restarted_from_ckpt + ): + assert ( + self.scale_factor == 1.0 + ), 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print('### USING STD-RESCALING ###') + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1.0 / z.flatten().std()) + print(f'setting self.scale_factor to {self.scale_factor}') + print('### USING STD-RESCALING ###') + + def register_schedule( + self, + given_betas=None, + beta_schedule='linear', + timesteps=1000, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + ): + super().register_schedule( + given_betas, + beta_schedule, + timesteps, + linear_start, + linear_end, + cosine_s, + ) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == '__is_first_stage__': + print('Using first stage also as cond stage.') + self.cond_stage_model = self.first_stage_model + elif config == '__is_unconditional__': + print( + f'Training {self.__class__.__name__} as an unconditional model.' + ) + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + try: + model = instantiate_from_config(config) + except urllib.error.URLError: + raise SystemExit( + "* Couldn't load a dependency. Try running scripts/preload_models.py from an internet-conected machine." + ) + self.cond_stage_model = model + + def instantiate_embedding_manager(self, config, embedder): + model = instantiate_from_config(config, embedder=embedder) + + if config.params.get( + 'embedding_manager_ckpt', None + ): # do not load if missing OR empty string + model.load(config.params.embedding_manager_ckpt) + + return model + + def _get_denoise_row_from_list( + self, samples, desc='', force_no_decoder_quantization=False + ): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append( + self.decode_first_stage( + zd.to(self.device), + force_not_quantize=force_no_decoder_quantization, + ) + ) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError( + f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" + ) + return self.scale_factor * z + + def get_learned_conditioning(self, c, **kwargs): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable( + self.cond_stage_model.encode + ): + c = self.cond_stage_model.encode( + c, embedding_manager=self.embedding_manager,**kwargs + ) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c, **kwargs) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c, **kwargs) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min( + torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 + )[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip( + weighting, + self.split_input_params['clip_min_weight'], + self.split_input_params['clip_max_weight'], + ) + weighting = ( + weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + ) + + if self.split_input_params['tie_braker']: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip( + L_weighting, + self.split_input_params['clip_min_tie_weight'], + self.split_input_params['clip_max_tie_weight'], + ) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold( + self, x, kernel_size, stride, uf=1, df=1 + ): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting( + kernel_size[0], kernel_size[1], Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h, w + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0], kernel_size[1], Ly * Lx) + ) + + elif uf > 1 and df == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, + padding=0, + stride=(stride[0] * uf, stride[1] * uf), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 + ) + + weighting = self.get_weighting( + kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h * uf, w * uf + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) + ) + + elif df > 1 and uf == 1: + fold_params = dict( + kernel_size=kernel_size, dilation=1, padding=0, stride=stride + ) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict( + kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, + padding=0, + stride=(stride[0] // df, stride[1] // df), + ) + fold = torch.nn.Fold( + output_size=(x.shape[2] // df, x.shape[3] // df), + **fold_params2, + ) + + weighting = self.get_weighting( + kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device + ).to(x.dtype) + normalization = fold(weighting).view( + 1, 1, h // df, w // df + ) # normalizes the overlap + weighting = weighting.view( + (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) + ) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input( + self, + batch, + k, + return_first_stage_outputs=False, + force_c_encode=False, + cond_key=None, + return_original_cond=False, + bs=None, + ): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage( + self, z, predict_cids=False, force_not_quantize=False + ): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry( + z, shape=None + ) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1.0 / self.scale_factor * z + + if hasattr(self, 'split_input_params'): + if self.split_input_params['patch_distributed_vq']: + ks = self.split_input_params['ks'] # eg. (128, 128) + stride = self.split_input_params['stride'] # eg. (64, 64) + uf = self.split_input_params['vqf'] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print('reducing Kernel') + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print('reducing stride') + + fold, unfold, normalization, weighting = self.get_fold_unfold( + z, ks, stride, uf=uf + ) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view( + (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) + ) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [ + self.first_stage_model.decode( + z[:, :, :, :, i], + force_not_quantize=predict_cids + or force_not_quantize, + ) + for i in range(z.shape[-1]) + ] + else: + + output_list = [ + self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1]) + ] + + o = torch.stack( + output_list, axis=-1 + ) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view( + (o.shape[0], -1, o.shape[-1]) + ) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode( + z, + force_not_quantize=predict_cids or force_not_quantize, + ) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode( + z, force_not_quantize=predict_cids or force_not_quantize + ) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage( + self, z, predict_cids=False, force_not_quantize=False + ): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry( + z, shape=None + ) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1.0 / self.scale_factor * z + + if hasattr(self, 'split_input_params'): + if self.split_input_params['patch_distributed_vq']: + ks = self.split_input_params['ks'] # eg. (128, 128) + stride = self.split_input_params['stride'] # eg. (64, 64) + uf = self.split_input_params['vqf'] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print('reducing Kernel') + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print('reducing stride') + + fold, unfold, normalization, weighting = self.get_fold_unfold( + z, ks, stride, uf=uf + ) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view( + (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) + ) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [ + self.first_stage_model.decode( + z[:, :, :, :, i], + force_not_quantize=predict_cids + or force_not_quantize, + ) + for i in range(z.shape[-1]) + ] + else: + + output_list = [ + self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1]) + ] + + o = torch.stack( + output_list, axis=-1 + ) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view( + (o.shape[0], -1, o.shape[-1]) + ) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode( + z, + force_not_quantize=predict_cids or force_not_quantize, + ) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode( + z, force_not_quantize=predict_cids or force_not_quantize + ) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, 'split_input_params'): + if self.split_input_params['patch_distributed_vq']: + ks = self.split_input_params['ks'] # eg. (128, 128) + stride = self.split_input_params['stride'] # eg. (64, 64) + df = self.split_input_params['vqf'] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print('reducing Kernel') + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print('reducing stride') + + fold, unfold, normalization, weighting = self.get_fold_unfold( + x, ks, stride, df=df + ) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view( + (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) + ) # (bn, nc, ks[0], ks[1], L ) + + output_list = [ + self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1]) + ] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view( + (o.shape[0], -1, o.shape[-1]) + ) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint( + 0, self.num_timesteps, (x.shape[0],), device=self.device + ).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample( + x_start=c, t=tc, noise=torch.randn_like(c.float()) + ) + + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations( + self, bboxes, crop_coordinates + ): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = ( + 'c_concat' + if self.model.conditioning_key == 'concat' + else 'c_crossattn' + ) + cond = {key: cond} + + if hasattr(self, 'split_input_params'): + assert ( + len(cond) == 1 + ) # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params['ks'] # eg. (128, 128) + stride = self.split_input_params['stride'] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold( + x_noisy, ks, stride + ) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view( + (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) + ) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if ( + self.cond_stage_key + in ['image', 'LR_image', 'segmentation', 'bbox_img'] + and self.model.conditioning_key + ): # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert ( + len(c) == 1 + ) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view( + (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) + ) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [ + {c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1]) + ] + + elif self.cond_stage_key == 'coordinates_bbox': + assert ( + 'original_image_size' in self.split_input_params + ), 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params[ + 'original_image_size' + ] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left positions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [ + ( + rescale_latent + * stride[0] + * (patch_nr % n_patches_per_row) + / full_img_w, + rescale_latent + * stride[1] + * (patch_nr // n_patches_per_row) + / full_img_h, + ) + for patch_nr in range(z.shape[-1]) + ] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [ + ( + x_tl, + y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h, + ) + for x_tl, y_tl in tl_patch_coordinates + ] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [ + torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[ + None + ].to(self.device) + for bbox in patch_limits + ] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance( + cond, dict + ), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack( + [ + torch.cat([cut_cond, p], dim=1) + for p in patch_limits_tknzd + ] + ) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange( + adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1] + ) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [ + cond for i in range(z.shape[-1]) + ] # Todo make this more efficient + + # apply model by loop over crops + output_list = [ + self.model(z_list[i], t, **cond_list[i]) + for i in range(z.shape[-1]) + ] + assert not isinstance( + output_list[0], tuple + ) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view( + (o.shape[0], -1, o.shape[-1]) + ) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) + * x_t + - pred_xstart + ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor( + [self.num_timesteps - 1] * batch_size, device=x_start.device + ) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == 'x0': + target = x_start + elif self.parameterization == 'eps': + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean( + [1, 2, 3] + ) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t.item()].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean( + dim=(1, 2, 3) + ) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += self.original_elbo_weight * loss_vlb + loss_dict.update({f'{prefix}/loss': loss}) + + if self.embedding_reg_weight > 0: + loss_embedding_reg = ( + self.embedding_manager.embedding_to_coarse_loss().mean() + ) + + loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg}) + + loss += self.embedding_reg_weight * loss_embedding_reg + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance( + self, + x, + c, + t, + clip_denoised: bool, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + score_corrector=None, + corrector_kwargs=None, + ): + t_in = t + model_out = self.apply_model( + x, t_in, c, return_ids=return_codebook_ids + ) + + if score_corrector is not None: + assert self.parameterization == 'eps' + model_out = score_corrector.modify_score( + self, model_out, x, t, c, **corrector_kwargs + ) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == 'eps': + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == 'x0': + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1.0, 1.0) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize( + x_recon + ) + ( + model_mean, + posterior_variance, + posterior_log_variance, + ) = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return ( + model_mean, + posterior_variance, + posterior_log_variance, + logits, + ) + elif return_x0: + return ( + model_mean, + posterior_variance, + posterior_log_variance, + x_recon, + ) + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample( + self, + x, + c, + t, + clip_denoised=False, + repeat_noise=False, + return_codebook_ids=False, + quantize_denoised=False, + return_x0=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + ): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance( + x=x, + c=c, + t=t, + clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if return_codebook_ids: + raise DeprecationWarning('Support dropped.') + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape( + b, *((1,) * (len(x.shape) - 1)) + ) + + if return_codebook_ids: + return model_mean + nonzero_mask * ( + 0.5 * model_log_variance + ).exp() * noise, logits.argmax(dim=1) + if return_x0: + return ( + model_mean + + nonzero_mask * (0.5 * model_log_variance).exp() * noise, + x0, + ) + else: + return ( + model_mean + + nonzero_mask * (0.5 * model_log_variance).exp() * noise + ) + + @torch.no_grad() + def progressive_denoising( + self, + cond, + shape, + verbose=True, + callback=None, + quantize_denoised=False, + img_callback=None, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + batch_size=None, + x_T=None, + start_T=None, + log_every_t=None, + ): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm( + reversed(range(0, timesteps)), + desc='Progressive Generation', + total=timesteps, + ) + if verbose + else reversed(range(0, timesteps)) + ) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample( + x_start=cond, t=tc, noise=torch.randn_like(cond) + ) + + img, x0_partial = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + return_x0=True, + temperature=temperature[i], + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + ) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop( + self, + cond, + shape, + return_intermediates=False, + x_T=None, + verbose=True, + callback=None, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + start_T=None, + log_every_t=None, + ): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = ( + tqdm( + reversed(range(0, timesteps)), + desc='Sampling t', + total=timesteps, + ) + if verbose + else reversed(range(0, timesteps)) + ) + + if mask is not None: + assert x0 is not None + assert ( + x0.shape[2:3] == mask.shape[2:3] + ) # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample( + x_start=cond, t=tc, noise=torch.randn_like(cond) + ) + + img = self.p_sample( + img, + cond, + ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, + ) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1.0 - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample( + self, + cond, + batch_size=16, + return_intermediates=False, + x_T=None, + verbose=True, + timesteps=None, + quantize_denoised=False, + mask=None, + x0=None, + shape=None, + **kwargs, + ): + if shape is None: + shape = ( + batch_size, + self.channels, + self.image_size, + self.image_size, + ) + if cond is not None: + if isinstance(cond, dict): + cond = { + key: cond[key][:batch_size] + if not isinstance(cond[key], list) + else list(map(lambda x: x[:batch_size], cond[key])) + for key in cond + } + else: + cond = ( + [c[:batch_size] for c in cond] + if isinstance(cond, list) + else cond[:batch_size] + ) + return self.p_sample_loop( + cond, + shape, + return_intermediates=return_intermediates, + x_T=x_T, + verbose=verbose, + timesteps=timesteps, + quantize_denoised=quantize_denoised, + mask=mask, + x0=x0, + ) + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates = ddim_sampler.sample( + ddim_steps, batch_size, shape, cond, verbose=False, **kwargs + ) + + else: + samples, intermediates = self.sample( + cond=cond, + batch_size=batch_size, + return_intermediates=True, + **kwargs, + ) + + return samples, intermediates + + @torch.no_grad() + def get_unconditional_conditioning(self, batch_size, null_label=None): + if null_label is not None: + xc = null_label + if isinstance(xc, ListConfig): + xc = list(xc) + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + if hasattr(xc, "to"): + xc = xc.to(self.device) + c = self.get_learned_conditioning(xc) + else: + # todo: get null label from cond_stage_model + raise NotImplementedError() + c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) + return c + + @torch.no_grad() + def log_images( + self, + batch, + N=8, + n_row=4, + sample=True, + ddim_steps=50, + ddim_eta=1.0, + return_keys=None, + quantize_denoised=True, + inpaint=False, + plot_denoise_rows=False, + plot_progressive_rows=False, + plot_diffusion_rows=False, + **kwargs, + ): + + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N, + ) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log['inputs'] = x + log['reconstruction'] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, 'decode'): + xc = self.cond_stage_model.decode(c) + log['conditioning'] = xc + elif self.cond_stage_key in ['caption']: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption']) + log['conditioning'] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img( + (x.shape[2], x.shape[3]), batch['human_label'] + ) + log['conditioning'] = xc + elif isimage(xc): + log['conditioning'] = xc + if ismap(xc): + log['original_conditioning'] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack( + diffusion_row + ) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange( + diffusion_grid, 'b n c h w -> (b n) c h w' + ) + diffusion_grid = make_grid( + diffusion_grid, nrow=diffusion_row.shape[0] + ) + log['diffusion_row'] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope('Plotting'): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log['samples'] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log['denoise_row'] = denoise_grid + + uc = self.get_learned_conditioning(len(c) * ['']) + sample_scaled, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + unconditional_guidance_scale=5.0, + unconditional_conditioning=uc, + ) + log['samples_scaled'] = self.decode_first_stage(sample_scaled) + + if ( + quantize_denoised + and not isinstance(self.first_stage_model, AutoencoderKL) + and not isinstance(self.first_stage_model, IdentityFirstStage) + ): + # also display when quantizing x0 while sampling + with self.ema_scope('Plotting Quantized Denoised'): + samples, z_denoise_row = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + ddim_steps=ddim_steps, + eta=ddim_eta, + quantize_denoised=True, + ) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log['samples_x0_quantized'] = x_samples + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 + mask = mask[:, None, ...] + with self.ema_scope('Plotting Inpaint'): + + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log['samples_inpainting'] = x_samples + log['mask'] = mask + + # outpaint + with self.ema_scope('Plotting Outpaint'): + samples, _ = self.sample_log( + cond=c, + batch_size=N, + ddim=use_ddim, + eta=ddim_eta, + ddim_steps=ddim_steps, + x0=z[:N], + mask=mask, + ) + x_samples = self.decode_first_stage(samples.to(self.device)) + log['samples_outpainting'] = x_samples + + if plot_progressive_rows: + with self.ema_scope('Plotting Progressives'): + img, progressives = self.progressive_denoising( + c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N, + ) + prog_row = self._get_denoise_row_from_list( + progressives, desc='Progressive Generation' + ) + log['progressive_row'] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + + if self.embedding_manager is not None: + params = list(self.embedding_manager.embedding_parameters()) + # params = list(self.cond_stage_model.transformer.text_model.embeddings.embedding_manager.embedding_parameters()) + else: + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print( + f'{self.__class__.__name__}: Also optimizing conditioner params!' + ) + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print('Setting up LambdaLR scheduler...') + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1, + } + ] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, 'colorize'): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 + return x + + @rank_zero_only + def on_save_checkpoint(self, checkpoint): + checkpoint.clear() + + if os.path.isdir(self.trainer.checkpoint_callback.dirpath): + self.embedding_manager.save( + os.path.join( + self.trainer.checkpoint_callback.dirpath, 'embeddings.pt' + ) + ) + + if (self.global_step - self.emb_ckpt_counter) > 500: + self.embedding_manager.save( + os.path.join( + self.trainer.checkpoint_callback.dirpath, + f'embeddings_gs-{self.global_step}.pt', + ) + ) + + self.emb_ckpt_counter += 500 + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [ + None, + 'concat', + 'crossattn', + 'hybrid', + 'adm', + ] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + cc = torch.cat(c_crossattn, 1) + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert ( + cond_stage_key == 'coordinates_bbox' + ), 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label( + dset.get_category_id(catno) + ) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot( + tknzd_bbox.detach().cpu(), map_fn, (256, 256) + ) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs + +class LatentInpaintDiffusion(LatentDiffusion): + def __init__( + self, + concat_keys=("mask", "masked_image"), + masked_image_key="masked_image", + finetune_keys=None, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.masked_image_key = masked_image_key + assert self.masked_image_key in concat_keys + self.concat_keys = concat_keys + + + @torch.no_grad() + def get_input( + self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False + ): + # note: restricted to non-trainable encoders currently + assert ( + not self.cond_stage_trainable + ), "trainable cond stages not yet supported for inpainting" + z, c, x, xrec, xc = super().get_input( + batch, + self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=bs, + ) + + assert exists(self.concat_keys) + c_cat = list() + for ck in self.concat_keys: + cc = ( + rearrange(batch[ck], "b h w c -> b c h w") + .to(memory_format=torch.contiguous_format) + .float() + ) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + bchw = z.shape + if ck != self.masked_image_key: + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds diff --git a/invokeai/models/diffusion/ksampler.py b/invokeai/models/diffusion/ksampler.py new file mode 100644 index 0000000000..f98ca8de21 --- /dev/null +++ b/invokeai/models/diffusion/ksampler.py @@ -0,0 +1,312 @@ +"""wrapper around part of Katherine Crowson's k-diffusion library, making it call compatible with other Samplers""" + +import k_diffusion as K +import torch +from torch import nn + +from .cross_attention_map_saving import AttentionMapSaver +from .sampler import Sampler +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent + + +# at this threshold, the scheduler will stop using the Karras +# noise schedule and start using the model's schedule +STEP_THRESHOLD = 30 + +def cfg_apply_threshold(result, threshold = 0.0, scale = 0.7): + if threshold <= 0.0: + return result + maxval = 0.0 + torch.max(result).cpu().numpy() + minval = 0.0 + torch.min(result).cpu().numpy() + if maxval < threshold and minval > -threshold: + return result + if maxval > threshold: + maxval = min(max(1, scale*maxval), threshold) + if minval < -threshold: + minval = max(min(-1, scale*minval), -threshold) + return torch.clamp(result, min=minval, max=maxval) + + +class CFGDenoiser(nn.Module): + def __init__(self, model, threshold = 0, warmup = 0): + super().__init__() + self.inner_model = model + self.threshold = threshold + self.warmup_max = warmup + self.warmup = max(warmup / 10, 1) + self.invokeai_diffuser = InvokeAIDiffuserComponent(model, + model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond)) + + + def prepare_to_sample(self, t_enc, **kwargs): + + extra_conditioning_info = kwargs.get('extra_conditioning_info', None) + + if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: + self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = t_enc) + else: + self.invokeai_diffuser.restore_default_cross_attention() + + + def forward(self, x, sigma, uncond, cond, cond_scale): + next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale) + if self.warmup < self.warmup_max: + thresh = max(1, 1 + (self.threshold - 1) * (self.warmup / self.warmup_max)) + self.warmup += 1 + else: + thresh = self.threshold + if thresh > self.threshold: + thresh = self.threshold + return cfg_apply_threshold(next_x, thresh) + +class KSampler(Sampler): + def __init__(self, model, schedule='lms', device=None, **kwargs): + denoiser = K.external.CompVisDenoiser(model) + super().__init__( + denoiser, + schedule, + steps=model.num_timesteps, + ) + self.sigmas = None + self.ds = None + self.s_in = None + self.karras_max = kwargs.get('karras_max',STEP_THRESHOLD) + if self.karras_max is None: + self.karras_max = STEP_THRESHOLD + + def make_schedule( + self, + ddim_num_steps, + ddim_discretize='uniform', + ddim_eta=0.0, + verbose=False, + ): + outer_model = self.model + self.model = outer_model.inner_model + super().make_schedule( + ddim_num_steps, + ddim_discretize='uniform', + ddim_eta=0.0, + verbose=False, + ) + self.model = outer_model + self.ddim_num_steps = ddim_num_steps + # we don't need both of these sigmas, but storing them here to make + # comparison easier later on + self.model_sigmas = self.model.get_sigmas(ddim_num_steps) + self.karras_sigmas = K.sampling.get_sigmas_karras( + n=ddim_num_steps, + sigma_min=self.model.sigmas[0].item(), + sigma_max=self.model.sigmas[-1].item(), + rho=7., + device=self.device, + ) + + if ddim_num_steps >= self.karras_max: + print(f'>> Ksampler using model noise schedule (steps >= {self.karras_max})') + self.sigmas = self.model_sigmas + else: + print(f'>> Ksampler using karras noise schedule (steps < {self.karras_max})') + self.sigmas = self.karras_sigmas + + # ALERT: We are completely overriding the sample() method in the base class, which + # means that inpainting will not work. To get this to work we need to be able to + # modify the inner loop of k_heun, k_lms, etc, as is done in an ugly way + # in the lstein/k-diffusion branch. + + @torch.no_grad() + def decode( + self, + z_enc, + cond, + t_enc, + img_callback=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + init_latent = None, + mask = None, + **kwargs + ): + samples,_ = self.sample( + batch_size = 1, + S = t_enc, + x_T = z_enc, + shape = z_enc.shape[1:], + conditioning = cond, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning = unconditional_conditioning, + img_callback = img_callback, + x0 = init_latent, + mask = mask, + **kwargs + ) + return samples + + # this is a no-op, provided here for compatibility with ddim and plms samplers + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + return x0 + + # Most of these arguments are ignored and are only present for compatibility with + # other samples + @torch.no_grad() + def sample( + self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + attention_maps_callback=None, + quantize_x0=False, + eta=0.0, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo=None, + threshold = 0, + perlin = 0, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs, + ): + def route_callback(k_callback_values): + if img_callback is not None: + img_callback(k_callback_values['x'],k_callback_values['i']) + + # if make_schedule() hasn't been called, we do it now + if self.sigmas is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + + # sigmas are set up in make_schedule - we take the last steps items + sigmas = self.sigmas[-S-1:] + + # x_T is variation noise. When an init image is provided (in x0) we need to add + # more randomness to the starting image. + if x_T is not None: + if x0 is not None: + x = x_T + torch.randn_like(x0, device=self.device) * sigmas[0] + else: + x = x_T * sigmas[0] + else: + x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] + + model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10)) + model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info) + + # setup attention maps saving. checks for None are because there are multiple code paths to get here. + attention_map_saver = None + if attention_maps_callback is not None and extra_conditioning_info is not None: + eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 + attention_map_token_ids = range(1, eos_token_index) + attention_map_saver = AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:]) + model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) + + extra_args = { + 'cond': conditioning, + 'uncond': unconditional_conditioning, + 'cond_scale': unconditional_guidance_scale, + } + print(f'>> Sampling with k_{self.schedule} starting at step {len(self.sigmas)-S-1} of {len(self.sigmas)-1} ({S} new sampling steps)') + sampling_result = ( + K.sampling.__dict__[f'sample_{self.schedule}']( + model_wrap_cfg, x, sigmas, extra_args=extra_args, + callback=route_callback + ), + None, + ) + if attention_map_saver is not None: + attention_maps_callback(attention_map_saver) + return sampling_result + + # this code will support inpainting if and when ksampler API modified or + # a workaround is found. + @torch.no_grad() + def p_sample( + self, + img, + cond, + ts, + index, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + extra_conditioning_info=None, + **kwargs, + ): + if self.model_wrap is None: + self.model_wrap = CFGDenoiser(self.model) + extra_args = { + 'cond': cond, + 'uncond': unconditional_conditioning, + 'cond_scale': unconditional_guidance_scale, + } + if self.s_in is None: + self.s_in = img.new_ones([img.shape[0]]) + if self.ds is None: + self.ds = [] + + # terrible, confusing names here + steps = self.ddim_num_steps + t_enc = self.t_enc + + # sigmas is a full steps in length, but t_enc might + # be less. We start in the middle of the sigma array + # and work our way to the end after t_enc steps. + # index starts at t_enc and works its way to zero, + # so the actual formula for indexing into sigmas: + # sigma_index = (steps-index) + s_index = t_enc - index - 1 + self.model_wrap.prepare_to_sample(s_index, extra_conditioning_info=extra_conditioning_info) + img = K.sampling.__dict__[f'_{self.schedule}']( + self.model_wrap, + img, + self.sigmas, + s_index, + s_in = self.s_in, + ds = self.ds, + extra_args=extra_args, + ) + + return img, None, None + + # REVIEW THIS METHOD: it has never been tested. In particular, + # we should not be multiplying by self.sigmas[0] if we + # are at an intermediate step in img2img. See similar in + # sample() which does work. + def get_initial_image(self,x_T,shape,steps): + print(f'WARNING: ksampler.get_initial_image(): get_initial_image needs testing') + x = (torch.randn(shape, device=self.device) * self.sigmas[0]) + if x_T is not None: + return x_T + x + else: + return x + + def prepare_to_sample(self,t_enc,**kwargs): + self.t_enc = t_enc + self.model_wrap = None + self.ds = None + self.s_in = None + + def q_sample(self,x0,ts): + ''' + Overrides parent method to return the q_sample of the inner model. + ''' + return self.model.inner_model.q_sample(x0,ts) + + def conditioning_key(self)->str: + return self.model.inner_model.model.conditioning_key + diff --git a/invokeai/models/diffusion/plms.py b/invokeai/models/diffusion/plms.py new file mode 100644 index 0000000000..4df703bed5 --- /dev/null +++ b/invokeai/models/diffusion/plms.py @@ -0,0 +1,146 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial +from ldm.invoke.devices import choose_torch_device +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent +from .sampler import Sampler +from ldm.modules.diffusionmodules.util import noise_like + + +class PLMSSampler(Sampler): + def __init__(self, model, schedule='linear', device=None, **kwargs): + super().__init__(model,schedule,model.num_timesteps, device) + + def prepare_to_sample(self, t_enc, **kwargs): + super().prepare_to_sample(t_enc, **kwargs) + + extra_conditioning_info = kwargs.get('extra_conditioning_info', None) + all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) + + if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: + self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) + else: + self.invokeai_diffuser.restore_default_cross_attention() + + + # this is the essential routine + @torch.no_grad() + def p_sample( + self, + x, # image, called 'img' elsewhere + c, # conditioning, called 'cond' elsewhere + t, # timesteps, called 'ts' elsewhere + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + old_eps=[], + t_next=None, + step_count:int=1000, # total number of steps + **kwargs, + ): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if ( + unconditional_conditioning is None + or unconditional_guidance_scale == 1.0 + ): + # damian0815 would like to know when/if this code path is used + e_t = self.model.apply_model(x, t, c) + else: + # step_index counts in the opposite direction to index + step_index = step_count-(index+1) + e_t = self.invokeai_diffuser.do_diffusion_step(x, t, + unconditional_conditioning, c, + unconditional_guidance_scale, + step_index=step_index) + if score_corrector is not None: + assert self.model.parameterization == 'eps' + e_t = score_corrector.modify_score( + self.model, e_t, x, t, c, **corrector_kwargs + ) + + return e_t + + alphas = ( + self.model.alphas_cumprod + if use_original_steps + else self.ddim_alphas + ) + alphas_prev = ( + self.model.alphas_cumprod_prev + if use_original_steps + else self.ddim_alphas_prev + ) + sqrt_one_minus_alphas = ( + self.model.sqrt_one_minus_alphas_cumprod + if use_original_steps + else self.ddim_sqrt_one_minus_alphas + ) + sigmas = ( + self.model.ddim_sigmas_for_original_num_steps + if use_original_steps + else self.ddim_sigmas + ) + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full( + (b, 1, 1, 1), alphas_prev[index], device=device + ) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full( + (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device + ) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t + noise = ( + sigma_t + * noise_like(x.shape, device, repeat_noise) + * temperature + ) + if noise_dropout > 0.0: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = ( + 55 * e_t + - 59 * old_eps[-1] + + 37 * old_eps[-2] + - 9 * old_eps[-3] + ) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/invokeai/models/diffusion/sampler.py b/invokeai/models/diffusion/sampler.py new file mode 100644 index 0000000000..29479ff15f --- /dev/null +++ b/invokeai/models/diffusion/sampler.py @@ -0,0 +1,450 @@ +''' +invokeai.models.diffusion.sampler + +Base class for invokeai.models.diffusion.ddim, invokeai.models.diffusion.ksampler, etc +''' +import torch +import numpy as np +from tqdm import tqdm +from functools import partial +from ldm.invoke.devices import choose_torch_device +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent + +from ldm.modules.diffusionmodules.util import ( + make_ddim_sampling_parameters, + make_ddim_timesteps, + noise_like, + extract_into_tensor, +) + +class Sampler(object): + def __init__(self, model, schedule='linear', steps=None, device=None, **kwargs): + self.model = model + self.ddim_timesteps = None + self.ddpm_num_timesteps = steps + self.schedule = schedule + self.device = device or choose_torch_device() + self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, + model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device(self.device): + attr = attr.to(torch.float32).to(torch.device(self.device)) + setattr(self, name, attr) + + # This method was copied over from ddim.py and probably does stuff that is + # ddim-specific. Disentangle at some point. + def make_schedule( + self, + ddim_num_steps, + ddim_discretize='uniform', + ddim_eta=0.0, + verbose=False, + ): + self.total_steps = ddim_num_steps + self.ddim_timesteps = make_ddim_timesteps( + ddim_discr_method=ddim_discretize, + num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps, + verbose=verbose, + ) + alphas_cumprod = self.model.alphas_cumprod + assert ( + alphas_cumprod.shape[0] == self.ddpm_num_timesteps + ), 'alphas have to be defined for each timestep' + to_torch = ( + lambda x: x.clone() + .detach() + .to(torch.float32) + .to(self.model.device) + ) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer( + 'alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev) + ) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer( + 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())) + ) + self.register_buffer( + 'sqrt_one_minus_alphas_cumprod', + to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), + ) + self.register_buffer( + 'log_one_minus_alphas_cumprod', + to_torch(np.log(1.0 - alphas_cumprod.cpu())), + ) + self.register_buffer( + 'sqrt_recip_alphas_cumprod', + to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())), + ) + self.register_buffer( + 'sqrt_recipm1_alphas_cumprod', + to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), + ) + + # ddim sampling parameters + ( + ddim_sigmas, + ddim_alphas, + ddim_alphas_prev, + ) = make_ddim_sampling_parameters( + alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, + verbose=verbose, + ) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer( + 'ddim_sqrt_one_minus_alphas', np.sqrt(1.0 - ddim_alphas) + ) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) + / (1 - self.alphas_cumprod) + * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) + ) + self.register_buffer( + 'ddim_sigmas_for_original_num_steps', + sigmas_for_original_sampling_steps, + ) + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return ( + extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) + * noise + ) + + @torch.no_grad() + def sample( + self, + S, # S is steps + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, # TODO: this is very confusing because it is called "step_callback" elsewhere. Change. + quantize_x0=False, + eta=0.0, + mask=None, + x0=None, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + verbose=False, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs, + ): + + if conditioning is not None: + if isinstance(conditioning, dict): + ctmp = conditioning[list(conditioning.keys())[0]] + while isinstance(ctmp, list): + ctmp = ctmp[0] + cbs = ctmp.shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # check to see if make_schedule() has run, and if not, run it + if self.ddim_timesteps is None: + self.make_schedule( + ddim_num_steps=S, + ddim_eta = eta, + verbose = False, + ) + + ts = self.get_timesteps(S) + + # sampling + C, H, W = shape + shape = (batch_size, C, H, W) + samples, intermediates = self.do_sampling( + conditioning, + shape, + timesteps=ts, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, + x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + steps=S, + **kwargs + ) + return samples, intermediates + + @torch.no_grad() + def do_sampling( + self, + cond, + shape, + timesteps=None, + x_T=None, + ddim_use_original_steps=False, + callback=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + log_every_t=100, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + steps=None, + **kwargs + ): + b = shape[0] + time_range = ( + list(reversed(range(0, timesteps))) + if ddim_use_original_steps + else np.flip(timesteps) + ) + + total_steps=steps + + iterator = tqdm( + time_range, + desc=f'{self.__class__.__name__}', + total=total_steps, + dynamic_ncols=True, + ) + old_eps = [] + self.prepare_to_sample(t_enc=total_steps,all_timesteps_count=steps,**kwargs) + img = self.get_initial_image(x_T,shape,total_steps) + + # probably don't need this at all + intermediates = {'x_inter': [img], 'pred_x0': [img]} + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full( + (b,), + step, + device=self.device, + dtype=torch.long + ) + ts_next = torch.full( + (b,), + time_range[min(i + 1, len(time_range) - 1)], + device=self.device, + dtype=torch.long, + ) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample( + x0, ts + ) # TODO: deterministic forward pass? + img = img_orig * mask + (1.0 - mask) * img + + outs = self.p_sample( + img, + cond, + ts, + index=index, + use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, + temperature=temperature, + noise_dropout=noise_dropout, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, + t_next=ts_next, + step_count=steps + ) + img, pred_x0, e_t = outs + + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: + callback(i) + if img_callback: + img_callback(img,i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + # NOTE that decode() and sample() are almost the same code, and do the same thing. + # The variable names are changed in order to be confusing. + @torch.no_grad() + def decode( + self, + x_latent, + cond, + t_start, + img_callback=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + init_latent = None, + mask = None, + all_timesteps_count = None, + **kwargs + ): + timesteps = ( + np.arange(self.ddpm_num_timesteps) + if use_original_steps + else self.ddim_timesteps + ) + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f'>> Running {self.__class__.__name__} sampling starting at step {self.total_steps - t_start} of {self.total_steps} ({total_steps} new sampling steps)') + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + x0 = init_latent + self.prepare_to_sample(t_enc=total_steps, all_timesteps_count=all_timesteps_count, **kwargs) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full( + (x_latent.shape[0],), + step, + device=x_latent.device, + dtype=torch.long, + ) + + ts_next = torch.full( + (x_latent.shape[0],), + time_range[min(i + 1, len(time_range) - 1)], + device=self.device, + dtype=torch.long, + ) + + if mask is not None: + assert x0 is not None + xdec_orig = self.q_sample(x0, ts) # TODO: deterministic forward pass? + x_dec = xdec_orig * mask + (1.0 - mask) * x_dec + + outs = self.p_sample( + x_dec, + cond, + ts, + index=index, + use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + t_next = ts_next, + step_count=len(self.ddim_timesteps) + ) + + x_dec, pred_x0, e_t = outs + if img_callback: + img_callback(x_dec,i) + + return x_dec + + def get_initial_image(self,x_T,shape,timesteps=None): + if x_T is None: + return torch.randn(shape, device=self.device) + else: + return x_T + + def p_sample( + self, + img, + cond, + ts, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + old_eps=None, + t_next=None, + steps=None, + ): + raise NotImplementedError("p_sample() must be implemented in a descendent class") + + def prepare_to_sample(self,t_enc,**kwargs): + ''' + Hook that will be called right before the very first invocation of p_sample() + to allow subclass to do additional initialization. t_enc corresponds to the actual + number of steps that will be run, and may be less than total steps if img2img is + active. + ''' + pass + + def get_timesteps(self,ddim_steps): + ''' + The ddim and plms samplers work on timesteps. This method is called after + ddim_timesteps are created in make_schedule(), and selects the portion of + timesteps that will be used for sampling, depending on the t_enc in img2img. + ''' + return self.ddim_timesteps[:ddim_steps] + + def q_sample(self,x0,ts): + ''' + Returns self.model.q_sample(x0,ts). Is overridden in the k* samplers to + return self.model.inner_model.q_sample(x0,ts) + ''' + return self.model.q_sample(x0,ts) + + def conditioning_key(self)->str: + return self.model.model.conditioning_key + + def uses_inpainting_model(self)->bool: + return self.conditioning_key() in ('hybrid','concat') + + def adjust_settings(self,**kwargs): + ''' + This is a catch-all method for adjusting any instance variables + after the sampler is instantiated. No type-checking performed + here, so use with care! + ''' + for k in kwargs.keys(): + try: + setattr(self,k,kwargs[k]) + except AttributeError: + print(f'** Warning: attempt to set unknown attribute {k} in sampler of type {type(self)}') diff --git a/invokeai/models/diffusion/shared_invokeai_diffusion.py b/invokeai/models/diffusion/shared_invokeai_diffusion.py new file mode 100644 index 0000000000..32b978f704 --- /dev/null +++ b/invokeai/models/diffusion/shared_invokeai_diffusion.py @@ -0,0 +1,491 @@ +from contextlib import contextmanager +from dataclasses import dataclass +from math import ceil +from typing import Callable, Optional, Union, Any, Dict + +import numpy as np +import torch +from diffusers.models.cross_attention import AttnProcessor +from typing_extensions import TypeAlias + +from ldm.invoke.globals import Globals +from .cross_attention_control import Arguments, \ + restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ + CrossAttentionType, SwapCrossAttnContext +from .cross_attention_map_saving import AttentionMapSaver + +ModelForwardCallback: TypeAlias = Union[ + # x, t, conditioning, Optional[cross-attention kwargs] + Callable[[torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]], torch.Tensor], + Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] +] + +@dataclass(frozen=True) +class PostprocessingSettings: + threshold: float + warmup: float + h_symmetry_time_pct: Optional[float] + v_symmetry_time_pct: Optional[float] + + +class InvokeAIDiffuserComponent: + ''' + The aim of this component is to provide a single place for code that can be applied identically to + all InvokeAI diffusion procedures. + + At the moment it includes the following features: + * Cross attention control ("prompt2prompt") + * Hybrid conditioning (used for inpainting) + ''' + debug_thresholding = False + sequential_guidance = False + + @dataclass + class ExtraConditioningInfo: + + tokens_count_including_eos_bos: int + cross_attention_control_args: Optional[Arguments] = None + + @property + def wants_cross_attention_control(self): + return self.cross_attention_control_args is not None + + + def __init__(self, model, model_forward_callback: ModelForwardCallback, + is_running_diffusers: bool=False, + ): + """ + :param model: the unet model to pass through to cross attention control + :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) + """ + self.conditioning = None + self.model = model + self.is_running_diffusers = is_running_diffusers + self.model_forward_callback = model_forward_callback + self.cross_attention_control_context = None + self.sequential_guidance = Globals.sequential_guidance + + @contextmanager + def custom_attention_context(self, + extra_conditioning_info: Optional[ExtraConditioningInfo], + step_count: int): + do_swap = extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control + old_attn_processor = None + if do_swap: + old_attn_processor = self.override_cross_attention(extra_conditioning_info, + step_count=step_count) + try: + yield None + finally: + if old_attn_processor is not None: + self.restore_default_cross_attention(old_attn_processor) + # TODO resuscitate attention map saving + #self.remove_attention_map_saving() + + def override_cross_attention(self, conditioning: ExtraConditioningInfo, step_count: int) -> Dict[str, AttnProcessor]: + """ + setup cross attention .swap control. for diffusers this replaces the attention processor, so + the previous attention processor is returned so that the caller can restore it later. + """ + self.conditioning = conditioning + self.cross_attention_control_context = Context( + arguments=self.conditioning.cross_attention_control_args, + step_count=step_count + ) + return override_cross_attention(self.model, + self.cross_attention_control_context, + is_running_diffusers=self.is_running_diffusers) + + def restore_default_cross_attention(self, restore_attention_processor: Optional['AttnProcessor']=None): + self.conditioning = None + self.cross_attention_control_context = None + restore_default_cross_attention(self.model, + is_running_diffusers=self.is_running_diffusers, + restore_attention_processor=restore_attention_processor) + + def setup_attention_map_saving(self, saver: AttentionMapSaver): + def callback(slice, dim, offset, slice_size, key): + if dim is not None: + # sliced tokens attention map saving is not implemented + return + saver.add_attention_maps(slice, key) + + tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) + for identifier, module in tokens_cross_attention_modules: + key = ('down' if identifier.startswith('down') else + 'up' if identifier.startswith('up') else + 'mid') + module.set_attention_slice_calculated_callback( + lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key)) + + def remove_attention_map_saving(self): + tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) + for _, module in tokens_cross_attention_modules: + module.set_attention_slice_calculated_callback(None) + + def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor, + unconditioning: Union[torch.Tensor,dict], + conditioning: Union[torch.Tensor,dict], + unconditional_guidance_scale: float, + step_index: Optional[int]=None, + total_step_count: Optional[int]=None, + ): + """ + :param x: current latents + :param sigma: aka t, passed to the internal model to control how much denoising will occur + :param unconditioning: embeddings for unconditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] + :param conditioning: embeddings for conditioned output. for hybrid conditioning this is a dict of tensors [B x 77 x 768], otherwise a single tensor [B x 77 x 768] + :param unconditional_guidance_scale: aka CFG scale, controls how much effect the conditioning tensor has + :param step_index: counts upwards from 0 to (step_count-1) (as passed to setup_cross_attention_control, if using). May be called multiple times for a single step, therefore do not assume that its value will monotically increase. If None, will be estimated by comparing sigma against self.model.sigmas . + :return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning. + """ + + + cross_attention_control_types_to_do = [] + context: Context = self.cross_attention_control_context + if self.cross_attention_control_context is not None: + percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) + cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through) + + wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0) + wants_hybrid_conditioning = isinstance(conditioning, dict) + + if wants_hybrid_conditioning: + unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(x, sigma, unconditioning, + conditioning) + elif wants_cross_attention_control: + unconditioned_next_x, conditioned_next_x = self._apply_cross_attention_controlled_conditioning(x, sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do) + elif self.sequential_guidance: + unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially( + x, sigma, unconditioning, conditioning) + + else: + unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning( + x, sigma, unconditioning, conditioning) + + combined_next_x = self._combine(unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale) + + return combined_next_x + + def do_latent_postprocessing( + self, + postprocessing_settings: PostprocessingSettings, + latents: torch.Tensor, + sigma, + step_index, + total_step_count + ) -> torch.Tensor: + if postprocessing_settings is not None: + percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) + latents = self.apply_threshold(postprocessing_settings, latents, percent_through) + latents = self.apply_symmetry(postprocessing_settings, latents, percent_through) + return latents + + def calculate_percent_through(self, sigma, step_index, total_step_count): + if step_index is not None and total_step_count is not None: + # 🧨diffusers codepath + percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate + else: + # legacy compvis codepath + # TODO remove when compvis codepath support is dropped + if step_index is None and sigma is None: + raise ValueError( + f"Either step_index or sigma is required when doing cross attention control, but both are None.") + percent_through = self.estimate_percent_through(step_index, sigma) + return percent_through + + # methods below are called from do_diffusion_step and should be considered private to this class. + + def _apply_standard_conditioning(self, x, sigma, unconditioning, conditioning): + # fast batched path + x_twice = torch.cat([x] * 2) + sigma_twice = torch.cat([sigma] * 2) + both_conditionings = torch.cat([unconditioning, conditioning]) + both_results = self.model_forward_callback(x_twice, sigma_twice, both_conditionings) + unconditioned_next_x, conditioned_next_x = both_results.chunk(2) + if conditioned_next_x.device.type == 'mps': + # prevent a result filled with zeros. seems to be a torch bug. + conditioned_next_x = conditioned_next_x.clone() + return unconditioned_next_x, conditioned_next_x + + + def _apply_standard_conditioning_sequentially(self, x: torch.Tensor, sigma, unconditioning: torch.Tensor, conditioning: torch.Tensor): + # low-memory sequential path + unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) + conditioned_next_x = self.model_forward_callback(x, sigma, conditioning) + if conditioned_next_x.device.type == 'mps': + # prevent a result filled with zeros. seems to be a torch bug. + conditioned_next_x = conditioned_next_x.clone() + return unconditioned_next_x, conditioned_next_x + + + def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning): + assert isinstance(conditioning, dict) + assert isinstance(unconditioning, dict) + x_twice = torch.cat([x] * 2) + sigma_twice = torch.cat([sigma] * 2) + both_conditionings = dict() + for k in conditioning: + if isinstance(conditioning[k], list): + both_conditionings[k] = [ + torch.cat([unconditioning[k][i], conditioning[k][i]]) + for i in range(len(conditioning[k])) + ] + else: + both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]]) + unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2) + return unconditioned_next_x, conditioned_next_x + + + def _apply_cross_attention_controlled_conditioning(self, + x: torch.Tensor, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do): + if self.is_running_diffusers: + return self._apply_cross_attention_controlled_conditioning__diffusers(x, sigma, unconditioning, + conditioning, + cross_attention_control_types_to_do) + else: + return self._apply_cross_attention_controlled_conditioning__compvis(x, sigma, unconditioning, conditioning, + cross_attention_control_types_to_do) + + def _apply_cross_attention_controlled_conditioning__diffusers(self, + x: torch.Tensor, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do): + context: Context = self.cross_attention_control_context + + cross_attn_processor_context = SwapCrossAttnContext(modified_text_embeddings=context.arguments.edited_conditioning, + index_map=context.cross_attention_index_map, + mask=context.cross_attention_mask, + cross_attention_types_to_do=[]) + # no cross attention for unconditioning (negative prompt) + unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning, + {"swap_cross_attn_context": cross_attn_processor_context}) + + # do requested cross attention types for conditioning (positive prompt) + cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do + conditioned_next_x = self.model_forward_callback(x, sigma, conditioning, + {"swap_cross_attn_context": cross_attn_processor_context}) + return unconditioned_next_x, conditioned_next_x + + + def _apply_cross_attention_controlled_conditioning__compvis(self, x:torch.Tensor, sigma, unconditioning, conditioning, cross_attention_control_types_to_do): + # print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do) + # slower non-batched path (20% slower on mac MPS) + # We are only interested in using attention maps for conditioned_next_x, but batching them with generation of + # unconditioned_next_x causes attention maps to *also* be saved for the unconditioned_next_x. + # This messes app their application later, due to mismatched shape of dim 0 (seems to be 16 for batched vs. 8) + # (For the batched invocation the `wrangler` function gets attention tensor with shape[0]=16, + # representing batched uncond + cond, but then when it comes to applying the saved attention, the + # wrangler gets an attention tensor which only has shape[0]=8, representing just self.edited_conditionings.) + # todo: give CrossAttentionControl's `wrangler` function more info so it can work with a batched call as well. + context:Context = self.cross_attention_control_context + + try: + unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) + + # process x using the original prompt, saving the attention maps + #print("saving attention maps for", cross_attention_control_types_to_do) + for ca_type in cross_attention_control_types_to_do: + context.request_save_attention_maps(ca_type) + _ = self.model_forward_callback(x, sigma, conditioning) + context.clear_requests(cleanup=False) + + # process x again, using the saved attention maps to control where self.edited_conditioning will be applied + #print("applying saved attention maps for", cross_attention_control_types_to_do) + for ca_type in cross_attention_control_types_to_do: + context.request_apply_saved_attention_maps(ca_type) + edited_conditioning = self.conditioning.cross_attention_control_args.edited_conditioning + conditioned_next_x = self.model_forward_callback(x, sigma, edited_conditioning) + context.clear_requests(cleanup=True) + + except: + context.clear_requests(cleanup=True) + raise + + return unconditioned_next_x, conditioned_next_x + + def _combine(self, unconditioned_next_x, conditioned_next_x, guidance_scale): + # to scale how much effect conditioning has, calculate the changes it does and then scale that + scaled_delta = (conditioned_next_x - unconditioned_next_x) * guidance_scale + combined_next_x = unconditioned_next_x + scaled_delta + return combined_next_x + + def apply_threshold( + self, + postprocessing_settings: PostprocessingSettings, + latents: torch.Tensor, + percent_through: float + ) -> torch.Tensor: + + if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0: + return latents + + threshold = postprocessing_settings.threshold + warmup = postprocessing_settings.warmup + + if percent_through < warmup: + current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup)) + else: + current_threshold = threshold + + if current_threshold <= 0: + return latents + + maxval = latents.max().item() + minval = latents.min().item() + + scale = 0.7 # default value from #395 + + if self.debug_thresholding: + std, mean = [i.item() for i in torch.std_mean(latents)] + outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold)) + print(f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n" + f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n" + f" | {outside / latents.numel() * 100:.2f}% values outside threshold") + + if maxval < current_threshold and minval > -current_threshold: + return latents + + num_altered = 0 + + # MPS torch.rand_like is fine because torch.rand_like is wrapped in generate.py! + + if maxval > current_threshold: + latents = torch.clone(latents) + maxval = np.clip(maxval * scale, 1, current_threshold) + num_altered += torch.count_nonzero(latents > maxval) + latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval + + if minval < -current_threshold: + latents = torch.clone(latents) + minval = np.clip(minval * scale, -current_threshold, -1) + num_altered += torch.count_nonzero(latents < minval) + latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval + + if self.debug_thresholding: + print(f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n" + f" | {num_altered / latents.numel() * 100:.2f}% values altered") + + return latents + + def apply_symmetry( + self, + postprocessing_settings: PostprocessingSettings, + latents: torch.Tensor, + percent_through: float + ) -> torch.Tensor: + + # Reset our last percent through if this is our first step. + if percent_through == 0.0: + self.last_percent_through = 0.0 + + if postprocessing_settings is None: + return latents + + # Check for out of bounds + h_symmetry_time_pct = postprocessing_settings.h_symmetry_time_pct + if (h_symmetry_time_pct is not None and (h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0)): + h_symmetry_time_pct = None + + v_symmetry_time_pct = postprocessing_settings.v_symmetry_time_pct + if (v_symmetry_time_pct is not None and (v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0)): + v_symmetry_time_pct = None + + dev = latents.device.type + + latents.to(device='cpu') + + if ( + h_symmetry_time_pct != None and + self.last_percent_through < h_symmetry_time_pct and + percent_through >= h_symmetry_time_pct + ): + # Horizontal symmetry occurs on the 3rd dimension of the latent + width = latents.shape[3] + x_flipped = torch.flip(latents, dims=[3]) + latents = torch.cat([latents[:, :, :, 0:int(width/2)], x_flipped[:, :, :, int(width/2):int(width)]], dim=3) + + if ( + v_symmetry_time_pct != None and + self.last_percent_through < v_symmetry_time_pct and + percent_through >= v_symmetry_time_pct + ): + # Vertical symmetry occurs on the 2nd dimension of the latent + height = latents.shape[2] + y_flipped = torch.flip(latents, dims=[2]) + latents = torch.cat([latents[:, :, 0:int(height / 2)], y_flipped[:, :, int(height / 2):int(height)]], dim=2) + + self.last_percent_through = percent_through + return latents.to(device=dev) + + def estimate_percent_through(self, step_index, sigma): + if step_index is not None and self.cross_attention_control_context is not None: + # percent_through will never reach 1.0 (but this is intended) + return float(step_index) / float(self.cross_attention_control_context.step_count) + # find the best possible index of the current sigma in the sigma sequence + smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma) + sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0 + # flip because sigmas[0] is for the fully denoised image + # percent_through must be <1 + return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0]) + # print('estimated percent_through', percent_through, 'from sigma', sigma.item()) + + + # todo: make this work + @classmethod + def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale): + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) # aka sigmas + + deltas = None + uncond_latents = None + weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)] + + # below is fugly omg + num_actual_conditionings = len(c_or_weighted_c_list) + conditionings = [uc] + [c for c,weight in weighted_cond_list] + weights = [1] + [weight for c,weight in weighted_cond_list] + chunk_count = ceil(len(conditionings)/2) + deltas = None + for chunk_index in range(chunk_count): + offset = chunk_index*2 + chunk_size = min(2, len(conditionings)-offset) + + if chunk_size == 1: + c_in = conditionings[offset] + latents_a = forward_func(x_in[:-1], t_in[:-1], c_in) + latents_b = None + else: + c_in = torch.cat(conditionings[offset:offset+2]) + latents_a, latents_b = forward_func(x_in, t_in, c_in).chunk(2) + + # first chunk is guaranteed to be 2 entries: uncond_latents + first conditioining + if chunk_index == 0: + uncond_latents = latents_a + deltas = latents_b - uncond_latents + else: + deltas = torch.cat((deltas, latents_a - uncond_latents)) + if latents_b is not None: + deltas = torch.cat((deltas, latents_b - uncond_latents)) + + # merge the weighted deltas together into a single merged delta + per_delta_weights = torch.tensor(weights[1:], dtype=deltas.dtype, device=deltas.device) + normalize = False + if normalize: + per_delta_weights /= torch.sum(per_delta_weights) + reshaped_weights = per_delta_weights.reshape(per_delta_weights.shape + (1, 1, 1)) + deltas_merged = torch.sum(deltas * reshaped_weights, dim=0, keepdim=True) + + # old_return_value = super().forward(x, sigma, uncond, cond, cond_scale) + # assert(0 == len(torch.nonzero(old_return_value - (uncond_latents + deltas_merged * cond_scale)))) + + return uncond_latents + deltas_merged * global_guidance_scale diff --git a/invokeai/models/model_manager.py b/invokeai/models/model_manager.py new file mode 100644 index 0000000000..2a0a8ec933 --- /dev/null +++ b/invokeai/models/model_manager.py @@ -0,0 +1,1221 @@ +""" +Manage a cache of Stable Diffusion model files for fast switching. +They are moved between GPU and CPU as necessary. If CPU memory falls +below a preset minimum, the least recently used model will be +cleared and loaded from disk when next needed. +""" +from __future__ import annotations + +import contextlib +import gc +import hashlib +import os +import re +import sys +import textwrap +import time +import warnings +from enum import Enum +from pathlib import Path +from shutil import move, rmtree +from typing import Any, Optional, Union + +import safetensors +import safetensors.torch +import torch +import transformers +from diffusers import AutoencoderKL +from diffusers import logging as dlogging +from huggingface_hub import scan_cache_dir +from omegaconf import OmegaConf +from omegaconf.dictconfig import DictConfig +from picklescan.scanner import scan_file_path + +from ldm.invoke.devices import CPU_DEVICE +from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ldm.invoke.globals import Globals, global_cache_dir +from ldm.util import ( + ask_user, + download_with_resume, + url_attachment_name, +) + + +class SDLegacyType(Enum): + V1 = 1 + V1_INPAINT = 2 + V2 = 3 + UNKNOWN = 99 + +DEFAULT_MAX_MODELS = 2 +VAE_TO_REPO_ID = { # hack, see note in convert_and_import() + "vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse", +} + +class ModelManager(object): + def __init__( + self, + config: OmegaConf, + device_type: torch.device = CPU_DEVICE, + precision: str = "float16", + max_loaded_models=DEFAULT_MAX_MODELS, + sequential_offload=False, + ): + """ + Initialize with the path to the models.yaml config file, + the torch device type, and precision. The optional + min_avail_mem argument specifies how much unused system + (CPU) memory to preserve. The cache of models in RAM will + grow until this value is approached. Default is 2G. + """ + # prevent nasty-looking CLIP log message + transformers.logging.set_verbosity_error() + self.config = config + self.precision = precision + self.device = torch.device(device_type) + self.max_loaded_models = max_loaded_models + self.models = {} + self.stack = [] # this is an LRU FIFO + self.current_model = None + self.sequential_offload = sequential_offload + + def valid_model(self, model_name: str) -> bool: + """ + Given a model name, returns True if it is a valid + identifier. + """ + return model_name in self.config + + def get_model(self, model_name: str): + """ + Given a model named identified in models.yaml, return + the model object. If in RAM will load into GPU VRAM. + If on disk, will load from there. + """ + if not self.valid_model(model_name): + print( + f'** "{model_name}" is not a known model name. Please check your models.yaml file' + ) + return self.current_model + + if self.current_model != model_name: + if model_name not in self.models: # make room for a new one + self._make_cache_room() + self.offload_model(self.current_model) + + if model_name in self.models: + requested_model = self.models[model_name]["model"] + print(f">> Retrieving model {model_name} from system RAM cache") + self.models[model_name]["model"] = self._model_from_cpu(requested_model) + width = self.models[model_name]["width"] + height = self.models[model_name]["height"] + hash = self.models[model_name]["hash"] + + else: # we're about to load a new model, so potentially offload the least recently used one + requested_model, width, height, hash = self._load_model(model_name) + self.models[model_name] = { + "model": requested_model, + "width": width, + "height": height, + "hash": hash, + } + + self.current_model = model_name + self._push_newest_model(model_name) + return { + "model": requested_model, + "width": width, + "height": height, + "hash": hash, + } + + def default_model(self) -> str | None: + """ + Returns the name of the default model, or None + if none is defined. + """ + for model_name in self.config: + if self.config[model_name].get("default"): + return model_name + return list(self.config.keys())[0] # first one + + def set_default_model(self, model_name: str) -> None: + """ + Set the default model. The change will not take + effect until you call model_manager.commit() + """ + assert model_name in self.model_names(), f"unknown model '{model_name}'" + + config = self.config + for model in config: + config[model].pop("default", None) + config[model_name]["default"] = True + + def model_info(self, model_name: str) -> dict: + """ + Given a model name returns the OmegaConf (dict-like) object describing it. + """ + if model_name not in self.config: + return None + return self.config[model_name] + + def model_names(self) -> list[str]: + """ + Return a list consisting of all the names of models defined in models.yaml + """ + return list(self.config.keys()) + + def is_legacy(self, model_name: str) -> bool: + """ + Return true if this is a legacy (.ckpt) model + """ + # if we are converting legacy files automatically, then + # there are no legacy ckpts! + if Globals.ckpt_convert: + return False + info = self.model_info(model_name) + if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")): + return True + return False + + def list_models(self) -> dict: + """ + Return a dict of models in the format: + { model_name1: {'status': ('active'|'cached'|'not loaded'), + 'description': description, + 'format': ('ckpt'|'diffusers'|'vae'), + }, + model_name2: { etc } + Please use model_manager.models() to get all the model names, + model_manager.model_info('model-name') to get the stanza for the model + named 'model-name', and model_manager.config to get the full OmegaConf + object derived from models.yaml + """ + models = {} + for name in sorted(self.config, key=str.casefold): + stanza = self.config[name] + + # don't include VAEs in listing (legacy style) + if "config" in stanza and "/VAE/" in stanza["config"]: + continue + + models[name] = dict() + format = stanza.get("format", "ckpt") # Determine Format + + # Common Attribs + description = stanza.get("description", None) + if self.current_model == name: + status = "active" + elif name in self.models: + status = "cached" + else: + status = "not loaded" + models[name].update( + description=description, + format=format, + status=status, + ) + + # Checkpoint Config Parse + if format == "ckpt": + models[name].update( + config=str(stanza.get("config", None)), + weights=str(stanza.get("weights", None)), + vae=str(stanza.get("vae", None)), + width=str(stanza.get("width", 512)), + height=str(stanza.get("height", 512)), + ) + + # Diffusers Config Parse + if vae := stanza.get("vae", None): + if isinstance(vae, DictConfig): + vae = dict( + repo_id=str(vae.get("repo_id", None)), + path=str(vae.get("path", None)), + subfolder=str(vae.get("subfolder", None)), + ) + + if format == "diffusers": + models[name].update( + vae=vae, + repo_id=str(stanza.get("repo_id", None)), + path=str(stanza.get("path", None)), + ) + + return models + + def print_models(self) -> None: + """ + Print a table of models, their descriptions, and load status + """ + models = self.list_models() + for name in models: + if models[name]["format"] == "vae": + continue + line = f'{name:25s} {models[name]["status"]:>10s} {models[name]["format"]:10s} {models[name]["description"]}' + if models[name]["status"] == "active": + line = f"\033[1m{line}\033[0m" + print(line) + + def del_model(self, model_name: str, delete_files: bool = False) -> None: + """ + Delete the named model. + """ + omega = self.config + if model_name not in omega: + print(f"** Unknown model {model_name}") + return + # save these for use in deletion later + conf = omega[model_name] + repo_id = conf.get("repo_id", None) + path = self._abs_path(conf.get("path", None)) + weights = self._abs_path(conf.get("weights", None)) + + del omega[model_name] + if model_name in self.stack: + self.stack.remove(model_name) + if delete_files: + if weights: + print(f"** deleting file {weights}") + Path(weights).unlink(missing_ok=True) + elif path: + print(f"** deleting directory {path}") + rmtree(path, ignore_errors=True) + elif repo_id: + print(f"** deleting the cached model directory for {repo_id}") + self._delete_model_from_cache(repo_id) + + def add_model( + self, model_name: str, model_attributes: dict, clobber: bool = False + ) -> None: + """ + Update the named model with a dictionary of attributes. Will fail with an + assertion error if the name already exists. Pass clobber=True to overwrite. + On a successful update, the config will be changed in memory and the + method will return True. Will fail with an assertion error if provided + attributes are incorrect or the model name is missing. + """ + omega = self.config + assert "format" in model_attributes, 'missing required field "format"' + if model_attributes["format"] == "diffusers": + assert ( + "description" in model_attributes + ), 'required field "description" is missing' + assert ( + "path" in model_attributes or "repo_id" in model_attributes + ), 'model must have either the "path" or "repo_id" fields defined' + else: + for field in ("description", "weights", "height", "width", "config"): + assert field in model_attributes, f"required field {field} is missing" + + assert ( + clobber or model_name not in omega + ), f'attempt to overwrite existing model definition "{model_name}"' + + omega[model_name] = model_attributes + + if "weights" in omega[model_name]: + omega[model_name]["weights"].replace("\\", "/") + + if clobber: + self._invalidate_cached_model(model_name) + + def _load_model(self, model_name: str): + """Load and initialize the model from configuration variables passed at object creation time""" + if model_name not in self.config: + print( + f'"{model_name}" is not a known model name. Please check your models.yaml file' + ) + return + + mconfig = self.config[model_name] + + # for usage statistics + if self._has_cuda(): + torch.cuda.reset_peak_memory_stats() + torch.cuda.empty_cache() + + tic = time.time() + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + model, width, height, model_hash = self._load_diffusers_model(mconfig) + + # usage statistics + toc = time.time() + print(">> Model loaded in", "%4.2fs" % (toc - tic)) + if self._has_cuda(): + print( + ">> Max VRAM used to load the model:", + "%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9), + "\n>> Current VRAM usage:" + "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), + ) + return model, width, height, model_hash + + def _load_diffusers_model(self, mconfig): + name_or_path = self.model_name_or_path(mconfig) + using_fp16 = self.precision == "float16" + + print(f">> Loading diffusers model from {name_or_path}") + if using_fp16: + print(" | Using faster float16 precision") + else: + print(" | Using more accurate float32 precision") + + # TODO: scan weights maybe? + pipeline_args: dict[str, Any] = dict( + safety_checker=None, local_files_only=not Globals.internet_available + ) + if "vae" in mconfig and mconfig["vae"] is not None: + if vae := self._load_vae(mconfig["vae"]): + pipeline_args.update(vae=vae) + if not isinstance(name_or_path, Path): + pipeline_args.update(cache_dir=global_cache_dir("diffusers")) + if using_fp16: + pipeline_args.update(torch_dtype=torch.float16) + fp_args_list = [{"revision": "fp16"}, {}] + else: + fp_args_list = [{}] + + verbosity = dlogging.get_verbosity() + dlogging.set_verbosity_error() + + pipeline = None + for fp_args in fp_args_list: + try: + pipeline = StableDiffusionGeneratorPipeline.from_pretrained( + name_or_path, + **pipeline_args, + **fp_args, + ) + except OSError as e: + if str(e).startswith("fp16 is not a valid"): + pass + else: + print( + f"** An unexpected error occurred while downloading the model: {e})" + ) + if pipeline: + break + + dlogging.set_verbosity(verbosity) + assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded') + + if self.sequential_offload: + pipeline.enable_offload_submodels(self.device) + else: + pipeline.to(self.device) + + model_hash = self._diffuser_sha256(name_or_path) + + # square images??? + width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor + height = width + + print(f" | Default image dimensions = {width} x {height}") + + return pipeline, width, height, model_hash + + def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path: + if isinstance(model_name, DictConfig) or isinstance(model_name, dict): + mconfig = model_name + elif model_name in self.config: + mconfig = self.config[model_name] + else: + raise ValueError( + f'"{model_name}" is not a known model name. Please check your models.yaml file' + ) + + if "path" in mconfig and mconfig["path"] is not None: + path = Path(mconfig["path"]) + if not path.is_absolute(): + path = Path(Globals.root, path).resolve() + return path + elif "repo_id" in mconfig: + return mconfig["repo_id"] + else: + raise ValueError("Model config must specify either repo_id or path.") + + def offload_model(self, model_name: str) -> None: + """ + Offload the indicated model to CPU. Will call + _make_cache_room() to free space if needed. + """ + if model_name not in self.models: + return + + print(f">> Offloading {model_name} to CPU") + model = self.models[model_name]["model"] + self.models[model_name]["model"] = self._model_to_cpu(model) + + gc.collect() + if self._has_cuda(): + torch.cuda.empty_cache() + + def scan_model(self, model_name, checkpoint): + """ + Apply picklescanner to the indicated checkpoint and issue a warning + and option to exit if an infected file is identified. + """ + # scan model + print(f">> Scanning Model: {model_name}") + scan_result = scan_file_path(checkpoint) + if scan_result.infected_files != 0: + if scan_result.infected_files == 1: + print(f"\n### Issues Found In Model: {scan_result.issues_count}") + print( + "### WARNING: The model you are trying to load seems to be infected." + ) + print("### For your safety, InvokeAI will not load this model.") + print("### Please use checkpoints from trusted sources.") + print("### Exiting InvokeAI") + sys.exit() + else: + print( + "\n### WARNING: InvokeAI was unable to scan the model you are using." + ) + model_safe_check_fail = ask_user( + "Do you want to to continue loading the model?", ["y", "n"] + ) + if model_safe_check_fail.lower() != "y": + print("### Exiting InvokeAI") + sys.exit() + else: + print(">> Model scanned ok") + + def import_diffuser_model( + self, + repo_or_path: Union[str, Path], + model_name: str = None, + model_description: str = None, + vae: dict = None, + commit_to_conf: Path = None, + ) -> bool: + """ + Attempts to install the indicated diffuser model and returns True if successful. + + "repo_or_path" can be either a repo-id or a path-like object corresponding to the + top of a downloaded diffusers directory. + + You can optionally provide a model name and/or description. If not provided, + then these will be derived from the repo name. If you provide a commit_to_conf + path to the configuration file, then the new entry will be committed to the + models.yaml file. + """ + model_name = model_name or Path(repo_or_path).stem + model_description = model_description or f"Imported diffusers model {model_name}" + new_config = dict( + description=model_description, + vae=vae, + format="diffusers", + ) + if isinstance(repo_or_path, Path) and repo_or_path.exists(): + new_config.update(path=str(repo_or_path)) + else: + new_config.update(repo_id=repo_or_path) + + self.add_model(model_name, new_config, True) + if commit_to_conf: + self.commit(commit_to_conf) + return model_name + + def import_ckpt_model( + self, + weights: Union[str, Path], + config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml", + vae: Union[str, Path] = None, + model_name: str = None, + model_description: str = None, + commit_to_conf: Path = None, + ) -> str: + """ + Attempts to install the indicated ckpt file and returns True if successful. + + "weights" can be either a path-like object corresponding to a local .ckpt file + or a http/https URL pointing to a remote model. + + "vae" is a Path or str object pointing to a ckpt or safetensors file to be used + as the VAE for this model. + + "config" is the model config file to use with this ckpt file. It defaults to + v1-inference.yaml. If a URL is provided, the config will be downloaded. + + You can optionally provide a model name and/or description. If not provided, + then these will be derived from the weight file name. If you provide a commit_to_conf + path to the configuration file, then the new entry will be committed to the + models.yaml file. + + Return value is the name of the imported file, or None if an error occurred. + """ + if str(weights).startswith(("http:", "https:")): + model_name = model_name or url_attachment_name(weights) + + weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1") + config_path = self._resolve_path(config, "configs/stable-diffusion") + + if weights_path is None or not weights_path.exists(): + return + if config_path is None or not config_path.exists(): + return + + model_name = ( + model_name or Path(weights).stem + ) # note this gives ugly pathnames if used on a URL without a Content-Disposition header + model_description = ( + model_description or f"Imported stable diffusion weights file {model_name}" + ) + new_config = dict( + weights=str(weights_path), + config=str(config_path), + description=model_description, + format="ckpt", + width=512, + height=512, + ) + if vae: + new_config["vae"] = vae + self.add_model(model_name, new_config, True) + if commit_to_conf: + self.commit(commit_to_conf) + return model_name + + @classmethod + def probe_model_type(self, checkpoint: dict) -> SDLegacyType: + """ + Given a pickle or safetensors model object, probes contents + of the object and returns an SDLegacyType indicating its + format. Valid return values include: + SDLegacyType.V1 + SDLegacyType.V1_INPAINT + SDLegacyType.V2 + SDLegacyType.UNKNOWN + """ + key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: + return SDLegacyType.V2 + + try: + state_dict = checkpoint.get("state_dict") or checkpoint + in_channels = state_dict[ + "model.diffusion_model.input_blocks.0.0.weight" + ].shape[1] + if in_channels == 9: + return SDLegacyType.V1_INPAINT + elif in_channels == 4: + return SDLegacyType.V1 + else: + return SDLegacyType.UNKNOWN + except KeyError: + return SDLegacyType.UNKNOWN + + def heuristic_import( + self, + path_url_or_repo: str, + convert: bool = True, + model_name: str = None, + description: str = None, + commit_to_conf: Path = None, + ) -> str: + """ + Accept a string which could be: + - a HF diffusers repo_id + - a URL pointing to a legacy .ckpt or .safetensors file + - a local path pointing to a legacy .ckpt or .safetensors file + - a local directory containing .ckpt and .safetensors files + - a local directory containing a diffusers model + + After determining the nature of the model and downloading it + (if necessary), the file is probed to determine the correct + configuration file (if needed) and it is imported. + + The model_name and/or description can be provided. If not, they will + be generated automatically. + + If convert is true, legacy models will be converted to diffusers + before importing. + + If commit_to_conf is provided, the newly loaded model will be written + to the `models.yaml` file at the indicated path. Otherwise, the changes + will only remain in memory. + + The (potentially derived) name of the model is returned on success, or None + on failure. When multiple models are added from a directory, only the last + imported one is returned. + """ + model_path: Path = None + thing = path_url_or_repo # to save typing + + print(f">> Probing {thing} for import") + + if thing.startswith(("http:", "https:", "ftp:")): + print(f" | {thing} appears to be a URL") + model_path = self._resolve_path( + thing, "models/ldm/stable-diffusion-v1" + ) # _resolve_path does a download if needed + + elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): + if Path(thing).stem in ["model", "diffusion_pytorch_model"]: + print( + f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import" + ) + return + else: + print(f" | {thing} appears to be a checkpoint file on disk") + model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1") + + elif Path(thing).is_dir() and Path(thing, "model_index.json").exists(): + print(f" | {thing} appears to be a diffusers file on disk") + model_name = self.import_diffuser_model( + thing, + vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), + model_name=model_name, + description=description, + commit_to_conf=commit_to_conf, + ) + + elif Path(thing).is_dir(): + if (Path(thing) / "model_index.json").exists(): + print(f" | {thing} appears to be a diffusers model.") + model_name = self.import_diffuser_model( + thing, commit_to_conf=commit_to_conf + ) + else: + print( + f" |{thing} appears to be a directory. Will scan for models to import" + ) + for m in list(Path(thing).rglob("*.ckpt")) + list( + Path(thing).rglob("*.safetensors") + ): + if model_name := self.heuristic_import( + str(m), convert, commit_to_conf=commit_to_conf + ): + print(f" >> {model_name} successfully imported") + return model_name + + elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing): + print(f" | {thing} appears to be a HuggingFace diffusers repo_id") + model_name = self.import_diffuser_model( + thing, commit_to_conf=commit_to_conf + ) + pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name]) + return model_name + else: + print( + f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id" + ) + + # Model_path is set in the event of a legacy checkpoint file. + # If not set, we're all done + if not model_path: + return + + if model_path.stem in self.config: # already imported + print(" | Already imported. Skipping") + return + + # another round of heuristics to guess the correct config file. + checkpoint = ( + safetensors.torch.load_file(model_path) + if model_path.suffix == ".safetensors" + else torch.load(model_path) + ) + model_type = self.probe_model_type(checkpoint) + + model_config_file = None + if model_type == SDLegacyType.V1: + print(" | SD-v1 model detected") + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v1-inference.yaml" + ) + elif model_type == SDLegacyType.V1_INPAINT: + print(" | SD-v1 inpainting model detected") + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" + ) + elif model_type == SDLegacyType.V2: + print( + " | SD-v2 model detected; model will be converted to diffusers format" + ) + model_config_file = Path( + Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" + ) + convert = True + else: + print( + f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import" + ) + return + + diffuser_path = Path( + Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem + ) + model_name = self.convert_and_import( + model_path, + diffusers_path=diffuser_path, + vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), + model_name=model_name, + model_description=description, + original_config_file=model_config_file, + commit_to_conf=commit_to_conf, + ) + if commit_to_conf: + self.commit(commit_to_conf) + return model_name + + def convert_and_import( + self, + ckpt_path: Path, + diffusers_path: Path, + model_name=None, + model_description=None, + vae=None, + original_config_file: Path = None, + commit_to_conf: Path = None, + ) -> str: + """ + Convert a legacy ckpt weights file to diffuser model and import + into models.yaml. + """ + ckpt_path = self._resolve_path(ckpt_path, "models/ldm/stable-diffusion-v1") + if original_config_file: + original_config_file = self._resolve_path( + original_config_file, "configs/stable-diffusion" + ) + + new_config = None + + from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser + + if diffusers_path.exists(): + print( + f"ERROR: The path {str(diffusers_path)} already exists. Please move or remove it and try again." + ) + return + + model_name = model_name or diffusers_path.name + model_description = model_description or f"Optimized version of {model_name}" + print(f">> Optimizing {model_name} (30-60s)") + try: + # By passing the specified VAE to the conversion function, the autoencoder + # will be built into the model rather than tacked on afterward via the config file + vae_model = self._load_vae(vae) if vae else None + convert_ckpt_to_diffuser( + ckpt_path, + diffusers_path, + extract_ema=True, + original_config_file=original_config_file, + vae=vae_model, + ) + print( + f" | Success. Optimized model is now located at {str(diffusers_path)}" + ) + print(f" | Writing new config file entry for {model_name}") + new_config = dict( + path=str(diffusers_path), + description=model_description, + format="diffusers", + ) + if model_name in self.config: + self.del_model(model_name) + self.add_model(model_name, new_config, True) + if commit_to_conf: + self.commit(commit_to_conf) + print(">> Conversion succeeded") + except Exception as e: + print(f"** Conversion failed: {str(e)}") + print( + "** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)" + ) + + return model_name + + def search_models(self, search_folder): + print(f">> Finding Models In: {search_folder}") + models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") + models_folder_safetensors = Path(search_folder).glob("**/*.safetensors") + + ckpt_files = [x for x in models_folder_ckpt if x.is_file()] + safetensor_files = [x for x in models_folder_safetensors if x.is_file()] + + files = ckpt_files + safetensor_files + + found_models = [] + for file in files: + location = str(file.resolve()).replace("\\", "/") + if ( + "model.safetensors" not in location + and "diffusion_pytorch_model.safetensors" not in location + ): + found_models.append({"name": file.stem, "location": location}) + + return search_folder, found_models + + def _choose_diffusers_vae( + self, model_name: str, vae: str = None + ) -> Union[dict, str]: + # In the event that the original entry is using a custom ckpt VAE, we try to + # map that VAE onto a diffuser VAE using a hard-coded dictionary. + # I would prefer to do this differently: We load the ckpt model into memory, swap the + # VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped + # VAE is built into the model. However, when I tried this I got obscure key errors. + if vae: + return vae + if model_name in self.config and ( + vae_ckpt_path := self.model_info(model_name).get("vae", None) + ): + vae_basename = Path(vae_ckpt_path).stem + diffusers_vae = None + if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None): + print( + f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version" + ) + vae = {"repo_id": diffusers_vae} + else: + print( + f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown' + ) + print( + '** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config' + ) + vae = {"repo_id": "stabilityai/sd-vae-ft-mse"} + return vae + + def _make_cache_room(self) -> None: + num_loaded_models = len(self.models) + if num_loaded_models >= self.max_loaded_models: + least_recent_model = self._pop_oldest_model() + print( + f">> Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}" + ) + if least_recent_model is not None: + del self.models[least_recent_model] + gc.collect() + + def print_vram_usage(self) -> None: + if self._has_cuda: + print( + ">> Current VRAM usage: ", + "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), + ) + + def commit(self, config_file_path: str) -> None: + """ + Write current configuration out to the indicated file. + """ + yaml_str = OmegaConf.to_yaml(self.config) + if not os.path.isabs(config_file_path): + config_file_path = os.path.normpath( + os.path.join(Globals.root, config_file_path) + ) + tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") + with open(tmpfile, "w", encoding="utf-8") as outfile: + outfile.write(self.preamble()) + outfile.write(yaml_str) + os.replace(tmpfile, config_file_path) + + def preamble(self) -> str: + """ + Returns the preamble for the config file. + """ + return textwrap.dedent( + """\ + # This file describes the alternative machine learning models + # available to InvokeAI script. + # + # To add a new model, follow the examples below. Each + # model requires a model config file, a weights file, + # and the width and height of the images it + # was trained on. + """ + ) + + @classmethod + def migrate_models(cls): + """ + Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5 + to the 2.3.0 "diffusers" version. This should be a one-time operation, called at + script startup time. + """ + # Three transformer models to check: bert, clip and safety checker + legacy_locations = [ + Path( + "CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker" + ), + Path("bert-base-uncased/models--bert-base-uncased"), + Path( + "openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14" + ), + ] + models_dir = Path(Globals.root, "models") + legacy_layout = False + for model in legacy_locations: + legacy_layout = legacy_layout or Path(models_dir, model).exists() + if not legacy_layout: + return + + print( + "** Legacy version <= 2.2.5 model directory layout detected. Reorganizing." + ) + print("** This is a quick one-time operation.") + + # transformer files get moved into the hub directory + if cls._is_huggingface_hub_directory_present(): + hub = global_cache_dir("hub") + else: + hub = models_dir / "hub" + + os.makedirs(hub, exist_ok=True) + for model in legacy_locations: + source = models_dir / model + dest = hub / model.stem + print(f"** {source} => {dest}") + if source.exists(): + if dest.exists(): + rmtree(source) + else: + move(source, dest) + + # anything else gets moved into the diffusers directory + if cls._is_huggingface_hub_directory_present(): + diffusers = global_cache_dir("diffusers") + else: + diffusers = models_dir / "diffusers" + + os.makedirs(diffusers, exist_ok=True) + for root, dirs, _ in os.walk(models_dir, topdown=False): + for dir in dirs: + full_path = Path(root, dir) + if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers): + continue + if Path(dir).match("models--*--*"): + dest = diffusers / dir + print(f"** {full_path} => {dest}") + if dest.exists(): + rmtree(full_path) + else: + move(full_path, dest) + + # now clean up by removing any empty directories + empty = [ + root + for root, dirs, files, in os.walk(models_dir) + if not len(dirs) and not len(files) + ] + for d in empty: + os.rmdir(d) + print("** Migration is done. Continuing...") + + def _resolve_path( + self, source: Union[str, Path], dest_directory: str + ) -> Optional[Path]: + resolved_path = None + if str(source).startswith(("http:", "https:", "ftp:")): + dest_directory = Path(dest_directory) + if not dest_directory.is_absolute(): + dest_directory = Globals.root / dest_directory + dest_directory.mkdir(parents=True, exist_ok=True) + resolved_path = download_with_resume(str(source), dest_directory) + else: + if not os.path.isabs(source): + source = os.path.join(Globals.root, source) + resolved_path = Path(source) + return resolved_path + + def _invalidate_cached_model(self, model_name: str) -> None: + self.offload_model(model_name) + if model_name in self.stack: + self.stack.remove(model_name) + self.models.pop(model_name, None) + + def _model_to_cpu(self, model): + if self.device == CPU_DEVICE: + return model + + if isinstance(model, StableDiffusionGeneratorPipeline): + model.offload_all() + return model + + model.cond_stage_model.device = CPU_DEVICE + model.to(CPU_DEVICE) + + for submodel in ("first_stage_model", "cond_stage_model", "model"): + try: + getattr(model, submodel).to(CPU_DEVICE) + except AttributeError: + pass + return model + + def _model_from_cpu(self, model): + if self.device == CPU_DEVICE: + return model + + if isinstance(model, StableDiffusionGeneratorPipeline): + model.ready() + return model + + model.to(self.device) + model.cond_stage_model.device = self.device + + for submodel in ("first_stage_model", "cond_stage_model", "model"): + try: + getattr(model, submodel).to(self.device) + except AttributeError: + pass + + return model + + def _pop_oldest_model(self): + """ + Remove the first element of the FIFO, which ought + to be the least recently accessed model. Do not + pop the last one, because it is in active use! + """ + return self.stack.pop(0) + + def _push_newest_model(self, model_name: str) -> None: + """ + Maintain a simple FIFO. First element is always the + least recent, and last element is always the most recent. + """ + with contextlib.suppress(ValueError): + self.stack.remove(model_name) + self.stack.append(model_name) + + def _has_cuda(self) -> bool: + return self.device.type == "cuda" + + def _diffuser_sha256( + self, name_or_path: Union[str, Path], chunksize=4096 + ) -> Union[str, bytes]: + path = None + if isinstance(name_or_path, Path): + path = name_or_path + else: + owner, repo = name_or_path.split("/") + path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}") + if not path.exists(): + return None + hashpath = path / "checksum.sha256" + if hashpath.exists() and path.stat().st_mtime <= hashpath.stat().st_mtime: + with open(hashpath) as f: + hash = f.read() + return hash + print(" | Calculating sha256 hash of model files") + tic = time.time() + sha = hashlib.sha256() + count = 0 + for root, dirs, files in os.walk(path, followlinks=False): + for name in files: + count += 1 + with open(os.path.join(root, name), "rb") as f: + while chunk := f.read(chunksize): + sha.update(chunk) + hash = sha.hexdigest() + toc = time.time() + print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic)) + with open(hashpath, "w") as f: + f.write(hash) + return hash + + def _cached_sha256(self, path, data) -> Union[str, bytes]: + dirname = os.path.dirname(path) + basename = os.path.basename(path) + base, _ = os.path.splitext(basename) + hashpath = os.path.join(dirname, base + ".sha256") + + if os.path.exists(hashpath) and os.path.getmtime(path) <= os.path.getmtime( + hashpath + ): + with open(hashpath) as f: + hash = f.read() + return hash + + print(" | Calculating sha256 hash of weights file") + tic = time.time() + sha = hashlib.sha256() + sha.update(data) + hash = sha.hexdigest() + toc = time.time() + print(f">> sha256 = {hash}", "(%4.2fs)" % (toc - tic)) + + with open(hashpath, "w") as f: + f.write(hash) + return hash + + def _load_vae(self, vae_config) -> AutoencoderKL: + vae_args = {} + try: + name_or_path = self.model_name_or_path(vae_config) + except Exception: + return None + if name_or_path is None: + return None + using_fp16 = self.precision == "float16" + + vae_args.update( + cache_dir=global_cache_dir("diffusers"), + local_files_only=not Globals.internet_available, + ) + + print(f" | Loading diffusers VAE from {name_or_path}") + if using_fp16: + vae_args.update(torch_dtype=torch.float16) + fp_args_list = [{"revision": "fp16"}, {}] + else: + print(" | Using more accurate float32 precision") + fp_args_list = [{}] + + vae = None + deferred_error = None + + # A VAE may be in a subfolder of a model's repository. + if "subfolder" in vae_config: + vae_args["subfolder"] = vae_config["subfolder"] + + for fp_args in fp_args_list: + # At some point we might need to be able to use different classes here? But for now I think + # all Stable Diffusion VAE are AutoencoderKL. + try: + vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args) + except OSError as e: + if str(e).startswith("fp16 is not a valid"): + pass + else: + deferred_error = e + if vae: + break + + if not vae and deferred_error: + print(f"** Could not load VAE {name_or_path}: {str(deferred_error)}") + + return vae + + @staticmethod + def _delete_model_from_cache(repo_id): + cache_info = scan_cache_dir(global_cache_dir("diffusers")) + + # I'm sure there is a way to do this with comprehensions + # but the code quickly became incomprehensible! + hashes_to_delete = set() + for repo in cache_info.repos: + if repo.repo_id == repo_id: + for revision in repo.revisions: + hashes_to_delete.add(revision.commit_hash) + strategy = cache_info.delete_revisions(*hashes_to_delete) + print( + f"** deletion of this model is expected to free {strategy.expected_freed_size_str}" + ) + strategy.execute() + + @staticmethod + def _abs_path(path: str | Path) -> Path: + if path is None or Path(path).is_absolute(): + return path + return Path(Globals.root, path).resolve() + + @staticmethod + def _is_huggingface_hub_directory_present() -> bool: + return ( + os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None + ) From 3b921cf3934adf9703ccaed437bc2a14a9af5c7f Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 00:37:13 -0500 Subject: [PATCH 03/19] add more missing files --- invokeai/generator/__init__.py | 6 + invokeai/generator/base.py | 374 +++++++++ invokeai/generator/diffusers_pipeline.py | 764 ++++++++++++++++++ invokeai/generator/embiggen.py | 501 ++++++++++++ invokeai/generator/img2img.py | 69 ++ invokeai/generator/inpaint.py | 324 ++++++++ invokeai/generator/omnibus.py | 173 ++++ invokeai/generator/txt2img.py | 60 ++ invokeai/generator/txt2img2img.py | 163 ++++ .../__pycache__/__init__.cpython-310.pyc | Bin 653 -> 664 bytes 10 files changed, 2434 insertions(+) create mode 100644 invokeai/generator/__init__.py create mode 100644 invokeai/generator/base.py create mode 100644 invokeai/generator/diffusers_pipeline.py create mode 100644 invokeai/generator/embiggen.py create mode 100644 invokeai/generator/img2img.py create mode 100644 invokeai/generator/inpaint.py create mode 100644 invokeai/generator/omnibus.py create mode 100644 invokeai/generator/txt2img.py create mode 100644 invokeai/generator/txt2img2img.py diff --git a/invokeai/generator/__init__.py b/invokeai/generator/__init__.py new file mode 100644 index 0000000000..c50c9d387d --- /dev/null +++ b/invokeai/generator/__init__.py @@ -0,0 +1,6 @@ +''' +Initialization file for the invokeai.generator package +''' +from .base import Generator +from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline +from .inpaint import infill_methods diff --git a/invokeai/generator/base.py b/invokeai/generator/base.py new file mode 100644 index 0000000000..7b7e1ff126 --- /dev/null +++ b/invokeai/generator/base.py @@ -0,0 +1,374 @@ +''' +Base class for invokeai.backend.generator.* +including img2img, txt2img, and inpaint +''' +from __future__ import annotations + +import os +import os.path as osp +import random +import traceback +from contextlib import nullcontext + +import cv2 +import numpy as np +import torch + +from PIL import Image, ImageFilter, ImageChops +from diffusers import DiffusionPipeline +from einops import rearrange +from pathlib import Path +from pytorch_lightning import seed_everything +from tqdm import trange + +import invokeai.assets.web as web_assets +from ..models.diffusion.ddpm import DiffusionWrapper +from ldm.util import rand_perlin_2d + +downsampling = 8 +CAUTION_IMG = 'caution.png' + +class Generator: + downsampling_factor: int + latent_channels: int + precision: str + model: DiffusionWrapper | DiffusionPipeline + + def __init__(self, model: DiffusionWrapper | DiffusionPipeline, precision: str): + self.model = model + self.precision = precision + self.seed = None + self.latent_channels = model.channels + self.downsampling_factor = downsampling # BUG: should come from model or config + self.safety_checker = None + self.perlin = 0.0 + self.threshold = 0 + self.variation_amount = 0 + self.with_variations = [] + self.use_mps_noise = False + self.free_gpu_mem = None + self.caution_img = None + + # this is going to be overridden in img2img.py, txt2img.py and inpaint.py + def get_make_image(self,prompt,**kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + """ + raise NotImplementedError("image_iterator() must be implemented in a descendent class") + + def set_variation(self, seed, variation_amount, with_variations): + self.seed = seed + self.variation_amount = variation_amount + self.with_variations = with_variations + + def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None, + image_callback=None, step_callback=None, threshold=0.0, perlin=0.0, + h_symmetry_time_pct=None, v_symmetry_time_pct=None, + safety_checker:dict=None, + free_gpu_mem: bool=False, + **kwargs): + scope = nullcontext + self.safety_checker = safety_checker + self.free_gpu_mem = free_gpu_mem + attention_maps_images = [] + attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image()) + make_image = self.get_make_image( + prompt, + sampler = sampler, + init_image = init_image, + width = width, + height = height, + step_callback = step_callback, + threshold = threshold, + perlin = perlin, + h_symmetry_time_pct = h_symmetry_time_pct, + v_symmetry_time_pct = v_symmetry_time_pct, + attention_maps_callback = attention_maps_callback, + **kwargs + ) + results = [] + seed = seed if seed is not None and seed >= 0 else self.new_seed() + first_seed = seed + seed, initial_noise = self.generate_initial_noise(seed, width, height) + + # There used to be an additional self.model.ema_scope() here, but it breaks + # the inpaint-1.5 model. Not sure what it did.... ? + with scope(self.model.device.type): + for n in trange(iterations, desc='Generating'): + x_T = None + if self.variation_amount > 0: + seed_everything(seed) + target_noise = self.get_noise(width,height) + x_T = self.slerp(self.variation_amount, initial_noise, target_noise) + elif initial_noise is not None: + # i.e. we specified particular variations + x_T = initial_noise + else: + seed_everything(seed) + try: + x_T = self.get_noise(width,height) + except: + print('** An error occurred while getting initial noise **') + print(traceback.format_exc()) + + image = make_image(x_T) + + if self.safety_checker is not None: + image = self.safety_check(image) + + results.append([image, seed]) + + if image_callback is not None: + attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1] + image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image) + + seed = self.new_seed() + + # Free up memory from the last generation. + clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None + if clear_cuda_cache is not None: + clear_cuda_cache() + + return results + + def sample_to_image(self,samples)->Image.Image: + """ + Given samples returned from a sampler, converts + it into a PIL Image + """ + with torch.inference_mode(): + image = self.model.decode_latents(samples) + return self.model.numpy_to_pil(image)[0] + + def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image: + if init_image is None or init_mask is None: + return result + + # Get the original alpha channel of the mask if there is one. + # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') + pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L') + pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist + + # Build an image with only visible pixels from source to use as reference for color-matching. + init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8) + init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8) + init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8) + + # Get numpy version of result + np_image = np.asarray(result, dtype=np.uint8) + + # Mask and calculate mean and standard deviation + mask_pixels = init_a_pixels * init_mask_pixels > 0 + np_init_rgb_pixels_masked = init_rgb_pixels[mask_pixels, :] + np_image_masked = np_image[mask_pixels, :] + + if np_init_rgb_pixels_masked.size > 0: + init_means = np_init_rgb_pixels_masked.mean(axis=0) + init_std = np_init_rgb_pixels_masked.std(axis=0) + gen_means = np_image_masked.mean(axis=0) + gen_std = np_image_masked.std(axis=0) + + # Color correct + np_matched_result = np_image.copy() + np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8) + matched_result = Image.fromarray(np_matched_result, mode='RGB') + else: + matched_result = Image.fromarray(np_image, mode='RGB') + + # Blur the mask out (into init image) by specified amount + if mask_blur_radius > 0: + nm = np.asarray(pil_init_mask, dtype=np.uint8) + nmd = cv2.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2)) + pmd = Image.fromarray(nmd, mode='L') + blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius)) + else: + blurred_init_mask = pil_init_mask + + multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) + + # Paste original on color-corrected generation (using blurred mask) + matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask) + return matched_result + + def sample_to_lowres_estimated_image(self,samples): + # origingally adapted from code by @erucipe and @keturn here: + # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 + + # these updated numbers for v1.5 are from @torridgristle + v1_5_latent_rgb_factors = torch.tensor([ + # R G B + [ 0.3444, 0.1385, 0.0670], # L1 + [ 0.1247, 0.4027, 0.1494], # L2 + [-0.3192, 0.2513, 0.2103], # L3 + [-0.1307, -0.1874, -0.7445] # L4 + ], dtype=samples.dtype, device=samples.device) + + latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors + latents_ubyte = (((latent_image + 1) / 2) + .clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + .byte()).cpu() + + return Image.fromarray(latents_ubyte.numpy()) + + def generate_initial_noise(self, seed, width, height): + initial_noise = None + if self.variation_amount > 0 or len(self.with_variations) > 0: + # use fixed initial noise plus random noise per iteration + seed_everything(seed) + initial_noise = self.get_noise(width,height) + for v_seed, v_weight in self.with_variations: + seed = v_seed + seed_everything(seed) + next_noise = self.get_noise(width,height) + initial_noise = self.slerp(v_weight, initial_noise, next_noise) + if self.variation_amount > 0: + random.seed() # reset RNG to an actually random state, so we can get a random seed for variations + seed = random.randrange(0,np.iinfo(np.uint32).max) + return (seed, initial_noise) + else: + return (seed, None) + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height): + """ + Returns a tensor filled with random numbers, either form a normal distribution + (txt2img) or from the latent image (img2img, inpaint) + """ + raise NotImplementedError("get_noise() must be implemented in a descendent class") + + def get_perlin_noise(self,width,height): + fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device + # limit noise to only the diffusion image channels, not the mask channels + input_channels = min(self.latent_channels, 4) + # round up to the nearest block of 8 + temp_width = int((width + 7) / 8) * 8 + temp_height = int((height + 7) / 8) * 8 + noise = torch.stack([ + rand_perlin_2d((temp_height, temp_width), + (8, 8), + device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device) + return noise[0:4, 0:height, 0:width] + + def new_seed(self): + self.seed = random.randrange(0, np.iinfo(np.uint32).max) + return self.seed + + def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): + ''' + Spherical linear interpolation + Args: + t (float/np.ndarray): Float value between 0.0 and 1.0 + v0 (np.ndarray): Starting vector + v1 (np.ndarray): Final vector + DOT_THRESHOLD (float): Threshold for considering the two vectors as + colineal. Not recommended to alter this. + Returns: + v2 (np.ndarray): Interpolation vector between v0 and v1 + ''' + inputs_are_torch = False + if not isinstance(v0, np.ndarray): + inputs_are_torch = True + v0 = v0.detach().cpu().numpy() + if not isinstance(v1, np.ndarray): + inputs_are_torch = True + v1 = v1.detach().cpu().numpy() + + dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) + if np.abs(dot) > DOT_THRESHOLD: + v2 = (1 - t) * v0 + t * v1 + else: + theta_0 = np.arccos(dot) + sin_theta_0 = np.sin(theta_0) + theta_t = theta_0 * t + sin_theta_t = np.sin(theta_t) + s0 = np.sin(theta_0 - theta_t) / sin_theta_0 + s1 = sin_theta_t / sin_theta_0 + v2 = s0 * v0 + s1 * v1 + + if inputs_are_torch: + v2 = torch.from_numpy(v2).to(self.model.device) + + return v2 + + def safety_check(self,image:Image.Image): + ''' + If the CompViz safety checker flags an NSFW image, we + blur it out. + ''' + import diffusers + + checker = self.safety_checker['checker'] + extractor = self.safety_checker['extractor'] + features = extractor([image], return_tensors="pt") + features.to(self.model.device) + + # unfortunately checker requires the numpy version, so we have to convert back + x_image = np.array(image).astype(np.float32) / 255.0 + x_image = x_image[None].transpose(0, 3, 1, 2) + + diffusers.logging.set_verbosity_error() + checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values) + if has_nsfw_concept[0]: + print('** An image with potential non-safe content has been detected. A blurred image will be returned. **') + return self.blur(image) + else: + return image + + def blur(self,input): + blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) + try: + caution = self.get_caution_img() + if caution: + blurry.paste(caution,(0,0),caution) + except FileNotFoundError: + pass + return blurry + + def get_caution_img(self): + path = None + if self.caution_img: + return self.caution_img + path = Path(web_assets.__path__[0]) / CAUTION_IMG + caution = Image.open(path) + self.caution_img = caution.resize((caution.width // 2, caution.height //2)) + return self.caution_img + + # this is a handy routine for debugging use. Given a generated sample, + # convert it into a PNG image and store it at the indicated path + def save_sample(self, sample, filepath): + image = self.sample_to_image(sample) + dirname = os.path.dirname(filepath) or '.' + if not os.path.exists(dirname): + print(f'** creating directory {dirname}') + os.makedirs(dirname, exist_ok=True) + image.save(filepath,'PNG') + + + def torch_dtype(self)->torch.dtype: + return torch.float16 if self.precision == 'float16' else torch.float32 + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height): + device = self.model.device + # limit noise to only the diffusion image channels, not the mask channels + input_channels = min(self.latent_channels, 4) + if self.use_mps_noise or device.type == 'mps': + x = torch.randn([1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + dtype=self.torch_dtype(), + device='cpu').to(device) + else: + x = torch.randn([1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + dtype=self.torch_dtype(), + device=device) + if self.perlin > 0.0: + perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) + x = (1-self.perlin)*x + self.perlin*perlin_noise + return x diff --git a/invokeai/generator/diffusers_pipeline.py b/invokeai/generator/diffusers_pipeline.py new file mode 100644 index 0000000000..709617c37f --- /dev/null +++ b/invokeai/generator/diffusers_pipeline.py @@ -0,0 +1,764 @@ +from __future__ import annotations + +import dataclasses +import inspect +import psutil +import secrets +from collections.abc import Sequence +from dataclasses import dataclass, field +from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any + +import PIL.Image +import einops +import psutil +import torch +import torchvision.transforms as T +from diffusers.models import AutoencoderKL, UNet2DConditionModel +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.outputs import BaseOutput +from torchvision.transforms.functional import resize as tv_resize +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer +from typing_extensions import ParamSpec + +from ldm.invoke.globals import Globals +from invokeai.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from ldm.modules.textual_inversion_manager import TextualInversionManager +from ldm.invoke.devices import normalize_device, CPU_DEVICE +from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup +from ..models.diffusion.cross_attention_map_saving import AttentionMapSaver +from compel import EmbeddingsProvider + +@dataclass +class PipelineIntermediateState: + run_id: str + step: int + timestep: int + latents: torch.Tensor + predicted_original: Optional[torch.Tensor] = None + attention_map_saver: Optional[AttentionMapSaver] = None + + +# copied from configs/stable-diffusion/v1-inference.yaml +_default_personalization_config_params = dict( + placeholder_strings=["*"], + initializer_wods=["sculpture"], + per_image_tokens=False, + num_vectors_per_token=1, + progressive_words=False +) + + +@dataclass +class AddsMaskLatents: + """Add the channels required for inpainting model input. + + The inpainting model takes the normal latent channels as input, _plus_ a one-channel mask + and the latent encoding of the base image. + + This class assumes the same mask and base image should apply to all items in the batch. + """ + forward: Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] + mask: torch.Tensor + initial_image_latents: torch.Tensor + + def __call__(self, latents: torch.Tensor, t: torch.Tensor, text_embeddings: torch.Tensor) -> torch.Tensor: + model_input = self.add_mask_channels(latents) + return self.forward(model_input, t, text_embeddings) + + def add_mask_channels(self, latents): + batch_size = latents.size(0) + # duplicate mask and latents for each batch + mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) + image_latents = einops.repeat(self.initial_image_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) + # add mask and image as additional channels + model_input, _ = einops.pack([latents, mask, image_latents], 'b * h w') + return model_input + + +def are_like_tensors(a: torch.Tensor, b: object) -> bool: + return ( + isinstance(b, torch.Tensor) + and (a.size() == b.size()) + ) + +@dataclass +class AddsMaskGuidance: + mask: torch.FloatTensor + mask_latents: torch.FloatTensor + scheduler: SchedulerMixin + noise: torch.Tensor + _debug: Optional[Callable] = None + + def __call__(self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning) -> BaseOutput: + output_class = step_output.__class__ # We'll create a new one with masked data. + + # The problem with taking SchedulerOutput instead of the model output is that we're less certain what's in it. + # It's reasonable to assume the first thing is prev_sample, but then does it have other things + # like pred_original_sample? Should we apply the mask to them too? + # But what if there's just some other random field? + prev_sample = step_output[0] + # Mask anything that has the same shape as prev_sample, return others as-is. + return output_class( + {k: (self.apply_mask(v, self._t_for_field(k, t)) + if are_like_tensors(prev_sample, v) else v) + for k, v in step_output.items()} + ) + + def _t_for_field(self, field_name:str, t): + if field_name == "pred_original_sample": + return torch.zeros_like(t, dtype=t.dtype) # it represents t=0 + return t + + def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor: + batch_size = latents.size(0) + mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) + if t.dim() == 0: + # some schedulers expect t to be one-dimensional. + # TODO: file diffusers bug about inconsistency? + t = einops.repeat(t, '-> batch', batch=batch_size) + # Noise shouldn't be re-randomized between steps here. The multistep schedulers + # get very confused about what is happening from step to step when we do that. + mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) + # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? + # mask_latents = self.scheduler.scale_model_input(mask_latents, t) + mask_latents = einops.repeat(mask_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) + masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) + if self._debug: + self._debug(masked_input, f"t={t} lerped") + return masked_input + + +def trim_to_multiple_of(*args, multiple_of=8): + return tuple((x - x % multiple_of) for x in args) + + +def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool=True, multiple_of=8) -> torch.FloatTensor: + """ + + :param image: input image + :param normalize: scale the range to [-1, 1] instead of [0, 1] + :param multiple_of: resize the input so both dimensions are a multiple of this + """ + w, h = trim_to_multiple_of(*image.size) + transformation = T.Compose([ + T.Resize((h, w), T.InterpolationMode.LANCZOS), + T.ToTensor(), + ]) + tensor = transformation(image) + if normalize: + tensor = tensor * 2.0 - 1.0 + return tensor + + +def is_inpainting_model(unet: UNet2DConditionModel): + return unet.conv_in.in_channels == 9 + +CallbackType = TypeVar('CallbackType') +ReturnType = TypeVar('ReturnType') +ParamType = ParamSpec('ParamType') + +@dataclass(frozen=True) +class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]): + """Convert a generator to a function with a callback and a return value.""" + + generator_method: Callable[ParamType, ReturnType] + callback_arg_type: Type[CallbackType] + + def __call__(self, *args: ParamType.args, + callback:Callable[[CallbackType], Any]=None, + **kwargs: ParamType.kwargs) -> ReturnType: + result = None + for result in self.generator_method(*args, **kwargs): + if callback is not None and isinstance(result, self.callback_arg_type): + callback(result) + if result is None: + raise AssertionError("why was that an empty generator?") + return result + + +@dataclass(frozen=True) +class ConditioningData: + unconditioned_embeddings: torch.Tensor + text_embeddings: torch.Tensor + guidance_scale: float + """ + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). + Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate + images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + """ + extra: Optional[InvokeAIDiffuserComponent.ExtraConditioningInfo] = None + scheduler_args: dict[str, Any] = field(default_factory=dict) + """ + Additional arguments to pass to invokeai_diffuser.do_latent_postprocessing(). + """ + postprocessing_settings: Optional[PostprocessingSettings] = None + + @property + def dtype(self): + return self.text_embeddings.dtype + + def add_scheduler_args_if_applicable(self, scheduler, **kwargs): + scheduler_args = dict(self.scheduler_args) + step_method = inspect.signature(scheduler.step) + for name, value in kwargs.items(): + try: + step_method.bind_partial(**{name: value}) + except TypeError: + # FIXME: don't silently discard arguments + pass # debug("%s does not accept argument named %r", scheduler, name) + else: + scheduler_args[name] = value + return dataclasses.replace(self, scheduler_args=scheduler_args) + +@dataclass +class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput): + r""" + Output class for InvokeAI's Stable Diffusion pipeline. + + Args: + attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user + after generation completes. Optional. + """ + attention_map_saver: Optional[AttentionMapSaver] + + +class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Implementation note: This class started as a refactored copy of diffusers.StableDiffusionPipeline. + Hopefully future versions of diffusers provide access to more of these functions so that we don't + need to duplicate them here: https://github.com/huggingface/diffusers/issues/551#issuecomment-1281508384 + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offsensive or harmful. + Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _model_group: ModelGroup + + ID_LENGTH = 8 + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: Optional[StableDiffusionSafetyChecker], + feature_extractor: Optional[CLIPFeatureExtractor], + requires_safety_checker: bool = False, + precision: str = 'float32', + ): + super().__init__(vae, text_encoder, tokenizer, unet, scheduler, + safety_checker, feature_extractor, requires_safety_checker) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward, is_running_diffusers=True) + use_full_precision = (precision == 'float32' or precision == 'autocast') + self.textual_inversion_manager = TextualInversionManager(tokenizer=self.tokenizer, + text_encoder=self.text_encoder, + full_precision=use_full_precision) + # InvokeAI's interface for text embeddings and whatnot + self.embeddings_provider = EmbeddingsProvider( + tokenizer=self.tokenizer, + text_encoder=self.text_encoder, + textual_inversion_manager=self.textual_inversion_manager + ) + + self._model_group = FullyLoadedModelGroup(self.unet.device) + self._model_group.install(*self._submodels) + + + def _adjust_memory_efficient_attention(self, latents: torch.Tensor): + """ + if xformers is available, use it, otherwise use sliced attention. + """ + if torch.cuda.is_available() and is_xformers_available() and not Globals.disable_xformers: + self.enable_xformers_memory_efficient_attention() + else: + if torch.backends.mps.is_available(): + # until pytorch #91617 is fixed, slicing is borked on MPS + # https://github.com/pytorch/pytorch/issues/91617 + # fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline. + pass + else: + if self.device.type == 'cpu' or self.device.type == 'mps': + mem_free = psutil.virtual_memory().free + elif self.device.type == 'cuda': + mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device)) + else: + raise ValueError(f"unrecognized device {self.device}") + # input tensor of [1, 4, h/8, w/8] + # output tensor of [16, (h/8 * w/8), (h/8 * w/8)] + bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 + max_size_required_for_baddbmm = \ + 16 * \ + latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \ + bytes_per_element_needed_for_baddbmm_duplication + if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code + self.enable_attention_slicing(slice_size='max') + else: + self.disable_attention_slicing() + + + def enable_offload_submodels(self, device: torch.device): + """ + Offload each submodel when it's not in use. + + Useful for low-vRAM situations where the size of the model in memory is a big chunk of + the total available resource, and you want to free up as much for inference as possible. + + This requires more moving parts and may add some delay as the U-Net is swapped out for the + VAE and vice-versa. + """ + models = self._submodels + if self._model_group is not None: + self._model_group.uninstall(*models) + group = LazilyLoadedModelGroup(device) + group.install(*models) + self._model_group = group + + def disable_offload_submodels(self): + """ + Leave all submodels loaded. + + Appropriate for cases where the size of the model in memory is small compared to the memory + required for inference. Avoids the delay and complexity of shuffling the submodels to and + from the GPU. + """ + models = self._submodels + if self._model_group is not None: + self._model_group.uninstall(*models) + group = FullyLoadedModelGroup(self._model_group.execution_device) + group.install(*models) + self._model_group = group + + def offload_all(self): + """Offload all this pipeline's models to CPU.""" + self._model_group.offload_current() + + def ready(self): + """ + Ready this pipeline's models. + + i.e. pre-load them to the GPU if appropriate. + """ + self._model_group.ready() + + def to(self, torch_device: Optional[Union[str, torch.device]] = None): + # overridden method; types match the superclass. + if torch_device is None: + return self + self._model_group.set_device(torch.device(torch_device)) + self._model_group.ready() + + @property + def device(self) -> torch.device: + return self._model_group.execution_device + + @property + def _submodels(self) -> Sequence[torch.nn.Module]: + module_names, _, _ = self.extract_init_dict(dict(self.config)) + values = [getattr(self, name) for name in module_names.keys()] + return [m for m in values if isinstance(m, torch.nn.Module)] + + def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + callback: Callable[[PipelineIntermediateState], None]=None, + run_id=None) -> InvokeAIStableDiffusionPipelineOutput: + r""" + Function invoked when calling the pipeline for generation. + + :param conditioning_data: + :param latents: Pre-generated un-noised latents, to be used as inputs for + image generation. Can be used to tweak the same generation with different prompts. + :param num_inference_steps: The number of denoising steps. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + :param noise: Noise to add to the latents, sampled from a Gaussian distribution. + :param callback: + :param run_id: + """ + result_latents, result_attention_map_saver = self.latents_from_embeddings( + latents, num_inference_steps, + conditioning_data, + noise=noise, + run_id=run_id, + callback=callback) + # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 + torch.cuda.empty_cache() + + with torch.inference_mode(): + image = self.decode_latents(result_latents) + output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_map_saver) + return self.check_for_safety(output, dtype=conditioning_data.dtype) + + def latents_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + timesteps=None, + additional_guidance: List[Callable] = None, run_id=None, + callback: Callable[[PipelineIntermediateState], None] = None + ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: + if timesteps is None: + self.scheduler.set_timesteps(num_inference_steps, device=self._model_group.device_for(self.unet)) + timesteps = self.scheduler.timesteps + infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState) + result: PipelineIntermediateState = infer_latents_from_embeddings( + latents, timesteps, conditioning_data, + noise=noise, + additional_guidance=additional_guidance, + run_id=run_id, + callback=callback) + return result.latents, result.attention_map_saver + + def generate_latents_from_embeddings(self, latents: torch.Tensor, timesteps, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + run_id: str = None, + additional_guidance: List[Callable] = None): + self._adjust_memory_efficient_attention(latents) + if run_id is None: + run_id = secrets.token_urlsafe(self.ID_LENGTH) + if additional_guidance is None: + additional_guidance = [] + extra_conditioning_info = conditioning_data.extra + with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info, + step_count=len(self.scheduler.timesteps) + ): + + yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps, + latents=latents) + + batch_size = latents.shape[0] + batched_t = torch.full((batch_size,), timesteps[0], + dtype=timesteps.dtype, device=self._model_group.device_for(self.unet)) + latents = self.scheduler.add_noise(latents, noise, batched_t) + + attention_map_saver: Optional[AttentionMapSaver] = None + + for i, t in enumerate(self.progress_bar(timesteps)): + batched_t.fill_(t) + step_output = self.step(batched_t, latents, conditioning_data, + step_index=i, + total_step_count=len(timesteps), + additional_guidance=additional_guidance) + latents = step_output.prev_sample + + latents = self.invokeai_diffuser.do_latent_postprocessing( + postprocessing_settings=conditioning_data.postprocessing_settings, + latents=latents, + sigma=batched_t, + step_index=i, + total_step_count=len(timesteps) + ) + + predicted_original = getattr(step_output, 'pred_original_sample', None) + + # TODO resuscitate attention map saving + #if i == len(timesteps)-1 and extra_conditioning_info is not None: + # eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 + # attention_map_token_ids = range(1, eos_token_index) + # attention_map_saver = AttentionMapSaver(token_ids=attention_map_token_ids, latents_shape=latents.shape[-2:]) + # self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) + + yield PipelineIntermediateState(run_id=run_id, step=i, timestep=int(t), latents=latents, + predicted_original=predicted_original, attention_map_saver=attention_map_saver) + + return latents, attention_map_saver + + @torch.inference_mode() + def step(self, t: torch.Tensor, latents: torch.Tensor, + conditioning_data: ConditioningData, + step_index:int, total_step_count:int, + additional_guidance: List[Callable] = None): + # invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value + timestep = t[0] + + if additional_guidance is None: + additional_guidance = [] + + # TODO: should this scaling happen here or inside self._unet_forward? + # i.e. before or after passing it to InvokeAIDiffuserComponent + latent_model_input = self.scheduler.scale_model_input(latents, timestep) + + # predict the noise residual + noise_pred = self.invokeai_diffuser.do_diffusion_step( + latent_model_input, t, + conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings, + conditioning_data.guidance_scale, + step_index=step_index, + total_step_count=total_step_count, + ) + + # compute the previous noisy sample x_t -> x_t-1 + step_output = self.scheduler.step(noise_pred, timestep, latents, + **conditioning_data.scheduler_args) + + # TODO: this additional_guidance extension point feels redundant with InvokeAIDiffusionComponent. + # But the way things are now, scheduler runs _after_ that, so there was + # no way to use it to apply an operation that happens after the last scheduler.step. + for guidance in additional_guidance: + step_output = guidance(step_output, timestep, conditioning_data) + + return step_output + + def _unet_forward(self, latents, t, text_embeddings, cross_attention_kwargs: Optional[dict[str,Any]] = None): + """predict the noise residual""" + if is_inpainting_model(self.unet) and latents.size(1) == 4: + # Pad out normal non-inpainting inputs for an inpainting model. + # FIXME: There are too many layers of functions and we have too many different ways of + # overriding things! This should get handled in a way more consistent with the other + # use of AddsMaskLatents. + latents = AddsMaskLatents( + self._unet_forward, + mask=torch.ones_like(latents[:1, :1], device=latents.device, dtype=latents.dtype), + initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype) + ).add_mask_channels(latents) + + # First three args should be positional, not keywords, so torch hooks can see them. + return self.unet(latents, t, text_embeddings, + cross_attention_kwargs=cross_attention_kwargs).sample + + def img2img_from_embeddings(self, + init_image: Union[torch.FloatTensor, PIL.Image.Image], + strength: float, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, callback: Callable[[PipelineIntermediateState], None] = None, + run_id=None, + noise_func=None + ) -> InvokeAIStableDiffusionPipelineOutput: + if isinstance(init_image, PIL.Image.Image): + init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) + + if init_image.dim() == 3: + init_image = einops.rearrange(init_image, 'c h w -> 1 c h w') + + # 6. Prepare latent variables + initial_latents = self.non_noised_latents_from_image( + init_image, device=self._model_group.device_for(self.unet), + dtype=self.unet.dtype) + noise = noise_func(initial_latents) + + return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps, + conditioning_data, + strength, + noise, run_id, callback) + + def img2img_from_latents_and_embeddings(self, initial_latents, num_inference_steps, + conditioning_data: ConditioningData, + strength, + noise: torch.Tensor, run_id=None, callback=None + ) -> InvokeAIStableDiffusionPipelineOutput: + timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, + device=self._model_group.device_for(self.unet)) + result_latents, result_attention_maps = self.latents_from_embeddings( + initial_latents, num_inference_steps, conditioning_data, + timesteps=timesteps, + noise=noise, + run_id=run_id, + callback=callback) + + # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 + torch.cuda.empty_cache() + + with torch.inference_mode(): + image = self.decode_latents(result_latents) + output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) + return self.check_for_safety(output, dtype=conditioning_data.dtype) + + def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device) -> (torch.Tensor, int): + img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components) + assert img2img_pipeline.scheduler is self.scheduler + img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, adjusted_steps = img2img_pipeline.get_timesteps(num_inference_steps, strength, device=device) + # Workaround for low strength resulting in zero timesteps. + # TODO: submit upstream fix for zero-step img2img + if timesteps.numel() == 0: + timesteps = self.scheduler.timesteps[-1:] + adjusted_steps = timesteps.numel() + return timesteps, adjusted_steps + + def inpaint_from_embeddings( + self, + init_image: torch.FloatTensor, + mask: torch.FloatTensor, + strength: float, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, callback: Callable[[PipelineIntermediateState], None] = None, + run_id=None, + noise_func=None, + ) -> InvokeAIStableDiffusionPipelineOutput: + device = self._model_group.device_for(self.unet) + latents_dtype = self.unet.dtype + + if isinstance(init_image, PIL.Image.Image): + init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) + + init_image = init_image.to(device=device, dtype=latents_dtype) + mask = mask.to(device=device, dtype=latents_dtype) + + if init_image.dim() == 3: + init_image = init_image.unsqueeze(0) + + timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, device=device) + + # 6. Prepare latent variables + # can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents + # because we have our own noise function + init_image_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype) + noise = noise_func(init_image_latents) + + if mask.dim() == 3: + mask = mask.unsqueeze(0) + latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR) \ + .to(device=device, dtype=latents_dtype) + + guidance: List[Callable] = [] + + if is_inpainting_model(self.unet): + # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint + # (that's why there's a mask!) but it seems to really want that blanked out. + masked_init_image = init_image * torch.where(mask < 0.5, 1, 0) + masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype) + + # TODO: we should probably pass this in so we don't have to try/finally around setting it. + self.invokeai_diffuser.model_forward_callback = \ + AddsMaskLatents(self._unet_forward, latent_mask, masked_latents) + else: + guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise)) + + try: + result_latents, result_attention_maps = self.latents_from_embeddings( + init_image_latents, num_inference_steps, + conditioning_data, noise=noise, timesteps=timesteps, + additional_guidance=guidance, + run_id=run_id, callback=callback) + finally: + self.invokeai_diffuser.model_forward_callback = self._unet_forward + + # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 + torch.cuda.empty_cache() + + with torch.inference_mode(): + image = self.decode_latents(result_latents) + output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) + return self.check_for_safety(output, dtype=conditioning_data.dtype) + + def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype): + init_image = init_image.to(device=device, dtype=dtype) + with torch.inference_mode(): + if device.type == 'mps': + # workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222 + # TODO remove this workaround once kulinseth#222 is merged to pytorch mainline + self.vae.to(CPU_DEVICE) + init_image = init_image.to(CPU_DEVICE) + else: + self._model_group.load(self.vae) + init_latent_dist = self.vae.encode(init_image).latent_dist + init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible! + if device.type == 'mps': + self.vae.to(device) + init_latents = init_latents.to(device) + + init_latents = 0.18215 * init_latents + return init_latents + + def check_for_safety(self, output, dtype): + with torch.inference_mode(): + screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype) + screened_attention_map_saver = None + if has_nsfw_concept is None or not has_nsfw_concept: + screened_attention_map_saver = output.attention_map_saver + return InvokeAIStableDiffusionPipelineOutput(screened_images, + has_nsfw_concept, + # block the attention maps if NSFW content is detected + attention_map_saver=screened_attention_map_saver) + + def run_safety_checker(self, image, device=None, dtype=None): + # overriding to use the model group for device info instead of requiring the caller to know. + if self.safety_checker is not None: + device = self._model_group.device_for(self.safety_checker) + return super().run_safety_checker(image, device, dtype) + + @torch.inference_mode() + def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None): + """ + Compatibility function for invokeai.models.diffusion.ddpm.LatentDiffusion. + """ + return self.embeddings_provider.get_embeddings_for_weighted_prompt_fragments( + text_batch=c, + fragment_weights_batch=fragment_weights, + should_return_tokens=return_tokens, + device=self._model_group.device_for(self.unet)) + + @property + def cond_stage_model(self): + return self.embeddings_provider + + @torch.inference_mode() + def _tokenize(self, prompt: Union[str, List[str]]): + return self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + @property + def channels(self) -> int: + """Compatible with DiffusionWrapper""" + return self.unet.in_channels + + def decode_latents(self, latents): + # Explicit call to get the vae loaded, since `decode` isn't the forward method. + self._model_group.load(self.vae) + return super().decode_latents(latents) + + def debug_latents(self, latents, msg): + with torch.inference_mode(): + from ldm.util import debug_image + decoded = self.numpy_to_pil(self.decode_latents(latents)) + for i, img in enumerate(decoded): + debug_image(img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True) diff --git a/invokeai/generator/embiggen.py b/invokeai/generator/embiggen.py new file mode 100644 index 0000000000..23447d59e3 --- /dev/null +++ b/invokeai/generator/embiggen.py @@ -0,0 +1,501 @@ +''' +invokeai.backend.generator.embiggen descends from ldm.invoke.generator +and generates with invokeai.backend.generator.img2img +''' + +import numpy as np +import torch +from PIL import Image +from tqdm import trange + +from .base import Generator +from .img2img import Img2Img + + +class Embiggen(Generator): + def __init__(self, model, precision): + super().__init__(model, precision) + self.init_latent = None + + # Replace generate because Embiggen doesn't need/use most of what it does normallly + def generate(self,prompt,iterations=1,seed=None, + image_callback=None, step_callback=None, + **kwargs): + + make_image = self.get_make_image( + prompt, + step_callback = step_callback, + **kwargs + ) + results = [] + seed = seed if seed else self.new_seed() + + # Noise will be generated by the Img2Img generator when called + for _ in trange(iterations, desc='Generating'): + # make_image will call Img2Img which will do the equivalent of get_noise itself + image = make_image() + results.append([image, seed]) + if image_callback is not None: + image_callback(image, seed, prompt_in=prompt) + seed = self.new_seed() + return results + + @torch.no_grad() + def get_make_image( + self, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + init_img, + strength, + width, + height, + embiggen, + embiggen_tiles, + step_callback=None, + **kwargs + ): + """ + Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image + Return value depends on the seed at the time you call it + """ + assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models" + + # Construct embiggen arg array, and sanity check arguments + if embiggen == None: # embiggen can also be called with just embiggen_tiles + embiggen = [1.0] # If not specified, assume no scaling + elif embiggen[0] < 0: + embiggen[0] = 1.0 + print( + '>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !') + if len(embiggen) < 2: + embiggen.append(0.75) + elif embiggen[1] > 1.0 or embiggen[1] < 0: + embiggen[1] = 0.75 + print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !') + if len(embiggen) < 3: + embiggen.append(0.25) + elif embiggen[2] < 0: + embiggen[2] = 0.25 + print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !') + + # Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math + # and then sort them, because... people. + if embiggen_tiles: + embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles)) + embiggen_tiles.sort() + + if strength >= 0.5: + print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.') + + # Prep img2img generator, since we wrap over it + gen_img2img = Img2Img(self.model,self.precision) + + # Open original init image (not a tensor) to manipulate + initsuperimage = Image.open(init_img) + + with Image.open(init_img) as img: + initsuperimage = img.convert('RGB') + + # Size of the target super init image in pixels + initsuperwidth, initsuperheight = initsuperimage.size + + # Increase by scaling factor if not already resized, using ESRGAN as able + if embiggen[0] != 1.0: + initsuperwidth = round(initsuperwidth*embiggen[0]) + initsuperheight = round(initsuperheight*embiggen[0]) + if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero + from ldm.invoke.restoration.realesrgan import ESRGAN + esrgan = ESRGAN() + print( + f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}') + if embiggen[0] > 2: + initsuperimage = esrgan.process( + initsuperimage, + embiggen[1], # upscale strength + self.seed, + 4, # upscale scale + ) + else: + initsuperimage = esrgan.process( + initsuperimage, + embiggen[1], # upscale strength + self.seed, + 2, # upscale scale + ) + # We could keep recursively re-running ESRGAN for a requested embiggen[0] larger than 4x + # but from personal experiance it doesn't greatly improve anything after 4x + # Resize to target scaling factor resolution + initsuperimage = initsuperimage.resize( + (initsuperwidth, initsuperheight), Image.Resampling.LANCZOS) + + # Use width and height as tile widths and height + # Determine buffer size in pixels + if embiggen[2] < 1: + if embiggen[2] < 0: + embiggen[2] = 0 + overlap_size_x = round(embiggen[2] * width) + overlap_size_y = round(embiggen[2] * height) + else: + overlap_size_x = round(embiggen[2]) + overlap_size_y = round(embiggen[2]) + + # With overall image width and height known, determine how many tiles we need + def ceildiv(a, b): + return -1 * (-a // b) + + # X and Y needs to be determined independantly (we may have savings on one based on the buffer pixel count) + # (initsuperwidth - width) is the area remaining to the right that we need to layers tiles to fill + # (width - overlap_size_x) is how much new we can fill with a single tile + emb_tiles_x = 1 + emb_tiles_y = 1 + if (initsuperwidth - width) > 0: + emb_tiles_x = ceildiv(initsuperwidth - width, + width - overlap_size_x) + 1 + if (initsuperheight - height) > 0: + emb_tiles_y = ceildiv(initsuperheight - height, + height - overlap_size_y) + 1 + # Sanity + assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.' + + # Prep alpha layers -------------- + # https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil + # agradientL is Left-side transparent + agradientL = Image.linear_gradient('L').rotate( + 90).resize((overlap_size_x, height)) + # agradientT is Top-side transparent + agradientT = Image.linear_gradient('L').resize((width, overlap_size_y)) + # radial corner is the left-top corner, made full circle then cut to just the left-top quadrant + agradientC = Image.new('L', (256, 256)) + for y in range(256): + for x in range(256): + # Find distance to lower right corner (numpy takes arrays) + distanceToLR = np.sqrt([(255 - x) ** 2 + (255 - y) ** 2])[0] + # Clamp values to max 255 + if distanceToLR > 255: + distanceToLR = 255 + #Place the pixel as invert of distance + agradientC.putpixel((x, y), round(255 - distanceToLR)) + + # Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges + # Fits for a left-fading gradient on the bottom side and full opacity on the right side. + agradientAsymC = Image.new('L', (256, 256)) + for y in range(256): + for x in range(256): + value = round(max(0, x-(255-y)) * (255 / max(1,y))) + #Clamp values + value = max(0, value) + value = min(255, value) + agradientAsymC.putpixel((x, y), value) + + # Create alpha layers default fully white + alphaLayerL = Image.new("L", (width, height), 255) + alphaLayerT = Image.new("L", (width, height), 255) + alphaLayerLTC = Image.new("L", (width, height), 255) + # Paste gradients into alpha layers + alphaLayerL.paste(agradientL, (0, 0)) + alphaLayerT.paste(agradientT, (0, 0)) + alphaLayerLTC.paste(agradientL, (0, 0)) + alphaLayerLTC.paste(agradientT, (0, 0)) + alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0)) + # make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile + # to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space + alphaLayerTaC = alphaLayerT.copy() + alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + alphaLayerLTaC = alphaLayerLTC.copy() + alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + + if embiggen_tiles: + # Individual unconnected sides + alphaLayerR = Image.new("L", (width, height), 255) + alphaLayerR.paste(agradientL.rotate( + 180), (width - overlap_size_x, 0)) + alphaLayerB = Image.new("L", (width, height), 255) + alphaLayerB.paste(agradientT.rotate( + 180), (0, height - overlap_size_y)) + alphaLayerTB = Image.new("L", (width, height), 255) + alphaLayerTB.paste(agradientT, (0, 0)) + alphaLayerTB.paste(agradientT.rotate( + 180), (0, height - overlap_size_y)) + alphaLayerLR = Image.new("L", (width, height), 255) + alphaLayerLR.paste(agradientL, (0, 0)) + alphaLayerLR.paste(agradientL.rotate( + 180), (width - overlap_size_x, 0)) + + # Sides and corner Layers + alphaLayerRBC = Image.new("L", (width, height), 255) + alphaLayerRBC.paste(agradientL.rotate( + 180), (width - overlap_size_x, 0)) + alphaLayerRBC.paste(agradientT.rotate( + 180), (0, height - overlap_size_y)) + alphaLayerRBC.paste(agradientC.rotate(180).resize( + (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerLBC = Image.new("L", (width, height), 255) + alphaLayerLBC.paste(agradientL, (0, 0)) + alphaLayerLBC.paste(agradientT.rotate( + 180), (0, height - overlap_size_y)) + alphaLayerLBC.paste(agradientC.rotate(90).resize( + (overlap_size_x, overlap_size_y)), (0, height - overlap_size_y)) + alphaLayerRTC = Image.new("L", (width, height), 255) + alphaLayerRTC.paste(agradientL.rotate( + 180), (width - overlap_size_x, 0)) + alphaLayerRTC.paste(agradientT, (0, 0)) + alphaLayerRTC.paste(agradientC.rotate(270).resize( + (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + + # All but X layers + alphaLayerABT = Image.new("L", (width, height), 255) + alphaLayerABT.paste(alphaLayerLBC, (0, 0)) + alphaLayerABT.paste(agradientL.rotate( + 180), (width - overlap_size_x, 0)) + alphaLayerABT.paste(agradientC.rotate(180).resize( + (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerABL = Image.new("L", (width, height), 255) + alphaLayerABL.paste(alphaLayerRTC, (0, 0)) + alphaLayerABL.paste(agradientT.rotate( + 180), (0, height - overlap_size_y)) + alphaLayerABL.paste(agradientC.rotate(180).resize( + (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerABR = Image.new("L", (width, height), 255) + alphaLayerABR.paste(alphaLayerLBC, (0, 0)) + alphaLayerABR.paste(agradientT, (0, 0)) + alphaLayerABR.paste(agradientC.resize( + (overlap_size_x, overlap_size_y)), (0, 0)) + alphaLayerABB = Image.new("L", (width, height), 255) + alphaLayerABB.paste(alphaLayerRTC, (0, 0)) + alphaLayerABB.paste(agradientL, (0, 0)) + alphaLayerABB.paste(agradientC.resize( + (overlap_size_x, overlap_size_y)), (0, 0)) + + # All-around layer + alphaLayerAA = Image.new("L", (width, height), 255) + alphaLayerAA.paste(alphaLayerABT, (0, 0)) + alphaLayerAA.paste(agradientT, (0, 0)) + alphaLayerAA.paste(agradientC.resize( + (overlap_size_x, overlap_size_y)), (0, 0)) + alphaLayerAA.paste(agradientC.rotate(270).resize( + (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + + # Clean up temporary gradients + del agradientL + del agradientT + del agradientC + + def make_image(): + # Make main tiles ------------------------------------------------- + if embiggen_tiles: + print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...') + else: + print( + f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...') + + emb_tile_store = [] + # Although we could use the same seed for every tile for determinism, at higher strengths this may + # produce duplicated structures for each tile and make the tiling effect more obvious + # instead track and iterate a local seed we pass to Img2Img + seed = self.seed + seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy + + for tile in range(emb_tiles_x * emb_tiles_y): + # Don't iterate on first tile + if tile != 0: + if seed < seedintlimit: + seed += 1 + else: + seed = 0 + + # Determine if this is a re-run and replace + if embiggen_tiles and not tile in embiggen_tiles: + continue + # Get row and column entries + emb_row_i = tile // emb_tiles_x + emb_column_i = tile % emb_tiles_x + # Determine bounds to cut up the init image + # Determine upper-left point + if emb_column_i + 1 == emb_tiles_x: + left = initsuperwidth - width + else: + left = round(emb_column_i * (width - overlap_size_x)) + if emb_row_i + 1 == emb_tiles_y: + top = initsuperheight - height + else: + top = round(emb_row_i * (height - overlap_size_y)) + right = left + width + bottom = top + height + + # Cropped image of above dimension (does not modify the original) + newinitimage = initsuperimage.crop((left, top, right, bottom)) + # DEBUG: + # newinitimagepath = init_img[0:-4] + f'_emb_Ti{tile}.png' + # newinitimage.save(newinitimagepath) + + if embiggen_tiles: + print( + f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)') + else: + print( + f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles') + + # create a torch tensor from an Image + newinitimage = np.array( + newinitimage).astype(np.float32) / 255.0 + newinitimage = newinitimage[None].transpose(0, 3, 1, 2) + newinitimage = torch.from_numpy(newinitimage) + newinitimage = 2.0 * newinitimage - 1.0 + newinitimage = newinitimage.to(self.model.device) + clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None + + tile_results = gen_img2img.generate( + prompt, + iterations = 1, + seed = seed, + sampler = sampler, + steps = steps, + cfg_scale = cfg_scale, + conditioning = conditioning, + ddim_eta = ddim_eta, + image_callback = None, # called only after the final image is generated + step_callback = step_callback, # called after each intermediate image is generated + width = width, + height = height, + init_image = newinitimage, # notice that init_image is different from init_img + mask_image = None, + strength = strength, + clear_cuda_cache = clear_cuda_cache + ) + + emb_tile_store.append(tile_results[0][0]) + # DEBUG (but, also has other uses), worth saving if you want tiles without a transparency overlap to manually composite + # emb_tile_store[-1].save(init_img[0:-4] + f'_emb_To{tile}.png') + del newinitimage + + # Sanity check we have them all + if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)): + outputsuperimage = Image.new( + "RGBA", (initsuperwidth, initsuperheight)) + if embiggen_tiles: + outputsuperimage.alpha_composite( + initsuperimage.convert('RGBA'), (0, 0)) + for tile in range(emb_tiles_x * emb_tiles_y): + if embiggen_tiles: + if tile in embiggen_tiles: + intileimage = emb_tile_store.pop(0) + else: + continue + else: + intileimage = emb_tile_store[tile] + intileimage = intileimage.convert('RGBA') + # Get row and column entries + emb_row_i = tile // emb_tiles_x + emb_column_i = tile % emb_tiles_x + if emb_row_i == 0 and emb_column_i == 0 and not embiggen_tiles: + left = 0 + top = 0 + else: + # Determine upper-left point + if emb_column_i + 1 == emb_tiles_x: + left = initsuperwidth - width + else: + left = round(emb_column_i * + (width - overlap_size_x)) + if emb_row_i + 1 == emb_tiles_y: + top = initsuperheight - height + else: + top = round(emb_row_i * (height - overlap_size_y)) + # Handle gradients for various conditions + # Handle emb_rerun case + if embiggen_tiles: + # top of image + if emb_row_i == 0: + if emb_column_i == 0: + if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerB) + # Otherwise do nothing on this tile + elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + intileimage.putalpha(alphaLayerR) + else: + intileimage.putalpha(alphaLayerRBC) + elif emb_column_i == emb_tiles_x - 1: + if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerL) + else: + intileimage.putalpha(alphaLayerLBC) + else: + if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerL) + else: + intileimage.putalpha(alphaLayerLBC) + elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + intileimage.putalpha(alphaLayerLR) + else: + intileimage.putalpha(alphaLayerABT) + # bottom of image + elif emb_row_i == emb_tiles_y - 1: + if emb_column_i == 0: + if (tile+1) in embiggen_tiles: # Look-ahead right + intileimage.putalpha(alphaLayerTaC) + else: + intileimage.putalpha(alphaLayerRTC) + elif emb_column_i == emb_tiles_x - 1: + # No tiles to look ahead to + intileimage.putalpha(alphaLayerLTC) + else: + if (tile+1) in embiggen_tiles: # Look-ahead right + intileimage.putalpha(alphaLayerLTaC) + else: + intileimage.putalpha(alphaLayerABB) + # vertical middle of image + else: + if emb_column_i == 0: + if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerTaC) + else: + intileimage.putalpha(alphaLayerTB) + elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + intileimage.putalpha(alphaLayerRTC) + else: + intileimage.putalpha(alphaLayerABL) + elif emb_column_i == emb_tiles_x - 1: + if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerLTC) + else: + intileimage.putalpha(alphaLayerABR) + else: + if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + intileimage.putalpha(alphaLayerLTaC) + else: + intileimage.putalpha(alphaLayerABR) + elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + intileimage.putalpha(alphaLayerABB) + else: + intileimage.putalpha(alphaLayerAA) + # Handle normal tiling case (much simpler - since we tile left to right, top to bottom) + else: + if emb_row_i == 0 and emb_column_i >= 1: + intileimage.putalpha(alphaLayerL) + elif emb_row_i >= 1 and emb_column_i == 0: + if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right + intileimage.putalpha(alphaLayerT) + else: + intileimage.putalpha(alphaLayerTaC) + else: + if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right + intileimage.putalpha(alphaLayerLTC) + else: + intileimage.putalpha(alphaLayerLTaC) + # Layer tile onto final image + outputsuperimage.alpha_composite(intileimage, (left, top)) + else: + print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') + + # after internal loops and patching up return Embiggen image + return outputsuperimage + # end of function declaration + return make_image diff --git a/invokeai/generator/img2img.py b/invokeai/generator/img2img.py new file mode 100644 index 0000000000..aed066d1bd --- /dev/null +++ b/invokeai/generator/img2img.py @@ -0,0 +1,69 @@ +''' +invokeai.backend.generator.img2img descends from ldm.invoke.generator +''' + +import torch +from diffusers import logging + +from .base import Generator +from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData +from ..models.diffusion.shared_invokeai_diffusion import PostprocessingSettings + +class Img2Img(Generator): + def __init__(self, model, precision): + super().__init__(model, precision) + self.init_latent = None # by get_noise() + + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning,init_image,strength,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, + h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, + **kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it. + """ + self.perlin = perlin + + # noinspection PyTypeChecker + pipeline: StableDiffusionGeneratorPipeline = self.model + pipeline.scheduler = sampler + + uc, c, extra_conditioning_info = conditioning + conditioning_data = ( + ConditioningData( + uc, c, cfg_scale, extra_conditioning_info, + postprocessing_settings=PostprocessingSettings( + threshold=threshold, + warmup=warmup, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct + ) + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + + + def make_image(x_T): + # FIXME: use x_T for initial seeded noise + # We're not at the moment because the pipeline automatically resizes init_image if + # necessary, which the x_T input might not match. + logging.set_verbosity_error() # quench safety check warnings + pipeline_output = pipeline.img2img_from_embeddings( + init_image, strength, steps, conditioning_data, + noise_func=self.get_noise_like, + callback=step_callback + ) + if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + attention_maps_callback(pipeline_output.attention_map_saver) + return pipeline.numpy_to_pil(pipeline_output.images)[0] + + return make_image + + def get_noise_like(self, like: torch.Tensor): + device = like.device + if device.type == 'mps': + x = torch.randn_like(like, device='cpu').to(device) + else: + x = torch.randn_like(like, device=device) + if self.perlin > 0.0: + shape = like.shape + x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) + return x diff --git a/invokeai/generator/inpaint.py b/invokeai/generator/inpaint.py new file mode 100644 index 0000000000..02f3de4531 --- /dev/null +++ b/invokeai/generator/inpaint.py @@ -0,0 +1,324 @@ +''' +invokeai.backend.generator.inpaint descends from ldm.invoke.generator +''' +from __future__ import annotations + +import math + +import PIL +import cv2 +import numpy as np +import torch +from PIL import Image, ImageFilter, ImageOps, ImageChops + +from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusionGeneratorPipeline, \ + ConditioningData +from .img2img import Img2Img +from ldm.invoke.patchmatch import PatchMatch +from ldm.util import debug_image + + +def infill_methods()->list[str]: + methods = [ + "tile", + "solid", + ] + if PatchMatch.patchmatch_available(): + methods.insert(0, 'patchmatch') + return methods + +class Inpaint(Img2Img): + def __init__(self, model, precision): + self.inpaint_height = 0 + self.inpaint_width = 0 + self.enable_image_debugging = False + self.init_latent = None + self.pil_image = None + self.pil_mask = None + self.mask_blur_radius = 0 + self.infill_method = None + super().__init__(model, precision) + + # Outpaint support code + def get_tile_images(self, image: np.ndarray, width=8, height=8): + _nrows, _ncols, depth = image.shape + _strides = image.strides + + nrows, _m = divmod(_nrows, height) + ncols, _n = divmod(_ncols, width) + if _m != 0 or _n != 0: + return None + + return np.lib.stride_tricks.as_strided( + np.ravel(image), + shape=(nrows, ncols, height, width, depth), + strides=(height * _strides[0], width * _strides[1], *_strides), + writeable=False + ) + + def infill_patchmatch(self, im: Image.Image) -> Image: + if im.mode != 'RGBA': + return im + + # Skip patchmatch if patchmatch isn't available + if not PatchMatch.patchmatch_available(): + return im + + # Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though) + im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3) + im_patched = Image.fromarray(im_patched_np, mode = 'RGB') + return im_patched + + def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image: + # Only fill if there's an alpha layer + if im.mode != 'RGBA': + return im + + a = np.asarray(im, dtype=np.uint8) + + tile_size = (tile_size, tile_size) + + # Get the image as tiles of a specified size + tiles = self.get_tile_images(a,*tile_size).copy() + + # Get the mask as tiles + tiles_mask = tiles[:,:,:,:,3] + + # Find any mask tiles with any fully transparent pixels (we will be replacing these later) + tmask_shape = tiles_mask.shape + tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape)) + n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) + tiles_mask = (tiles_mask > 0) + tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1) + + # Get RGB tiles in single array and filter by the mask + tshape = tiles.shape + tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:])) + filtered_tiles = tiles_all[tiles_mask] + + if len(filtered_tiles) == 0: + return im + + # Find all invalid tiles and replace with a random valid tile + replace_count = (tiles_mask == False).sum() + rng = np.random.default_rng(seed = seed) + tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:] + + # Convert back to an image + tiles_all = tiles_all.reshape(tshape) + tiles_all = tiles_all.swapaxes(1,2) + st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4])) + si = Image.fromarray(st, mode='RGBA') + + return si + + + def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image: + npimg = np.asarray(mask, dtype=np.uint8) + + # Detect any partially transparent regions + npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0))) + + # Detect hard edges + npedge = cv2.Canny(npimg, threshold1=100, threshold2=200) + + # Combine + npmask = npgradient + npedge + + # Expand + npmask = cv2.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) + + new_mask = Image.fromarray(npmask) + + if edge_blur > 0: + new_mask = new_mask.filter(ImageFilter.BoxBlur(edge_blur)) + + return ImageOps.invert(new_mask) + + + def seam_paint(self, im: Image.Image, seam_size: int, seam_blur: int, prompt, sampler, steps, cfg_scale, ddim_eta, + conditioning, strength, noise, infill_method, step_callback) -> Image.Image: + hard_mask = self.pil_image.split()[-1].copy() + mask = self.mask_edge(hard_mask, seam_size, seam_blur) + + make_image = self.get_make_image( + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + init_image = im.copy().convert('RGBA'), + mask_image = mask, + strength = strength, + mask_blur_radius = 0, + seam_size = 0, + step_callback = step_callback, + inpaint_width = im.width, + inpaint_height = im.height, + infill_method = infill_method + ) + + seam_noise = self.get_noise(im.width, im.height) + + result = make_image(seam_noise) + + return result + + + @torch.no_grad() + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning, + init_image: PIL.Image.Image | torch.FloatTensor, + mask_image: PIL.Image.Image | torch.FloatTensor, + strength: float, + mask_blur_radius: int = 8, + # Seam settings - when 0, doesn't fill seam + seam_size: int = 0, + seam_blur: int = 0, + seam_strength: float = 0.7, + seam_steps: int = 10, + tile_size: int = 32, + step_callback=None, + inpaint_replace=False, enable_image_debugging=False, + infill_method = None, + inpaint_width=None, + inpaint_height=None, + inpaint_fill:tuple(int)=(0x7F, 0x7F, 0x7F, 0xFF), + attention_maps_callback=None, + **kwargs): + """ + Returns a function returning an image derived from the prompt and + the initial image + mask. Return value depends on the seed at + the time you call it. kwargs are 'init_latent' and 'strength' + """ + + self.enable_image_debugging = enable_image_debugging + infill_method = infill_method or infill_methods()[0] + self.infill_method = infill_method + + self.inpaint_width = inpaint_width + self.inpaint_height = inpaint_height + + if isinstance(init_image, PIL.Image.Image): + self.pil_image = init_image.copy() + + # Do infill + if infill_method == 'patchmatch' and PatchMatch.patchmatch_available(): + init_filled = self.infill_patchmatch(self.pil_image.copy()) + elif infill_method == 'tile': + init_filled = self.tile_fill_missing( + self.pil_image.copy(), + seed = self.seed, + tile_size = tile_size + ) + elif infill_method == 'solid': + solid_bg = PIL.Image.new("RGBA", init_image.size, inpaint_fill) + init_filled = PIL.Image.alpha_composite(solid_bg, init_image) + else: + raise ValueError(f"Non-supported infill type {infill_method}", infill_method) + init_filled.paste(init_image, (0,0), init_image.split()[-1]) + + # Resize if requested for inpainting + if inpaint_width and inpaint_height: + init_filled = init_filled.resize((inpaint_width, inpaint_height)) + + debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) + + # Create init tensor + init_image = image_resized_to_grid_as_tensor(init_filled.convert('RGB')) + + if isinstance(mask_image, PIL.Image.Image): + self.pil_mask = mask_image.copy() + debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging) + + init_alpha = self.pil_image.getchannel("A") + if mask_image.mode != "L": + # FIXME: why do we get passed an RGB image here? We can only use single-channel. + mask_image = mask_image.convert("L") + mask_image = ImageChops.multiply(mask_image, init_alpha) + self.pil_mask = mask_image + + # Resize if requested for inpainting + if inpaint_width and inpaint_height: + mask_image = mask_image.resize((inpaint_width, inpaint_height)) + + debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging) + mask: torch.FloatTensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + else: + mask: torch.FloatTensor = mask_image + + self.mask_blur_radius = mask_blur_radius + + # noinspection PyTypeChecker + pipeline: StableDiffusionGeneratorPipeline = self.model + pipeline.scheduler = sampler + + # todo: support cross-attention control + uc, c, _ = conditioning + conditioning_data = (ConditioningData(uc, c, cfg_scale) + .add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + + + def make_image(x_T): + pipeline_output = pipeline.inpaint_from_embeddings( + init_image=init_image, + mask=1 - mask, # expects white means "paint here." + strength=strength, + num_inference_steps=steps, + conditioning_data=conditioning_data, + noise_func=self.get_noise_like, + callback=step_callback, + ) + + if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + attention_maps_callback(pipeline_output.attention_map_saver) + + result = self.postprocess_size_and_mask(pipeline.numpy_to_pil(pipeline_output.images)[0]) + + # Seam paint if this is our first pass (seam_size set to 0 during seam painting) + if seam_size > 0: + old_image = self.pil_image or init_image + old_mask = self.pil_mask or mask_image + + result = self.seam_paint(result, seam_size, seam_blur, prompt, sampler, seam_steps, cfg_scale, ddim_eta, + conditioning, seam_strength, x_T, infill_method, step_callback) + + # Restore original settings + self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning, + old_image, + old_mask, + strength, + mask_blur_radius, seam_size, seam_blur, seam_strength, + seam_steps, tile_size, step_callback, + inpaint_replace, enable_image_debugging, + inpaint_width = inpaint_width, + inpaint_height = inpaint_height, + infill_method = infill_method, + **kwargs) + + return result + + return make_image + + + def sample_to_image(self, samples)->Image.Image: + gen_result = super().sample_to_image(samples).convert('RGB') + return self.postprocess_size_and_mask(gen_result) + + + def postprocess_size_and_mask(self, gen_result: Image.Image) -> Image.Image: + debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging) + + # Resize if necessary + if self.inpaint_width and self.inpaint_height: + gen_result = gen_result.resize(self.pil_image.size) + + if self.pil_image is None or self.pil_mask is None: + return gen_result + + corrected_result = self.repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) + debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) + + return corrected_result diff --git a/invokeai/generator/omnibus.py b/invokeai/generator/omnibus.py new file mode 100644 index 0000000000..a6fae3e567 --- /dev/null +++ b/invokeai/generator/omnibus.py @@ -0,0 +1,173 @@ +"""omnibus module to be used with the runwayml 9-channel custom inpainting model""" + +import torch +from PIL import Image, ImageOps +from einops import repeat + +from ldm.invoke.devices import choose_autocast +from ldm.invoke.generator.img2img import Img2Img +from ldm.invoke.generator.txt2img import Txt2Img + + +class Omnibus(Img2Img,Txt2Img): + def __init__(self, model, precision): + super().__init__(model, precision) + self.pil_mask = None + self.pil_image = None + + def get_make_image( + self, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + width, + height, + init_image = None, + mask_image = None, + strength = None, + step_callback=None, + threshold=0.0, + perlin=0.0, + mask_blur_radius: int = 8, + **kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it. + """ + self.perlin = perlin + num_samples = 1 + + sampler.make_schedule( + ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False + ) + + if isinstance(init_image, Image.Image): + self.pil_image = init_image + if init_image.mode != 'RGB': + init_image = init_image.convert('RGB') + init_image = self._image_to_tensor(init_image) + + if isinstance(mask_image, Image.Image): + self.pil_mask = mask_image + + mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1]) + mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False) + + self.mask_blur_radius = mask_blur_radius + + if init_image is not None and mask_image is not None: # inpainting + masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero + + elif init_image is not None: # img2img + scope = choose_autocast(self.precision) + + with scope(self.model.device.type): + self.init_latent = self.model.get_first_stage_encoding( + self.model.encode_first_stage(init_image) + ) # move to latent space + + # create a completely black mask (1s) + mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device) + # and the masked image is just a copy of the original + masked_image = init_image + + else: # txt2img + init_image = torch.zeros(1, 3, height, width, device=self.model.device) + mask_image = torch.ones(1, 1, height, width, device=self.model.device) + masked_image = init_image + + self.init_latent = init_image + height = init_image.shape[2] + width = init_image.shape[3] + model = self.model + + def make_image(x_T): + with torch.no_grad(): + scope = choose_autocast(self.precision) + with scope(self.model.device.type): + + batch = self.make_batch_sd( + init_image, + mask_image, + masked_image, + prompt=prompt, + device=model.device, + num_samples=num_samples, + ) + + c = model.cond_stage_model.encode(batch["txt"]) + c_cat = list() + for ck in model.concat_keys: + cc = batch[ck].float() + if ck != model.masked_image_key: + bchw = [num_samples, 4, height//8, width//8] + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = model.get_first_stage_encoding(model.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + + # cond + cond={"c_concat": [c_cat], "c_crossattn": [c]} + + # uncond cond + uc_cross = model.get_unconditional_conditioning(num_samples, "") + uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]} + shape = [model.channels, height//8, width//8] + + samples, _ = sampler.sample( + batch_size = 1, + S = steps, + x_T = x_T, + conditioning = cond, + shape = shape, + verbose = False, + unconditional_guidance_scale = cfg_scale, + unconditional_conditioning = uc_full, + eta = 1.0, + img_callback = step_callback, + threshold = threshold, + ) + if self.free_gpu_mem: + self.model.model.to("cpu") + return self.sample_to_image(samples) + + return make_image + + def make_batch_sd( + self, + image, + mask, + masked_image, + prompt, + device, + num_samples=1): + batch = { + "image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples), + "txt": num_samples * [prompt], + "mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples), + "masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples), + } + return batch + + def get_noise(self, width:int, height:int): + if self.init_latent is not None: + height = self.init_latent.shape[2] + width = self.init_latent.shape[3] + return Txt2Img.get_noise(self,width,height) + + + def sample_to_image(self, samples)->Image.Image: + gen_result = super().sample_to_image(samples).convert('RGB') + + if self.pil_image is None or self.pil_mask is None: + return gen_result + if self.pil_image.size != self.pil_mask.size: + return gen_result + + corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) + + return corrected_result diff --git a/invokeai/generator/txt2img.py b/invokeai/generator/txt2img.py new file mode 100644 index 0000000000..7802e3a913 --- /dev/null +++ b/invokeai/generator/txt2img.py @@ -0,0 +1,60 @@ +''' +invokeai.backend.generator.txt2img inherits from invokeai.backend.generator +''' +import PIL.Image +import torch + +from .base import Generator +from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData +from ..models import PostprocessingSettings + +class Txt2Img(Generator): + def __init__(self, model, precision): + super().__init__(model, precision) + + @torch.no_grad() + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning,width,height,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, + h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, + **kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + kwargs are 'width' and 'height' + """ + self.perlin = perlin + + # noinspection PyTypeChecker + pipeline: StableDiffusionGeneratorPipeline = self.model + pipeline.scheduler = sampler + + uc, c, extra_conditioning_info = conditioning + conditioning_data = ( + ConditioningData( + uc, c, cfg_scale, extra_conditioning_info, + postprocessing_settings=PostprocessingSettings( + threshold=threshold, + warmup=warmup, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct + ) + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + + def make_image(x_T) -> PIL.Image.Image: + pipeline_output = pipeline.image_from_embeddings( + latents=torch.zeros_like(x_T,dtype=self.torch_dtype()), + noise=x_T, + num_inference_steps=steps, + conditioning_data=conditioning_data, + callback=step_callback, + ) + + if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + attention_maps_callback(pipeline_output.attention_map_saver) + + return pipeline.numpy_to_pil(pipeline_output.images)[0] + + return make_image + + + diff --git a/invokeai/generator/txt2img2img.py b/invokeai/generator/txt2img2img.py new file mode 100644 index 0000000000..67de74fecf --- /dev/null +++ b/invokeai/generator/txt2img2img.py @@ -0,0 +1,163 @@ +''' +invokeai.backend.generator.txt2img inherits from invokeai.backend.generator +''' + +import math +from typing import Callable, Optional + +import torch +from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error + +from .base import Generator +from .diffusers_pipeline import trim_to_multiple_of, StableDiffusionGeneratorPipeline, \ + ConditioningData +from ..models import PostprocessingSettings + + +class Txt2Img2Img(Generator): + def __init__(self, model, precision): + super().__init__(model, precision) + self.init_latent = None # for get_noise() + + def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_eta, + conditioning, width:int, height:int, strength:float, + step_callback:Optional[Callable]=None, threshold=0.0, warmup=0.2, perlin=0.0, + h_symmetry_time_pct=None, v_symmetry_time_pct=None, attention_maps_callback=None, **kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + kwargs are 'width' and 'height' + """ + self.perlin = perlin + + # noinspection PyTypeChecker + pipeline: StableDiffusionGeneratorPipeline = self.model + pipeline.scheduler = sampler + + uc, c, extra_conditioning_info = conditioning + conditioning_data = ( + ConditioningData( + uc, c, cfg_scale, extra_conditioning_info, + postprocessing_settings = PostprocessingSettings( + threshold=threshold, + warmup=0.2, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct + ) + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + + def make_image(x_T): + + first_pass_latent_output, _ = pipeline.latents_from_embeddings( + latents=torch.zeros_like(x_T), + num_inference_steps=steps, + conditioning_data=conditioning_data, + noise=x_T, + callback=step_callback, + ) + + # Get our initial generation width and height directly from the latent output so + # the message below is accurate. + init_width = first_pass_latent_output.size()[3] * self.downsampling_factor + init_height = first_pass_latent_output.size()[2] * self.downsampling_factor + print( + f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling" + ) + + # resizing + resized_latents = torch.nn.functional.interpolate( + first_pass_latent_output, + size=(height // self.downsampling_factor, width // self.downsampling_factor), + mode="bilinear" + ) + + # Free up memory from the last generation. + clear_cuda_cache = kwargs['clear_cuda_cache'] or None + if clear_cuda_cache is not None: + clear_cuda_cache() + + second_pass_noise = self.get_noise_like(resized_latents, override_perlin=True) + + # Clear symmetry for the second pass + from dataclasses import replace + new_postprocessing_settings = replace(conditioning_data.postprocessing_settings, h_symmetry_time_pct=None) + new_postprocessing_settings = replace(new_postprocessing_settings, v_symmetry_time_pct=None) + new_conditioning_data = replace(conditioning_data, postprocessing_settings=new_postprocessing_settings) + + verbosity = get_verbosity() + set_verbosity_error() + pipeline_output = pipeline.img2img_from_latents_and_embeddings( + resized_latents, + num_inference_steps=steps, + conditioning_data=new_conditioning_data, + strength=strength, + noise=second_pass_noise, + callback=step_callback) + set_verbosity(verbosity) + + if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + attention_maps_callback(pipeline_output.attention_map_saver) + + return pipeline.numpy_to_pil(pipeline_output.images)[0] + + + # FIXME: do we really need something entirely different for the inpainting model? + + # in the case of the inpainting model being loaded, the trick of + # providing an interpolated latent doesn't work, so we transiently + # create a 512x512 PIL image, upscale it, and run the inpainting + # over it in img2img mode. Because the inpaing model is so conservative + # it doesn't change the image (much) + + return make_image + + def get_noise_like(self, like: torch.Tensor, override_perlin: bool=False): + device = like.device + if device.type == 'mps': + x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device) + else: + x = torch.randn_like(like, device=device, dtype=self.torch_dtype()) + if self.perlin > 0.0 and override_perlin == False: + shape = like.shape + x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) + return x + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height,scale = True): + # print(f"Get noise: {width}x{height}") + if scale: + # Scale the input width and height for the initial generation + # Make their area equivalent to the model's resolution area (e.g. 512*512 = 262144), + # while keeping the minimum dimension at least 0.5 * resolution (e.g. 512*0.5 = 256) + + aspect = width / height + dimension = self.model.unet.config.sample_size * self.model.vae_scale_factor + min_dimension = math.floor(dimension * 0.5) + model_area = dimension * dimension # hardcoded for now since all models are trained on square images + + if aspect > 1.0: + init_height = max(min_dimension, math.sqrt(model_area / aspect)) + init_width = init_height * aspect + else: + init_width = max(min_dimension, math.sqrt(model_area * aspect)) + init_height = init_width / aspect + + scaled_width, scaled_height = trim_to_multiple_of(math.floor(init_width), math.floor(init_height)) + + else: + scaled_width = width + scaled_height = height + + device = self.model.device + channels = self.latent_channels + if channels == 9: + channels = 4 # we don't really want noise for all the mask channels + shape = (1, channels, + scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor) + if self.use_mps_noise or device.type == 'mps': + tensor = torch.empty(size=shape, device='cpu') + tensor = self.get_noise_like(like=tensor).to(device) + else: + tensor = torch.empty(size=shape, device=device) + tensor = self.get_noise_like(like=tensor) + return tensor diff --git a/invokeai/models/__pycache__/__init__.cpython-310.pyc b/invokeai/models/__pycache__/__init__.cpython-310.pyc index 53157f298f8a4a4dd7ca0a62b4b1346bba613bab..01511dd53ace79fa8c5097354e620c121d18e529 100644 GIT binary patch delta 64 zcmeBWox#eR&&$ij00i$R{7qglk++PMFPA@xe`3ciMuEx6jLM9hlNU19Om1aVs PWGE5@QbmH3&6tt^8-@?{ From 1d77581d9684c4d567b695c5b47cf2ec5e7254b1 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 00:45:56 -0500 Subject: [PATCH 04/19] restore behavior of !import_model; fix initial models bug --- .../__pycache__/model_manager.cpython-310.pyc | Bin 33421 -> 33589 bytes invokeai/models/model_manager.py | 40 ++++++++++++------ ldm/invoke/config/model_install_backend.py | 2 +- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/invokeai/models/__pycache__/model_manager.cpython-310.pyc b/invokeai/models/__pycache__/model_manager.cpython-310.pyc index 0f035e0d7a7352c1c006bf302893d2ed2db32611..b12ee5470334c667506dcbdb1816b08ba4c016ac 100644 GIT binary patch delta 406 zcmXwyPe>bK5XJY+epx~wThfanDyThl6)Ff7Q4awvnCTE@my!5~L^Ayk;!u z(&N0ZO6^0F^n3&~sSjvY3m^5cmoh{Q7d9>m(9Oxl5B#oFArg`=lDJwN=^~=GH z;$RoqHwPry&A*FsaNztXDXr8Jv+{oRK26c~CaF=kyGE*1GJht0e4<|J^g;Zf=SZvu z{B@Qii-nja?RhN6J|J0gzocpW7GM`?Uhy3)iegU&w_>CBxPeH8QdVRF!BzR6v*fJi zaNoH?zr@~wSD;Ifkw7;mjaHzOj|SregFV9$U`b?0Y^?FhXe}Yd>5!kW!&l?a3T)n; z$^eJl5`F@G%A!170Jty{0;;%qwh^f3j=3P<<8)*UXcO=ATWIIyXkLcS+TuN7g1=&q d1e0&#Hjoo}^9~mxnK-~Td#y7CP5iPxM1Ko5V)Fn1 diff --git a/invokeai/models/model_manager.py b/invokeai/models/model_manager.py index 2a0a8ec933..b204cf9444 100644 --- a/invokeai/models/model_manager.py +++ b/invokeai/models/model_manager.py @@ -746,19 +746,35 @@ class ModelManager(object): f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import" ) return + + if convert: + diffuser_path = Path( + Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem + ) + model_name = self.convert_and_import( + model_path, + diffusers_path=diffuser_path, + vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), + model_name=model_name, + model_description=description, + original_config_file=model_config_file, + commit_to_conf=commit_to_conf, + ) + else: + model_name = self.import_ckpt_model( + model_path, + config=model_config_file, + model_name=model_name, + model_description=description, + vae=str( + Path( + Globals.root, + "models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt", + ) + ), + commit_to_conf=commit_to_conf, + ) - diffuser_path = Path( - Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem - ) - model_name = self.convert_and_import( - model_path, - diffusers_path=diffuser_path, - vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), - model_name=model_name, - model_description=description, - original_config_file=model_config_file, - commit_to_conf=commit_to_conf, - ) if commit_to_conf: self.commit(commit_to_conf) return model_name diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 186af2aaae..d86bd14b00 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -18,7 +18,7 @@ from tqdm import tqdm from typing import List import invokeai.configs as configs -from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from invokeai.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir from invokeai.models import ModelManager From 5b6c61fc75ad1bc1a8ec6dee61fb139b3e272d78 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 08:32:11 -0500 Subject: [PATCH 05/19] move models and generator into backend --- invokeai/backend/__init__.py | 3 ++- invokeai/{ => backend}/generator/__init__.py | 0 invokeai/{ => backend}/generator/base.py | 0 .../generator/diffusers_pipeline.py | 4 ++-- invokeai/{ => backend}/generator/embiggen.py | 0 invokeai/{ => backend}/generator/img2img.py | 2 +- invokeai/{ => backend}/generator/inpaint.py | 0 invokeai/{ => backend}/generator/omnibus.py | 0 invokeai/{ => backend}/generator/txt2img.py | 0 .../{ => backend}/generator/txt2img2img.py | 0 invokeai/backend/invoke_ai_web_server.py | 2 +- invokeai/{ => backend}/models/__init__.py | 2 +- invokeai/{ => backend}/models/__init__.py~ | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 664 -> 680 bytes .../__pycache__/autoencoder.cpython-310.pyc | Bin 13718 -> 13718 bytes .../__pycache__/model_manager.cpython-310.pyc | Bin 33589 -> 33597 bytes invokeai/{ => backend}/models/autoencoder.py | 0 invokeai/backend/models/diffusion/__init__.py | 6 ++++++ .../models/diffusion/__init__.py~ | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 488 bytes .../cross_attention_control.cpython-310.pyc | Bin 20594 -> 20599 bytes ...cross_attention_map_saving.cpython-310.pyc | Bin 3333 -> 3333 bytes .../__pycache__/ddim.cpython-310.pyc | Bin 3044 -> 3044 bytes .../__pycache__/ddpm.cpython-310.pyc | Bin 48129 -> 48129 bytes .../__pycache__/ksampler.cpython-310.pyc | Bin 7538 -> 7543 bytes .../__pycache__/plms.cpython-310.pyc | Bin 3913 -> 3913 bytes .../__pycache__/sampler.cpython-310.pyc | Bin 9799 -> 9799 bytes .../shared_invokeai_diffusion.cpython-310.pyc | Bin 15344 -> 15344 bytes .../models/diffusion/classifier.py | 0 .../diffusion/cross_attention_control.py | 0 .../diffusion/cross_attention_map_saving.py | 0 .../{ => backend}/models/diffusion/ddim.py | 0 .../{ => backend}/models/diffusion/ddpm.py | 0 .../models/diffusion/ksampler.py | 0 .../{ => backend}/models/diffusion/plms.py | 0 .../{ => backend}/models/diffusion/sampler.py | 0 .../diffusion/shared_invokeai_diffusion.py | 0 .../{ => backend}/models/model_manager.py | 2 +- invokeai/models/diffusion/__init__.py | 4 ---- .../__pycache__/__init__.cpython-310.pyc | Bin 304 -> 0 bytes ldm/generate.py | 7 +++---- ldm/invoke/CLI.py | 6 +++--- ldm/invoke/conditioning.py | 2 +- ldm/invoke/config/model_install_backend.py | 4 ++-- ldm/invoke/merge_diffusers.py | 3 +-- ldm/modules/attention.py | 2 +- 46 files changed, 25 insertions(+), 24 deletions(-) rename invokeai/{ => backend}/generator/__init__.py (100%) rename invokeai/{ => backend}/generator/base.py (100%) rename invokeai/{ => backend}/generator/diffusers_pipeline.py (99%) rename invokeai/{ => backend}/generator/embiggen.py (100%) rename invokeai/{ => backend}/generator/img2img.py (97%) rename invokeai/{ => backend}/generator/inpaint.py (100%) rename invokeai/{ => backend}/generator/omnibus.py (100%) rename invokeai/{ => backend}/generator/txt2img.py (100%) rename invokeai/{ => backend}/generator/txt2img2img.py (100%) rename invokeai/{ => backend}/models/__init__.py (86%) rename invokeai/{ => backend}/models/__init__.py~ (100%) rename invokeai/{ => backend}/models/__pycache__/__init__.cpython-310.pyc (58%) rename invokeai/{ => backend}/models/__pycache__/autoencoder.cpython-310.pyc (99%) rename invokeai/{ => backend}/models/__pycache__/model_manager.cpython-310.pyc (94%) rename invokeai/{ => backend}/models/autoencoder.py (100%) create mode 100644 invokeai/backend/models/diffusion/__init__.py rename invokeai/{ => backend}/models/diffusion/__init__.py~ (100%) create mode 100644 invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc rename invokeai/{ => backend}/models/diffusion/__pycache__/cross_attention_control.cpython-310.pyc (98%) rename invokeai/{ => backend}/models/diffusion/__pycache__/cross_attention_map_saving.cpython-310.pyc (96%) rename invokeai/{ => backend}/models/diffusion/__pycache__/ddim.cpython-310.pyc (96%) rename invokeai/{ => backend}/models/diffusion/__pycache__/ddpm.cpython-310.pyc (99%) rename invokeai/{ => backend}/models/diffusion/__pycache__/ksampler.cpython-310.pyc (96%) rename invokeai/{ => backend}/models/diffusion/__pycache__/plms.cpython-310.pyc (97%) rename invokeai/{ => backend}/models/diffusion/__pycache__/sampler.cpython-310.pyc (99%) rename invokeai/{ => backend}/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc (99%) rename invokeai/{ => backend}/models/diffusion/classifier.py (100%) rename invokeai/{ => backend}/models/diffusion/cross_attention_control.py (100%) rename invokeai/{ => backend}/models/diffusion/cross_attention_map_saving.py (100%) rename invokeai/{ => backend}/models/diffusion/ddim.py (100%) rename invokeai/{ => backend}/models/diffusion/ddpm.py (100%) rename invokeai/{ => backend}/models/diffusion/ksampler.py (100%) rename invokeai/{ => backend}/models/diffusion/plms.py (100%) rename invokeai/{ => backend}/models/diffusion/sampler.py (100%) rename invokeai/{ => backend}/models/diffusion/shared_invokeai_diffusion.py (100%) rename invokeai/{ => backend}/models/model_manager.py (100%) delete mode 100644 invokeai/models/diffusion/__init__.py delete mode 100644 invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 82014807ba..16ced6d9d3 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -1,6 +1,7 @@ ''' Initialization file for invokeai.backend ''' -from .invoke_ai_web_server import InvokeAIWebServer +# this is causing circular import issues +# from .invoke_ai_web_server import InvokeAIWebServer diff --git a/invokeai/generator/__init__.py b/invokeai/backend/generator/__init__.py similarity index 100% rename from invokeai/generator/__init__.py rename to invokeai/backend/generator/__init__.py diff --git a/invokeai/generator/base.py b/invokeai/backend/generator/base.py similarity index 100% rename from invokeai/generator/base.py rename to invokeai/backend/generator/base.py diff --git a/invokeai/generator/diffusers_pipeline.py b/invokeai/backend/generator/diffusers_pipeline.py similarity index 99% rename from invokeai/generator/diffusers_pipeline.py rename to invokeai/backend/generator/diffusers_pipeline.py index 709617c37f..db86fbaf11 100644 --- a/invokeai/generator/diffusers_pipeline.py +++ b/invokeai/backend/generator/diffusers_pipeline.py @@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec from ldm.invoke.globals import Globals -from invokeai.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings from ldm.modules.textual_inversion_manager import TextualInversionManager from ldm.invoke.devices import normalize_device, CPU_DEVICE from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..models.diffusion.cross_attention_map_saving import AttentionMapSaver +from ..models.diffusion import AttentionMapSaver from compel import EmbeddingsProvider @dataclass diff --git a/invokeai/generator/embiggen.py b/invokeai/backend/generator/embiggen.py similarity index 100% rename from invokeai/generator/embiggen.py rename to invokeai/backend/generator/embiggen.py diff --git a/invokeai/generator/img2img.py b/invokeai/backend/generator/img2img.py similarity index 97% rename from invokeai/generator/img2img.py rename to invokeai/backend/generator/img2img.py index aed066d1bd..8cc2004745 100644 --- a/invokeai/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -7,7 +7,7 @@ from diffusers import logging from .base import Generator from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..models.diffusion.shared_invokeai_diffusion import PostprocessingSettings +from ..models.diffusion import PostprocessingSettings class Img2Img(Generator): def __init__(self, model, precision): diff --git a/invokeai/generator/inpaint.py b/invokeai/backend/generator/inpaint.py similarity index 100% rename from invokeai/generator/inpaint.py rename to invokeai/backend/generator/inpaint.py diff --git a/invokeai/generator/omnibus.py b/invokeai/backend/generator/omnibus.py similarity index 100% rename from invokeai/generator/omnibus.py rename to invokeai/backend/generator/omnibus.py diff --git a/invokeai/generator/txt2img.py b/invokeai/backend/generator/txt2img.py similarity index 100% rename from invokeai/generator/txt2img.py rename to invokeai/backend/generator/txt2img.py diff --git a/invokeai/generator/txt2img2img.py b/invokeai/backend/generator/txt2img2img.py similarity index 100% rename from invokeai/generator/txt2img2img.py rename to invokeai/backend/generator/txt2img2img.py diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index c93e5e2a60..f624cb7710 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -27,7 +27,7 @@ from invokeai.backend.modules.parameters import parameters_to_command from ldm.generate import Generate from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer -from ..generator import infill_methods, PipelineIntermediateState +from .generator import infill_methods, PipelineIntermediateState from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, global_models_dir ) diff --git a/invokeai/models/__init__.py b/invokeai/backend/models/__init__.py similarity index 86% rename from invokeai/models/__init__.py rename to invokeai/backend/models/__init__.py index 70abd4358e..bd11ddb78c 100644 --- a/invokeai/models/__init__.py +++ b/invokeai/backend/models/__init__.py @@ -1,5 +1,5 @@ ''' -Initialization file for the invokeai.models package +Initialization file for the invokeai.backend.models package ''' from .model_manager import ModelManager, SDLegacyType from .diffusion import InvokeAIDiffuserComponent diff --git a/invokeai/models/__init__.py~ b/invokeai/backend/models/__init__.py~ similarity index 100% rename from invokeai/models/__init__.py~ rename to invokeai/backend/models/__init__.py~ diff --git a/invokeai/models/__pycache__/__init__.cpython-310.pyc b/invokeai/backend/models/__pycache__/__init__.cpython-310.pyc similarity index 58% rename from invokeai/models/__pycache__/__init__.cpython-310.pyc rename to invokeai/backend/models/__pycache__/__init__.cpython-310.pyc index 01511dd53ace79fa8c5097354e620c121d18e529..0d3b9ff6c5f5b3495fc8cf052ad0e3cef482d707 100644 GIT binary patch delta 56 zcmbQix`LHApO=@50SF@h{Y~C5k++4>cH$&8_N2t*?9{xJjoX_U8J#9uGiks?CMPkO F0RZgD5$yl~ delta 40 vcmZ3%I)jxrpO=@50SMkt_?x_9B5w<$>BLED8xJ%wGFnY`X405k%w!4x_-PD( diff --git a/invokeai/models/__pycache__/autoencoder.cpython-310.pyc b/invokeai/backend/models/__pycache__/autoencoder.cpython-310.pyc similarity index 99% rename from invokeai/models/__pycache__/autoencoder.cpython-310.pyc rename to invokeai/backend/models/__pycache__/autoencoder.cpython-310.pyc index a47741b0d4fb45a304320ffa59c48fc41f4c2b82..e4bd36325319c0a98f247cce80a3bc6361af89ea 100644 GIT binary patch delta 20 acmbQ1JuRC%pO=@50SKyo|J}&lZ3+ND#RiW6 delta 20 acmbQ1JuRC%pO=@50SGMi|K7;mZ3+NAg$54* diff --git a/invokeai/models/__pycache__/model_manager.cpython-310.pyc b/invokeai/backend/models/__pycache__/model_manager.cpython-310.pyc similarity index 94% rename from invokeai/models/__pycache__/model_manager.cpython-310.pyc rename to invokeai/backend/models/__pycache__/model_manager.cpython-310.pyc index b12ee5470334c667506dcbdb1816b08ba4c016ac..bfa9a24f27a264d8e2e9f724207d909db2ce103c 100644 GIT binary patch delta 333 zcmdnm#Osj%So$6x>|Fc#d&$K6A)qH)hSrhnWKy#U_ie#4);0E@shSPfASA zPR&c%Jc~t(AE@@I@E%b{!O2aUMvOv}w`%Gzif?|TDas6#`Jofd$|$@!#E6xPQFL>Z zO}HRXaFcJo0FY9Od&>^gC6H#tD74u-EtE+}5a`^ZBS7L7Yi@Q*W>JyI=1my_OhDrf zXE`$hDfS#oHlSk1qED=BLLkM0n=LC2Fam{sRmLy^%?z*p$7U>)lai~KnOBydovN3f zlb@8BQ(Oc#8l>eG2S~QGBr~T-7-WJ7h!C9|*rYUhRa3Qq0HX*a532w(3mXd)%YP9T E08j{7dH?_b delta 327 zcmdnn#AI|N{f*pg)v1iM>|(1N+(x0N*Bx* z%F)Z!kJ8UIh?*SFD8b0Txq|T=m_Lna?)4qz0VEW#4UXg|4_MPu_~7EOLe z;mv1-_lPnIPwvt*VicLYS5t>kX!9#gQD#QrO}f#njG~)kj99rC#WyF}gbM;y@AAzT z08(0UZ`pyGB+{%HMK%Ygg)#{V16^5k1W4Rs&CO29EGiP)yemV13255sEN4a_#h+uz z22|`>^of;C1f*Davtz{pMxYQ=RSYB0%=qemY?G%n$(ad(OcBaS$<@ouE6dML)l1LG yPfE-wE&|&D(tC>oBwJdNnNuVRGD-|cOy1a3Bf!Hbz$n5fz|O+P!o>1lgarVj!dE!} diff --git a/invokeai/models/autoencoder.py b/invokeai/backend/models/autoencoder.py similarity index 100% rename from invokeai/models/autoencoder.py rename to invokeai/backend/models/autoencoder.py diff --git a/invokeai/backend/models/diffusion/__init__.py b/invokeai/backend/models/diffusion/__init__.py new file mode 100644 index 0000000000..569c22c429 --- /dev/null +++ b/invokeai/backend/models/diffusion/__init__.py @@ -0,0 +1,6 @@ +''' +Initialization file for invokeai.models.diffusion +''' +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from .cross_attention_control import InvokeAICrossAttentionMixin +from .cross_attention_map_saving import AttentionMapSaver diff --git a/invokeai/models/diffusion/__init__.py~ b/invokeai/backend/models/diffusion/__init__.py~ similarity index 100% rename from invokeai/models/diffusion/__init__.py~ rename to invokeai/backend/models/diffusion/__init__.py~ diff --git a/invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6864fb12ad1685c64a76090da388747f6e71b40a GIT binary patch literal 488 zcmZ8d%Sr<=6wSQaQmoXCQr&brn?`UeV$n@OrO@4MLY!o5Y#u|B7VRJTOS5&QpCBmS z8JQ{ua&m9Z$>rpj!Jv;|y?=e?`vF3)b@O+H&|JcHrvMCPhy^T^feN!wMOhToeI#R* zWJy!UvZK0L7wTkz($4m*KhwesD#ez1Vf45Vl8*~x$3kz+ic>LBhH>d8Occf1fu#Qi zMo=17qgf-F&aNAZ+bg3=qq+9g;oLZ1T9b3rx>OII|=&g7X z8t8-H8(J>thTFTeS9J+tCMTpCxh1unk=7j9xw3T&P4UxWP-KIF`b8sQUePBli|2#^A{ZnB+M!^UCtGQxh{cKVq8T3jjR+4@dw2 delta 37 scmeyqfbr7;M(%uGUM>b8u-N~5Bli|2M!(4inB(W&j?BEW{Or`k%+2SS7E1vD6;Tcn delta 35 qcmexv^~s7mpO=@50SGMi|K7;GkcrWL@>(W&=A4w=%~zNfO924Lg9~&3 diff --git a/invokeai/models/diffusion/__pycache__/plms.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/plms.cpython-310.pyc similarity index 97% rename from invokeai/models/diffusion/__pycache__/plms.cpython-310.pyc rename to invokeai/backend/models/diffusion/__pycache__/plms.cpython-310.pyc index 4b8c259d077efb66a5b1cd6b09c088a505342e2f..0583aa0b4e9dd34fda2696081aa85655eb716e6c 100644 GIT binary patch delta 19 ZcmX>pcT$ckpO=@50SKyoZ{)J)2LLnR1nK|) delta 19 ZcmX>pcT$ckpO=@50SIoiZsfA(2LLn11kwNi diff --git a/invokeai/models/diffusion/__pycache__/sampler.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/sampler.cpython-310.pyc similarity index 99% rename from invokeai/models/diffusion/__pycache__/sampler.cpython-310.pyc rename to invokeai/backend/models/diffusion/__pycache__/sampler.cpython-310.pyc index d749938ac4a274a6f78acb9873e6f93cced235dd..c76f8373aa4e649ee31592966cc397be73543e79 100644 GIT binary patch delta 19 ZcmX@^bKHk3pO=@50SKyoZ{)I70{}V41y=w7 delta 19 ZcmX@^bKHk3pO=@50SGR(Y~-?40{}UG1vdZy diff --git a/invokeai/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc similarity index 99% rename from invokeai/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc rename to invokeai/backend/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc index 644fb9f1dceace8413603bb522f5a9276bb2d899..eee71d9aed784bb0154fbc5f940d80f5745cf397 100644 GIT binary patch delta 19 ZcmexR{-K;JpO=@50SKyoZ{&Jr4FE={2E70P delta 19 ZcmexR{-K;JpO=@50SIQaY~*@o4FE+624nyL diff --git a/invokeai/models/diffusion/classifier.py b/invokeai/backend/models/diffusion/classifier.py similarity index 100% rename from invokeai/models/diffusion/classifier.py rename to invokeai/backend/models/diffusion/classifier.py diff --git a/invokeai/models/diffusion/cross_attention_control.py b/invokeai/backend/models/diffusion/cross_attention_control.py similarity index 100% rename from invokeai/models/diffusion/cross_attention_control.py rename to invokeai/backend/models/diffusion/cross_attention_control.py diff --git a/invokeai/models/diffusion/cross_attention_map_saving.py b/invokeai/backend/models/diffusion/cross_attention_map_saving.py similarity index 100% rename from invokeai/models/diffusion/cross_attention_map_saving.py rename to invokeai/backend/models/diffusion/cross_attention_map_saving.py diff --git a/invokeai/models/diffusion/ddim.py b/invokeai/backend/models/diffusion/ddim.py similarity index 100% rename from invokeai/models/diffusion/ddim.py rename to invokeai/backend/models/diffusion/ddim.py diff --git a/invokeai/models/diffusion/ddpm.py b/invokeai/backend/models/diffusion/ddpm.py similarity index 100% rename from invokeai/models/diffusion/ddpm.py rename to invokeai/backend/models/diffusion/ddpm.py diff --git a/invokeai/models/diffusion/ksampler.py b/invokeai/backend/models/diffusion/ksampler.py similarity index 100% rename from invokeai/models/diffusion/ksampler.py rename to invokeai/backend/models/diffusion/ksampler.py diff --git a/invokeai/models/diffusion/plms.py b/invokeai/backend/models/diffusion/plms.py similarity index 100% rename from invokeai/models/diffusion/plms.py rename to invokeai/backend/models/diffusion/plms.py diff --git a/invokeai/models/diffusion/sampler.py b/invokeai/backend/models/diffusion/sampler.py similarity index 100% rename from invokeai/models/diffusion/sampler.py rename to invokeai/backend/models/diffusion/sampler.py diff --git a/invokeai/models/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/models/diffusion/shared_invokeai_diffusion.py similarity index 100% rename from invokeai/models/diffusion/shared_invokeai_diffusion.py rename to invokeai/backend/models/diffusion/shared_invokeai_diffusion.py diff --git a/invokeai/models/model_manager.py b/invokeai/backend/models/model_manager.py similarity index 100% rename from invokeai/models/model_manager.py rename to invokeai/backend/models/model_manager.py index b204cf9444..e4dc5ffe40 100644 --- a/invokeai/models/model_manager.py +++ b/invokeai/backend/models/model_manager.py @@ -32,13 +32,13 @@ from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path from ldm.invoke.devices import CPU_DEVICE -from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ldm.invoke.globals import Globals, global_cache_dir from ldm.util import ( ask_user, download_with_resume, url_attachment_name, ) +from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline class SDLegacyType(Enum): diff --git a/invokeai/models/diffusion/__init__.py b/invokeai/models/diffusion/__init__.py deleted file mode 100644 index 749f5c3f6e..0000000000 --- a/invokeai/models/diffusion/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -''' -Initialization file for invokeai.models.diffusion -''' -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent diff --git a/invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc b/invokeai/models/diffusion/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index f528d77f34feb82cc4d7b0e6d2b5932ac68f438c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 304 zcmYk0u}Z{15QcY?h!Qxku(>X6nrvY!qIgXj5$vv6LUzK9Zg!5{J;C!Od<|c~he>PY zD+q#<969`9{$ZHmo1YeoT(Cahf7>_iU*qs!2@f}1;eySGwJ_2oR`$s{k;0^pqRjS} zdF?P_V{zX^Bv*83q3FmL=w9d<8mxLUU_)iF>vn-p@^8t!QVz>{I=QV^(+d0@^;>e_ z;(eJ8r&=3owH}sX(|9m?2I?cpj|o47KjAjn&q{5m2W3M9bn3y=6SOg?`H`9#>JUmD cQA#r(qt?~-bvV1?_0Gce7&K&TnK)1X0HCZ{qW}N^ diff --git a/ldm/generate.py b/ldm/generate.py index a639360491..00c94b6a83 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -27,9 +27,8 @@ from pytorch_lightning import logging, seed_everything import ldm.invoke.conditioning -from invokeai.models import ModelManager -from invokeai.generator import infill_methods -from invokeai.models import (DDIMSampler, KSampler, PLMSSampler ) +from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) +from invokeai.backend.generator import infill_methods from ldm.invoke.args import metadata_from_png from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.conditioning import get_uc_and_c_and_ec @@ -898,7 +897,7 @@ class Generate: return self._load_generator(".inpaint", "Inpaint") def _load_generator(self, module, class_name): - mn = f"invokeai.generator{module}" + mn = f"invokeai.backend.generator{module}" cn = class_name module = importlib.import_module(mn) constructor = getattr(module, cn) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 05aa4482d0..42fe6638aa 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -21,11 +21,11 @@ import ldm.invoke from ..generate import Generate from .args import (Args, dream_cmd_from_png, metadata_dumps, metadata_from_png) -from invokeai.generator import PipelineIntermediateState +from invokeai.backend.generator import PipelineIntermediateState from .globals import Globals from .image_util import make_grid from .log import write_log -from invokeai.models import ModelManager +from invokeai.backend.models import ModelManager from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .readline import Completer, get_completer from ..util import url_attachment_name @@ -1022,7 +1022,7 @@ def get_next_command(infile=None, model_name="no model") -> str: # command stri def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan): print("\n* --web was specified, starting web server...") - from invokeai.backend import InvokeAIWebServer + from invokeai.backend.invoke_ai_web_server import InvokeAIWebServer # Change working directory to the stable-diffusion directory os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 7ff99c252e..68bf65069d 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from .devices import torch_dtype -from invokeai.models import InvokeAIDiffuserComponent +from invokeai.backend.models import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index d86bd14b00..428a46b96b 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -18,9 +18,9 @@ from tqdm import tqdm from typing import List import invokeai.configs as configs -from invokeai.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from invokeai.backend.generator import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from invokeai.models import ModelManager +from invokeai.backend.models import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 5c100fcf8b..16e5340e8f 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -23,11 +23,10 @@ from omegaconf import OmegaConf from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from invokeai.models import ModelManager +from invokeai.backend.models import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" - def merge_diffusion_models( model_ids_or_paths: List[Union[str, Path]], alpha: float = 0.5, diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 0cd69366ce..11b2b45cff 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from invokeai.models.diffusion.cross_attention_control import InvokeAICrossAttentionMixin +from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin from ldm.modules.diffusionmodules.util import checkpoint def exists(val): From f2ce2f1778aa148da29cf1a8d861c60d32fe4bd0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 08:38:14 -0500 Subject: [PATCH 06/19] fix import of moved model_manager module --- ldm/invoke/app/services/generate_initializer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ldm/invoke/app/services/generate_initializer.py b/ldm/invoke/app/services/generate_initializer.py index 0cfc3f39bb..e1162a1b9a 100644 --- a/ldm/invoke/app/services/generate_initializer.py +++ b/ldm/invoke/app/services/generate_initializer.py @@ -3,13 +3,12 @@ import os import sys import traceback -from invokeai.models import ModelManager +from invokeai.backend.models import ModelManager from ...globals import Globals from ....generate import Generate import ldm.invoke - # TODO: most of this code should be split into individual services as the Generate.py code is deprecated def get_generate(args, config) -> Generate: if not args.conf: From 2c7928b163b7dae20fd87850aed66f66e853c40d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 Feb 2023 23:24:24 -0500 Subject: [PATCH 07/19] remove pycaches from repo --- .../models/__pycache__/__init__.cpython-310.pyc | Bin 680 -> 0 bytes .../__pycache__/autoencoder.cpython-310.pyc | Bin 13718 -> 0 bytes .../__pycache__/model_manager.cpython-310.pyc | Bin 33597 -> 0 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 488 -> 0 bytes .../cross_attention_control.cpython-310.pyc | Bin 20599 -> 0 bytes .../cross_attention_map_saving.cpython-310.pyc | Bin 3333 -> 0 bytes .../diffusion/__pycache__/ddim.cpython-310.pyc | Bin 3044 -> 0 bytes .../diffusion/__pycache__/ddpm.cpython-310.pyc | Bin 48129 -> 0 bytes .../__pycache__/ksampler.cpython-310.pyc | Bin 7543 -> 0 bytes .../diffusion/__pycache__/plms.cpython-310.pyc | Bin 3913 -> 0 bytes .../__pycache__/sampler.cpython-310.pyc | Bin 9799 -> 0 bytes .../shared_invokeai_diffusion.cpython-310.pyc | Bin 15344 -> 0 bytes 12 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 invokeai/backend/models/__pycache__/__init__.cpython-310.pyc delete mode 100644 invokeai/backend/models/__pycache__/autoencoder.cpython-310.pyc delete mode 100644 invokeai/backend/models/__pycache__/model_manager.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/cross_attention_control.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/cross_attention_map_saving.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/ddim.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/ddpm.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/ksampler.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/plms.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/sampler.cpython-310.pyc delete mode 100644 invokeai/backend/models/diffusion/__pycache__/shared_invokeai_diffusion.cpython-310.pyc diff --git a/invokeai/backend/models/__pycache__/__init__.cpython-310.pyc b/invokeai/backend/models/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 0d3b9ff6c5f5b3495fc8cf052ad0e3cef482d707..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 680 zcmZ8e%Z}496wRY;9&KjIERfhmf=xHg4*+6@ZmOA}N?}7b#>8%GYR8f7snq!+c6?8^ ztmYSxK-{=wf^d}UbB?cXPVSXS5<9Ry{r**bcOBaR&cZFAa z!Y_Td<31mWaXGel#3v#s1B=Hz6j2#jJmImJmQ#xZJ`+iqSRC>TaamphjtXZHKfH<8 zH8Tuz_J9qOb-HDordz2}v!^MmkMckvmBWJR@R^{$}Cz@|Se=52rwkrhI1VqGI~wx_xlD<47| z5o-u4s!_);p7@dwCe*kSuLkj;&py@UzwCw!t%6Q`HDp&xYK?Gj8ou8kjgMeYguY3xN0yyOmLaIl d3_{E1)P}D?yyx_7H$FXwFmmSp%us+W+s!d+TM&$6V~p(uQA7eHu|g6glW-V&^ZUKm z)z^*#iIi^D*WWwpo!|Xlx2LAE20q;vU#dQEw_$vZiP4XP#9Q!0p8^nuFdN2_{xz2@ zmA0FXWtc|OUGhwpSPj1!ECmX;8<}QyDW`C!k#9~dO*N;Nrd8T)%rpy21%-Q!+2-8R zoN2tx5WWbWGDHwsYrD?dOS_Si5n1G9!`(>lK{_Y$Nay=C_9ACWOe1GHG}fH8dDPk` z%#%iG=6Q_SSS*>zbhX`z!?SU-(yG)#iJZd2VKr{c``0V2xZVxrjU_Ab?`l=sB9w`L zXQ&X=J9uZkQfs#=jki_Sqo`hK-C2)fSzlg{>+KfG=j*K~W*e0_EU(CRvy84*>a{An z9{uF{iRg1cLSxAk1{Tn)VZnsi|JwL(ru{=glQI^v<)4 zOJXkhdMTCyy#PxYF(V338B19)E9UT<6T8H2{N_bT>=AqM#*~;B`;ap&_KPd3u#AHM%y zELmeQaqCSi?LX^Te(Sd5)xyZ^iD&TGc%tK`VZ_FHW7FIM-SnKEQMP(EpxyHUo!Aw* zy3sQ~WS`j`soIzjnp5py6H?Fu3TPH}TdC>K#DY0r{c$V9!uvKNRm1nfS{1 zF!B#=7PpXl-c;|{>K#|L`7BDW>bX5nWG}G)99lZk%L8653~WGtv8Gq~FK#Q@iL)HW zmBf`{v(iZlQKc1@>n#zUEk~=BPMBo7X5zQXDv@Lq-S8zzS(EsoWzaE8{VTcTbK>{OMKm3luWBd(jw$kC&mU=S3|Mh zz=*U+NpcNYu9a6R6kMf3;zVI%B?*M+7iYOT8khUATyh`REK@~e*z6ap@w;o$u z#*J-Wh}6lD*j7e{=EVnL3y=6venr{wtmd>$TN5>K?ec_&^weI9Mbr zpeHbBzp&)1O_bMT=3q=#O0u1b1g(PRqr|VaJLk&Cehn-5pmJ6zS@Ml44YH#CRGY8C zt;`2QSIVHdPRNant^=^%>sbXeZwBTx_9!r!3bYs3BUyu6l#uFU@ zm{W8WfaA&S)_bj&<{S&$?_A2;)15Le}n4Am6YQSAjT7{}(A!!^FV>W?@| zT7eKaYK=!md6eTh2+*en`5xej4LfCWl%BR-x_lu77_OEs7#jy|pLDe%>S)sBpnn6p ziMwt0M)sAvnPjy|rQDS|GD?o#UXMBD<;2(4&azxZ;p7N&dLXbbMEE4i6_Muk0`5w; zcBIajfF>afMofDlrA|$WfG)n&h-S~m*!`X*=V9SPER#Bkt)%M;E_)m;NHv2%&%mv> zI_q(=ONQ~fY;mIn&DnZ2jP#m2qQ0_{m}S~;V#oRUAZ0MAcnWPt)cbqY+_F$Y-I;WL z{W@cFqqWKB4!%gww+C)hDhCQ=4T?mVrz|-G^`Ua1hMDx?ouVbI>AumbF0mr%d^V9#*S06=&A6o4Gi%nI|8MU><(N7T3?~dF)%b z`;Ak!yq_Koc-^BbcmceRG0dpmu(qY#qNI7XZ7nP+u`g$kdY;w~6v(IB^eUfyJuh=tC42cI~Z3VZ-r<2X8I(-XKG;#A_86g44P5) z3hfj;mIuk~Sk_Bfml6avr$x)fxik&SGoVBn*eioJs$9DCLt~jFrbyKiaL#YoXnCDVTTO4@%Xlr1RdF~LF389YqV^QzTj0trQvdEbLC11@2MVDRrb^%n^}{DGnLxYYEB$NBf}o>VH0^*1J)=ob$R<98s@Co~IWM&dzK z<%6@zgHcjT5gjapM_|=B_@1|E1fMi+Frq8Zz}DgSQO02?H|)j4p@UkYxd}J8eipKz zP(EF0)P*uz)$}E4yyOlyLtaFNd=4PV50(+lOs?Y0L_=S~8_^tq);XRTT*_W@oWc(r zr#qjn#Kb#^F~)XuPX}!GG7>+O?FQ^7#@WrDhfW$}xjtJW1~kf+qxHyG8L+Ai2f@Ec zrv$MX$o@f2FRvIjP!PHmIeitwder~y}=%X|> zNj1ODzLll-Ys5AQ$ggsX;3EWNH~BGuBuK|UL0R$>DE$hah~}bl1^OImL3A@yms7f1r8wkk4*nmk19~RhdW5bFq z1TM|<2f;xRr3;cZZwrwl18qh6_Tt?mE$uL%K@Or0sqS9Ozl@kvX z2Vt;s{xG8ic^DHLTR_?a2hO9Ev@fb^5h3f%*^0Ov}(lBq^mk&KBTcn2#( zg}4~)&{mAAlt|*-E-Fps^=g0QHddgyube0CMjd66`YM@Hx&peVRe?36Lua%blOP>_ z5~SnmJ6`g)`1WtJl7}vp65_9F>9Z`GaCUaAk&;F$-PSeYyM&70Px zyk)(dx2>1*j`ah}B{h4XUGGxXfz@kS2%b5YEE`<$q7@8r=ezkr-s~PsX?v$G$LO2x zG#}FU3zWa0AJFz%YS6=lMmWuEmJkj3+@KAvUHQgqRhYAr!^Crc-qYP+E?t`V}3iB4D=Ym#^o_F5Vv7ep? zu?=G$Hkl)D>iNS|w9r2iKwGL61JNX3IW%coKH7B9rY)Q3-G>4`Oo7~QJ+l#vwf-pT zcog5T>oR!Ly#+zU5#@}LK{>CEDQD!Byv4**wriwlURn4ITVsh48d|S1tQ65-8p}}X zJV@XyRpSu}PIr$M_Gwt)aziEHP0QM{R_@uMm`-x-R^yzunj;u`jdsj7<)e5>(l;8B z-cPWhfTowKGC|2O*lT4K?vHLF{W7G%kVqafnkIH0_?$&*4pcg;a+Ep#1II1A=(ydv zltPD1jk(bzA4U!pUB*C2mvq)5pSu`?E(9cPbS3rxQa83Y9hd8(B)MzyX#hA+>Ucx7oY)XR z0F|?_t`&DFeLBASlE&73WHAng>dY|@LKqb6qrir-i9QuPf=b)w@scTj16avAe!Mg< zUk5bFmdkJpaV~)~j(nM?DGhz%p`!YdJD_Mt3sJ`J zP?gKi6VSGhUnbz#ld1kGOEqjXU``CDcMGfBN?uC|E$hcYYSTd> zZ~`xI12^jiey}gNJ{tr#1f{H9m{%=T(a6}x?xz&xrf0b7tQMZ#Q^68EI^jHJEIH7Z zp73#I>Z-F+58RuIV~4R56)D1f&|4TM%d}N=6UzJ*@c? z1Z9F%f{6){d<9pIF27?T?5t!KnCSA!>EN0CN7O{uk&7$U-2&cL zDD-a^%qlLJ1I->zrfwe|1>SRFQBqo03DOU7KHUH4?*K;b3cxTfLuiX&`vG|!b%XJ$O8dD}49JwH;T7`y?_Kc> z4~60al*x&{NiZv1MCxE9qt5V_fWB2w?ge;z7s5V#y8%VCTSfGIp5&(|45iE>Vi4{1 zWr)+RT*qwXGy$c+h}PJ}BuS&RXN1{R+=T3_O;zr_HdVGf+^- zR}ox8a4o@g1SJAW?fwCdyoniaB2e$#Ol&uS>i)-xsdsjeFC7vswR|?09`EMii_O6v z^=uyNdNz*(hX=drpzu<6?--2?#Slp%{cw)-vhKhWbpZZ9&j(=(^v?!|$GD|nQQnDu z3(LY#eIO7s(zB#g9Jm_ zpad&7>BC%MivP=x#-^P)SoGs7IdLdPU7dbNJzQV4o<$(3xE(ocQYWtZv~?(851_6l zEN({{7E%_-W60nI3K~LDlHu)(P}bonP0ZwR)Vr7pIBowqW_S1a*tMP*)0rG^I%c_o zgjPj0VExmX#O%JJ4F4#4)JkchCoQxe9MedgTiTV@NQTx%&_I^VBl?cG+lJFZBl`OILB?ooc>bWp+iB#1+;3pf z2Ek7eD8sf$?7al<14zx74=_V9fugAi(s>4@pXSWxfK0~n;V{gP&wuCGeGczt@F?2< zGa#DwA+iUtd*vtcI19%-h`7HAWtNKBBL_H*4+pvrQ5O|)$87@SGSG*RV*l%1zy(R< z@=BS`^$}fRB;OxI^1~m}Ze|0=)Myu;;|}d+C)&*n;`@3RK7^^sUm@56cu@X4;Bl>M zJ_>Z(A8`;5vD*6yo+bDc!E*qJ1HO^C+LnvNewBc$CjXp3Q7YS?2;NOH{l`(|v)s8B z5ITWBEJTiA#`m2dwc`JWuzGe_XQy|ZZ*&igQ^hL>+v%ANDC4J*7^4jAdJot25JBL< zJ1IgPzQba_oFaJQGpxbb+a%4XbDm953u$J*K@SwYsaQiF>xX>T=ude4lfB=>p!A;7 zX6WV-_8LTa<$oZnG~&zaK>h!IujrV4k9&1J5bdz1YpjnvaF{Vz^)#oD)q$z*q4Cw8 z2V?Y{Sm?E2qr|>=aIJusyHeFVXS&Ql856Z5WW{MikG*$N5&TuVF27V9uE$VttfVF z5UG=YjD*&ae?sgF1QVknw;#o5#kXb(!T{j59;To3;@ePAftIQBVxm7*uH)|th;Y~ z?|+0CnEV@#<8KMRLhx0BiQYM_o4Diu0A$kY(&38 zpAg_3t3Mws`O2fk34CcPnY&xSzOBdSj@M-rpNw%0p7-_gr=LL3G!6BT4f)|Ohk*6g zp`P79R8Yr=P27ox?Wn!OG=A})C?EQKsYFq^?_$v_#dhu_J+UW7Si1)r*Nb$>ci8`M$JAy$j+$>rt16lgOMd?jz3*VokM=G93vCSh zmEU1H|NEiA?nJoF~p0@C5qzCsp5So; zCA>In9eI64ZhHY$?%>SmPXur-5h=BC2iuh_Dead<{NO6cIh=Y`S0Ms%|ClFh^3%-U zrE(Wqt%a5KmO7TeDfCq=$&O?^tumm$c(J%qUsjRh=y4nKgeLYF;n6)Zq! T&NiMjo-CZ0+haVr@1g$&nv@=Z diff --git a/invokeai/backend/models/__pycache__/model_manager.cpython-310.pyc b/invokeai/backend/models/__pycache__/model_manager.cpython-310.pyc deleted file mode 100644 index bfa9a24f27a264d8e2e9f724207d909db2ce103c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33597 zcmd6Qd5|2}dEazT&$+X+2Nowmvbi7$41gURyd;RE2;m?>5eu3C335ehG?<>n&hA`z z-3xHrvobBvlB}4bWXp~%iPplFoiKJBS8P|D!*=ZWs!FNKt~hqwah$_W6#f_6u?|Qi z^80z`S|g{ra8X`|kHmXM8-Lz~3(8om%Ht5{bXy#o(WXi^mcP!(B2G z2`90Ta16(68ZC3dG?-5|lM6}twiYb;PA#P5JH3#W@619*zOxHi`OYom--;3CX9MldY+Rsn+zuv|MMJn_4pqGp)@Fn_F8JwuF7yy0BI1 z+7^CqU)V0!I~I1}JKMaiwR2&ol+7W3`@$WP&o}RE*$cMh$C`JwW*259Uuf=X-Mw(P z(Ig>Bk_2`+2qW;ns8?7<5!alhn=Z~`<%_rmWHu#Kb~!Mw&B^f`U80Opp!hCC~fz@ zmOou>S1;D>s$Hwrmg;tA(LU={FEs1+@y6ofs@v$a?N-OBH|@nnv+ml99c3?8UC(x} zG`!kUqkXZQKetq0v#Y9(f|u)#eWC7Msn^@~<4-?pSKE$#jNh)^uGi|WTUBdi`_!V` zY}H#GwT9}NO*em`-t1gKH@d3s);+t`Xg6A`t^Kx#PBrUnQK5pi*Ict#U9_S*b*0g4 zqQQI(MHKF`y=JHCAiJnKE!%0hEB2Ko^rl^Z*+WvtqjLT&!d}S;#;Wah$E$i6r8|#n z>qL9C#qX!9-crd7ERHBhtCpwg^-?lO9&N7$*)v_fP;CaOXWPi2{P@vTuTyW=Fj4A* zPvJ(U*|~TTlY?y0tyS9*KrKJj{bhT9vHm#~= zrRsTD=oV(O(yq4ZZ<%@~!c~tK{qcIcuBu)~J>BTmn++@;r)=;K8&z-FoygP^3kJ5W z=@{6iCbno2+tb3S$`f!kWcK(C&XU@;RnNYh}Gn4C& zA6V+N>Ia&xS8ucrJgqv*^_u4%IMu$~S*agAb)X@cYU99#YHg+7b`A(Ux%#Wpl1-t? z-8Hohwe82pB?8MPEQ9~DhTQjeUar>X7QMNaTc3N+{Ri_C;uvO-p8GyGw~{?_7l#eGlg8n z`K0W73z+W*oh{B*+{-zia<)4=kjp!N&)MnRj@%gDy~DW^xq|aUj_urq+_>|aGwbX^ zuIPN)dEL3kxfiWWvk5KU=iHCn zjPro=Af9c`J*59RKkWR7^QiNF)UYM@SbX<$&W}1DaE{^b*7`Q*$DEVSFFpedsf!y8B zva^EP?{S(=3*YxTZKs3p-6+v@E}_I8XTwpBi(Dz^IoF(@a;`WpqX&DPpLW(9ANTh; zUv_%VI&%B#bIwPd?~~Tb$p4J<{m#c^ya#Z#;aqiIm8*l!L)a)k8x&3x*J_-sg3KcJ zuy^RelR>&B7($H$YJxGRez{SrS3GQ{>jmu5+EQ8H3UWZ7wFa?vkaRs&OAkO9^GSS; zkA}o}$lY z^%y`lZ-;-U+6}K!Z8rS6P1Gako0!b@I+EAhth`ojHElg6AuRjj3W)DzNVXo5%~9?b z#2$~WY=h+3oe&MjuYzV&s+X&cW(AZ51nDA>UK^CEiz!)bfOsu|!n9UvOZIA8Xo$N8 z^w5gm-vd-zvO}tb{t78lm+QK@m_F(u*p0UR%+b>rWwROAa8aR!)wb7YVz3R@zFcjt z)@@{}-LC48z&d67czvD0r&cc0prXb>*rttWmHD)Ua~7t}b+3 zAd*U5sg6>6@I>w7>s#Sftq6r{TXdRRy9e$^PjcO~wcx_cFVt=_EOuO|< ztGL9PRXU4=HwXCIa_h}S-AXBHN*!YE0Vc$-I=LG;B5qJ?uTpsruH0!P`N_1AHVXLY ze|f{-e0l(~%h4M_Ub-QOA9S|vt*UllUFdY0H4>)ik9mFsbcu)wu*-?d2>_tspO5G8 z@y2D4`fA7=kY?-$+rUa)`<&uZEZPm3`6}0@;lyt?98LnDMD7`INtHXtV=`hK9u*nZieg^}mtnB{nd$Ylb`N8Q5vY7ZQ&7M)InG z9hz7+&q~@eR+8#&nSkVFL*0RQEZM_}4eZKg>|DK{my?$h>UrEr_KfWbC%HYbZlYF8 zYE4D8MteOf^)TDeZ=~@CFhY2h!43c4Pf>5#jp~8i)LUoiV{pC z2q;KhX*k|ekY1`cE-rb2wN!PNN|t&*phrE%aFzbhPkrPf^y#ssHCD5KI{b(bpb`jxsHqLEU%zeR!xMU(=} z>gX3nx;46mauR=Y!!^WSu`e%8Ka1Qlg4z?G);s5 z3g1p=@o7wJlU%~cQM?&$oEhi~dR zeibEzmCdInF|+24Y17{^G-pG_150L%vlRVtwqm696fU^EHxk!?jospt5Z3!>>tmcl zW8LhTn9)3t2{u0F({z%Kbq#|3rA-bIhM=d16OM&>O#7GPL7#;jc+;q`%YUcC>k-xByj^4@4Kc0P|XE-7B#vaq}TtwRIW^UHGAa|GxQVZXjKv{MQA zNql6uMPxkgRbgPSk`Xlq2B~+TUA0*i`Aiqz>^62HVIkU?E^kQF@tzx@F)@6Ul4>4} zYNqH6a^MbR@ptjABxHuvStLJ&k8lD-YlIWf!xS(YTFWHf;O7{5b0TT-<_a1vuHy!_ z$Te&u2O9}AeQOUnY@?);1X|k4Z6lm`%71^{+i(KBkmnI*lx!$eDHOx)J>}X;*8{Ul zrYwHzWRSTcWW{~c^ixvgdZFxA7wcZV?RJ!V4iG}*YL%iu0kfhTDiY+2tk09~5@DMm;@e&zinK2K*(MYTt8YDb( z8ItpBL^}T%$Okjo!+fnnG0`JWw9|p4!x?1$Qr@$!a_5-fQ<57{S~#g|CO94D(@q9? za#fIsR!sGPlU+}@^FUdNbn@$I*04RXoP}g$_0rDRYf11qs0(jhiJpOM zuvU|`UNyq2!qCt($BRTDW2HD~a2Ik z0uRP5N5Q7*9f^y7Z-|t+9}XR{zjrr86mQkteZ=0gyIS+W%cJUlr9Y zFQr)ADHeC^!FRQ=J={zvs)zlPwOj=rjj2Ukoo2i-?Fy?GDq*E6&G^RUpxI1^miDuJwgzEghCIF6b*f%H$U}3}=4#dIiWIJt)tFSc zc8e<|8Gr+SmNif)E!6+p2kkI+D zz=vp-I{kJwlNCmr%y!X8e`_)a7CXFyr)idyCvTgtSm_Z8H_+%1z9!q4!$rJpEqaX~CclnL*;(sY}4QPVx&DC062ACxuTM*N`~nnkBOX&vV-oUr4->0h`$` zpGQsabH;i}NYaHjlIuy&=!raiDea5{e--`t7$l#jm}FPMOVa!iGX`RUXsAI<;_59z zWW>z{loeN^7lz9bBo9B}28OM;K)&_pp@Xh(+IEmcMz3659^Ce!LoMIB?L!A!^CeSx zxTi2jf zgKq@|@BpCj6|wNtlBgv5%OXRT4IMJv|A`C;AC&@_M5rEOCB+*cXD~%kK_RLGkWWjv zd{dLp4X6Zg4OR9vs3$-y!Mw;F5JS0>;d_s02G#;$J%V;Ds53%!f$H`$>I{?@>0Sm) zEUkISUp1C9PR7Z;7PX#MA3>>HFCCZ4f*5C<{A)BLvVSS1P-3i?ib|j_xn7ED2|kZL zrGnoV4UWt7#7tvod+PJQ{uJ~9&kjis^8*Njs78v&G?NSEMBC?R+-WsjZ260o&WgVU zmBb}rD1rz>>ljLnH!ik2s;+f^uD@@vAW^YKi5jm%ff2n4Y-*SC8VO+mHP#7omFfjo zj7#c1+@#WtMlQwu8KhU+0J0UuH4nByXc4&3q(CbJhA3zNl8)W>sDrjJB+IfCUrky z;DU^@>ebyKQ-hK0LS5B3xae<4>Gu1$D=-bCGMQ_43=%Bh3zDlTs3-{paxYL2pKZsp z6rTYOd3_I1k$mHRmiOCJPEK?ODX@Wj@*ti-Sq|hgMupqQj7xiZN%sWyk9FB_PvB`m zq~&q_6pSU19VFo*zlA}4LFb&J=piPY$!mb*UQ%6%!~($W6yO%0(scDxn9>*(Ju5_I z6l}Fxz6v-2g9R%lK#W3BJZWY7JZ-9rP@{)M+i{Im7sf@tWs4e`+El_H2@`l0Rj)d0 zcKu~g9T)m96t=_pe?)YzU>*o)S5(6rP^O)szSdo>(Kx)g+O#z()OO#H>j;g!(4|r6 z*IGbrI;Oc7J%~Hls(VWvN2>*tXkkR{dy6r_B7M_XprXGFWn*6{+@BROZ#Q7zh3=f3 zt&)KY!0F*bxC-}jA1??k@b3)oL(IOkip>e8s8Me^_N?Z1W>pI?Cf-YnUeS$1|da8v`F9WNp7oQkT_X2j^372RpohKno zo7N4}wBAWu<_)ksk~d$;T00Cgsqf_RG;0-1pdDx&ts5iACa!o$cqdzv#Yd>*|3C)& zo7M?KO@P)6=)QMQYZy8bRctG+BceeECuz!U~8?#_|xy71#)MXibYSH5!8zdeuFj%kBf z^eVhzj!31Cmx_Jj3AzbQd!@GORMi2z4qAt7uvNTOD&2Z@g+@mZ>x6$f0v00iPoHsUWWqT}PXlfbE(6kq}0UcHcb<`aY zJ>6=22U`EW_9fHOvmYeAMs37K5SiX+K)~}(ZDbp%$1%)f+(2YO^H8``C2v6`pM}%_ zwY*8?{EV^loos5Fs$~2l&sl&{*?PM$Di;h`5Qa7n<(ycL^V?_+Ac^fkeHS(gw$BPI zOP=VyVwNqbw&9NO@oi{39QY4#>o^I|T22)}q+f#u2Y_x+N7DnjhNO^KHIPrh-X+)9 z4CqZ#xMMA6V6DoKh4ivL)5~2o-6wh1%af=(IX(fwU66ZY%LOo#xZ{jDg@(~cTr{Bp zO5z)Atv9|=#FowAdxBfoNvwnY1VixPdd?sBCYPsr<|5wdSzk7Kxi1@^HrBJ}6YW_m z;Z3i7-bk$HjfB6&+tkZnH8y6PacK8G+0J5o#aOrRx>rDbn|k9|6`6mal|pa4HzufIW?t>a z!umG$7jXn>LJZJ@*ukH*VU~H8OtAQsfpmpV4>Z4PSBC4Qei>{r@ESE3u>t6~2+Jiy%giLcV6k(;L(@O&O?ZWgO}*mK^B~A7 znB~Nw!dmPedhq%trhUVokSzZZu;fSEusGCThO+_oWym0P*in-FD@$bgBd;tDwvU$V z>qbe?oxgb&y6RQ%22^fbfZrQtuI~7Ea`?w1uYnfb^&B7+D7B&3wjZ(mr2Vp8x-*y- ztgD@d`x31c-N{F`v(J{~)8>>)Z(&a0cn2>E&(@co+WJQCl z6S}qqL6=3y$&px5inKUrRw1a9t%56?ov` zdJ!t5jsj8_{P+ddRUqqH3Fj)v!`%_G7#BDg?}mPF75I_30xLmbu`5><>NtX2JoF$N z6^6%-E)p1Z^#&*8F(yZu?241Snfq}h4zcSZGQ_7vlN#`>X(qEq`o>IhS|s;K@`0TQ z8XB#ov4CaCyqNEp$^6^JRFSwBxY)We^NRUxD}(1Z^2wcGcZx;7 zhTcWxTauaUTg0c4=9m3&0gYS}Uy2NF%Vmkyo z2z?yzds5RJtQSgPH8881ZPgxP(cOnZefwT0U}+C{>E+BuR;VEC1fhMZuO>UAU5x|_ zHrkLy$;^aw0gM|Qu|k=RrrTJ&8}(bz8uLZ?=EBJfLIefU$i>$V|K|N7Xi(T55wD-2 z%z_q#9|F518a9A$A{f`KR>kXx*MoVn1{Ff5*#t^gpFshQ20zQ(k05~z6SG>Hxf)r% z6DahnTt4E^e9AH=u_I@&SfcN^F=BWh^1&EV!=>*anoDjo8x?w`O<{{Bk>9z?RMe7-EGuXAOgTI8VXIW28m%G41{ub zJED!HXyLl>+^X6kyBya=u|xX+vI^)O;MfICMjJKJ{ttK(JsX79z6ViXIQKUJEY7kE z1Pa+ZM{Mz~P}?!X{?7caUAyd4Y}>U@c2?2YsdiM(KgNgWk3KVhYX0#f_PJQ%p|=4& z;XSn`L_U;3+z0QSZwBL#*T^FpIbcX|`oQTG1}bedmTyjG^>R>8I=;g#&+R%1jS0(pWz!PFh%TD)Pny38Dl(*=ssrJ1usJ)%sz9S`s@lj zD?=1wS@!3ScjVTUDwOA3$`0-AXa{S?j>|Me^Yi||SPP7{%)LyxR1Oxw?ySs1t;k)d zCXfWhXqq(R78C>uAb?>dJovV*xq-zcSDRQmg|h=N)1>fNnT$HrgRqvv_F~wDQd43!B5n*5+@9~_-1f}$O7|dxYW${ z%x;=w7@BelRLSl*5C;T-H$*;IM3#8QhATq%GErJI$>7`6HH)t9@s`W*(3&hA3x?%LUeZ+t%%n|(cDIUnNzt=tM{6C5<4727PN-&NGn?aVa*VG1~eG+caIVxS8=>&bqjFfz}}_P zF05@}=wOS>NFF;L`D`!aSgQ&5E;x%|zq`BWNYu;9y?syx@ZOxSO z*}}bq7^RBM+3G-BA@DPREM_5_idw%4YYFxuDx#Nas16;jLWau%eP=8^dTc;oJ053< zI|S-YlmMTK(B~uO90ri420m$?h4oI@c$V))sUhmf;xi#->_;tL!rkN)KEO#pch{3aWLv} zT{Y?fqSiPa5JZRGkWtoFyhusa$JHhCdWx_Njk$pIk?VH|e7Fa2Ch8`1HFx>Y^=U?@SbW4FMafzd1=527RSMDfIY{Dy?thWbHY0)f z77Eu|_4Ob5fHXq|OrB#x+Mxan3Emdjt>2K)gg%K7>QD%KC=~`{dXT#I|HPVh#mU{s z1ydoS=T6b0%P?|7nx}*~?HN2-;?IKZqg%Go z-41OHJtX08gK|6t0(TB4gR&)*wa!DYg6k}<58*n+Yka5q4NZ1t?J;e5;=MF<)n^k| za1^16^UQtIbmWAr1wlc419(m2B8>+36Kh<62!^?b6ROu&A%p-PV#`4y00waIylEs^ z5*;@ND$w@VvJKS@*u&6VEE@?DW4qQ|b?SXr#lZ^8&mD^Q-^l&wVCuU(KYZ)^;Q`k8 zp?_MFikKtNz*KIuyQ^O10w~dnTRvPqR311k7TrErhGn*WxC|Y}N?C*NTQ~66paubA z4XjgjurxK#pZhKvIaodzRw7dITQqgP#XVKZ1}UgsyLEv-br)`FuQr>xJDJQP39=uo zuZdbL63z&-K^CrmqMTB1@p23Y_h{U2IEDh9kE|?_gukc_r1PC(G)c9zxW(REyNmmq1IRVEg zU2Oxqf{g|F2nHW)CweBu>aq;@Fl;60HlS{iC+SF$24`+?NZ`NH?XN)Q0`DZ!*1Wjv zX7SFvzPp@ua@er%XAR*UwvgBLP@xvrExsj?!RRO+)%~TgZm8hYCFY%iR+Qo@>XUe# z@V(_cTz>!1WehZ{@k0E#ONN%SCTO_(@Z0OTtu_RKJ1xwu&R)HeKJP zH>vXVCO4*>?J#(m>c7W5+7Zn*M;7%6^D~XQH}MS0HQLT|>v{f@S`yA}($}5gn`!s2 zILDB9;jOm32`lk7YQJ5STKfKsbH|GL8AIi~&C6S0MguPkW8{{NZOp-JmVhyB47mcJ zWV~1Kb}Zi}A|e#*ifpCx64vO*Z2@h+%-^QR!ZD1CGM0uos7M1#KN$K;SjDi zbzK-hvbJr{q0|#62Sh|sl?)uz49nciWQQjkG=mO4XYkyDa2RoS!YVXFFH-O@ul;{n zQ>+gVS_R7&hpM|n>U2|o`lM{AL$|Dk{#p#j0f%(Ws`q*1ql>UGfE`E5pc&L+fbVO= zD;0XkkVtBZ%3eflwkC%0l0Hc2{5BvR`{GK4VXP?tzowPGUf2;`xQI(!BA-YOz< z;Lrgq49(`+K#QjqxlFL(Ntbv~W`A@zMrO+$PqDKiLxVWwQ-CX&p78Mdz!11Fd%BQb<%l@ShMK36)?*F97Y3)fC`=W& z`%qbT^Fc>EqskTV2_p}6TP+-iZ@^@$CjiHkFrRR>qSQidHi18MYDH!2Fh|=&Oqh6X zB9Ovk5rd+B^ktfPe+&x$r@>4T^x@2gRSTt~Hc6qq@j>OU` zF%f^r_lR=Lzw0|xBqJ=tje>#_JP_=jf8-X$!*vU-d16Z7#D`l9Agx!>5uOn z#FJPDEE>*2_Z7cb{b;fMO&|Wyi&x6~=3d%&Aj;hBmv8FD6RQ%7=OitJ{h0^}%k?CD zH~eRg#17mzD~C-SnODNys+WGho<8ob{e76E_%hf;Sh0TxVd!Y+78~`sztZYOPTV#Y zI(r;4bh!J8V`;on|Cx_>^&f-z$0ij6j?A8*e3tYd(3tn#6p7JmBRWO@L{#_Tk*41l zUPp*1#FHZkFg(rW?|=8!5BHm#yDSLnoPVA;Fg}Mk3e@o17UvhBqUccl*;|IA>d#{c z4r0bd=!#;z!dgxr7VCqVj%pw<6C8qcx5d1qVfZl(l<%hFH~?+kyTY|Ol&3;^MXw=JXEOYke&w9;mrz2chsS;@)c3GOlW1xU-8CYGZ{uSkt-rM z79yH(0|rJ*{Tf>{Dx&6yiil5EXdfF`!C*XEcD?Gm*oY)|Gxsed4&_FA=UTHg#KSEH zDi#Z_Hd`!8khp?#BMtOBo>t3bzg5UiiqZzY7B@_bjyBUt$d`16L^_$oF(@c^ zqWZQa;eaDP8Ws+<7U;HPWK)wj;M(&xoB$x1vSz4##8?>R*cOe5^h-`7z5!dYl4+P0 z>4$9Lz*fIU$bHkw>)z>p25y1+1ND50VY#>n`es1Sob0dQ~T3 z0T~k;MMixT&kEX=5-m(o-a3z-i<)bE4SqT6V{q74Vo}Dy3izCn={FYM8-!s+bOKi z1x%E`6?}(Y8R@q0ACzO{nAX4pGO~5&i0yv@Yy@PA$UCC%020o$c1($AF{kZt`FXuw z&~fSsjcnnA;?+>M_teW^O~lL+)zl}kLSReqZzO#QZiA{G3Pbvm2yq%*WL3a+*2D$7 zJE8UHBs}WpmNRAig|s+T`pXt0qQlRQYRivo|j< zZLg>gZs|@I*cKMlUJj>8p1@f$`G&3#&f0KX&pBgXM4LE{zYjPuvq-;-!o0r)zMpVNuSwk0KMj`mP;RQv^Z*CIoLys7>*u+!NTs@tMSVw5_)C;U~}m^+_bY<|Kwh zj$IlR0>~>v(%fJnNPfbnW!X*H525c8CoQnZpy^HZJ8bC(c{5E>vh4_y7aE`F{Vapl zr_4kDZE%Eg=v+{9uw73`bZ=n6;Ne6HPTmdSK>H8EJ2)D}2Zcqtu7%SYSi)>>yLp1* zuIT2z2UF$R=(aurUIbT^HR*f|A5;@3rQmuzZQ6r@dyWkD<>8XS|4*Cm-yT+Y`=%Uw5(E}gH4#6RYWt@G&@rRU`g4P>3d74u9 zd*Dduod7ptp1ZW(fCDfb5pg23A!aaWjN!8+B|?4#*W$#8vr)OIOesIF$xf2;=MJ%@ zi@r--g+w2qg9Z81F=;@>8l z^|p3KK7>vMQx!y*st^@KQ5>`g`FnV%ejiEbejH38X0iD4M*-K|V4D1d2YVJHCWj<< zc>dLK)bJ{2dkZq8krYZO1kr>BTKY{ZHDWv)^}ZgORG^gpq~41QOie;f@%*D>G9VAF zTuzeb3a{eaVLVCJyk@m-K7(hgPoo<3IuZ!hpJDzO6FG|VS>{NH6@$i&5**kJ!3|d~ zt7;4Jka1q-bq?HrbnM&Ai0!4$wPOz^1)N<%C`3bkeNL7xowi^zCH zR)7>?0pvg@QJ1?-AL*1d<7Jm2yhz9cBfP_sh^2E)3sy-FqJjelEx24JU!M_g>jGdA zhcHD%u^^BL!N-Bm)X;&?qeBNilN%6!*b-46U@`4K5eo|BiJ`p@XIu|5(%N@`nL<5t z#Efd!k-jwH7yB-AWv|r@j4OU>)mxl<56)2`2o!=~1EA+i6FOLJ2EqlSy=qN_*>T0I zFGw|99FGXH2yD<2vEN#T!?&E25Pbsz-Zhk<5vv2biLi(4HBQ=K=d|E0Cqy-}DI*R# zY|3l}r!NsU7rhddMh3$Xinj~vqk%-Y0qP`vj37bSFW?|WTDZ$C$w_<{aH4TC>2Eqc zXgJp1gf~Gv>d*i{29W>&=@{0*lIg!Mo^I`-qdjw?-iLl!w4o5hL?-FaaCH&mu?PNk zK^lXE!=XUyCo$OI5kZ_@<%v<)dJt^KZULp>xD}(N%Q@3f7>ql@3U}G@kuR=u?D4H5 z?ex1KWk=pT*P&A0u8J@v#02_KoQ{Cp#jey}mSa~g+|h8WY@a}PMrzO$C_Lm&E#M$Y zlHnn+%8=;WcY+OjPXQB+Nf1mCH+3A$$k5f`={UG=^eqGWJ$kf{=#LZU1+yLm z&MT_gWe{CPfxQ( z#%T%*XQ*Jg{ECSaUr%D!^bqz5z2d@aIoTj*byuy7U^J7p@u`MTEgH z)Nk>`Y20s|Lmle3?(eUmzj6`6JF)Pc)_P4jum@F^dhr9fx8pE z2@$LQ0COU$9L_k@Hh&sJ^l3N@^9U-T@uU54@&eHVzTx3{3=|5j%FsDLJsTcTUs9db zi%ZZ194r0S%AcDH%g@b`Gad9_ z9(lpvhY&2@9Qnez)piYEQMCQJxtpH&$BFI{FTd7obmuNN5P=d0Fza)m5w)d5_s5Ov zQgd^+E+%Z$$$9@2*ypEo#KTAd_3$Hlx#*De2}xU ze=n+%c^C5qB6%RJ@6FR*4w6f&7Z}4$I>(%IyWJfl+S;Uz(gE zd?+G{+q~X|+6IUrMdNP}vk_ON6Il~H(%aBLzYSH&4gbLDK{D9VK>r=2bblA-N$(AM zcw({~%u<=UG@c3KRF=!7N$r=}w=RgG14uFv+*5(=V@X@=aALQP`~})VU=(9?1z=f- z7oI+qd>^_VWEsp@Mkn?Yij;~o7g$%S%@rIZa=`~Tg+xLJrrzeGlxpFP(qxdV@wYVe z)&vi&g?TqLHfYPnc=S}@BMhRzh7skBr>hW|6&$*b;_};U{1_)W!xBj}666rorYZbm zkdmMff#Hm-K~Nl?*k9ptG*OqCgNBRd0P3!8Sl=q-c-(VFG-LYI5x8|2A+k7mz_Un{ znDjT|HM|l%*%m&b4vlZiDJDEOzm^`yawC2Nd{Yhow(qdi>NLkw6j>x*jO#_JVfjla zA9@;^;$iWg0L%{-)o?vzAgmK?sTvbD3C00PRG=0}Nn6}4jNZ^QfzfdX?r%`1;b;=* ziySh*dngUi7k4f2(VA;^? zL41a~pTzsIALTMG&^<6fa41G-KQK1HW*&}p(^;8PBUu*Qg>b%{svy6S_GGAz0dl>0 z-SqRGgaibO6h1khxO6*Qk+W-uwJY)qxJHN!)bgf@$V0)@B9NiSNG%sbg|rBJ&fuNs zbnrp+PZ2fhbKhH$3&x_Oy6`DdFA@ZPiMKNdhydqX6%=uNMaLi$*H`$LJje>fStJ{U zGZ>xT-$jwFoZg+t(1M3RD@YM$1s@aJmLbuA1V(jk8X1d+QFry-=edhozngi+{4(IJ z7B}&YSx;BMv|_HeV6MmFxsFR=w(peLejuLhJN0a<1#Xp`#BCUz!<2*zlZn$)_+idt zg~>;li~!$BbiZ`->>#{>%+8i*c9Q;S&Ua78$BpkmxIL%?0 zPEKL8U_&oq?(s-3NuLv*hZjj6f;r{FX5&JD9gAOY!D6Z3;;hQl3cB1uIK367d;;Z; zN0_pJ_#8WLOpXR`6q6Q3R2%;y%mx5pXEydQ);PL5r zrHzqrW^qWXw-Uf`>Q_G16JOo-Wx;%0M;*V3tyr-UFi0f+!_+O{> z_llvO0rr9T`T98QWf_K-U;rftK&HX0mpz+6?6VIJ0UGXkxiHv4ej|@}a^BbmqMWVg zoy_`}H@*y2nUh7JClfgCec-SW;0T`46bA2GpXiM_`G;WX2NN>BJh3r}AV&zw?TlSD zU%yTFf(kiUCMMMn_wpN4J&2~*27!8|7+_>5_t5aWot za%coTNf-Qa))c;th()Ho$Dhn)hbVbnF;NSxCVArle=OsDZ#m0UTvDcu@!p6?R z{Eg!!>$$;A)62=m9tvv6dx+5n!~zLnY-5ZY+AB~~j>8vY(0_}b9JvYB7M3XB4ID|P zqYe;Jp_h+06purvWJ9qBlfB$c8%p=#2ZlBj?jg32pNTfie*5p(F@t_RegVeC{+`J0 z83O;Q0lDKL%zu!LEo)iX4fK-{2$l_z1@qb83aUlW29i!F?gZ85Wrqp*K-FbV0DW8o zI=2G^*(y{+Y)h>n!S8TES0j=vVkYC}Dw7ZJeZsi9%*$Iqa8Rt&p!6Ku-P0mS1AU-0M4%e(miH5in-WbD2$P*_AzY1acJoXMo zmXM2hdwDRc#u~f?_zw0vJxv0CoJ1L(f7jIRRa(W*Rx4)ya!z`^r3m|tt)M_mt!bU zj%hvj(VWBw#J9u;Osh{ye8A^>Y4;}(PWva&FSG&TQa~-(dwq>BV0v7~@6*mvrur#N zsk(+l$D(^GjO!BXCiPi!`s%BnX3?Kv^0Q37#DuC0^$(DQ-cwPCJ9?bTtG;=}hQa7f zmYX10?PE?rl?;cL%l{E`0@7jv+|5g_xB3Mp|Aa{_(?=c_c`;CnktN77FVWnH*3~~} z@-LYzA)#&q5f>1vPlwr7zs@JOa#Q*_Jopqr`YB`}kpb({fMmkIlO}5m`ZN4{r-*P* z6z{1|!^uP=3y)C-ciSO%VLvb>SQ|>=&Ez&*k1}WCnu|UU@r7wLn!-l_d=VL%4ro#U zHw8BQDwQY3auR^FiQLsPqCY^cz7shDSC(pjpkjo#Bzn8F z`?@d!F5_;Hi()IfK@z4V9Qktnl3!>vMy0Z4c)~F50boe!2iSnnde)X5@ zHAGv(>2aw%FGvUqiABTx8MNVMMkT)lu-p}RrPMzvPzcX;nVFxeF+cQdz+3F& zxqA<92&#B5b4PgB?B7LR?tk!>rN8Hvcj-nFz4!b;e(=7$X`Dm{`yKF6Ryv@Ylp@)I z=dbcPE73u(+0}uP;ZXi{eD^nzgU%Cw*m!@$+y9%yG(f%`zP+ zIesD)6?HHMsYqs4a`O7SRz6Z?IPZTDH!1VG4VNYBobK#j@tJh&ubGp9_Pg!GPj6Lo ziv64i${zy&>5+VmFUXj_9_McHs@_Y33nJs{!^Yj*YnGye!2ra_GKEkgV@r#ETu(N& z!2@NVWes6jnOZ|F>Pml=J(DSv>9{yha4=62hc2wJ^lvhe(i~gqkm8n9w==ne38eT$ zFn#>Q$)nFcb*^&y=<}7+XO5qE>a2Q*Pak3OB$KC^Jj>)fle0`d!o*{;#^frKA7b)2 zlOJay^!WsH|BlI@Gx;hKAa0z-b9MB6eT3v&e3rzJ)Gj6^CdYZN%-o+bN0Yib%j7S4 z`6(v9%j6`JeSGi)bDv@I%S=AXd!)ZXf#P4I7Pj~b9}>kWqL5qSmV5D(li+?1Nj3pz zMw(QN!u9-)d?vpypU)Ta*?c-bp8r5$+`PSTBfl$qSN^g5Om@wf9E3&?Ga>qmEp@-CREO=8+>=F?^o&~qy#*4wg1%| zOE7k_=D^07`+2@FF=DzC@*m{n_d)3FqMd?f;_EWKL$w!-{NzzE-46~~=5axY+BzcO z^ngRV{Nztz(VIO5F;`g6zYf`}o;-Zne5kc3V OLeO6POaVgrTK@-9w9<3{ diff --git a/invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 6864fb12ad1685c64a76090da388747f6e71b40a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 488 zcmZ8d%Sr<=6wSQaQmoXCQr&brn?`UeV$n@OrO@4MLY!o5Y#u|B7VRJTOS5&QpCBmS z8JQ{ua&m9Z$>rpj!Jv;|y?=e?`vF3)b@O+H&|JcHrvMCPhy^T^feN!wMOhToeI#R* zWJy!UvZK0L7wTkz($4m*KhwesD#ez1Vf45Vl8*~x$3kz+ic>LBhH>d8Occf1fu#Qi zMo=17qgf-F&aNAZ+bg3=qq+9g;oLZ1T9b3rx>OII|=&g7X z8t8-H8(J>thTFTeS9J+tCMTpCxh1unk=7j9xw3T&P4UxWP-KIF`1uvLUNw}9eNR7Dl4pfG=s z_1fb1`_8@n(v0n`qNP54?z!jro$q|_Go8uFyn#>mgAZ%p4GiPI@nQIr!NW872ESk$ zhG%$Y(^%EN=BkNjt7*0DRlDV^I;Jj9t)`@2dNnP-nbiz_?Pj`_UCl}zr>H&B<1AwJ7;abE-AHI&B(XG`y^ryKQ(m-`JeFWv`Y{lJ_Q1GI7gVoyAkZ zo5a&3p60w|ujn7}rhMBkE*W*loAzdIr&i~^v)-&fTerM9?*N_`ym`OmE%=N6l7G;j zt6NKk)L!(KZab@o;<^XDLwGvu9rljgHdc>#NByJTG5;WXb-jo1=9u@ecN}kA%;ALh z2(SLmWo2elL*`Me_W0`Y)~VG~t@3Kw zH2g==&nd5re#*D3TjuIxcz(=#9M6yYWvu*hJfHT?;Q5UIIG#^?=4GRN?jHARrEEpn zSGK}Nr(JDE*-O=Cv%1#w%VuP?+emZIs`c%b-wp#j%zbsm51;tLrB2&xu*J(A&u^l9 z^4TzKzp6SlKL|Ps1ruRM)ix?#cyr6IahTx`5JIEI95TK^bXc`KbJg~&RmZb2H76=v zQk@_`w-^}%y@r17vE4lj$x1mDWh<3-wdGeTQNB`Xb-e8+^My*~+IF=Wzi}=<_vPoK z^lPtt>A985cMUazwvznd^4X0}%Rk!;Lch^IiKwqySn}slYau=v z#O~DUqIVc=k8sAoZ`+?(GJxX~o~!fqy!VjzFxoFkz2k9>6H;TMp7kE_PU7uBc{}YN z((mK8I8|8HquwdhIE)iWTsCoz1cz6TdW*!~C<8PM{WrsCZhTK#)!=HhG+x?hd;Xi1 zR(0!v{pcg)A~q%2j{!o0E;$%6)h)&&Eas;VuwKdB3DpGLU#j&j2d-xc7W=^qm}J|^y#CKBU)4OVr*eGSd4 zYeA>E9r|wA39C)Fz1>>#mD{PitQauUKauV!j@f9fKY=gCZUip=Hyc;|=1sSWNupu6 zQEkh##yi4`(7n-UHr;A11mQpvwcRfFr?>JNh*_rA^sDXdt@5NI#Zo-uYL*FiN6jON z4h4R*UKvL@bP7C)j)b5p?O>nR>L7bL#9kiVvzM5FKx ze9yjR{tWPT)82J@)*ayHwRGGz^+4M++J1JAw&{M`6P`iLUU|NoBExXIN8DPq-R^{L zb!!W7aa$b)3?%M!+BLtNi4Lv%;e(JA<`W31S5sJy;xUfW?4<3fX_mN1%8okD7b)Q> zk;5^gwBE6CIJ?SUZvfZTexv;udJ4EDbAbJ{Su~Sh=fm!)1p1%U$%B+r`1SB!G0!je z>iv^Quv5mSdDT*dJ}rURcG>7ZL3q1t+%;FqnIVV_PRj!NP)C>?MRL~?n5f5D&}Yv| zaP3fxmK7rK5XU9vK8yPAlg;L_^rF>0K3KA#@?LF5aBz5C>NH0`gJhpMlPwJ9td{vW zHra=h$zs==H)VKlY|dX~vvWwgkB!fE|3*<-`Qf%|?>qVT$$d!7H;wB6yk3SW2p0o4 znt6aKjL&|l5mff3C@-V;_X(`L*?lOP_P({%XHj$iseWc?s;Yn~$DHY^by+VVg)_T| z^0dY#mXrJ3wN{?T*6>*AZ5Tr*moJD;iX)6KZjOgG*ywCGJ&hJbpjS~*Kt}}P*D;6Kwf;Bm*QLn%{Wsm1t_qS2b{o zO1Pmq+v^)66DR4l@fFhh7BW54Gq(+u=b^^eTIW`Pz-~G_*1AD5>_HmqnGTv=HPul( z+e^k(L!E3}rqNFK?A;XGk{IKiGgS98c!o5Kx=*!jyoF#N*A~*v=W@Dw1wvGx-CmYm z+E;j3gJ6K;)$lzKWZ~!Qjq83}gn~cV?<5va#=vmFeW7#1tyd`w2wbE+#R)ec`vZZ8 zMB}?ReD~D3<#SJMZR4TcX$1ah_dK3!oz_;<4=GfD53}5Z&{19Y%ggR_Z$irnQJ|{r zb>Btv<>iZLv^d}A>BN|XsIL0DB`lnN?fN>K(ULO+LmhTKZ!MRPAYkf)6#X(h=n_P^A7b z4^vO^wSzZ7ln*=D%{5b{qf-g`Xlul4+wt4{_ zs+W*NX&FCIU%~@aiih}<*1Yj~RubkR)nDWz+l)gyFMLw^*~069nn>QlhZ4ev2h-hB z0=_}TXfozokPr!u>A|B0OpStC@}W(K01;sPXJqt(myDeh&bk3QakQiV&^~D^BYJ~A zchaOl=flrm-pkgC-XmFG0uLKQ zmF)o-qNz_K69`?Rwb`cMHEAAN3%jTh%nX(-?;Jc=jO`y!)aeB@!Hs%iKV9&m2T${Qk zt0~(navmbPwzZAT;KZX;BlKHAWQCn_O13AmulhHmEF>L4MJ41-sNI-^{RjwBvs2YF z+^}q|DJSID3dVU4gUHHD(1@l@>poU~CRzEX>LHe(Un?IQv*jr6Xp|DmCX9+u=sPN! zs#N5pR;lO*us}LKYz&d}98zr_AUq?x0V^R7j3R<-W_y;o^L*>9bvjM;B(pYfPhH>} zf{}{=rvCOk!eso}>O!;Y= zDgE4L-k;#V^%V25xg27lmxgJt;AN7U>!v@+daTi}Mf1J0TJ-W#3+>lUPinAE-@Hk~ zra{9d4EDw<44d98eoJD|G*)LlnlTL+Hff?aV9uoZ(tu%K8|N3i36PPa(V@ijduhb( z{Bq;XM!PzNGa{ngb8Xtj-K6fBdb4`H13}SqN1MTp(5!7Y1u0IuH#Qoz4cF^H$g8fe zE7-^(5V_6HjWaDDCWf0XF&K2mt+qY3Yz8t|BB!o(!VOY>m>~vaTeFTE6gYtc(z2TL z)2Ck*P(j!bL{hA62Vn5YSJ%*M6IlOnXRc$qDNmmf9H7SIU+=Pq^B-di) z2-zSB;o%UuK$AKWh)v5g4}yeLY>%lUDM$nIVk7|RzK2%Nz_60u3bw;WGb&zhDB9sF z8Zta>u>VB`JmP>OTUlR=7W<2+jDjIrh#@%+f#~>nlVpQv83V-Es|D@=Pb4@ny77W9%Zb%1$|f7MKW8A77F(>gvs%^v4M_I&hRZ&5g7KY8 z=1x;Xv9L9}m0EIKeq^QyopdJ9M+;dFM z%PP(nZR`#8^3OS6}I8D_-KAG-y zmX84%Zioi~o$TWf#r#dSm8oC)q^XaQ4-#ErkVC!Is9q)?DzB6@gr)pNWGFM^YxT@c zn<}RKiZXy_{+jS`nQ%th@20Rp)^6H!ywn}AAjq&8Fa2(6C)0yW;yFm+GJV(D$>L3J zC)dk~x_Y6P>lwW)RncDhR%$2TwtCr}JnH1-{fj+gH{Z+a_jeGq5E{D^c($RWO!O#4 zWLLE6N0JV&dP=ZG=7-fNRc&r?z9?O>}D_;?3c`mpeH>S3$;rko<#;mAf0=#l3s zNH`UiA50Z)}8MSXsh^B|Os0PPJ5G}-qdHl@D5Yg07eUgf3a--qFGzR>J@)V5Y z)GSDCu-&Ra>;XfmH2v#-^Lv<0;39GCQ&1lB_&R3#gXvrv`W944D{oChv2@IC-!s1# zyWOMb5wivHD2K1$(rsjjOTZQ2hTx4>Ds4Q|QUm-VE_n_xCKbX+(XZ*;vY`h(1TzB+ z6blCM36^T$TDq5^nE~2QlCzT>TBEL)dN)n1Vm;_bFp|KgogTs#@gCo`WN&4c@5Z*F z{`TC>Xv|1;eF&)PZLDHYJ4%V+QT;KL-p%R_H?KwJ)yTZA)qYb&W+Srcj~YAcb20Qf zTd^<&UUBX}p(7!6kD2q>iqBzNy5~Obi0{?O|Mxv0jQ81t*sLQErv5-p6vn4M>_^`x zd!YU}!T2Yb{3#}%0yO9UJ1hQqBtJ4h&ev$=NIp~x`vi{O2%s56Kv>5t*x6@5Y=C&O z3|OpS2}#3BW$oG?ky9*aN&PwFka5R^5^lh;DJ^==9S7tnwIY!T_Y4?x)doh;R>zb^ z9^y*mz@D`sN@ho?@1P`B%GIA{^uR3ZT>xNyCW>vsx9;*gR7=;tc040qA0tsj39y`;48oz`b z%-Bs9yv>A=P=5}|UG1P9U;HPm`-%6_R6>3hX>zcBq*XsLa!AMk`>XyHJS2N8zgY7e z5F5L%b0&4HIl$o{?XNIJ10Kisf`?~?FjafjB96}?FhnAPnQWz&} zqj703eb6BM@P=d#M=H!DGvPZA4*M9s1I7o9ZW^pmKnn*#(`W3MXa*Y4RV*vYufeaN z1xkUu;9g{deI>TbQ3Q-8@v4GYFqm8_tu-(^*y)*p4gdN$Vh&NXq=n!5eQr__$zz~r zbFfPm&4X|*eA+`i=x78@xmnya;VhJ}z!41E!$e{zLskH|VIxZc*id|76fe`v3{&(G z05v|~y|e%s8-()mU;}kIwhr9rmPu~Y-=K_V9|94;!H*9*HE==i^}2lWqIBDfGJ znhwFZL>nx%>ALcLtx{_~LKPR=Mqy$607w@v@CE5WR2*wQc0P&&x=`Eps_+uVfg%=7 z#aVcDXc(S0^EOPwk9|4{!Hz}%Px_F?R{+0(4Dk|lfuNlnfbb4|j`|IN8!W|n8?{bj zs}NClHKiUw{g`q}{XUrErx^FWgsMgkrvq%=3e?vy^gR~|@KyvfhyE<}S5cxV%wJ%R z@~a|%)g2~d06W1gX~Y4CL$VgZ?APls4dDdM5sbv&0>T{@EeU%tr=9LI9|yos({Xum z%*+{ODiyC&t5meTk}8$5nGoht`eliB5n)8V!Qv_t#iY(;o5>9(eYz^ATa7_NK=kXZ zHvos^N-SGP^2_)JhmlAqW$Ll?$*H4u*3K?vVXef!bT(HgNzG^u5ebOfYSVbH1H%|3 zg#Z%Ryp%r#(>VML!KqwQ^5M$(EZB4k8JOjZJ$k?GT~OSud}!VRk7X1txiAm4&TECr zj!Ln@)UNag7(}oR0%M~k1%uy6FiU;AEmk#fq(?EZ`ddu?Hj>EMZu{X7a0r*^ShA!< zG_Q?@qn;p|WKomhgL9Cb)!)a%^9W(nv63UpCb;{DsDzgKcx=L^??Ipl88hgFsUD)P zJPZ0#3MG)JkY?mLJ@zcjVKWOE`Pl2Jo$O9-C%-eXQ|L{AH5p(qS+HyiziDV%d6Aei zcvqlI46Q7c8-eB_NH!GJJV8kt~c#v-_>CYsNXYp5AbT z^A^aKmxBVhf!#($SE_gh3|_Jr$0ltv!h`MT{{r+j}o#Yhll%!U!Fb zA<~Q>d?1_u4<#Um_HIBl$i|c+H4pjB@D@0b#$a>o~x{u>vC{3_3AI`JE*riq2Hojm0uEl^RYNBDCQ;m**?5!b~K z*f7e(&x-Udva09sJs(+X_+2}%{vHcwJBW}PUv(*=G&DYG6Yrktz~X+}cbdEW8T6me zId%!me_C`(NMZCCzfE5;iB(#B$WGe%a1k}&vjmTGO!Xf)v6NVE)f32%7`RBhD1He7 zQz!F4sG3F#Zc3PEK&M0d7UY4@X+wJb77#j2ZKi?H^Pb%{J?C5IH;r$aa3#?l1!K@| zMw5vhJIs>k?Z8X~$r_{yaS2}vb6)lx1Mva5_iV%nq>uw)Lt1z*wSZeB_!gyf>OC9z zX?$lEgi}bQQ%S4)2q!Kk7y`BqqbSTkK(zDv=ZTTS$#qdroW(aFVlYJejT!BT#R0Y6 zF|a=C9S}+c&WRpWx4vy%gJ)b$9R|xc0@z)`D&tpe^_%EJ5I~iYyK6-U#`s62)&$jx z@&a(d1$}&l`Q$Is4aq>Lr@z5X&7+Tkk%f~rZ5Ho$&r;kN?@ycHgP}9r@8cfyQltBM z30Zh(%ydtWEpAo7=)Dc%3YP##U=njNh~e(elPcoo2dS>mTuo`a`BVMc-R$YOm+nIh z)zV&wR;LZO^O-G%bI=7)$2kVA?ry-;9^ht(3n*;E^hVn}?eaAQ0k5M%b*-}+53q*C`K_7B$} zI0d~RTM#C;RD-YenopC}uu&a{S=dga&0sK@G7a$v<8ZIGJ2wKi+ql7avIh7DV%LLZ zMAgIJ0s4a0$h6@?s8``nJMFGvL8ykpIA&e~(Ut>{vQuRl1o){@RooO_BiL#p#!RB- z>96RzH#+JnBl{uwB0P?fD9yM_tl#&R%lYU)(`zj!$k6Xl(a@y+3lpB@=vX4nenIZr zfVqyIl|htRG|BL3FTVK`L^giM26uoB`BDTUW3L1f@j@F5Y|U|+xpOQtRGsb z^aoB_vCRX-YyyQs%zhOaPa>`B);Enh$Qpd>g&j*1HRyv$4sq|WL_$!7)OF!?>sugN zAO}t*-JL;H2JUxAV5YO1jY%DBk+-uf`w8RPqGN=)n_n?uJ}`~$d7*2SZ zumD?|sROqV=xP28T6`S>@J{Nj)U`$Zd&9gTA~7uA(bB&DvJn#pRTz?NA!)yjOwZg& zy<yp zIJEfFJhYjHfh>c!wAgZ#E9ft~lMQpQ=tGz8W$)PV1DHE`luzthSUcL>LQPuscM6bB zCl)Dp=6VH~VDi1mIRj9K(FKwtSp{~)V^tF=L@d(jEt}1DlyB0|(y_AbWw$=Y+lNXru&BN+jY&WTSG$LUb?Gzmti+R!p z-YBPghyOo13Vj4*2Ipw+s&l+fGbDvb|LR|IEQvP6`J=3nlWNSui}xFx*g%tO;c<&+ zgZ#^J3f8Y8D=RQP$j~uJ}`F%y80WTRGYrXKq7dXO1 zF}9p3!8mRSvF#n~Z6e8<5<-KSYo4eVAp6x{MU=hRthUy?>eGLgOQF!5hs63RrkJ!7 z4fzM>Uwh>XuUtUfFamdI7eiFVgO0Xj72@#){tzX!s!f`qNg!FLj@FflDn3&4j*8EQ zp=yAp>rlXGQSYBFstEnB2oqDk$2OGVK`$x(>2-6vqJ@3PqChAzeZ9T^myLw}{SI<1 z0)`62oW1yP0Xs>5jzf#j{i2N0C6dK~N@d z1jRWBayW4ibU^2xfB*y?wOfFC4ojO*IE10=9OzmRbS-`J8PGL&=|I;qpcl~S)Mvvi zOrg*$_oQpiTh6s2=o)_ebPW+~0@@#7O69!J>~Z+_L9aeQCG|rl1JI^4Xg_4B>=bcK zeZ)kNOA>+M-$mIS!tfw6i+~&8hd3=>P8fpS2a5He(vgwlUSP7>Bb?wQ#aR;%*)aU_0kRcczekD+nqH)G^OpzYd);+Y6cHGkRsc?b*eJi(2gSip!boIlVd6>LN3)0V<@ z7U}SfD1G$?1Bve9{&+>}6tE`_ko^Rz{uY?VR0@lHuD@UOFu-jN~cOXKp$F`d@&?*Kz zM)+H5aJaSI^w%+HL@c9+zOqJzf|2wLN8RwNUXVC6eirq_{jNygqT*=3!`^rFg{UI+ zb8Ik*Cb;U}Z}v|KKC-i{HAgA|oEcct_Yt3$2hEc(;{r#SWJ1YIZ{!TRjwa)MtNG1l zvUf5Y{K=To|$7%5NXN`hYas;~=`+#ezO)ttpx2cLs3FT8{{lCLe{t=P~usMgq zXYdVp9usWlrT9(6lj;lclCyXv@)> zHd9Awk8a0bDCi?(ks;9jv zZ~8XmW5k=z0C`H$+=w|R4mge$p+xY;t_R=Mo49F%Tg;vZ{0h|jSfmJ2(9x$76H6Tw z1@tF6G|qqZB^jWU*NLWT)mnUOMhZsG1J44_vz&*+$iQ!hYv2%?jfx5CX;dsar5KT! zh(C^66nC-$7$$2L~~g!A|p zNJfn4q?*Kkf%tV~5EcY$6X*)L7j7>gg*hGOb7$-s-qzSup5Do5b2$_Y3#RfXaZiQL zz$dX+nO;^*7_C0FQ#OH!*(x7GM1~OD7PH z-wXze5hPUqitqn5lT%Fo4U>P%#6C5T9Q3qMJ#@u1#q6r~CEkE4o zc-lin%A>tjM@KP8R!9b{4&cRoe1)^-!H_B2%tM5Ac%uNyyaQ$4aq>SbvV>DO(Ip3sRQNh3lo19axC4|6vwh>obZO)Lij+XTvhnm~TU#liEF+RQ_1 zHW4RRKv_|8lgLdWSCF!4$+2t(xm>Rp*UR;$;(EE>G;*w$>&-|#j)q9Bol*$@Pp^a- zVcgBBgGSxPOs4Uj0Z*`DEh&Yw9*F&uZhzIg? z0Q&u0?*I?o&U~-f&oA_*`uW9J%=j+g5!Z!5+1P=D8GfYs-a>B)BF4e^X;GdIg_FDU zy@R|Gx4D3~hw;0(a|H7{0+IG;?{M!3Yzv2aN5!^q?5)FZvBgsFSZ^j71CfTu@NDU4 z9RGJQ!a>;sS~(6${2JCr0UA%>Up@WAt5WF|@L=9#h$gj5An_OEwa~(l3T?MQ9Yej# zMCi!jfz?g_F$10}g}=gN@Q?7oGeh~{ZB&oux0DaR*s#M@>;n!M)wO7b@(|^szK|qL zC*s?yl3pKBtSBYYlQxgM$+>)+$*(YZiDSuyt-dr=2_$4u{V%@nb1`+BHD~mA7)2%k z;Veqy0fyYNtK%J9F6if2_WV9+KO{(|??WQ^^hDB&G#_uF6m% z@x8!a7a>%x;~q)#0WpAFH<}rJ&HGTivOgp^u&CItC4MBR6-gESJU7_JAu;b4Iet!} zxGN3s%_sv0mSiR*AT%}!mHK^Sq@=9h|Da6!#DwUJ#RV>-WMw2%am(3$%b~)E$ky&or^RIVW$!+HgM$vRrZs4xLGsnf@s6$mKU! znD{IIgp4C+J0Rxi`$tm@qo4z-z08+{s}lCJRYUyKRYA^?-MC@w(J<37Yy2XZwD|07%;+h+}oYh^#O ztoe_GB|N}mNbdQOSc!zk46A{{Qy3^@tM8-Ui1LRy2)_`XLDNI5I9Zzr8iUOMnxy;< zAX*9R6{>iK|NZYjtG=1(b&&xnJ z9|Im$#{EIYb-l&@sZeH(IZIFK44%s7^W`(D!sfgjh_ioli|%30Lzc9maTwr&P;Abv_bJw883kn4{Tf$U;SX zg8c0nOjjgv?cmWm+VAq^L|=7-Yh2_kh9IfF{wDH{AfyJhoX$Z)F$z{w00i#2r-^03 zGf1*2xfhI3Fb0F^qX&nHT}&B2Rh%$>3cla$(dFW7_A{koHoI7~=BJN9!L=+K!sH`! GU;lqXW3og5 diff --git a/invokeai/backend/models/diffusion/__pycache__/cross_attention_map_saving.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/cross_attention_map_saving.cpython-310.pyc deleted file mode 100644 index a34e3f17eec5d0ac1cd38b2810202403fef7cfd0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3333 zcmZu!&2JmW72hu|KSWUdV!SgHm)Z|{ zW+z5r{EH@UK0YR&;nlwZ5sq+Er@#1h`>vgPeGk80>SsZ}mWBP$b%Za1w4OEkjjY*k zy3Xg0sEP1fM}#lEeoMH=POttN{G6knSA|OI*-j=#QM!Az~nQ$x(^~9QXERV(iff#Mzuje z3V$2)!-bmtx~OBlAsVuYy{2ekuO-$n^F&*$W40zbvOQYId_!zv-Vs|cYP;H6De{<) zkNKHYRnruwGLI6Wt9950^>w6&9BPtpFCQ9vcpu@_H$Y0abRwtpK;6Uvg_n<=SME{I zR}Ji|CP=j&7b!6nQO3u*=U0K2>EPf|1dv; zatDX|i5>7{A8wVY-WSPWFwyYy{x!ezaisZKlAr92r&S}0k~}d{)P@utf`o49w!HHX zuQ_7x@@P!gdil_JWjBSQwG|O|>?oS{T(!3NAqm!)1?gZ~wc=EAWo_TaTob33dw%D- z@T-GWK(4C%Q#pO>1}I2scK7PNFm?!dq0 z%v|HmX)n0&W^UE!^-tym0P2zVi?nLpi~J zfIC`j`5eUYB%9FfGBcFyBsr5grir`SzJtVbC-Q#wSK}l&XHkIkP!l~-9hpKT*p$~ zgL*xC?1&iAseF<>m5LPuX7fq2T{9ggF;CMeyMO<>4dC#A9m6&JM3S|8>=4#kVLVmP zXS#5aW`jaO0>qF{5(D?8DSIldJE4yCCTWo5abf){xZz-zFTRSoUfH3oA<%7UV-GGW zCaHi}6A$0;vyx__@(1jw$mPoWl^ZFF!XrglQTg$B(sQd&$g?Dty?Rx%{4#)}|v zDI$^R(;mLiq)Wrye6K;*KcOl`H})2Gk!uNxtx3Pdof*n*_oWLAfI+98S`*$et)bWe zB?tq692lYS%mU2(d2r$U%K2vUM{ib}g{C$S%Wzbu(?Aig>)duO+*#c;OtbV}x@EoV zfP2Hv{nfl_TJtr$?J^X=rwbra*(jSQKF}yE>%u?v)f2N`hNvd~iDx>ajh8;8wdR{; z2>vaMwrEtgcAVK7&f&LQyY0-{Wm^PYRBx=W9GGsiEBuy6(bcHb%=KxZas+wmY2=lw_X)1w{Sj+$%h$q041(wBq zGENxEY*@V~Jr9;!5SSZ|N(VgU3{~9_@&FGJngmH!2(aR9ZSFE;w}ltU;~`V>*+ilM zLmTP{uEg1NKeDNR^7p^d^Y2fWgIAxv{vRT&a=!!;1XP@Rw<;edU6uGn`qvQTnWoxi zhRBa+$|&k0DnH3hRU4!QH|i!W)X{EH_fa+Ub3PUcK5Ck?Xwifz%F?R-%fm;%!&~`) zI}rY&s>LbK#u!i1DtJ0I5-EE4WmP-0`Pdw&BI8Q&X;lMGkEgvYo10Y&Gh`jC)m7)R z`5DUOWN5CMq34N!&S56ii4+#96lGSmMW7sMy7XwkV*p;YX_x4xqa~M(tCaE^!Gnt9V3S(eI#- z`Y{Z-hgbg;q*ZIU0h*(h8+hB^POu61x#KnbZMWrj+_u;DI&Rl*c%gd(G;lj!*FE38 zj<@ee#iQN^wPQI#KZ>eWwCs>EZb#9x2~U?hHS3W@Fa*a`3efjEY7?Xizs60$&%QQF z-KOP_i2RJm&xw#dZ`eq^rJ_zu)j_CVvjwB!biGE{^*h~eV0U_JYL`w~Mm7vTvi0jM zp&oP3KqZYboaw#6B#)`TM~$ZHq4tQ_sJ!`ZCrUY~id0e4rl|F_qTZ)n)p)r0yYLGt PtVM>nx9$HQ?NG21HQ75;~y55_ZDI*1)0!w3FClXW)RQ!cE+emQb{?nCvyuDC}5l`ZiDAWrqU(Wm^{}qUcz*sV1V)bh> zG-jXI29}{6;hy^gJ8~}EOH%X9?bm8GhTF$e7i(fA@`g+_MDyHvMF;-Z#3q9pexs#U z9zJ~h`P?AoCUF`CvL9-PEiP?`i)RH-{vn2fydoF0q$Mlqh?Ul*O-W&WLq@i-2d+K$ zh4q?!L%1UdcCq`Rg?Woh(|XYko*ZxyOw|h#z2v2Z3AEp1${A-OO4Ld)nItc{`RG}S zo+>AgkH&$r!z>lwQf22+GW?eO>!ZWtY#bdXc@f3w;S-sSqOiyh7f2k&29h8?G)v`& z!ax&e>7fwu_+au<`S+7x{7eK7n%Xjkp+(N#Uvkg^=W!YroFCjzvM@;U2Oz2`0CUqU z%!iqL5lF!yPo4$gNzaAF(@7+iznHF?>2%DCcpSmlNiMZVq>n+ZFRbL1d5986u3n4% zJUouXG>MceqUUjl_X##T%Hf9^=V21$Ipa6>&e+u;v zFD{Lb4gv%EZ$N0!M*w|DV^Xp|(ce*u_XR_ykV`hR42^&QZI_lHQz}0wt&xQhfiZGQ ztF(1cAh!?_ny16H`a*!Ht?0Cn0oRzu1we_@BlvolsSe!ugR7u0HAz(0o|}JpQiqaS zpU7wu$cPsi&rOOc>nJMJ{)-?javsVI&IyVFS%?kvR)x$GxvA%FXU`)kV-ej{Q+Fgl z%CM@4hQTx`ZYs?2QBJ|5G?Y77P44MoUk|%_Skr@n;|?hOl{YYSX@fQBD(m7o+h2y{ z|9<4r-=UA|pFf=wguM9&qDI~ZGt?_Cu6rOf)<4IS{{`jf2x&DUe*|mW9$#!qZwwGkL zZpgNlWPA9Ny|UAVKgykp_9eMRX8WZl|7@)4Ty)F*OZp|7?Zdn4f|k3`yCvKX^tHC0 zIlEonDmFACWxGT>>5|e1_fA<8-ia;$irqIyTchn#bNtr~l3g=*ADXwfNO@VNs@<)JL>f}WYs?s7(MRckxl$IWq+mO7`V)=fhLnpl*l%(STHAxFi<&nBd zRJpJ&vU&B9=_yxh}=-CK{7cGkYm&FL}o&*=#t7S zrq1AHAxCsSSFI(DinE`8B@2!w$;WXzMW((rOYNEo@6=K6RWjWca$SFUCgY2Ll&H6b?9=#v(|=2tWmB8@q2yuQgF_ zO)W9?OVMYFauXW}B3{TJ>pyenaIVaOF)(ht8xVKyXDi?ZCRPA@h6%>{%}Iswu$thf zAEK|tlApqZmCxK$Gq)Ll{P=du-kDhcI7hm-?r1RTx#QHxZVHwZVPmS z+E@6xVs8D)X6$?Bz5BiA?$JHyx1rm0Y}%#UtjpMyZ?!?!>D#??VMdQh4@UHR^+%5$ zRSBt+tzRW)Uw(j(UR@smQlt^*s?K$cp^<}baE>TQDv9aXQuhh@6Aa2rGkzokp{y9~ z>ergpoHzO~LTBVlt})HV^cj6#(_J8c&k(D$;kr%cX@|Uh+uyOZ?srn__iD=HTx22E zG+XbWL}*lQ97z!T=D?!7s>w>J4+`pDBE|>RKjjD4XR8HcTEvOTBD2+!%}bcxuaqC? QGqh=tHu2FQ_1$Iv1C%~g;s5{u diff --git a/invokeai/backend/models/diffusion/__pycache__/ddpm.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/ddpm.cpython-310.pyc deleted file mode 100644 index d19a2eb9b1910be2b816bad32346041e74c58f3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48129 zcmch=34C1FecwBC=gx}3U?m7HB1a@85?V-Y)@I3;Es3&hiMA!$vSG<10CxZa1P0KZ zfkci6jbh3X)kL}ujy;jChbe&E~(QteOdb2-foW5#!L1H=)Bm-|bIi0ls-b!jZm2_$Tv~OwY>5To)PUrZaTF$Q&rVEzGUmjQ~P8U}O zrw8phy2@<%;PmRt`)bSUEU-aHTw5Ub%Am%9TUYhgJ?xA6_{! zeZ=w&E?>2B_4L&iFD)NkIW~Q4Womkg=b>PD`I?n$r>|YPZu+{F>!+``JR{4;S58cy zSh->PhLszqZzOFr7+bz+<>u*|iH`@nf{9@Fms8WX1V@8C!QSVR)9(uQ1^b^%OyBxI zBDf+r@LVD|P|Ypfb|E!=J1GZ)GAZTi?L6PX^OeCNo)5)2?j+@KaDUW+vv9Q=Uvp#!#Zgu6v z^7`Cj5Y8;t>L-F~ZFRA}SX;P0Se&0}|8}y6L9stv6P0t=@d&t+&kH+%2inDXG#fsZ-1LcgNSx z@6^QAwQ6l<@x+k2xn^b`PFcx z8rBQfUti|`++yJsZIe?|$td^G%*t#q^UxzxUX%~3Ga=a*s!=Aau2p9mWGKz8)*97w zjg^_&%tAFJCA&5gHWp`=iTaJFf))PfS7y#uD+}Rbpf4!mY^7NZS1YTv<@2Nz9$Kt7 z?p>|TFD~%3`!vO^&n!P!dzv;ebq~iy=c3Z`>O!S)u2Gq(R~Ad!p1*tmaXMjfs(E;GxOv&+%``xa*wR%;QQ7atJT^ZIl~V<#Al;6IpUtFMMpL94o2SzbI_eT7YtV6X=6yYIn=ADvlQTdsyt zZce?Jqq76V!qs|xrqQTH$@5f_iQnYA>8Y7u<@B7|-u_Ws1>ehGUC^SY`O1wmi}mvSdTmZ4QEsf3)tho%`_yuss6IWjY@Zd%`~<7j zdbzgRD9@eIN-EFP&abS7)#EQG7;wK{U7nA8)e&Az^%lH#&xtduE7cRr^+t8EcH$%9 z>QZ&CQ9r@@SUp>P--9O>EyBLCwz2!$d;UZatgWyOqEfI}pP5~*1{G`4-RhqD>ZihM z_&2Anwto~)^JinQpGYgwN=_ykiKS%A3zDtWV)FCJang2cY`jxZCa6BWI9H|lX@fA4 z&jJso{KyM$MEtZuKuDITMd_1dB~A3LGqim__dw*ATQM*hw5*Y~q8 zeyV?(nW`p0R*7la6?j1^@D~z6I>-dsAQ$9=LNGwA7z_rbzz5{(cbM1+e`$V)=J!3H zm`@7GKbN0Q<^8;uPv(pH!F(yWPv9N^?^n~o{Q~>XB`+kWGu7+`YMA$e$zTs}bHUzV zAK*V9><_Nse<3&!9OVB1K)*;X0epXYuv*$2x|9e`8n{ix>j0Fz{*9F%T8z=2}aDv#b;D+EvYMKBj-pv2q!7ahN_&*uk8r;VJJ#l%r z2X{P|p!~hTox!_F-xu5!+|B>}?tR`5J{a5+yw7U9B6xprFYgXKpYRjY2btXuMgD#F zedOU+eka9TWtJCf0KmvwzUkD-W{y>TVwo*=8lWRs^NM!PD`ZT(5)gdz`g`~Pmv7v< z_r2fz%=k|}@SDv)_ug;5@$w)1+QYNYzW1B$`HUmK|K2E7U8~Ps(N5>T;yM0QUj0c* zZX_-wHj|gAq%|2NpH9>VKbQ!-3(3zWTa&Fs#cQO3R4cLMFQxxtBJhcM{7=7>zK{fU zvxX~~R*5j%8YIlM3WWLA0AZn3Bphh%B`h`uTgle$3m)-OV~A2pT1_u{)S4%4n3Uo0 zQ;iY5xsW7flzMZGvBo$lqno=fG1eE7)SQ1QrJ7rNg2cM=P4IpqoNw(RzPq)HaI!T< zxTnRN%H9^=SN64X${n6><#^iPO54-t`0|QYx|L~VgTjjtWIP{ejkZ!PuQk*fZjA&3 zFJ?%k&y|C%eT3!4l}m?O`^k5h)m?m1y*a{DIsD1SRZCYBJ3_ApU3$PS*56d=)RY&# zm*I}Qb2l`P&8>$apnLgzSx5?awzjqkTB|LT4>4DVfLlTN(7789HPZmKR=GKNXZex! zT1^>%jLqTe*TQPB=zyZ&(rJKo7AQKqTCX;D**71mR@apO{)gXJho&h<$>RW#fyD*T zPPKBjdcGbNRC&eV+EgYQSXg|zTB`_C)T6d;ic)beTU2Ilq;9yvNYJD-R*1^>?!PHSe*E*0Z%2mKai;YSp z8aDh{1-~seX9V%1^jZjxit?4p-11DlP9AdCW>%`o4blnJj)ki0L^L?JytoFaGfWml z148IkhUI*v5e>}?NygdfepIlHRZ-(;I{2biU9Lxy>)^mOI-=^q&W1`E$9gn;UmNm2 z7S61x;b|MXa8%Qp)pymEnQ&ZBd3&ShD79RzMgB4fJ<86m&(A}3M1#uQ#>QbZ+=<3? zLNxHS+Msa&F^{i?iwld^nC02k$}`o)g){VLnWhUfw-d&zE3?%gpskg#y3kI|uB12sc|N}SISWqB18O{^9g<=z`!C~&hkLcZ; zf@uZI%B9spXKgLMoD3h)3nLSROT!NnsH(b(EgUQ)y_ElE#`lsFN&j*tl}UDgi^-Bb z6_W>&1<%r5n9T7%m-O}avd_2O-^^>7e8%&6o1-+9*&GydINshHb6M(b|CEU6@Tq~T ze-?b+fMU3i4707|Mz)r3KuZ9@e8IIK`C@t_8+fhkWMU)N@Rm}*L2&tt-bTLRZTh^) z2f*^rd3zFp2AkOKo`FC;GszUx^SA>Z>{;JMKAJkT1TMMJIpCHm6&s_0VH&dEgI zn?ZXsyu+JP_nUqF-wgG>35L5R2EslglZjS#9L(1$OeX9m%tCnL1H z*we;Q*AiMk+A0QPm%P4qj%ysOXN619cCNkqz-662?rIIVIxSajr(8>8!Nghbi^&k& zx-=f_9!mtfiS2?`9BAb(s9g16Lhas{*chw{LyZNK_RXPSk4kO~Z0^?RTKRk1PwFB0 z{l;Xl&oKUAFw6`O2C2K$^HQ*%ILB{cN4^2aF73UP+}w95L2KUo!Ng~w5y5Z=Pfew^ z?#TDBK0}fGFLPR(;WYKiWqz91&MdE;nW>l0%sdSlzgnKHmN{a9I)XCfYPmWy2jL$h z9PmauFp!jXn5pDGuFS3LoEt=A{m&IJ9NrFe(^PRg9Xz)PatRg=zyF7d zm%jWx-qRdjS-101OmS2LApO(J zvoW@)pJccTptia&L#;8jKOCfhP^SVwJV)R5sAq%$Ye9H}-lRa$VNDsk5ILhq!yB3S ze3_jG!Z&WdDH`fv9)}Mh5ST3xMzWh9Egk=(U+z_-Bs;a(w@D^=7eFK&g`_Ql5L0h0-Y7|tp7B@_XlXnciT7FzEa$G?~T9n z+;?rlTK~xnY4EmRPWONLOhUg?sZ-58ca|T8$snePpb&&|+0j<@=050c=v^pfUWpqa zN;-Zym2+gtr|CQ-tTKg z2ss8L4>oV2evG`{vl(v`f+EMCN2sS3f;C6dz7{Z+c#{WRuq^ppWaI3Cre>0`?)zgV^tVPP6#D?EZ-AK36_a&Z~*vPaz{*P>ATbWJ2 zmA#Y2m{2@Y7enA|TKeJI5nOyE>I4-!D5lfEDrOwK* zT+NK*R5b_TAj2eOq3HD194f(#JWt9M(=vhvJ5lBmrGveO)q@_s8&ps z$iY&*+#IvT*?$6=OHoeyN9e-m&sOOBi9pdBPpp+(@-C!W&^OSefDKYpP-&h^XN@YK z-+4|ux_3S$?%B)%nrnQ)GCvPg8?X~0HF|if<)@CFt28)apR3$3b-a`HPTR>OGNxhU zPW$Xrr^@Hb_wslA`0*#o4yWl{ZD-BpYQ4_Md~Ll^-&RWeJ?Ey`YN#(d97I+aC+hMs zBS*?NP8~0Qpc+=ME8hU09KJcc0a%@6=_xX-BBQ9?S$KY+`?{wp-8l;bSPG2_&o}Q(H#=}UcJi15GyVrN;cr2QfCB)I`HPO&h6{A|(xsmlG>JyY)%7Lpc^Xm%(dZ_|oA=MnWm9pidK47M0(qW+rL~dUy zmLe98TRf@p#3Xp)<>aZCeb>{^@|o-D1!7aa>tu?x5dIViQQBs8YhNZ~u8Uh1j3!BlB>bC7{ksZmSW5k*XgB}X zks5fa%fI-D2ImN6xWS2sh=K7knsYbvo}GsMH=D!p{C8@yqki8YxJSOvH(~FoN-@FZKMtVAXeqwzLpP0ONED9=}peULE4?ffFo&r0k%p# zp4!N4WH)l+47PFxPqL?szJA%W!3cj=!*UP7H~Hh|_kZ3t)61zXqsHzMdEkiIpCg8} zWEq)CCH(t5Oy%9$OP+J3im}mVAFZy#Gdc%12XY}QIF^z&lZ|^c(HSDKX*vgpty3|y z*1rMgG#z`Xr3m02<ARtQ4M#+j3!~7$CIO^uBZ)&t; zqGD~eX7b6EnflqSvz1$`)J@)2*EDI5?u9FqNfs}cQo{yZz`MZAQG=Z^kEs1kA4V0u zr=4OL^(#b}2lmDW;~pd(bq6m9i4(iS6MeUk+{iTiM!Esdh`iYg5a`DB&uWLf=tK7% zwb_Ih1Rp8i^5>JxAGG6AfgPirheSv_jnu?aaRO`&kLX@|LS{6XVCQ`Rx^b|T)`8%u zD_dUpVezQu5yiLC%wYahd#*G!uN6%6D2l3a8X=<}RO@pRgEeO0Lk8zz124{15Oprs z*^@SzpW(Bqq~i|iuswh5>(bsAXpAs6q-B%OCOApnunZXt4cq@LrNe1JMyx#x*XWADb5F$+{+BeQqNzA2?nHn%PI5bU-|+S=iZc|UMPc}J zOytv~Nd;LRsc?i7qIZc?Vas<%?!aR|-@uzAjg%yVoB{d5a46KCy9>eQ08Y z)XZ!>{976m%~iOj;tFkr0pLV#MLG?X5q^nR?N#fG8jCVe_X~&`^tm=WnYOIi9Ix%` zpVSZw92dZqw(AB8-}1$*$>m-%c77)JRt%2Y-}DW%R=ft=*0E1%+t;yn;zb>85VitF z*^}^7Sj7x!dNBbF$NCO@2K~xb3O4_UJ*Rn|x|lGwzaLm04$fH#ND zv=9`uav)#wfQ;MU4e{MT{EhO#%s0vu0xB+fSS7bhbwGb=#xi_`NW9^6#IM(@_3az3O__B!lmxQjJ^FUoWLFZnIl*JOIcj{;^BfvG zywa2Ge61A<5^`~j*4vekj8ehk3b;wUq9}#j^R=SM?_(L=YkBtstM#?CMWHEd_lq0T zLmz69hJUEQ7G6eH`5hdrte?UpGSWk?O@PDY} zSCv>0VZtt$Upyzt7ji!*O^kB(Ktp>*+yS}v$o{dB0!s zCSfm*dgFkjaWCg}$P_m-ef&i2i41>|uhs7&5NpZ^5P%8m9a04{gCjmm`|s2^wP!@t60+Wq(PSJ$yyM-2!u z*cz?W63jCLyF((#EY;wE@EdPa4FD`cyQJ+pQs`^?RH(H#1L< zlxO+J18Z_{rHb~``HEu`F&?659qFrvL>@dz8=?{MBNrFeSCNXi%;DEmbbKyO8;%eE zhQ2l$c*}_71@Vmg@!Rb&5$Ph%5Zs?+%$n?E+BW|h85*hd5KKLGul;Hzu@U5(Y+6PqfRjE|l@ z8TzJA8AVfDLzjuTQ|G02<7hA^O(xNPy%*EQ0OwRgS6IgOt3}L;({c`P;d_?zQYJ|6 zL5j+hi&gHy4NY&!t;x=vI{mUA<&bzSRAx~VlH47FHOiverM^TtBGt9kxijc-po5C$ zf|5@fJq@DO7hwfl$toD5-`eI2#{3Nb9DND*Xsl+5p%k))>$QzE;OmYOb!_Swr`q6?LE7w%GPy; zVa{A`9y5>xPFe(m!1}hC=s}O24^N&BQ5P4juO$**qV`Z6-@g5IP_KT4SJ7~{Ew$Bd zbFNYb!x%$b>KstO<)^#zoq65QcApBb$E=7g1@#TA=3IY<5bRtaDyA(AaJ;WnSAJudm;Tz3E~kd)6>NcTql}rYJ+__>I|x3HcE(sV;N|9S z8{_Ty`e2>lTnE+(@B;aP`x_qT6#zthmI3}-4EQgm2#0eA@BLG+P1M@Z-2wWV1NE;> z&X(h-(vR^p)j1aVRz zk7=+-2WDv)y+*WL#7{(XB)2)zDjMhGR(dqrLeGO%Wzn+m!yhMQEFk3zU^TuRXX??h zebEE;4Q`aSfu+y)fu%vp8IZOHrIxJJB2Ve?XB)dVCmQs9bGKxB@LfDNaz<+W&5Ll& z3rS2iv5;?A0R*lQ{19Dg25Y|5N>M7!5@(?QSubR2Mwit z5e>u~CJ6u?Z2GDSx=rQw6a67@&JA>UhhjD??^euUvC;c(7;a*SjMUB`dKu;y6@_Fb z1rONUk2>2hyH|hSvnVWhMbG%dm-k=JNmao^um@!iQ8%<@aQkMo57LQi!;(&HBps*5 z37SRQK?aG44j36lM5d*J8c#Q&E-1jQ>L4s&7 z*3q4-qWN3t8P;c1ylv2%h&{kx{aJ!s!s|i&U8Mh=Vx`~mrn`9x-}aW7j#6%SZY)C6>{WNsqoXR@4AFAq+w`!rcAFk?Hg40S z&bDoOEPph3(iu5SkGq!8W|>~_7tC0#GJBv+)4SU5gIQ+)LD~d$&c;R%yW^giDFp8( zV`GRtu`$Hn*cf78Fc-)WB00T3HiNh#HiI}2JQ2JncrR}bS}W8JF9?Ers%6YK-WR<8 zx%Bjv?N-D&e8%=(%U8ur%feuH@oKpwE%hdU*D@ocrfQND%X z1Hprq??{~QgH~ow*}<9ML%~Cq=PH*ccsMv^h8|bP&rbv&4nATzj>e^YH2(;-JxaS& ztHzw)U@VKVG355x%WF{9$6h$%R0NN%|=!7gf zo2-vYeNzg+>rncwY`8Z_!4LG|2YP`xi-&>)@j-;7nLz5nC5vYl6d!8%7sOvIqUf7n zq)hQ(AYKsuU=9K&64hZT4da8Neokl z^gU`r7>6`WeHCL6lvp?SU*gpFM0#VGuZAmEJoSU59@so6GZ@Qv!15i~mhZ1nZW-!a zim;4l<;un(#g`6Sctl1~FY>l>m7X_c5^|NDLe$TrJYBhYtW$H*YW@{FF_um_zM?>6 zcv!&^f@mBa@H$HIAex|UmQV@ndVjBibp^~dqQyEQ1}qLS?OP2`N@kXCV8vF`X+A1S z?0F83L6llsT?O3 zgtI~sSz9bCyA~bMM6bzZw;9oBH!jwe7br&*vE6DkvMnjp87KU>Mqz-VYFo)hdo)X6dH89P zP3pMbF64?jhpxAOJxm=HGpaCKROYzHBE$$DnP$|dMx5YowAUUco(1s%+cOm>O-LJ9 zmM}46ylu_#NllK71j8p3Jgy*6V5`xlZd#AsLv?4tV$>r;kIx8Sj z9Lc~t%)wBWy+o1U2^h@!^}JzEHWErl5fV-Ue5BaQLKpBLt>A$zjy^(w^WMDH5Y=&!dU6iT2HA z#u$J0mK~qHgbu9eUY%)p_64n$vY+$rGTy|L?{+@N^oOV*{E&jrC{RNXb=d}3QGy*m zbkvZ!6_d3X)RA6ee2o1SwpjHw41+^W4y5H#WlNExhZr*&*VzQS$&PJ;e^w)}vzO0E z+1S46r%kIEzV>*kd1&icnOSjP5p$yY+dmpHVWJd&^{*1pU%0h|U;r?S^mQ2>VxN+< zng36alB!n-VUwaba0SLCCdHD1si}cxFj7QWlpHzm4}!=|AnFqM!DM9vEuR<|XR{~; zgUvxk8!YssaHVU&7!)7=dynFZ0_fqwRXy+@h`jr%(XWd>Gno-opvx z3<`a+HpZN*JhzUD?@Adc0t{|zfdVkE@1`JO0mRMJs_@yD&o}oSK76?Ru}2>~^+5U2 z)AwEf$o-Gr`@Vfx-~)GSe!?b>Kp{S20%*kSf9@FYZ19RHmPpQFgcfp;R;rSDYShRb@6<_2BltXA(25!9U2~>AW%l!1uv)KN_TonC9+)kEG%+wchQG!D*sI_+$ija;={(sC0Mn6b;+*rkjonZnwRwX<31 zj2#wvm4T;jjZ03RP1Wx+@?Ev0>K|0RnQ4~>c|GVS8pWl-rP6s&w0;{y0s8cxO+Eu( zwB1G|OVOwim^~e@RGL?P%v9MeQJU~_rm$RJEn~K&mFbd>ho>3jX8v^BM54J*8D?te zzAINut;TL0PB!PSDc`%gz8ut!Hqh&s3Cc4Ve#wpquyFo(`E+=`9L8qO^|^3y4L=M} zsHWX+>!FUj1*e-&6~;!xQuKOia$5E)z$UE7j!z9cqQpSo1uYGu-h{i2tI}O_J1sCo z?cwtB;%tqwh7Zf2S^QYfaSeOhemIAldqUhRA}9RyzF$ zoLQ*b>2Rv2n#_<4e8G(ep<;E>imymaxg zI~!>lWmz;ccQ$0yVu?+xcqmr7RUKW?t{97V^Y_xO-=uuT1&+KXHhTKvMg{%T674m~ zOb=JSE6q8q+1gLteoCxPr>Ez?mBCm{1a~aoxn}F^y?udwuvDGuEZX~c(4;kBQ()c7 zf`xGNYGam$W*Mnt*fo2#0t8yp!8a6@ypD=fwuKVBE#~IFZS;k#ADs4BbJx+1N{<=N zJHx@L9G(DxbqqIVnW0Ev#|t%@`$zQnB?7z+p@U{5Ra;*8JsQ$yw~uSsB<)=qz+pag zgQateg96QCTLL=nS;j>l#KRpW=XMm% z8ntR-0Z#56qyqtdk7htC-q|O~N?(u`?Sji_6$lklNn2a|(VjXAqMM$bQXC zAJuFYH`+c7Pr@;Xnu`4i0g%~%MTP^lJZJe&FIJxkwY4yM!-H0Ay^}l>C7%fo>8r2W zJA6891qL}Be4LRJt&eKl1X|0@eJ14iZ_Kk!->5Rm)JgBE4k7TZ;Z~>kjeI`YH zmgceWk@Oo|DNN8^&)b?6a2%x)qtReA{Yj@`fymX;GfL-=v1w&&q;NaHcq!C(4oc^m ze}Ah<*O@!ZXUu=b))&v*i5XweIMYpUn_4+HT|o7~OveNzbow$`T6uOgub@FJ-J+B%VDD$;BUo@=(eB}R0cTRR@{($LaS(;)|RspFix_=WCbLl(z6!*j4e9YFu#YqPfCKjYfMRu_&D zQtUw7R`ATe8s|N#%5VtU8nt^kA6u!srPQpIDia1o{BfzQDN1Eksnl)dKpV(Cglc7{ zW=};!=7)ww(DuR-C(N9S6KrOqLSwO7nGNx17Di*8Pm~z@c!?AFteeP@=_fRaw(os? z6^+){mhqosqWahcom&F}1V&tJpMLM5^O5(^Ig@Tjc~>7mGa7g(uAGOhAYxFMs+0AM zX7~s-NFiFL$-vV$k7Pv2_NGi`#7$~mv317#%Chn3oA*%)*4*2?k5iRQfgbmrQ5G@@ zq{NzYV2jQDUI+pt$ORNGfT(dg|KoIcL#D3&G|F{STYnm?Mm`-!(XjD1igJtsq{)H% ziiRTxEhUZSG#C_23$I8DBj8Z?9(Pw#cz0?js!@s_=P&J#Ovm-$iSU=BE{`@3>XOkg~ZxYIo?3 z?GMFxqkfNU?PZ)#JG+$hTgc*aBtpjG?cGB8kTd%(NHd1cP+)f^JpknUU_#IjcsLh zM#F0iAgx0Nhhws#d5kQk^3Bh6utnQw`A(3@g2-NZwn7=KeqDL)y7HOp%Fj%7^LCJ% zWyWhGcF3JiFer|{0y5zh%dC4ob;A?aVaMKmzVV4}d0pBB&x8X#xe)OzE<1yJJa?LR zDrZO^RHFgx8?l&nwnM^lPNCVL4vOt!_Jw!1L7lVg$b76ezq%Ym{z<~{XY}$~Wiy7R zG0YA3iC~`^by_tdc*4Je*TF6-}JxByb%o_GG_lE6f}+>R#o!a=u4UQ}lZ&JDGwr9E#4+7W_ofs$xz4o`|=< zw=@i0(B?yjke>Y+#NX|nkr)4kKf)Iy5>z|Qy~e2P3oRVNqhJx@VGxv3T0|qTHimee z@13q-_}BWsWSb+l__9Y`cu`#NKpm;@?~!LrBJqup*6`*yNM%H1HSC;B_zy_IlaxIT z101l2^$@JipwEr{Fy8W@RdFMSiDE^vPeixoQjcR;bs3r5YQ1vK{W}k#`OmZ){yD+a zM33~KBRB}p>%9)T;b#;W*T+PECKG&&<=}X5cBr#m{G`%;T$!hdMZ2V{yp9JBzyoA& zrG92{9%s9|&?0Jc4k1CzQ6btyKg4%($V=A1_$ck|Z2|{bCPaHq6FbI(p2)ri18NnD$f(iVO7Auo}*m<$hJOl>C6QWEQWtbvyVo{>&MEue2KQN;_X zi{4Ut34OeaFsXG8eF3&K@$?06Gq3kSh8%@`iHoUmo*DxeFg_er5G*NDh!6;>=8)JZwfB1aj`Q-E7^Vrhj5O?&sTw}bQ zSE|Nv9EWN{_TCG|c7D&S>}tuS?l@e>VK9D)69)eQF2kL+D5m-aOoyRAhbeiaHPRa7 zJk5W&H=Q;yGdZ<9r2X&C^ds-r2fC%c-ZInl~ciwIQJC3>_ z`($!IZzes3!ULttIsCh^L@pQ8h_f@f*NTOrk#FFH%Q++{GCj=6AspJ<@)ZC!d8?Q% zp~NBcs-nMLabx?UdRuf6{u>(377rtJrL)Duf2+s-4IchGrFF;gKj@J^Hi}|jSMZw( z-f`Q8-%ys{QqWm@p^c;Qnrtw&=)TBchQCXsdH=ROgFOOXlvpuhFUO*hUTMppY72$` zMQ!;R^`qOM|Eje92K|^C^xu@;S^eRE*W;_5yq)C`T`8N1cWmQgXR#nB{Qq&W-1w&+ z%|0Q8pqfB|od&e%8(5Q=VB#MDeqj4T6#RXQ$-kW3Gawl!hSLUOqTYA z7E2b=K+EP$hBJdF;-Zy>P`H&i3{bZxITREg}}6&ynE20C3&RTvYpMAHL{ZYpflX zIv5iQNf8GyBTB&#gh96bE+z~H8##Qd4B}UR7{p)VxL*YE^B;CosZ+LMDo=>N1SOr4*T_$xA4vyjde(2V@kW@Idd5UfX;TFN`}9n z#l7Q(Zlj>?{^3j^H0tC%!YSbn+vxvoZ=>zF(2113(?+)K2lL>&KoN!*Ovq>0q0>0D z2MpI4`QHU-*4OV6F@%4p{{4}HZz(WXVcj(drnA%E(++<>qgv>If6V1~LI41HLjb-2 zziBR-y}1MW=n;6^LHzcy>f5Fns-^mb$r8&%I8=(K&UU*bgER9zp=FNj+oVqp0FKh^ zUL)Whdn7v-y-G=2*dJy{`d`&m)#C2nJK&2i%26BrO$H}neh17h4&6LkT3JoMu)ZTzj9F#GPxrw5 zgtFMV__K=D71%lZclFtCEBKm%3@_R$V74}G&~wV3SMXD0!lZw0>l74t`3IT;;cuTq zrt?)G4HP31%tn70Vqw4E+`V;bVwLGWL7$*|`_BqBvl5{DbGa9iKsE*}@UbVfgARj} z?l**%j8Lmj6lz`AvcdWjA6=+IoO}KQ{N*4UNwwaAEJ!1?cOVRV=SecHobCLQ_e>&_ zb{>I*Sa@T(d4mfv-hf^l05|SwTc#@BqoZzaCkE3b4QB-J(jqLU(il&{EeY@8Rpgz& z!K7zK7hCJt?T?9C} z65FQ7X0iQJt=UHMcLr1AgmuFSJyV;oI3~n(I2H@Z7rcuFlL3h*G7)&#!N`>v{E9{1HPROg(gora z{c{aCAK0;AA5*`^V2d9f*MGjxO8Wdl)?&y{UXVm6hYZQc86Yd}@GcgBLCK5RivyQZ zkPycEfB{GvB*fsw!Ogt1u>x2^-h0_+8wtRP7h7WkcqYJ`9F6N?bMbE9iNTNG*|zU$U;p?DGNDwGX-5 zZnYq%yyA2`?!AzHA;Z(?(im>=(_Cs$3~;(XmUtomeERv!^SS5q&kxL};i~K~HXDPM zC;NOJzRb9Cv<8hEQ+mF@c4lH<=b#C@R+OE@J)buwNzUrg4oro9ttFB~sml8wo3 z&d|Bf!IJ`)OwhXB@Pquuu8V&B_2lNB=W&;ZWuNLCQVrtRH1@WJgFV2dVQL$8d08L( zTEq6XlV&^#8z4 zIgkh@xb%j8H}>0V|BG4=jmf1wq!xmMkfftrkArU~{ogFKM%$~xVp<6zNilA3jbd;P ze{f^0R$xSc%@?_}X_0X>S)$at#^O4Dt%ZcI{%fr<)r%g|7)Yah%2_~_*}>tJ3Vuqk z!_J^xqU=wU#^2jeq1yU}W{wjvs5mC5_|7blUIvK4MG>n2N@E%H1IiO^KzTgTJh2r} z;vv6%yUtEWPBbUQp5YF>N1S;&c2?B|>7+TSQcfUKJ{h*b`h2@rx3qgTL9f1JzZQ~< zZCqgI*>2xF<7~56)F1}w~%urD#aSR$E{AG>!w+Wgzy?=FGx6QnN_1q&JqX%wl zYkR@o=@=nQyRAPe-Dc^oc7OKLpVUQ+WcGl9SUBDU|1T!RY$g>V!KLn1DnMes!?AN~ z!4`?S5u-Z%<{0y_7J|)j4u|0z<61e>6MrTesp&ErtYNx7FiLHdyKEu~#~|t-p2svi zN2m&B-hlVVaA3?$zxn?4`<`CUT<+_+D5%c*?9L=eLgXoA3VnruRbS(td#>l!V?SP{ z8PIea(+aC2hp04`BOl-M%dD`RUgs6qo*8K8c5EqeQs&hG$Bo1=`gQ3l<7obkEsfahHd`Wc7pibMYVOnQv>`%x^+$jA^YN;cwqU0L1 zVQvoDwn>xv8~(j&xMrRu;|;?wZ4NJ&<-kR*TZm1SYt8XEQVx{rUI&z!T;A4fQAdyR zXU1FBY#e7GlT3E^I|FznIovvFH?ijq#O`YN0&NTLq9@y`(Q>uf*IKb^e1X!f#HDeI zX{f(L$A3s2*C=N=hP=}PJJzmZSa5BLT`%8A7eTL}4|=vIAr#V6H666`ltF5rtEHqX zFQ%+pICe71G+j#nWPGW2GWNE)*W6#VMQ+(Wt{G>+oaq=sx`DQaUB0t16+X=X;?ojo zW#n_KW$0Y(XRs4phX($FzPO+)*?8GULo#{=*sLzwjh}UM@88`FZh;w>LepS}MSKm) z0sPds>mnqII{}8{tmUW>%R(iNIHFhY$j1(6)SB@I_P_=wd(Ja*0VbD6%eVwXIy>6? zRB4>8H8BsK2@o;I%C|k7kAJA9YP9ihVn@}qppTjt%0pE+qF@VvDk#mMlx~P-qZ8m%%RP|Gi-7(*l}9IyJCl-zCY!o4~#Cb z^n%|UhQ03ca>ELGuQ1$U)8`0zHm$||Y14AMEDp@5UmVE%(eRL$&3qevyq|dk_t|`h zXJ+&p%hV@vuIV~nF2oA+vi6YH@vv9W$@}s!t;@XS!MZP8E{TC}to)Kco*ISmk7>j< zHc3yP`#AQ30E~fCmDApVv;u5#(1jQ2?oMdo!^l!O2)opM01e01`8#~kHsk2JdLNKT zt8xyl_;8zxlu6l*o4kwb3!Ke>%lLn4Dg=|;ZfL!?u=03gyK42@70}0~axgOP*EFTl za$^z5VV10{r0$7YF+CMMfclo=;%j15Z@KS?SouM^`VEj?;%{V%c*pOgxzJJ(>YhqnilQ#vogTF zPS-Z*YFgo$PEoV$o9nS0;5z1&?!61Ab+4@zIyWP0s9Elvj{WXY4mMPa4RAzs6LBciXDsibVX?-xy_a!o;ZE=$>zCKi?F-plk)l95XcfMSWaT zV7#)QR&197xwbF|O6K*ct_*^?u(7OJ!EG+h(-{q1S zBgzpLk|l_}2;&kRBfGFc>dUzMH3j7GVBo`uNJ~?S^__t=A!on&n!Jno0eA3)H@1*N z)KZP~1;^%;>y11t2OR!x;;2Ioe>sqGuODiu+?x{&yu|HxvNVic@uF3QVHj415N;|J z4`r|;3XMS+py(2D``v)M_ikw@{7k2mBBd02N}+Tl9+b5-+>@$%^DGy~MX^|W+QxCd z5R`C4tgpCl6)Dc(xOwy^rQKFJ6&{S+sqe>>=Qfqq&LOA5#ZFJggApV_|Aw6SJ;T(S zULqZ7=N%<)si(AYsZ_jeY1AQSqf=x5>n@6anYG|t6mK(_YWvjfJs~>x;KoLFwy&?V zjKX^qyqDl)_$@{MSiu_v%>n0IcwuIR>ql3WEV?v&Q^8vbSb&MLp@P4qAi?KWtPwLSTgeiW=P#2HKWbQfZ*frUuHsyzh-k8F&d0@S zbE`A>Kr>&>ZU1MsmyJF!l>f7;TyzZ=H;H18mATB-C?34bb+L13>?@NTnzGSx)cC4C zOe+l=2OJxB>!ssv(|=Q0wDXOt{xxm+`*@N=V>vlP*-CT-{P7IlCNfY1lc@CrQaB<8Q*- zh(Sk_cfcUdpurI2ufUM^qR}`A4Dk(y_~8d}^P@YwGnYKmEc^uD%0O}ut;Up8qXJDdEXew1VKae`SF#_`^9iG+Q$uNqFVzlE}s2)EL+Oxg0J5_SJVBiX^KP~O>|5k91 zf}dCL9tFQZz%41ZNVPs~IV{fOjRvcgs&jH?OZFF)TZDD^`wHavY-+4WmS;q$Ev=8K z7#zOnE~T!@iCk%nwAE3%w+Q2u-twKhAsVtO`GVV<EoT$Ke&G@>%|wj;E`Il0r2bV;)qVXkln81yXCGIL*H|QRNwv)=VpLE zTdEU`FK0l=v9Dm^)pOwsOT!k0F)$=Slw}OOK(0$=Dd*8q%Y;{QsW<#{WOBLin$|Ep z@DaWm-59T>c}lsbhFM*pFq?*emGAjotpaB5et3Vwv=Ui%O3^j8ySNMnZA31QK`)WZ zV+yT7E|1AJ#CwM$Djsp?@|X$gUu9uV;InEJbW^koWVl3iH(W<_>C&K0MDr}!T=+e2 zlW*MK?^p)tqHv0}`c($Xoq0OTEqs?2`K=0WQ*gV2I|!P?kGk8x%K@?^ZvH&(Xs2W- zce#$wz;JWORB%y&u^mc15=AYTX!O1+K+)Z9_n7mNX!qK<)8m>x{5|E%svcBUomBCc zm0nR`fc*EU3fy3)udgb#f768jOlkdH!GBS81@d+Y(hjk*``kVP>b+|w9JVJZU5{eq8(yHUWvB1hU=0) zOG^`=yD=h0^LjEo5dR;&n5Y4$#l+hnzL4a4MfL&2l~;=`iG1E5bt&;37TdZ)10(=1 z&93MfLv_8hF@}x%xU~2v`3dAU&W`uNSn^_m*NY~2eIZ#JxA#c6!pDsG1fV~cb8l;D zm2uJJQZFQtbf~;}EZ#@3Sn|*vO@)m{Ue`J!Zy&L%fU-ycaxbQ8ncmcmU8v3DO|UTt z8u`9fp2OG(yEWh1h4p=NV}d@ETBCSMnovJ@LZ$*r+s)H(__qm1mPP~A`RkVqrHRDb z4IxSo#X;PdB*(6c$<|~egbqY5f|c8GNw-@*CUO z6D#TemP4TRUw3t&B%f>T#ynwfYp-4R42g&x*_ho|F-A*=V+X*$M|>n0GWWlK*c#{R z<2|Na?}x7e*^mJVIU*614IGpCto4^Jw&8U51M(eT6m9wE+jUp>Hzts_Buz# zcHDkaAKDT8Un^#(?=BNmsu0eJ#lO*8V{`tf(!Qq06$Pf&@>!*InWc3-Zc%0t40p}^ zJh7>L4%yh`-=J6TBXIr!X5|1wC^ITbUL%(QO5y@a-JEuhGVj#CZ>Rw7ay%VnjVV$& z?YjAZK9JT!`0o|`2W88NKRW{{XZqZz&(y=$XmeCJcY_STP$K0DAyqDEmYeDaHJZjX z*tO#hsIvm<&r-NQE}wHbP6(*(8Yc3^BZOwd_luu6{MNjq|iaVMA~(gr@eyXsTpLY8WIX1J7Y@lS7Ts{kM5p7Gi)ekn3fF2DCVb$XM>`ln4fEnY=V4ZYp!1uQZW=W5dIUHcJL$wQvo&q3!SEn z43(YNU=N}Rf1Sw-KddFHsk9qn3{Zq0QQ9AohLEZYB)YI*x7#EwePx@!3kwWB>;wU9 zzy7am{+QD2ZvKC#>QAew0t4Yk6|;S7l=Gu{yhkq{QS4(1KCVY=aR1g0Kd!g`QEwkp zOxrq?cX+ppyHNYj#VAt|2UjCN-nJW~NdnH@&<(G2jJ(}VE z_uhSJPIR7?TK`V=@NF6smIw}3f0MS%4G^?{guwNzzxwwP*j7k{xc+AeI)*7lO_EmFPk@Sg=pI@*V|5X zgb%1#5gMU^?(WQs!g#mJ{-Da<#+!Tlvio|(w;duP{E+JE_DJS-TYDrb;a=7H@OM{h zhbREMZ`mF@>v&)^`r)_`{U-s&NJG_P8Z9J4eIojwMzkkha}EILsZ^BoJKHO z2=nsMk%7&na}%CI(Q;zVW3itWV~jHa+IG|s1-vbbi7FXbZIki(BR=2?+xl(qL z%kGG2BoLb&~^U|x2Qh7UN}x&IpVWC zI0b4fuccDUpqt|45XV-`BI6XmOMQ>M(=qcPm=EVg;7y;yr`R%5wY&!Tf~F};x}5&B zb6`*^ek1OrFs6j!Mh?=+=j)b9sa1=3Y?f}+t?PFAh$7*wBr?iIo@#M~Pb#=u3-reo zn2g@YPD2b|(pyPM+ZP#W7NbHa@twY zROsMpb&aO0rYQWJvn$G5KUcP`M~0TI!hS8k%%J{H_D64YBm0sEOP4!%%B z8)oSa+dvz^wgQWuUuKWn#vV^^3^}f>D52Jfq@)~_nJv3Hr~T}1&JoMP$KD3^M3Ck` zfS>!xM0x!HGE=E@W|+?h6hnzsN{V9~!|e{6+MT2pf*fk6Pr`arxhV75e<=lOelhx` z*6tTq+80F_2j}1B3oKmvzR1CBfF=sRw_R&v(4K#QRaI(@+08d4+FcC)pfSWvKjUI- zwA@WUyBcE4plAwJc)QVMPCTs6aR}|$jEM%1M2+(w{ua~l~8}Mg5>%t zn0zq}Slhd?ukVJS{EghdT_5SYBS`JpOAGe5_EYvA$}YgVxR$aHwf2#IMN0<0do}Bu zhZ{#^N*wH4NaC<1xp81it@~RCf-7FkQuD!$vi0&Qa?3H$?Ma-8d>0(t7*p zn0vC`kpBz2w)G|%lv%NF*Prv-`_rD0PQR|C1$i5@p?Ib1zgD9Dvl3G&FST*y#-WYF z%*hnvc&K$4hfwj%ZX5{?wT{FFv4?`gxDP6{4ne;Y`<&N09H$&^r_86QFV4?bN9NgE z8%L~%SJA`0eLckKl&wd*lZvmdwh`RBQQl$he($^J>rvIsoE_u8R>;);hYpHM2dI%ez`DQIGU8lc!u{p0p9^`FYwQ=>f-g)&;_t&*_sC9L4eQ=zW zf3$U!9-W|`EcN_A-a;6m;EAdjYGUrvVY3j9iqaly19+rUU(+|;!<0S#Q++*f zW*yi34ueFyI_j>ypF8&byHtX3mXqmTtJrl4R`xL}s_&5PK&?8kf zy5J6F|2)A0J>X~j9!SC+J_#4}-Tr>GwXn|he<(p9Hsg+nXm9`9-bP0yM<8$|tg;5p zeeFxV${&p#oCf9ljNw-epf}&H1&(zy-YyJ|N?W)Vn25uyijQ;kZhU1}`N7&6cbqh~ zcS`-S!ElE^9RqQCW&}}PZqM`;*f`XS=18Z!5941*ucEPbUI`uI%*|b$d_BCjI;Ueh z)qaEV4Ns}wFEHTIP%o?Jo$fCS@6;r9sR%oFNVmwPnS4*@j$@pXOw)2Zg&>s`cT?9+ z%+#qH9HArMBH?3drJc;J(U0qKU4cQ0zf-V0ZMtjm2AjPti}L5#&~4vS zM)BJR-clZVlIr~$rjFiS9xdp5=duvh_hj+S1DAd3aqX3R4!iGeo9oPvwXX$_&CUA$~A!waXHSKV2DpM1f`8jC<)^9gS_6AeLk zx!#q{D?680IhFg~{0!dHIHA2yOZR^;NX_!2@l_Y=YvqSbTJ+E(<$8QMNywoOrSd#i z7&}Q)O zRqPQ3k18-PLC&*`2hv9dYn@r`B4b?LY~lWO8ck@3CI-<3LaGOSpm5& zcCelJL0()7IkCD*KDqU zV91h}oXa-ps<4UiZS0}h^1dDOYV#HV1O7`WG@EDu=(02rc(*0IL}j4&jv5TDn}Z!? zH||PkEXof-v(!ziEYz=Wrg$?|l#w%+X-3{^J^Ts!5TzIE!6G+`+r?exk@ifL>!eoM zQC-nnmMqu+oUzTsE}VH9r_)R1ao5hQP+vUmxb5kDzh(F%U8t;2E8Q%oH5-R)Hb&97 z6GHGV`O22P$jeac?xyYM&3 ztrX>*8tC$MxGO^oS0$c0t-`0ScM#_k)rKD?h_V%KK%2ws-G|k@XOx~-%yz*?_4vIC zv~}&4lTZjKJfpyenP>|C@?z}BOHH7kAJK{&M4e{-vz~zYMklX9Z~E$ z1sT2gh=N~ImA|UMsI{*vW;B?QdQU2|=nA_KG}I;we^|jIO6zXHXY`qYzDIdcB}X03 z269KxbIMk&EkDlFc>Dyf7na%C@24HxO<_2;jJPmu3h0B>wb}tnj z{*%nRb=9Z$G14D}z&Mdh=Y|W##P{TmEaDt>yhu<^-hB82rJJ?QfO4_$uwYDN|5NF%T5oZzd=v$D8$WkrDAlK5|cgn9uCOLMLjxibs&(DW%#v7Lt@Z@%j06 zcdeb2dhN8*_7X6nmv)ojvbe0e-Nz0ywq9)c*rHKsX9X?~ia1%0lOyqCYaq#$$+6&YmO$hz&uaosm=4p8ik90MGY!(`dkwNkqQ z+h%bBZ=2kxaP{SuUhN&iqvCG!M*LIpXAw-;&Eeg0rPcq5-kdwv!PnHW?Q7WpoTUzY z4)xU`IknZ{w0coYbTeci?@#Eh%@zEOO~p+*tSK|@2&spNGkrGxe_ngofX^jXqv?mY z?JvHnPfW?NteD0){8!4P15xD7?dXl<8BN|bWVLHVMhd?#uDM?-yecxsA1HiFQd45t z!fPhr@H205mHgJ}ja`#)mDVXZ21c}XK&V@BPpUNr)!r5WX(ueHIT<23T$%4Ly4}HJ z-;YRo{aCwq+;SP)fNVP52W*>7_cmmsv4>{P!*{sp!IgzhANt4%6|KLacJaD20j_io zM+2rPtIf#lv)WqNaJNztW+hpvPD1j_jCAuoJ5=Hfh!`fL&UGrqaX$`A?>^LH;AgI2 z4;h01eAwBoZG*5+Yc6vN@(Om?iC@s@6=*sqNHc%iYwrEGb-A-04Hfo5pFw@y9)Pb9 zi90NHbuUPM8I=-KY?pQ=S8c_FfYI0_25%e~ia@BMyph5524#E9V9bf`Q#b1RER;Jq3_;`;Tih!cSOu?!xg$l?n?J9?+fQ32G4hcAT`AU2z%|g-Jw<#qn(D@Z zcOFuJ=gn$=W=EK0gZ8%I=tpVJi|rW%9C;I&P-$;Z0o+Av+u$glhEdH~LJLlsZ7k|IT`Fb^=)lT<3E22M3^wd3Oqt~Yz`39NT6-l{uW2TWvzm%6O`)8VXU zbAa&-iwgckTPW$S!|jkPz@{?8Ze#{r9pl^Bs_8Prg-b$P8BZ#vS#s=a11v)-`fExv z#Q?2VTR}a1gaP9qt*tHtbDe{8J0X2uSw;S|-2)oz{~JpEbrt%H3Vv0=vkG=#56iy_ zkQ<`tZ1^RuT7P!#%rj9Qtya@1sBgut>eS!WYJESoz>zfUnlX-Q`&cpjT8S$>A*0Ml z4bG>G-ho?6+mGRc__8t^(uK%P_-gXBzHOZg=`a)P4e0Q{u34}>j^Y;gbLMsgVm6i^ zRX=Uu-xi$x1f~5o^}uZ!yrOWxDZP=wJbdzUsJG$6lrnX9_~#T?u~80X+_^KyYc)3~ z-_MIEjemr-^Je`UjjWxw9JnM{IMb-Xe2l!ch3ge=In;zv#Bmu(^kyvAOb-5mj7%^?2!3}a$lw#j&UP-Ref00_qNDO ziiu`)GMy2{Misb&R&VK53`56x6&>wO?KIQ;%=JAvlj{k6GBC5=SZ(`fk*bTyY)pC+ zUD;n;e6nlz$=bV5UkIf-TO<=}n$Fa}(DKnCAiR+v8hWTboevuc5#FR%k1CUBbi;yf z#n?fx8;J6Wa(B5(B??Q_X9*yGa#Ff>5)54h-2Fuo;2ldA3tUX)kEe@icM#9KS;*!x XqkiIh-0$$Ac5Lha(UIZ9<5&DoF}LII diff --git a/invokeai/backend/models/diffusion/__pycache__/ksampler.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/ksampler.cpython-310.pyc deleted file mode 100644 index 01ace311ac23709cbc9a126a0f3a896a807e6883..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7543 zcmZ`;+ix7#d7t~v&R)2@h!iQ28q0~LO%utf8@F{7*^X@5s_L~73Qj>3osQ-V$=S=y z`pm3EZD)a^q!A-1`_iX4F+c`+@mn7PGVt;m?sOl7DccHDnbuXeHSyH9bv*U1 z-ZQohRbd7*yJpYYwnW+LmU{NKEy|^Cx#w&8zjBHpGA+lq5CAt_S}S~aku%O6q{9AO|{VC_*6w1#rfB>EQ~Wu`i|e<@*jsBm8J0r6j$%ZkCRUL`u#V=$3uP( zpGaa1Io-evpV5?beYPX6|9myjWrd(7DBbsPryqmlDpNZuw{m3*JkOM4HCIm5W9>va z)=!ibWdzQn*0@k>bhDzI?V+@nbXj4hIGeDrFwJAXi)R$$`KiK} zG4c;@+}=xi;q7jkg;9L_J)X3~AWLsg*nS(G!GAxx-Ah>5O>a+y;C3gK=X$IEv{(#w zc0Gtv_o+Aia0An)^bK1z)Mfk@@U+z>)j_$T^2=xnaG$$KGw4chfrQGo3hmIC{Uc=u z9A^4q8M*;|I8?R`X0Z~U=8qIZ*|xyJa#6Xr`PLg@oJ48JIZ0SBnUChAGD!n*INb!v zlmke>iK0R-K2kiKAgG?}phjlmWYjAP(+;TC%1XH^R)Oyy+RQwnTAM)%npW$S)Loq1 zIq{l1+w=Jq4E+@S z(y+TDPK~dirm*ke&>u1X?h7=(2GUT5FU+vumghxrlzHB*JKY3AklsZ>4#DS^#fQ9| zgg@~)^GJao`9TM4G}8e`@oPYE9JTu8aOtZy7tqcp7o9QP z4{)bIFQ(zWfQa(ddjPU48=#DlR8LIUcd_ zVz=TNR2EtudMw=Wp~V+FOqrQ^VH^}z2fW`+`JZ4`DHfz{Qa0xMJOrADUY2-LPn&2< z31O;QU0YJ?+HifAo6qzQ=tL(MjVs~4hdb?qfKddVkYq5>$juXT1W;{jxOD*KO+X%? z7H=j~FAKI(%N1n_APe{_X#G``{V#U9XwF$wLqDC0gw1o)rb!sA`QCNeR&Qs`B;ZV^VV`Ple_Vx=) zg4ji-md3jB5+)G>x};WB7g%O#B&v=&T%2X;xOY<%P$WpRL|PU5z0mUt$0J=Ibg5kR zyblI`SGEW(;Pe&#m$cs6SW|+q+l97>Qx=5IRO-ztO0+j|!nRUZZKvUIs(l7x36bbZ ze*-dwSa%9;fY`EGw#p^;oT#%|8L(@F=CO66l(n*hyM$X~*QLX8pl+9BD|=oz9x7_= zX@B$-zY^x8k~4nJq1+I&Y32FyJo_432+M6}M`Md@>CnU+mGK<(MU8!(U1H0JG0Hsl*}u97St=>!cq41%B~LG9c)E;B82P9XnY1+rTK&?x*H znM3acC1W@NYM3OfY2kEy&i#~tv{^0G2ZasM3waC+Jx4yy`%yZzWY1$!LMDnDY`&L8 zJ>UdQVhQ_@78*-o9e_*xTj*NUw;sOvp7-$WAH2Es_Pc-m27euuO-pLQP3mB$!Cpv` zS6D2390ei&Q&G((wo?e!6ivjpUqz2}4WtTD*YGn`>kG>ygswyM9o6`9I6qst1bQx)-Lx4vpvUH)B`r90S~ii7#6mu2s6JZ0r(#! z@J+}y;p8*y;>LCLH5d>zF3RFOJ@Wb{MSs80`Ch`wpbfu$_pbY3>~^_u#o>AjyXp#; z$DN$2dsB#qd-r?pAJpbojgrFm{|vk1HXEGeMB06lzM=+CKLkpS#ilOyM8Y_hW*9$j zER0p@SI9U1eHv|xHDkHO{8>j&E`o1T4?0bLZ;eenEWSM+Z71;Z#~Bdn~{t#Mb{>76e!)k*?!MVr_r!*b6}R1+A(M zm!~3qjsu@zI$XbfH4u@Wd{b(8(k;A?zd!+UqcHf9D7wW&Lf2)luqiS9OWEo>~=M>H(KMVH2T!@moRp|TPLtx0B-w%uGtXEQ8l~zr7 zDSp@69YoBJ13ENv|JTm9o*P+>db?Bquh8~iER3F?c7&lp6k?4fj$uR1nt+kQ9Fhwn zEGYjr>iHg#9U>&t#lj#By@W@*5fE)GB=B$HjCOqr_;{PT3e+jd>C-};HEqJf1v&)? zI|k^Rl6AA{bY>dPMm{Aqey>%W-x;;x-1MyExN~Sj$U}N}k0v7*F;}X z>I*gFJ$jllkT^59VomdK(Skic&7y!fns%lX3O9dC2!2~)QVdHp#d>AC)+t8mAZb}3)U zY-Z=`628pJm*LJa=SUx|=j#H0msv%6)1!?X@e%y(5A)^8laTqcGP;&kn3-RLBj{$; z+|94HYYeNU^|RWMhLP*-`te-8aY9&Z17a_N%TbfMoO%L|KGh{0 ze1`_U3^H7Vx!R(Ik)aX^p^i6PaMKKFCaUr?l*7I|G$`pb)ZClHB{vSAxD#_Bn$u=6 zh6oQ}FI|^1LufJ(%l)hVF)(PMixBzCmADOT!F-@L7=8^ZN+39%Cf`JL>vZ)JyM% zv9N)Bn_l7kvnW7B-h5sH&LisS6A?3qR63xZjTlZ7LUu0=KNtXKLLnN(%DL}h&11bP&;!v zQC>g@nWK5>4`bBBUAo$3#eFgv3nT3>}_{4AZp?iWredTogQVBE)L=YgD4i0T|F@ z>MNjB5R!+&CKaV@^nc+keG$Z|;S}fNklYo2dj2NBrM4xq?@SqKaMjK<4ooX z2pJ|9xf%3+4|n=9NCt}_%#Vufh{%snx(UaEq%fJ`5sq9~k5wFwu|i2GLpXrL%^$x0 zgVz1lTd%m&R4aMUt(=JL2+qmsy(T;QQ_;mCyX9Z8smTPO_W z8A(Jv2qP%?0;&lL71>faM+aoVgaCbvvJ^#7nP^S+Q5s@H<2dDwG#~orVbxE?)Z9loZ(C99~F4JZZFLC66R)!i=c48 zV;6snxF3vVk&GX`i}bGy=x$A?!oF_eb0QQg?8%VHZVBG`Cqypn_UEX3k#&c`U7Mn{CELd3hb{h?#)OzIsfWO^3jB z{OzORFE$AICw?qnI()nfPnH7VWI#COETjXf-fW=3TMP9_ALtZidT2!Ez*KD`v?6<8 zL)+w5Sc{y2quO>@j~au9>Qf7wQESkmPkcp)^`h6hQK`EH(w;epHjd%@5@r_di{+c)BS z$)W$=jo${N(IoT5`$?20u^;C+#~m+9LtjACiIV`P2!lg^h)XSB&G|<*fQC=N1Z2Q? zYeq(30hNS+Kyq?IzN7^$SV70E(9X~u1ZJcC zo+gcKlKMj0t{cQb?z*xWPa-!DB0tOhH0v2s&-`#CjVR%MDD7-`;PXl7OS3YgG!H-X z#D4ZQl{MELhF+Gr?$_kM@9rHWk-rzhEJ3_?TO?zDm}h$nWA6pZ>Rzy?=FRrFvVM@n zdubSDyXk$=0Nq=-2?k_~9(R|vzPnI&h{DB%TSrrypas|L)5_j7smGcV3FcPJ7t zsF&vukFWqPm5U^l?S=i^%5dPqt>m6BM8N&agXDJvI5UAYxIgkHVSc$o&k$&6Y2>aS z4@C#sViO65k61&pjs#GEh#eqpcrx@)mo`{~wpbUQF>#3=%@6QO*^N?Oq=O5bLPzIv({{N z)*jj5TK2%6t`(+urL2$HSc@6F^-}_zp5hpaW1dpryrDRj;@GE6;Jl?cHN|mG8#6Ma zGd443*36#OV4cRSIa`@^AWJc`?P<4ID?5dD!lvtay=a|KZX7ZZ72RUJG*8$kY`QVs zEY^yRVg>qa73;;;CcxfHWV#)HI3Hobh|jrQtanM#;B~b!C@8b z)5Ts5Xg9G9bGFo+FV9EVhlcnEUr{4gmqxUgMs%RPHlL}Bqe1KKC9OA#&0>9|0amQ{ zpUE`LG8-O@jqGzEr3u+XGP|@m1y$RnK1R?X1ue;$Hf3?AG)jQEO;CEOffO90q-IG< z3vy3;XiiyK<>0Sy^jPQmAsUU^owKMA-O4}DO=g9GVFKFU=&Ls#r0pDpcHa&YSSWjE_ww}5!~v!#+FZo}03#son1Y|N2B0Io z1aEB<@G5%px+xco$>U6R>LGX0XQwj?vkdSMhQ6%fnM2n4ZN8lqoXV-3*SD9khZ zDVv&8Yie<=(6$I5y1oWch`n*-)GqYggrv7h4MJb*lF|Wgt<)irbs=DNdt4hk1#--X zY+5TcHP(b=wKqvo+ayz`a0+vTk!bbjK`0Q?R8*eh86~a(fzYIBuMR+qg^1KglQ4wy z^{&XP_-u~?k>wDE-o9V?RN8kZa1aE?zQBBgj889=78gkh?(fyb z4{@D11{`y~m%Hk-I&Ms*QYP0g zYcyXd_ai7xynG^{0K;894f+j?%1SlYi#aNT8~7vi%6kCBJR+4-AD z-a_&{BsCRQFuOWU3uPSO3ShePc9!{cEBwW@x47k8}3|Kd1G~{N5#4N zLenlr$sIM|g?;3;N`_IBxg$WeLovNzEdDhtiLce1v2V$&Zoz1j*|_q?Lr+#T8A6AS}ADXs8O6_p$W`lG{jB z_4(}^s$r=lZXxL- zQJN_2QPWCiwSBCXq@B(I^=0_VP{W<|9mckt3!Sk?%zE+rn(4fRWtBr)fYcihw6DQq z0*bQ>2ZGnpV63ix+p-+?Y+bxquEzxaz4dzzmi956XHeQZE`kaa@6c|zE+lDKNmQ+^ zDm#IvhqU6v-4`C0S^(u{h^*$kxF7mZ*}7FKs(eJ~uPQ?fX#5jWhmTD_bP8*?LPVcSTH7e5Gu@7IxcPaDGJ$dSDoKM SEXcY*bYe3bT=w+tYX1WmnjKpJ diff --git a/invokeai/backend/models/diffusion/__pycache__/sampler.cpython-310.pyc b/invokeai/backend/models/diffusion/__pycache__/sampler.cpython-310.pyc deleted file mode 100644 index c76f8373aa4e649ee31592966cc397be73543e79..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9799 zcma)C+m9sIS+84HS65%A=RUKuyRKc^$+o?oHI4%cG8-qhH(rxWLcGLea42f})XY>* z-}Y4X?o8)2u{>+V0X6}QP!ii>Mj(L&NL&O#Jn#qb$_qNLJRl22Nq`WDFU#*cr!OB<{2e!i;r;napaM>!x~I z>o%Tt(`h;Dj!9)sGuLw0U6sx?^DS@PL)vBeW})S;`zqgS7F(tDQmedPZdKMRt*P}X z)A)?R3e3M{Fh8#D+w0TJdfuoN`(O2w_KnU?93`u*4vU-VDoZvuduh^XuclF}+l;yI ze>zIzpx%tqG}!F$;DcK*mb5N@Ncql?<;5V*>i*jpvawb(rITG}Ej;txh-XREL~^#i z-RY!pn00u4J7n>Vq#nO*qCU!&uc|J8>gv;~t1*A7)9QBGaXYIy@=Pn*i9_m0s6Ljo zw?eRuT5%ThRL)L{v!oTLS=>#f-|i&%Rx{a&(11M29}YrJZ! z=u&K8wTyL(eU#bExn*L_a?HJDtUHB#!7CKl1?ICN@^i6!kUun7jg?sWmbq`Pdpf6J z4SxmHr*vibEFe9FH;X;Qs%%=dDI94-<#f}J>Wy0CojuHE*sN;dk6VD#s6M;M=GeR{ zFS1A20$W56ODLu57$@S+R@o9;MvXE%#a1x33Omit;5o(4vitC?vLK$`wAeXz{+6>o zgST0{-OnDt+Z=n4eFSyp)t%`K1Fx2aB^8QA5{THOYp(M4(cz%1^x3PMc} zOo0yx>(GAQc*|U?Su&^OK^CKKx49RpmzpijU71UhtyUzRdZ*3a!sbom=H^?*PoKK9 z-D$;_n%GiF`_dP9rxDk)^wMarUK&ZwB`s%{#&ULPC={#RJ?TBsj9MEkx_p{CXMt3W z{`uj2t%lf?Nfw5yPc%FAsF_|yLRW`WEGWJq1oURaS%~uHMpWOadAtHPvZ6#{vgc$bvnuN6-6ytKX@vmRugHn?=P*l}7BIX&uss5>6>n#<=rXsY=FWqYGBP`;Xd*VMlfp1xVS)32T!?mbuUDQf>5NM;CY-^|R0wPW!!!fM!= zd0-2pVIP`1Hsu^RD9s(3^ck>9BTRL7^Ql;e&C@mjvyK=m3hgU0uOOpncZ+8H-ORxHqD_qk&#@z35`n-k%2XT01f$hA`cQF#8DDcGdUrw27s|V z@I~Z(0=JR@*KkbFESdd}j$}aT=n)p(4R$iOfv={rqQ@N8d#x_V%*`Hu54$|RA!pO; zJPVJP$TKPv0|;A5yO*9Q9*WuzKPgW)J6qugRg=q#3ytx8%*iE3sPZ6O$>4p3l^(mKbM_qtEyKMy+~i3#)I)Db+2t>EkTv$I??7qv3P)J&&v}GXD545WmYrJ_#bvEA^Wm zA5Tga1(|w!6epBU`Phc~2)Lv#gSdIiq@RP^d)IR-mUG9locDdlx-*xrBEMuhcbwb| zQQI=-Oy_;ax%;_oudROx;?}8`Q@ZRsw-*d|! zLwK4_+A&Juts^09UVwTtfaFHQ9JuX5W+}A>QB`@lfeVZ?dIn!Xi7QOxJ6T@jNGAZz zZ2?5WyE}e&=8VO$0xa;4<174Ui71hNhEi$)onab{~~y zCTDuEr^mZZFMCc|*+-bGRT_hWoKCY&9T=7+^)PPN;f?gbIHk?#I4(hi1#0MHeF>>N%+dmbUcL=81>o$d^;{A*G}!Xs)L;dxxGYYIl}%?r zW=}yl!rrtJc!gr=H4_>;2kMDDXhC?OzVJbdq6k`I4rp1FK`WvJIwdNgRWSuREv7+d z#0==Hm<62^bD;CWe9aW|e{6o!e9;=59-I-U#hFd=C>PeWJ}SQu@iqkV=Y!95vY>x1 zAZt0v;8r3t4K{jNun}eT?MG4oK#-)t6_`=aVo5mJbQ%T=BZZYpZ)DEOQqtNQ8@F=$ zdM|1dhQLKs+lxsDpt-|84=!?1;mSR>Vp%(UjCZ=7UIqk=Tfnv`>+x7lrS%SvLl7W4 z5J*Xt@uD>AGqM(@huN&>!pfy%Jgl*bNadO`L1sh0;{+8f#L4~zuyJ= zzT>#oPo2E=zm8}9k5jOI;`r8oJ4NfqPRaT&r)>SFQ?dTTnX-Q5RIML6)7B518O!v) z@6GosW4uwlu9Mv~y2uKe!L6X>9|&431q*W`$EPwQGc&vkdmsMFzS(v%`v9T}aGn=4 zfRmh0f1<=4|m~M?fZnMK=V~V6TmaQ`JC}GJm|*s!Hg(~vxn9o5P|UK3;+)dX0c2P z2E8;Ivj8hooE2xFj`E@;%Az8sL{&_SC2=1{ShzhkI4902a63P^Uo0KWiTe*RUi4-O zz43Q){I8HZ&!E;Y3oid26xj*^tnm3i3LpGe_Q^ayBZ>4}Kn06a>DRzJ)kJM%s#u z&*nrJ0Utetqb!-Q13n53Od)~6NKbd3XYAQulE6H5aX7MinB~;+QLgZdD zHWn(G?=)FR;VU-_+lX38pY*K$^4sLV^WPxyH6kQNwFMsIAw8JI2!+K==Ag;9V*WJc zI-AX;%PmS4!cYbK!jRl`@@kQqM*k5R? zfNQVW)MItvFlPx=*54TQVL_p z7UBe7da@?aMA)Sf=tCz^L9d1;b3^*1Y(hODp9TZzB^X2m1;F>|)rQ{TggeqqVDe(7 z`OEa3>y(wxQoRHoX(zIKqyxp0s>z38A7&>a0z5&pf5x5O0`V3td+v^B0iImTf?=ou z>s`lPfbm$d$>4Dmj1hH_@0c(;Roa7*Nv5ZBXU=xe_QQVekNjyLynNL6aQo(x1^xg| z&+5;PfbEDHsZ)4ibWw<>gj=B-X?c<7UZx^v4eQWsTaa#hP!c5u6ZI7!#-M~!N0fkY zWl?J66c$!81Rqc{-|&>tLk5ezWFYMzw}59(R6yM~5mJEE`=SgJroz12_FzhwgR1Zb z)2La5xl$26D*zFT8WFRSm{O?M0P4*QW>G!^)boLQb6L4jf$^*Q0PLGZdJ1_}Rs{CV zpC~P7(?B902sk6kggIjboU~cc$j84UrWFEyLnGk9ERbv-_(!H-0r*!E2o8vs{#?+GZyv1*?5OoCPscE|sfm){ zn#Fgq#(a;+SBVf_@t25@J&mBB+V%>h##qE@7xPz$D5RO!nBpStP5?9UC%-{e$Y0=F zM1GUVZxJDpQ|QBgn^NB-a-PVyh@2+!J4Ak$$nOz(jmYnV)K;{uwTK@eQqa)C$vD%Mx^896{aEdC~t@Q7iqS zC4_v>6PrP8;a6x*XmsA}B>X)ZQ=JM%myHA}YY?WTV`^oh`;qAf!|T~jXD7(EfdMy@ zW|Jm^+$YQf6w@|M4q`!w5^l!*mHp%G$?K7OG#=8C8yxO4y^y$yXB zh#ZDYcv;jeYJ!I{29mzwvl=MV2(BMosT3!4ETzySeHRC8y{I{64WnTr=JZ_^DmdPE z-fLeB5ON4w(H@PZ8RHxhqicg1EV5yfWD_gC^%!n=lVisCPI4n&9lx(>e9{qnE--&u z=1=TX!l3Vt*2KwA_UK(k(6Pm#g`x#gdz50J5Fjj&Fi9Jd07XFZ$ueETM39H72G?!2 zQVq_q&4Qa9z7t?2#vbr$aBUl2vW^jxzdT&gXcJb-czDX+9k&T!t_5|D4XZ`qSQP9+ zO;0LLujrggtxC58<)PMp%yGi(*nWaftpm#=&2tgrMOGUr66!aGrWYTgH(Zw^s{kBX z#`Hkh&gCaB`WSDV0O&+z3YT%3~HlQ!1*4X?oo)s?0;)Kyf4P^ z`P-OyoZ;yM@YRt;dSUm`i&z?zoYDub{oe& z>TK>L>v1d0gNm4wN*kUgX;q_;=fvKmN`E$Z1l`-cZ6p zhpz7S-UcRGx?lt_!aku--K!6B9(yo_`_YZyU}4+oIsV&GuW+7lX4I>D);!iVl3JP- zW!rv%x@OWT2t4R>1FP@jI1Ro$?Vx3YP77Z!t~-vAS$hwfKx0^`_?rd=`k0l$irqIy zA-^x9-c1AnvFn`v_r~k3L>V$1L5e8`23Q?QHjo*B+JFvIR$O}z8%~FXf*TP}q79(m zn2!>EwH=ih0ztu4*N0CAsb3A&fVXflAHl(HeTP`W-K68hn2ot?$2`6mq@4iM1ci}p zqgOmWH!kl#^3X%UIGl&$O$5s_0&<*x2EF!9yK@uj6{nsFqIz)vI}JT8Ez8|dB za(t7PlA`aj@~KgErE;|~cn&3_-A{&?GvyeIMJ4=D zWt@4m4LIj@Yh9^~MzzwFD_4$k)U3b*9yuAm(hq6#;@lm6b}0-Ib!%#`n+GV?XD*+k z)Oo6%Z+F5i9x?trW!)q~-X#AMBDaZzcTc$=!(hfHg0C4yoN^L!In!jV zRsCOvRsH{lRR7yy83$YxqB-$D2k1ln2Z(djP%qPwKS_-;dRfxcCtqZn@&XY86zO(0 j=#L<(xA&TA{N+RQxQxAQq~y9dhL@uQQTOym|y4YN*E;Ub7Pc%AYiUH(xz}6F^IvK4n=WUBjdi!SeGP5RI&XEo02Dd; z%OQISSNJp%PpPU*scLUxkvyHLv{<-nxEo#$3#_x;(6fFKM2BTElld>V!l*6r*$6hS zdQpU1m=vN-AZ)Ycv%~~M%}zTxw&{es&88ReT_VC_Ok}1b=gP;|;v<#u+a1#^yK;frx6Ul$Sxw_eEdaHgId5vHd ztH0&dqHy)K;6`iPyZqW}Lp->RRjh^QhpVhnuXjT9wi<4_++$AG!0Felw0HR;23Wuq z65kgUOS9CHI<1QG1L&Scn@iOA??GD`x|v$dF|7(jR}IeuALN)0KF|eU?R#0<}3 zrdO!zEYGZ4Mz!cou!2|e@=FRUdXp%hV5M6+_^8Y#y*!n%vNz>Tdo#p+byM(~UY%u^ z*{oM$$55uRId&X<9b@y}9C8b6@s?ga&X(ATTS|4lFjJUeFS3)~!qy^N7U_N765YKM z)F;Ln&Ykj3vikwiBx-DVub0^a>@-GLW@o(n*n^^vGNpj$A$As{onjBOa~SP@R$-6e zZl~kjC)!S?Z69Ul(e?rM7+XP`)3Oa9US*G?{7gUp1bY(s2mAS_*k_P`h<%nljrYzD zLGcWrm5^BMS@t=!diaRi3+(f#J;yGx=g>#RdxSmDUbv+I(~q(l5dW2ADWjm51i^Ux zN~_s!1z=$J5v+32#_!suXSF!?UJ5X0cv&Y-dTR7$?Zgji0lUbgPPxA9$-Onz2*C@n}EZvdu~LpT@Y_QVhbT;_oW?R3nJd~?Q^FwM6(?| zDQ~AMY5Oniu0s}5Gu9x!q(0AeLI6)QYXoh#fhhzVvi&Ou*rhL>o&?Wi>@tu}l3|Dm zQpsd7of%?r3QQx()a|qPfyG&Qc_-p7q-h_t*MfR0ITb+~K!DU*ogf0s6J7wJ%Bmv4kMQW@Ego;6%oa0%%UD_clv7-}XU~-)zYqQLQX;st8DnCy>&I{4&fH+8hIYir}z>>K} z=G(Oyo?4wWw(B0~wzZ zuU=@o+--t6pam`nhUf%dB=dq-VRWFFw>le}lo#}HkSPQUjQ4k7Y8PzR_TA<>bM2dr zXwwD}NmIiZXVD2lD!USN{|!mfkx1X9EYrxkUBH-rYiqR zYWMWfoRGbkED#tj@ll`e(I*(c2)m#y*}j%T&)Ltw0~zf#T;aEo{069sxVV*%)h(r$ zV@gktbNm>%TLryoOnqPL>D?Kod`(f6uPd=mt@)W)+1FzIwo1nMRdJ;?tAm=G%1zKC z$%fBjq)JY57Fi8^iV6&grO!|SwMg=;<%BQ~lUW#9WQPWhkoIKP4soQw-eRo-*84J8wx?57UyWJ;`#_mxa^HqY;Mk@G4=E_%+f|V&DP~Z<$PcKL?J@b^p1~ULiVfA7H{2LQ1Be zR)o5d`VfLc@&mYqbgWc)1C`dt9=J~`Rib@Ti*4@RXtX-v5UckalNy(^1RsViw1>W? zmjr7a#t^=uCA8g83`ctTANukg9a^+P)|CPMJAfEw(r1T}h&jmdQP9fFA{W0s+JeQd9^8Xc7U`Y<%w2QgE&i z7D80$n0@h1_j5;+b2hc3oIE2WXKQ4FIJ6Om+*?+{*1~ zJ<#?`%3Jq;Q|skOZtTOSx~=jPEEiZJ2MNI+V0xf3qi4kWz7ZRB^<4$MnJj;+K=R}I z6iJYZwN@jXWMdq3fc3|=}qL@7K$9ptgNMq=JeGgu$ zn6L!)c~k>>l6u3?UzB?I`I{&YiD0Ld|1IQMmlcx}?@5v+5EJv-8((^P?J8*y*e6*=m!*v(oLv05FId~uMuV7By`2h!C(Rl zVj<5^cPEZskS)}Q^GHf+*~EoEP1Ww`xiWNx{!#ZczsxF+7|cYybP;?Z8F3?w1LX?F zDS&anI)YUc!3+hWZ%NO_7@GNO=!Aa>NoHUC3Z4_y;cHYuf@p{WAkvx;ctcAtpvGM^ z4J{;4hxpU)c0YUcbnnqmqV9mZ$gE4sonY7sNtTE5Lza;ff&{$OJZS{X+exM)#5sIt z$Y3GJvL8`Di6Vi;zrn+X(z76dlwN^SrotGRhsiIi>*C~ zt!=$G0o4mb86v9`m-q_aFf)d_tz#7?d*$G4Ji#xb2Bwf?wb;6Cq;-oJ&Ej7~Z6T}e zzgI+Qal8~|6Y*qRjt#&x6;F|(cZ(p1zM@P`^PLNJt;0F_uEZJelg3*J48ykGcJ1iA z-G;>uy$oBOR5IKdk0UfVu;|G;B{z1nb<=KkYMVAZ>sEt2acN?=Y8Y;Oh@sU&!wa_8 zT=y8cilMMJhN>}is}r?5k+efL4{?fxg$^6Sh1Xt-M3?|hXbZN=H(s=N>}Q^#6{y|+hb2u>~n57L|0O3 z9^w6*w;OdEkwnBfR_xc^-65B)i5PODjd%}*LM7q2SqO;6Bj3&A9$YlfW-S}>O&hcf z4-w_CFdKmMhU<4cNvF*g8l({+L@v1JhTe+(THRhlIO=@gT|@}G(Ikyxufq{U+{WcZ z>Vju%cLOmQ_>L5lSP}l+il_+ffxUV`n!3_C4!oNg7us$etR(_eyVN6bo9Ke@wqsDH)Y^>{JY3*>`VB|GUL6u60lNp#S^_8Q8I7AJCz)Ah`ra)r``dV|XY zl{?5GS}9GK9RF4HkW7gf50P`ae^QnO+1?2}Ltz7wQO3#Mmh0YMXwIl?%)nX1OyvZe z(Fy(;8jFCOWszA|MhM_uUg1&Q)W~?tewjNW%#fE*3ZI_hAmtbl6LE)aev)D*`c+w{ z@Dl=axJNKk_SCl#4G?CEdK(cHY6qvV`_{`amLX<@JQh;hhW3-vOSs|JI}L2t*f%%5 z078>wiy-A`9Gl9SUW^;(7?`UZ8OQ$+(?E4joM}>*iKA_zzNAy61a&`^P!C@6Gw`yB){WCH|d25 zv8ZWgmey%RoQ6qBlz~N{Mng_B=Npua(mnq+%KiygNT%^LJW@&IB`k%dcAq{P{r3{T zBP%c>0-nVvz-j7!g4q$iN^?4LW^8V$TL^Fgr;D+}c$wmPa^6>dxp|Lhs1YO-F$l@U91ZqEBoVyuNZr%I z{1=tCRpd00D~p`ApNrK#Ng5I5he=f3Q+oOL6u5kke3#x;-cy&92%Dn((z}SRysOo* zY07f9tjLH=S-S_B@shHq#`*WKcYXZ~_4%Gg@5y>1_pA;}k|(5-1GI0Y1aHAbvf_eR z2z`xT#~el+RHAlTY7u212XzAr{nktm@a?N|q_{sCJ<*82t6eAQPBaOBJH(AgNIuXK zZRPPqZG4cEVN!Kls%=5MXV4-kUi2GbL>AifbP_`NpQfT|%XiJY-7kFN@*Sd}2SJrV z*OdbQ1_AX=O8Vd>I*pwe+zoKzqDpmwAUk-%9+6N!hmj#U%DhX-uTe5amEWNRo{1!B(S#zRMI$emmp}kg9ZLm` z5D`kGO753rDw(2;$>-AXjLt$_A^{U z(3vI~M$j^MN;>J9PeC!Ca%v;!C9dzIm+VdvYA_MWH@6V+M5^O5HlWep*M+i4!w5`^ z^9}X3j^Kn5=N(c*vIYks(wYG`=GH2DvIu9Qfk+l}$U1cf9(Pg@b^wJh6TRt%+sVoC zsYqjA+P4d|IWTYw0@5nfW5p7F1)sw#_yr`%F%P>$V^%!ejGt%|Kp-zV);$I2>#OVT3Yw& zCyI{oX`}|zJ!pJ{CPc6u;0TucLp(^D=)N#uIw(TOUr6^n*53r*>6FrvH;3~ zF@jwmOl+vNp0<7v{bDC~D-WY2W9K5?n-K3oKhS&Ffy>7eB-2&#H(d8N6z$q8h=b>A zzUKy=_O*-##C|3sg4n4HBMM26#RWqgO2aGMNt=Yp>8x>TspFOL(7JTV&H^F)w~@e) zJyMSL$w4eQ;B$qG#=o4se5j@Ko2XqR8nE$T7HMbQ(kWI8osTw_Gd!(lEeIjJlvb2ui}GXv}alL^T5ZIwSN-q!ZD z+lnv)^Rc$Cr_UDle<)yU@SfQDQ6du-3+xH9aA2Gl5ui2qEehgJDBFZvb6bCr%>7bS zVAxCg9u9h9oZ|OzI1I4Ry%byGJ{eD>cN`T%c{whk^;C@6>=$nl1>rUqS=Glv9H`#hV4Qe0)n1t@*Y}^)AQRXdkdrdjCg5I={|Qf*Nz1Z4zu>U z<>Iw0sBpo4=(+9$!gOhn;kj;Z<*9ntu+QO~T6ckte#CaO5uCT15RORQ9s4=EtDU#I zMj?#Yb6rC;`H%rJy!&Lmd*UG(kPIp09nz7ue(xXN#jkt3-=~dVzt4&Dl%*d!FBQ(E zQ9uMJyQYMrJKgWv^$}+DSY?VY5x&TRgb(WO@IR1GjUbt2-g;+a5I;c_K-jj41-KoW zPBJNvcm=JX>+x1%v=Ie`!5bjD>I-ix(E^wgHNVkL3NVA2L+5EKWht|_Xk-z1p~(*c zGLvpi493R3o4bQBE$)qg?uM9Mk^?9(R|uATimhY+8^ z7|hxUguqDImo)54X{NTQE~+Sls?RR6{-(9!omtxP*DM$Uv-mru77;p|6Hg|3nWlhQ zu3P!k&<1oXI_<8PC(5$h0FZ&UDX+rVAJ z(Gg<_4rP?zg2-eB98VL+bS&fx6qsVlJELhnDnd9|Cn0R#(drOV5E%zr4cgFC)^Y>= zvjTe8Xa<1j#{wc!3qKa>!N8p$vH(#bg~&=FDjot6!2*$Sgcf@{pSk^+O#{^6Q^*)hdCs4ED^i^p%8Dv5k~DL zs+dUTaO^%BkB9M-sN~C(^kWu@7J;vDdKzJ5ax4?Q4rUVGFDFMV3j&StQoko0K=5uP z#?1!9?s#Fxp|{%}&XR9>jg8GHDePuNzSNKxT{qPC4V9PM>)fs1giS_So2z*jhZy>H!Ca6|-r#Bz9FXE3JH zo`gY6?Ww;-^jIk}G z>_;k~?}Yv(1{jNg=EowS;N>394fKjQdt?A_Imo8v*a#4+XIdR$D6#vE8ti@R;7oBM zKb|1d3r5@|qd=8Gr}X<}WrU&hcoKW(z&hT;5gqWh8BbA2>Mf@?9ZypLZJ`fNNa;;Su~A2W~$#~>r};BWICth10h^6A)Kt~ZBXj|cN&HfSBsFu+6YKqulo zm`-Td4`|Kxj$_!(Dr`Z{Y)vrh{akOJ6?zNt0{>A=P?h5O+ve9T`f8ub(lT%3&5%kS$qOBebQ+rd9n>jfRhBMoPeV6Lr(FiM2(VCc2c_z4B=o_Et#u1*iV&+r??4h zCPWdBZ-_6d`lo$UUryh_t~<5`D8(iL96ET_atATpr@{wOgTAm$D!W@0Mp7-pM?hm6 z0_=Cslt8$iyBtC#Dof_W3;FPhW4feEt*_l9M985g-lEpQVKCLb7(bI2bCAuZkm_ z$Eo%WO1?q~(JM~IiQ_Ds#JP+~+w|0+gn|e>poC0d{%uNVdqYH%IYmE36pT|Cgu9fG z&CJOf;G}x6^z_LfzCPP*G5!-OK0TV8q3nZ5=+lSr_mEf$&}t!)h9%J^th=<=td+$f zTAYwB-8nTqS1#wRDXpNX(?6$^(k0jw8f*$v*FG|h;?K2J4JA5C^nCHZtgR+As};aPi7 UE#)-rEbK?D@f`~m<81za09@5Hl>h($ From 850d1ee9845b6632e033e6fc723438ad3df30396 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 1 Mar 2023 18:24:18 -0500 Subject: [PATCH 08/19] move models and modules under invokeai/backend/ldm --- invokeai/backend/generator/base.py | 4 ++-- invokeai/backend/generator/diffusers_pipeline.py | 10 +++++----- invokeai/backend/generator/img2img.py | 2 +- invokeai/backend/generator/inpaint.py | 2 +- invokeai/backend/generator/txt2img.py | 2 +- .../invoke => invokeai/backend/ldm}/concepts_lib.py | 0 .../invoke => invokeai/backend/ldm}/conditioning.py | 2 +- {ldm => invokeai/backend/ldm}/data/__init__.py | 0 {ldm => invokeai/backend/ldm}/data/base.py | 0 {ldm => invokeai/backend/ldm}/data/imagenet.py | 0 {ldm => invokeai/backend/ldm}/data/lsun.py | 0 {ldm => invokeai/backend/ldm}/data/personalized.py | 0 .../backend/ldm}/data/personalized_style.py | 0 {ldm/invoke => invokeai/backend/ldm}/devices.py | 0 invokeai/backend/{ => ldm}/models/__init__.py | 0 invokeai/backend/{ => ldm}/models/__init__.py~ | 0 invokeai/backend/{ => ldm}/models/autoencoder.py | 6 +++--- .../backend/{ => ldm}/models/diffusion/__init__.py | 0 .../backend/{ => ldm}/models/diffusion/__init__.py~ | 0 .../{ => ldm}/models/diffusion/classifier.py | 0 .../models/diffusion/cross_attention_control.py | 2 +- .../models/diffusion/cross_attention_map_saving.py | 0 invokeai/backend/{ => ldm}/models/diffusion/ddim.py | 2 +- invokeai/backend/{ => ldm}/models/diffusion/ddpm.py | 10 +++++----- .../backend/{ => ldm}/models/diffusion/ksampler.py | 0 invokeai/backend/{ => ldm}/models/diffusion/plms.py | 4 ++-- .../backend/{ => ldm}/models/diffusion/sampler.py | 4 ++-- .../models/diffusion/shared_invokeai_diffusion.py | 0 invokeai/backend/{ => ldm}/models/model_manager.py | 6 +++--- {ldm => invokeai/backend/ldm}/modules/__init__.py | 0 {ldm => invokeai/backend/ldm}/modules/attention.py | 4 ++-- .../ldm}/modules/diffusionmodules/__init__.py | 0 .../backend/ldm}/modules/diffusionmodules/model.py | 4 ++-- .../ldm}/modules/diffusionmodules/openaimodel.py | 0 .../backend/ldm}/modules/diffusionmodules/util.py | 2 +- .../backend/ldm}/modules/distributions/__init__.py | 0 .../ldm}/modules/distributions/distributions.py | 0 {ldm => invokeai/backend/ldm}/modules/ema.py | 0 .../backend/ldm}/modules/embedding_manager.py | 0 .../backend/ldm}/modules/encoders/__init__.py | 0 .../backend/ldm}/modules/encoders/modules.py | 0 .../ldm}/modules/image_degradation/__init__.py | 0 .../ldm}/modules/image_degradation/bsrgan.py | 0 .../ldm}/modules/image_degradation/bsrgan_light.py | 0 .../ldm}/modules/image_degradation/utils/test.png | Bin .../ldm}/modules/image_degradation/utils_image.py | 0 .../backend/ldm}/modules/losses/__init__.py | 0 .../backend/ldm}/modules/losses/contperceptual.py | 0 .../backend/ldm}/modules/losses/vqperceptual.py | 0 .../ldm}/modules/textual_inversion_manager.py | 2 +- .../backend/ldm}/modules/x_transformer.py | 0 {ldm/invoke => invokeai/backend/ldm}/offloading.py | 0 {ldm => invokeai/backend/ldm}/util.py | 2 +- invokeai/configs/stable-diffusion/v1-finetune.yaml | 12 ++++++------ .../configs/stable-diffusion/v1-finetune_style.yaml | 12 ++++++------ invokeai/configs/stable-diffusion/v1-inference.yaml | 10 +++++----- .../stable-diffusion/v1-inpainting-inference.yaml | 10 +++++----- .../configs/stable-diffusion/v1-m1-finetune.yaml | 12 ++++++------ .../configs/stable-diffusion/v2-inference-v.yaml | 6 +++--- ldm/generate.py | 10 +++++----- ldm/invoke/CLI.py | 4 ++-- ldm/invoke/args.py | 2 +- ldm/invoke/config/model_install.py | 2 +- ldm/invoke/config/model_install_backend.py | 2 +- ldm/invoke/merge_diffusers.py | 2 +- ldm/invoke/readline.py | 2 +- pyproject.toml | 2 +- 67 files changed, 79 insertions(+), 79 deletions(-) rename {ldm/invoke => invokeai/backend/ldm}/concepts_lib.py (100%) rename {ldm/invoke => invokeai/backend/ldm}/conditioning.py (99%) rename {ldm => invokeai/backend/ldm}/data/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/data/base.py (100%) rename {ldm => invokeai/backend/ldm}/data/imagenet.py (100%) rename {ldm => invokeai/backend/ldm}/data/lsun.py (100%) rename {ldm => invokeai/backend/ldm}/data/personalized.py (100%) rename {ldm => invokeai/backend/ldm}/data/personalized_style.py (100%) rename {ldm/invoke => invokeai/backend/ldm}/devices.py (100%) rename invokeai/backend/{ => ldm}/models/__init__.py (100%) rename invokeai/backend/{ => ldm}/models/__init__.py~ (100%) rename invokeai/backend/{ => ldm}/models/autoencoder.py (99%) rename invokeai/backend/{ => ldm}/models/diffusion/__init__.py (100%) rename invokeai/backend/{ => ldm}/models/diffusion/__init__.py~ (100%) rename invokeai/backend/{ => ldm}/models/diffusion/classifier.py (100%) rename invokeai/backend/{ => ldm}/models/diffusion/cross_attention_control.py (99%) rename invokeai/backend/{ => ldm}/models/diffusion/cross_attention_map_saving.py (100%) rename invokeai/backend/{ => ldm}/models/diffusion/ddim.py (98%) rename invokeai/backend/{ => ldm}/models/diffusion/ddpm.py (99%) rename invokeai/backend/{ => ldm}/models/diffusion/ksampler.py (100%) rename invokeai/backend/{ => ldm}/models/diffusion/plms.py (98%) rename invokeai/backend/{ => ldm}/models/diffusion/sampler.py (99%) rename invokeai/backend/{ => ldm}/models/diffusion/shared_invokeai_diffusion.py (100%) rename invokeai/backend/{ => ldm}/models/model_manager.py (99%) rename {ldm => invokeai/backend/ldm}/modules/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/attention.py (98%) rename {ldm => invokeai/backend/ldm}/modules/diffusionmodules/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/diffusionmodules/model.py (99%) rename {ldm => invokeai/backend/ldm}/modules/diffusionmodules/openaimodel.py (100%) rename {ldm => invokeai/backend/ldm}/modules/diffusionmodules/util.py (99%) rename {ldm => invokeai/backend/ldm}/modules/distributions/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/distributions/distributions.py (100%) rename {ldm => invokeai/backend/ldm}/modules/ema.py (100%) rename {ldm => invokeai/backend/ldm}/modules/embedding_manager.py (100%) rename {ldm => invokeai/backend/ldm}/modules/encoders/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/encoders/modules.py (100%) rename {ldm => invokeai/backend/ldm}/modules/image_degradation/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/image_degradation/bsrgan.py (100%) rename {ldm => invokeai/backend/ldm}/modules/image_degradation/bsrgan_light.py (100%) rename {ldm => invokeai/backend/ldm}/modules/image_degradation/utils/test.png (100%) rename {ldm => invokeai/backend/ldm}/modules/image_degradation/utils_image.py (100%) rename {ldm => invokeai/backend/ldm}/modules/losses/__init__.py (100%) rename {ldm => invokeai/backend/ldm}/modules/losses/contperceptual.py (100%) rename {ldm => invokeai/backend/ldm}/modules/losses/vqperceptual.py (100%) rename {ldm => invokeai/backend/ldm}/modules/textual_inversion_manager.py (99%) rename {ldm => invokeai/backend/ldm}/modules/x_transformer.py (100%) rename {ldm/invoke => invokeai/backend/ldm}/offloading.py (100%) rename {ldm => invokeai/backend/ldm}/util.py (99%) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 7b7e1ff126..994a06f461 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -22,8 +22,8 @@ from pytorch_lightning import seed_everything from tqdm import trange import invokeai.assets.web as web_assets -from ..models.diffusion.ddpm import DiffusionWrapper -from ldm.util import rand_perlin_2d +from ..ldm.models.diffusion.ddpm import DiffusionWrapper +from ..ldm.util import rand_perlin_2d downsampling = 8 CAUTION_IMG = 'caution.png' diff --git a/invokeai/backend/generator/diffusers_pipeline.py b/invokeai/backend/generator/diffusers_pipeline.py index db86fbaf11..e9aa9fb86f 100644 --- a/invokeai/backend/generator/diffusers_pipeline.py +++ b/invokeai/backend/generator/diffusers_pipeline.py @@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec from ldm.invoke.globals import Globals -from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ldm.invoke.devices import normalize_device, CPU_DEVICE -from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..models.diffusion import AttentionMapSaver +from ..ldm.models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from ..ldm.modules.textual_inversion_manager import TextualInversionManager +from ..ldm.devices import normalize_device, CPU_DEVICE +from ..ldm.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup +from ..ldm.models.diffusion import AttentionMapSaver from compel import EmbeddingsProvider @dataclass diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py index 8cc2004745..c50120fc98 100644 --- a/invokeai/backend/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -7,7 +7,7 @@ from diffusers import logging from .base import Generator from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..models.diffusion import PostprocessingSettings +from ..ldm.models.diffusion import PostprocessingSettings class Img2Img(Generator): def __init__(self, model, precision): diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index 02f3de4531..ec5e4087ea 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -15,7 +15,7 @@ from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusion ConditioningData from .img2img import Img2Img from ldm.invoke.patchmatch import PatchMatch -from ldm.util import debug_image +from ..ldm.util import debug_image def infill_methods()->list[str]: diff --git a/invokeai/backend/generator/txt2img.py b/invokeai/backend/generator/txt2img.py index 7802e3a913..6ff4cedd6c 100644 --- a/invokeai/backend/generator/txt2img.py +++ b/invokeai/backend/generator/txt2img.py @@ -6,7 +6,7 @@ import torch from .base import Generator from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..models import PostprocessingSettings +from ..ldm.models import PostprocessingSettings class Txt2Img(Generator): def __init__(self, model, precision): diff --git a/ldm/invoke/concepts_lib.py b/invokeai/backend/ldm/concepts_lib.py similarity index 100% rename from ldm/invoke/concepts_lib.py rename to invokeai/backend/ldm/concepts_lib.py diff --git a/ldm/invoke/conditioning.py b/invokeai/backend/ldm/conditioning.py similarity index 99% rename from ldm/invoke/conditioning.py rename to invokeai/backend/ldm/conditioning.py index 17231ec4cb..918fa2def1 100644 --- a/ldm/invoke/conditioning.py +++ b/invokeai/backend/ldm/conditioning.py @@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from .devices import torch_dtype -from invokeai.backend.models import InvokeAIDiffuserComponent +from .models import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: diff --git a/ldm/data/__init__.py b/invokeai/backend/ldm/data/__init__.py similarity index 100% rename from ldm/data/__init__.py rename to invokeai/backend/ldm/data/__init__.py diff --git a/ldm/data/base.py b/invokeai/backend/ldm/data/base.py similarity index 100% rename from ldm/data/base.py rename to invokeai/backend/ldm/data/base.py diff --git a/ldm/data/imagenet.py b/invokeai/backend/ldm/data/imagenet.py similarity index 100% rename from ldm/data/imagenet.py rename to invokeai/backend/ldm/data/imagenet.py diff --git a/ldm/data/lsun.py b/invokeai/backend/ldm/data/lsun.py similarity index 100% rename from ldm/data/lsun.py rename to invokeai/backend/ldm/data/lsun.py diff --git a/ldm/data/personalized.py b/invokeai/backend/ldm/data/personalized.py similarity index 100% rename from ldm/data/personalized.py rename to invokeai/backend/ldm/data/personalized.py diff --git a/ldm/data/personalized_style.py b/invokeai/backend/ldm/data/personalized_style.py similarity index 100% rename from ldm/data/personalized_style.py rename to invokeai/backend/ldm/data/personalized_style.py diff --git a/ldm/invoke/devices.py b/invokeai/backend/ldm/devices.py similarity index 100% rename from ldm/invoke/devices.py rename to invokeai/backend/ldm/devices.py diff --git a/invokeai/backend/models/__init__.py b/invokeai/backend/ldm/models/__init__.py similarity index 100% rename from invokeai/backend/models/__init__.py rename to invokeai/backend/ldm/models/__init__.py diff --git a/invokeai/backend/models/__init__.py~ b/invokeai/backend/ldm/models/__init__.py~ similarity index 100% rename from invokeai/backend/models/__init__.py~ rename to invokeai/backend/ldm/models/__init__.py~ diff --git a/invokeai/backend/models/autoencoder.py b/invokeai/backend/ldm/models/autoencoder.py similarity index 99% rename from invokeai/backend/models/autoencoder.py rename to invokeai/backend/ldm/models/autoencoder.py index 3db7b6fd73..49c413a5fb 100644 --- a/invokeai/backend/models/autoencoder.py +++ b/invokeai/backend/ldm/models/autoencoder.py @@ -5,12 +5,12 @@ from contextlib import contextmanager from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import ( +from ..modules.diffusionmodules.model import Encoder, Decoder +from ..modules.distributions.distributions import ( DiagonalGaussianDistribution, ) -from ldm.util import instantiate_from_config +from ..util import instantiate_from_config class VQModel(pl.LightningModule): diff --git a/invokeai/backend/models/diffusion/__init__.py b/invokeai/backend/ldm/models/diffusion/__init__.py similarity index 100% rename from invokeai/backend/models/diffusion/__init__.py rename to invokeai/backend/ldm/models/diffusion/__init__.py diff --git a/invokeai/backend/models/diffusion/__init__.py~ b/invokeai/backend/ldm/models/diffusion/__init__.py~ similarity index 100% rename from invokeai/backend/models/diffusion/__init__.py~ rename to invokeai/backend/ldm/models/diffusion/__init__.py~ diff --git a/invokeai/backend/models/diffusion/classifier.py b/invokeai/backend/ldm/models/diffusion/classifier.py similarity index 100% rename from invokeai/backend/models/diffusion/classifier.py rename to invokeai/backend/ldm/models/diffusion/classifier.py diff --git a/invokeai/backend/models/diffusion/cross_attention_control.py b/invokeai/backend/ldm/models/diffusion/cross_attention_control.py similarity index 99% rename from invokeai/backend/models/diffusion/cross_attention_control.py rename to invokeai/backend/ldm/models/diffusion/cross_attention_control.py index a34f22e683..6e91e5c868 100644 --- a/invokeai/backend/models/diffusion/cross_attention_control.py +++ b/invokeai/backend/ldm/models/diffusion/cross_attention_control.py @@ -15,7 +15,7 @@ from torch import nn from compel.cross_attention_control import Arguments from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.cross_attention import AttnProcessor -from ldm.invoke.devices import torch_dtype +from ...devices import torch_dtype class CrossAttentionType(enum.Enum): diff --git a/invokeai/backend/models/diffusion/cross_attention_map_saving.py b/invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py similarity index 100% rename from invokeai/backend/models/diffusion/cross_attention_map_saving.py rename to invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py diff --git a/invokeai/backend/models/diffusion/ddim.py b/invokeai/backend/ldm/models/diffusion/ddim.py similarity index 98% rename from invokeai/backend/models/diffusion/ddim.py rename to invokeai/backend/ldm/models/diffusion/ddim.py index f2c6f4c591..f36e970552 100644 --- a/invokeai/backend/models/diffusion/ddim.py +++ b/invokeai/backend/ldm/models/diffusion/ddim.py @@ -3,7 +3,7 @@ import torch from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like +from ...modules.diffusionmodules.util import noise_like class DDIMSampler(Sampler): def __init__(self, model, schedule='linear', device=None, **kwargs): diff --git a/invokeai/backend/models/diffusion/ddpm.py b/invokeai/backend/ldm/models/diffusion/ddpm.py similarity index 99% rename from invokeai/backend/models/diffusion/ddpm.py rename to invokeai/backend/ldm/models/diffusion/ddpm.py index 1fe059cef4..f9e9485dd3 100644 --- a/invokeai/backend/models/diffusion/ddpm.py +++ b/invokeai/backend/ldm/models/diffusion/ddpm.py @@ -22,8 +22,8 @@ from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig import urllib -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ldm.util import ( +from ...modules.textual_inversion_manager import TextualInversionManager +from ...util import ( log_txt_as_img, exists, default, @@ -33,8 +33,8 @@ from ldm.util import ( count_params, instantiate_from_config, ) -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import ( +from ...modules.ema import LitEma +from ...modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) @@ -43,7 +43,7 @@ from ..autoencoder import ( IdentityFirstStage, AutoencoderKL, ) -from ldm.modules.diffusionmodules.util import ( +from ...modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, diff --git a/invokeai/backend/models/diffusion/ksampler.py b/invokeai/backend/ldm/models/diffusion/ksampler.py similarity index 100% rename from invokeai/backend/models/diffusion/ksampler.py rename to invokeai/backend/ldm/models/diffusion/ksampler.py diff --git a/invokeai/backend/models/diffusion/plms.py b/invokeai/backend/ldm/models/diffusion/plms.py similarity index 98% rename from invokeai/backend/models/diffusion/plms.py rename to invokeai/backend/ldm/models/diffusion/plms.py index 4df703bed5..e0a187e900 100644 --- a/invokeai/backend/models/diffusion/plms.py +++ b/invokeai/backend/ldm/models/diffusion/plms.py @@ -4,10 +4,10 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ldm.invoke.devices import choose_torch_device +from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like +from ...modules.diffusionmodules.util import noise_like class PLMSSampler(Sampler): diff --git a/invokeai/backend/models/diffusion/sampler.py b/invokeai/backend/ldm/models/diffusion/sampler.py similarity index 99% rename from invokeai/backend/models/diffusion/sampler.py rename to invokeai/backend/ldm/models/diffusion/sampler.py index 29479ff15f..bd9d8c157b 100644 --- a/invokeai/backend/models/diffusion/sampler.py +++ b/invokeai/backend/ldm/models/diffusion/sampler.py @@ -7,10 +7,10 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ldm.invoke.devices import choose_torch_device +from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent -from ldm.modules.diffusionmodules.util import ( +from ...modules.diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, diff --git a/invokeai/backend/models/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py similarity index 100% rename from invokeai/backend/models/diffusion/shared_invokeai_diffusion.py rename to invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py diff --git a/invokeai/backend/models/model_manager.py b/invokeai/backend/ldm/models/model_manager.py similarity index 99% rename from invokeai/backend/models/model_manager.py rename to invokeai/backend/ldm/models/model_manager.py index e4dc5ffe40..8edf220a4c 100644 --- a/invokeai/backend/models/model_manager.py +++ b/invokeai/backend/ldm/models/model_manager.py @@ -31,14 +31,14 @@ from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ldm.invoke.devices import CPU_DEVICE +from ..devices import CPU_DEVICE from ldm.invoke.globals import Globals, global_cache_dir -from ldm.util import ( +from ..util import ( ask_user, download_with_resume, url_attachment_name, ) -from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ...generator.diffusers_pipeline import StableDiffusionGeneratorPipeline class SDLegacyType(Enum): diff --git a/ldm/modules/__init__.py b/invokeai/backend/ldm/modules/__init__.py similarity index 100% rename from ldm/modules/__init__.py rename to invokeai/backend/ldm/modules/__init__.py diff --git a/ldm/modules/attention.py b/invokeai/backend/ldm/modules/attention.py similarity index 98% rename from ldm/modules/attention.py rename to invokeai/backend/ldm/modules/attention.py index 11b2b45cff..4fb426130f 100644 --- a/ldm/modules/attention.py +++ b/invokeai/backend/ldm/modules/attention.py @@ -7,8 +7,8 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin -from ldm.modules.diffusionmodules.util import checkpoint +from ..models.diffusion import InvokeAICrossAttentionMixin +from .diffusionmodules.util import checkpoint def exists(val): return val is not None diff --git a/ldm/modules/diffusionmodules/__init__.py b/invokeai/backend/ldm/modules/diffusionmodules/__init__.py similarity index 100% rename from ldm/modules/diffusionmodules/__init__.py rename to invokeai/backend/ldm/modules/diffusionmodules/__init__.py diff --git a/ldm/modules/diffusionmodules/model.py b/invokeai/backend/ldm/modules/diffusionmodules/model.py similarity index 99% rename from ldm/modules/diffusionmodules/model.py rename to invokeai/backend/ldm/modules/diffusionmodules/model.py index c7288e7a7b..94b0dfc4c7 100644 --- a/ldm/modules/diffusionmodules/model.py +++ b/invokeai/backend/ldm/modules/diffusionmodules/model.py @@ -7,8 +7,8 @@ from torch.nn.functional import silu import numpy as np from einops import rearrange -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention +from ...util import instantiate_from_config +from ..attention import LinearAttention import psutil diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py similarity index 100% rename from ldm/modules/diffusionmodules/openaimodel.py rename to invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py diff --git a/ldm/modules/diffusionmodules/util.py b/invokeai/backend/ldm/modules/diffusionmodules/util.py similarity index 99% rename from ldm/modules/diffusionmodules/util.py rename to invokeai/backend/ldm/modules/diffusionmodules/util.py index e93cf49a4d..a943e51d9e 100644 --- a/ldm/modules/diffusionmodules/util.py +++ b/invokeai/backend/ldm/modules/diffusionmodules/util.py @@ -15,7 +15,7 @@ import torch.nn as nn import numpy as np from einops import repeat -from ldm.util import instantiate_from_config +from ...util import instantiate_from_config def make_beta_schedule( diff --git a/ldm/modules/distributions/__init__.py b/invokeai/backend/ldm/modules/distributions/__init__.py similarity index 100% rename from ldm/modules/distributions/__init__.py rename to invokeai/backend/ldm/modules/distributions/__init__.py diff --git a/ldm/modules/distributions/distributions.py b/invokeai/backend/ldm/modules/distributions/distributions.py similarity index 100% rename from ldm/modules/distributions/distributions.py rename to invokeai/backend/ldm/modules/distributions/distributions.py diff --git a/ldm/modules/ema.py b/invokeai/backend/ldm/modules/ema.py similarity index 100% rename from ldm/modules/ema.py rename to invokeai/backend/ldm/modules/ema.py diff --git a/ldm/modules/embedding_manager.py b/invokeai/backend/ldm/modules/embedding_manager.py similarity index 100% rename from ldm/modules/embedding_manager.py rename to invokeai/backend/ldm/modules/embedding_manager.py diff --git a/ldm/modules/encoders/__init__.py b/invokeai/backend/ldm/modules/encoders/__init__.py similarity index 100% rename from ldm/modules/encoders/__init__.py rename to invokeai/backend/ldm/modules/encoders/__init__.py diff --git a/ldm/modules/encoders/modules.py b/invokeai/backend/ldm/modules/encoders/modules.py similarity index 100% rename from ldm/modules/encoders/modules.py rename to invokeai/backend/ldm/modules/encoders/modules.py diff --git a/ldm/modules/image_degradation/__init__.py b/invokeai/backend/ldm/modules/image_degradation/__init__.py similarity index 100% rename from ldm/modules/image_degradation/__init__.py rename to invokeai/backend/ldm/modules/image_degradation/__init__.py diff --git a/ldm/modules/image_degradation/bsrgan.py b/invokeai/backend/ldm/modules/image_degradation/bsrgan.py similarity index 100% rename from ldm/modules/image_degradation/bsrgan.py rename to invokeai/backend/ldm/modules/image_degradation/bsrgan.py diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py similarity index 100% rename from ldm/modules/image_degradation/bsrgan_light.py rename to invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py diff --git a/ldm/modules/image_degradation/utils/test.png b/invokeai/backend/ldm/modules/image_degradation/utils/test.png similarity index 100% rename from ldm/modules/image_degradation/utils/test.png rename to invokeai/backend/ldm/modules/image_degradation/utils/test.png diff --git a/ldm/modules/image_degradation/utils_image.py b/invokeai/backend/ldm/modules/image_degradation/utils_image.py similarity index 100% rename from ldm/modules/image_degradation/utils_image.py rename to invokeai/backend/ldm/modules/image_degradation/utils_image.py diff --git a/ldm/modules/losses/__init__.py b/invokeai/backend/ldm/modules/losses/__init__.py similarity index 100% rename from ldm/modules/losses/__init__.py rename to invokeai/backend/ldm/modules/losses/__init__.py diff --git a/ldm/modules/losses/contperceptual.py b/invokeai/backend/ldm/modules/losses/contperceptual.py similarity index 100% rename from ldm/modules/losses/contperceptual.py rename to invokeai/backend/ldm/modules/losses/contperceptual.py diff --git a/ldm/modules/losses/vqperceptual.py b/invokeai/backend/ldm/modules/losses/vqperceptual.py similarity index 100% rename from ldm/modules/losses/vqperceptual.py rename to invokeai/backend/ldm/modules/losses/vqperceptual.py diff --git a/ldm/modules/textual_inversion_manager.py b/invokeai/backend/ldm/modules/textual_inversion_manager.py similarity index 99% rename from ldm/modules/textual_inversion_manager.py rename to invokeai/backend/ldm/modules/textual_inversion_manager.py index c3ca69e992..2c5721ea7b 100644 --- a/ldm/modules/textual_inversion_manager.py +++ b/invokeai/backend/ldm/modules/textual_inversion_manager.py @@ -9,7 +9,7 @@ from picklescan.scanner import scan_file_path from transformers import CLIPTextModel, CLIPTokenizer from compel.embeddings_provider import BaseTextualInversionManager -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary +from ..concepts_lib import HuggingFaceConceptsLibrary @dataclass diff --git a/ldm/modules/x_transformer.py b/invokeai/backend/ldm/modules/x_transformer.py similarity index 100% rename from ldm/modules/x_transformer.py rename to invokeai/backend/ldm/modules/x_transformer.py diff --git a/ldm/invoke/offloading.py b/invokeai/backend/ldm/offloading.py similarity index 100% rename from ldm/invoke/offloading.py rename to invokeai/backend/ldm/offloading.py diff --git a/ldm/util.py b/invokeai/backend/ldm/util.py similarity index 99% rename from ldm/util.py rename to invokeai/backend/ldm/util.py index 34075613b6..0aac014ef9 100644 --- a/ldm/util.py +++ b/invokeai/backend/ldm/util.py @@ -15,7 +15,7 @@ import torch from PIL import Image, ImageDraw, ImageFont from tqdm import tqdm -from ldm.invoke.devices import torch_dtype +from .devices import torch_dtype def log_txt_as_img(wh, xc, size=10): diff --git a/invokeai/configs/stable-diffusion/v1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-finetune.yaml index 9fea4ae01f..96e7dd338a 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["sculpture"] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml index fdecca9b72..7433390ce3 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["painting"] @@ -27,7 +27,7 @@ model: num_vectors_per_token: 1 unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -67,7 +67,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -76,14 +76,14 @@ data: num_workers: 16 wrap: false train: - target: ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-inference.yaml b/invokeai/configs/stable-diffusion/v1-inference.yaml index 913cbbf310..911c756003 100644 --- a/invokeai/configs/stable-diffusion/v1-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-04 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -18,7 +18,7 @@ model: use_ema: False scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 10000 ] cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml index 78458a7e54..76ef8d2750 100644 --- a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 7.5e-05 - target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -18,7 +18,7 @@ model: finetune_keys: null scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 9 # 4 data + 4 downscaled image + 1 mask @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml index e6db3ac067..d9533d9b4b 100644 --- a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v2-inference-v.yaml b/invokeai/configs/stable-diffusion/v2-inference-v.yaml index 6b6828fbe7..2a426483eb 100644 --- a/invokeai/configs/stable-diffusion/v2-inference-v.yaml +++ b/invokeai/configs/stable-diffusion/v2-inference-v.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-4 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: parameterization: "v" linear_start: 0.00085 @@ -19,7 +19,7 @@ model: use_ema: False # we set this to false because this is an inference only config unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: use_checkpoint: True use_fp16: True @@ -62,7 +62,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder params: freeze: True layer: "penultimate" diff --git a/ldm/generate.py b/ldm/generate.py index 00c94b6a83..536a7f2df8 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -25,14 +25,14 @@ from omegaconf import OmegaConf from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything -import ldm.invoke.conditioning +import invokeai.backend.ldm.conditioning -from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) +from invokeai.backend.ldm.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) from invokeai.backend.generator import infill_methods from ldm.invoke.args import metadata_from_png -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary -from ldm.invoke.conditioning import get_uc_and_c_and_ec -from ldm.invoke.devices import choose_precision, choose_torch_device +from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary +from invokeai.backend.ldm.conditioning import get_uc_and_c_and_ec +from invokeai.backend.ldm.devices import choose_precision, choose_torch_device from ldm.invoke.globals import Globals, global_cache_dir from ldm.invoke.image_util import InitImageResizer from ldm.invoke.pngwriter import PngWriter diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 42fe6638aa..16f64e3bd1 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -25,10 +25,10 @@ from invokeai.backend.generator import PipelineIntermediateState from .globals import Globals from .image_util import make_grid from .log import write_log -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .readline import Completer, get_completer -from ..util import url_attachment_name +from invokeai.backend.ldm.util import url_attachment_name # global used in multiple functions (fix) infile = None diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 1a5dbe334a..17bb8005cb 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -97,7 +97,7 @@ from typing import List import ldm.invoke import ldm.invoke.pngwriter -from ldm.invoke.conditioning import split_weighted_subprompts +from invokeai.backend.ldm.conditioning import split_weighted_subprompts from ldm.invoke.globals import Globals diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 287283ca27..9dd22d2ac7 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -22,7 +22,7 @@ from npyscreen import widget from omegaconf import OmegaConf from shutil import get_terminal_size -from ..devices import choose_precision, choose_torch_device +from invokeai.backend.ldm.devices import choose_precision, choose_torch_device from ..globals import Globals, global_config_dir from .model_install_backend import (Dataset_path, default_config_file, default_dataset, get_root, diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 428a46b96b..c50e5ebc3c 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -20,7 +20,7 @@ from typing import List import invokeai.configs as configs from invokeai.backend.generator import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 16e5340e8f..28abb5d432 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -23,7 +23,7 @@ from omegaconf import OmegaConf from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" diff --git a/ldm/invoke/readline.py b/ldm/invoke/readline.py index 542bdeeaed..a605358a9b 100644 --- a/ldm/invoke/readline.py +++ b/ldm/invoke/readline.py @@ -12,7 +12,7 @@ import os import re import atexit from ldm.invoke.args import Args -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary +from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.globals import Globals # ---------------readline utilities--------------------- diff --git a/pyproject.toml b/pyproject.toml index 4b5a5d5fda..22e6f9282c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,7 +130,7 @@ version = { attr = "ldm.invoke.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.web*", "invokeai.models*", + "invokeai.assets.web*", "invokeai.generator*","invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*" From 1a7371ea1793bee7872db1d1a95b2f0394f70541 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 1 Mar 2023 21:09:22 -0500 Subject: [PATCH 09/19] remove unused embeddings code --- invokeai/backend/invoke_ai_web_server.py | 2 +- .../backend/ldm/modules/embedding_manager.py | 377 ------------------ 2 files changed, 1 insertion(+), 378 deletions(-) delete mode 100644 invokeai/backend/ldm/modules/embedding_manager.py diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index f624cb7710..f6e9446618 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -26,7 +26,7 @@ from invokeai.backend.modules.get_canvas_generation_mode import ( from invokeai.backend.modules.parameters import parameters_to_command from ldm.generate import Generate from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer +from invokeai.backend.ldm.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer from .generator import infill_methods, PipelineIntermediateState from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, global_models_dir diff --git a/invokeai/backend/ldm/modules/embedding_manager.py b/invokeai/backend/ldm/modules/embedding_manager.py deleted file mode 100644 index 3d60e70eb7..0000000000 --- a/invokeai/backend/ldm/modules/embedding_manager.py +++ /dev/null @@ -1,377 +0,0 @@ -import os.path -from cmath import log -import torch -from attr import dataclass -from torch import nn - -import sys - -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary -from ldm.data.personalized import per_img_token_list -from transformers import CLIPTokenizer -from functools import partial -from picklescan.scanner import scan_file_path - -PROGRESSIVE_SCALE = 2000 - - -def get_clip_token_id_for_string(tokenizer: CLIPTokenizer, token_str: str) -> int: - token_id = tokenizer.convert_tokens_to_ids(token_str) - return token_id - -def get_embedding_for_clip_token_id(embedder, token_id): - if type(token_id) is not torch.Tensor: - token_id = torch.tensor(token_id, dtype=torch.int) - return embedder(token_id.unsqueeze(0))[0, 0] - - -class EmbeddingManager(nn.Module): - def __init__( - self, - embedder, - placeholder_strings=None, - initializer_words=None, - per_image_tokens=False, - num_vectors_per_token=1, - progressive_words=False, - **kwargs, - ): - super().__init__() - - self.embedder = embedder - self.concepts_library=HuggingFaceConceptsLibrary() - - self.string_to_token_dict = {} - self.string_to_param_dict = nn.ParameterDict() - - self.initial_embeddings = ( - nn.ParameterDict() - ) # These should not be optimized - - self.progressive_words = progressive_words - self.progressive_counter = 0 - - self.max_vectors_per_token = num_vectors_per_token - - if hasattr( - embedder, 'tokenizer' - ): # using Stable Diffusion's CLIP encoder - self.is_clip = True - get_token_id_for_string = partial( - get_clip_token_id_for_string, embedder.tokenizer - ) - get_embedding_for_tkn_id = partial( - get_embedding_for_clip_token_id, - embedder.transformer.text_model.embeddings, - ) - # per bug report #572 - #token_dim = 1280 - token_dim = 768 - else: # using LDM's BERT encoder - self.is_clip = False - get_token_id_for_string = partial( - get_bert_token_id_for_string, embedder.tknz_fn - ) - get_embedding_for_tkn_id = embedder.transformer.token_emb - token_dim = 1280 - - if per_image_tokens: - placeholder_strings.extend(per_img_token_list) - - for idx, placeholder_string in enumerate(placeholder_strings): - - token_id = get_token_id_for_string(placeholder_string) - - if initializer_words and idx < len(initializer_words): - init_word_token_id = get_token_id_for_string(initializer_words[idx]) - - with torch.no_grad(): - init_word_embedding = get_embedding_for_tkn_id(init_word_token_id) - - token_params = torch.nn.Parameter( - init_word_embedding.unsqueeze(0).repeat( - num_vectors_per_token, 1 - ), - requires_grad=True, - ) - self.initial_embeddings[ - placeholder_string - ] = torch.nn.Parameter( - init_word_embedding.unsqueeze(0).repeat( - num_vectors_per_token, 1 - ), - requires_grad=False, - ) - else: - token_params = torch.nn.Parameter( - torch.rand( - size=(num_vectors_per_token, token_dim), - requires_grad=True, - ) - ) - - self.string_to_token_dict[placeholder_string] = token_id - self.string_to_param_dict[placeholder_string] = token_params - - def forward( - self, - tokenized_text, - embedded_text, - ): - # torch.save(embedded_text, '/tmp/embedding-manager-uglysonic-pre-rewrite.pt') - - b, n, device = *tokenized_text.shape, tokenized_text.device - - for ( - placeholder_string, - placeholder_token, - ) in self.string_to_token_dict.items(): - - placeholder_embedding = self.string_to_param_dict[ - placeholder_string - ].to(device) - - if self.progressive_words: - self.progressive_counter += 1 - max_step_tokens = ( - 1 + self.progressive_counter // PROGRESSIVE_SCALE - ) - else: - max_step_tokens = self.max_vectors_per_token - - num_vectors_for_token = min( - placeholder_embedding.shape[0], max_step_tokens - ) - - placeholder_rows, placeholder_cols = torch.where( - tokenized_text == placeholder_token - ) - - if placeholder_rows.nelement() == 0: - continue - - sorted_cols, sort_idx = torch.sort( - placeholder_cols, descending=True - ) - sorted_rows = placeholder_rows[sort_idx] - - for idx in range(sorted_rows.shape[0]): - row = sorted_rows[idx] - col = sorted_cols[idx] - - new_token_row = torch.cat( - [ - tokenized_text[row][:col], - torch.tensor([placeholder_token] * num_vectors_for_token, device=device), - tokenized_text[row][col + 1 :], - ], - axis=0, - )[:n] - new_embed_row = torch.cat( - [ - embedded_text[row][:col], - placeholder_embedding[:num_vectors_for_token], - embedded_text[row][col + 1 :], - ], - axis=0, - )[:n] - - embedded_text[row] = new_embed_row - tokenized_text[row] = new_token_row - - return embedded_text - - def save(self, ckpt_path): - torch.save( - { - 'string_to_token': self.string_to_token_dict, - 'string_to_param': self.string_to_param_dict, - }, - ckpt_path, - ) - - def load(self, ckpt_paths, full=True): - if len(ckpt_paths) == 0: - return - if type(ckpt_paths) != list: - ckpt_paths = [ckpt_paths] - ckpt_paths = self._expand_directories(ckpt_paths) - for c in ckpt_paths: - self._load(c,full) - # remember that we know this term and don't try to download it again from the concepts library - # note that if the concept name is also provided and different from the trigger term, they - # both will be stored in this dictionary - for term in self.string_to_param_dict.keys(): - term = term.strip('<').strip('>') - self.concepts_loaded[term] = True - print(f'>> Current embedding manager terms: {", ".join(self.string_to_param_dict.keys())}') - - def _expand_directories(self, paths:list[str]): - expanded_paths = list() - for path in paths: - if os.path.isfile(path): - expanded_paths.append(path) - elif os.path.isdir(path): - for root, _, files in os.walk(path): - for name in files: - expanded_paths.append(os.path.join(root,name)) - return [x for x in expanded_paths if os.path.splitext(x)[1] in ('.pt','.bin')] - - def _load(self, ckpt_path, full=True): - try: - scan_result = scan_file_path(ckpt_path) - if scan_result.infected_files == 1: - print(f'\n### Security Issues Found in Model: {scan_result.issues_count}') - print('### For your safety, InvokeAI will not load this embed.') - return - except Exception: - print(f"### WARNING::: Invalid or corrupt embeddings found. Ignoring: {ckpt_path}") - return - - embedding_info = self.parse_embedding(ckpt_path) - if embedding_info: - self.max_vectors_per_token = embedding_info['num_vectors_per_token'] - self.add_embedding(embedding_info['name'], embedding_info['embedding'], full) - else: - print(f'>> Failed to load embedding located at {ckpt_path}. Unsupported file.') - - def add_embedding(self, token_str, embedding, full): - if token_str in self.string_to_param_dict: - print(f">> Embedding manager refusing to overwrite already-loaded term '{token_str}'") - return - if not full: - embedding = embedding.half() - if len(embedding.shape) == 1: - embedding = embedding.unsqueeze(0) - - existing_token_id = get_clip_token_id_for_string(self.embedder.tokenizer, token_str) - if existing_token_id == self.embedder.tokenizer.unk_token_id: - num_tokens_added = self.embedder.tokenizer.add_tokens(token_str) - current_embeddings = self.embedder.transformer.resize_token_embeddings(None) - current_token_count = current_embeddings.num_embeddings - new_token_count = current_token_count + num_tokens_added - self.embedder.transformer.resize_token_embeddings(new_token_count) - - token_id = get_clip_token_id_for_string(self.embedder.tokenizer, token_str) - self.string_to_token_dict[token_str] = token_id - self.string_to_param_dict[token_str] = torch.nn.Parameter(embedding) - - def parse_embedding(self, embedding_file: str): - file_type = embedding_file.split('.')[-1] - if file_type == 'pt': - return self.parse_embedding_pt(embedding_file) - elif file_type == 'bin': - return self.parse_embedding_bin(embedding_file) - else: - print(f'>> Not a recognized embedding file: {embedding_file}') - - def parse_embedding_pt(self, embedding_file): - embedding_ckpt = torch.load(embedding_file, map_location='cpu') - embedding_info = {} - - # Check if valid embedding file - if 'string_to_token' and 'string_to_param' in embedding_ckpt: - - # Catch variants that do not have the expected keys or values. - try: - embedding_info['name'] = embedding_ckpt['name'] or os.path.basename(os.path.splitext(embedding_file)[0]) - - # Check num of embeddings and warn user only the first will be used - embedding_info['num_of_embeddings'] = len(embedding_ckpt["string_to_token"]) - if embedding_info['num_of_embeddings'] > 1: - print('>> More than 1 embedding found. Will use the first one') - - embedding = list(embedding_ckpt['string_to_param'].values())[0] - except (AttributeError,KeyError): - return self.handle_broken_pt_variants(embedding_ckpt, embedding_file) - - embedding_info['embedding'] = embedding - embedding_info['num_vectors_per_token'] = embedding.size()[0] - embedding_info['token_dim'] = embedding.size()[1] - - try: - embedding_info['trained_steps'] = embedding_ckpt['step'] - embedding_info['trained_model_name'] = embedding_ckpt['sd_checkpoint_name'] - embedding_info['trained_model_checksum'] = embedding_ckpt['sd_checkpoint'] - except AttributeError: - print(">> No Training Details Found. Passing ...") - - # .pt files found at https://cyberes.github.io/stable-diffusion-textual-inversion-models/ - # They are actually .bin files - elif len(embedding_ckpt.keys())==1: - print('>> Detected .bin file masquerading as .pt file') - embedding_info = self.parse_embedding_bin(embedding_file) - - else: - print('>> Invalid embedding format') - embedding_info = None - - return embedding_info - - def parse_embedding_bin(self, embedding_file): - embedding_ckpt = torch.load(embedding_file, map_location='cpu') - embedding_info = {} - - if list(embedding_ckpt.keys()) == 0: - print(">> Invalid concepts file") - embedding_info = None - else: - for token in list(embedding_ckpt.keys()): - embedding_info['name'] = token or os.path.basename(os.path.splitext(embedding_file)[0]) - embedding_info['embedding'] = embedding_ckpt[token] - embedding_info['num_vectors_per_token'] = 1 # All Concepts seem to default to 1 - embedding_info['token_dim'] = embedding_info['embedding'].size()[0] - - return embedding_info - - def handle_broken_pt_variants(self, embedding_ckpt:dict, embedding_file:str)->dict: - ''' - This handles the broken .pt file variants. We only know of one at present. - ''' - embedding_info = {} - if isinstance(list(embedding_ckpt['string_to_token'].values())[0],torch.Tensor): - print(f'>> Variant Embedding Detected. Parsing: {embedding_file}') # example at https://github.com/invoke-ai/InvokeAI/issues/1829 - token = list(embedding_ckpt['string_to_token'].keys())[0] - embedding_info['name'] = os.path.basename(os.path.splitext(embedding_file)[0]) - embedding_info['embedding'] = embedding_ckpt['string_to_param'].state_dict()[token] - embedding_info['num_vectors_per_token'] = embedding_info['embedding'].shape[0] - embedding_info['token_dim'] = embedding_info['embedding'].size()[0] - else: - print('>> Invalid embedding format') - embedding_info = None - - return embedding_info - - def has_embedding_for_token(self, token_str): - return token_str in self.string_to_token_dict - - def get_embedding_norms_squared(self): - all_params = torch.cat( - list(self.string_to_param_dict.values()), axis=0 - ) # num_placeholders x embedding_dim - param_norm_squared = (all_params * all_params).sum( - axis=-1 - ) # num_placeholders - - return param_norm_squared - - def embedding_parameters(self): - return self.string_to_param_dict.parameters() - - def embedding_to_coarse_loss(self): - - loss = 0.0 - num_embeddings = len(self.initial_embeddings) - - for key in self.initial_embeddings: - optimized = self.string_to_param_dict[key] - coarse = self.initial_embeddings[key].clone().to(optimized.device) - - loss = ( - loss - + (optimized - coarse) - @ (optimized - coarse).T - / num_embeddings - ) - - return loss From 3f0b0f3250fb909779334880346f4fd3c2c3e0be Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 2 Mar 2023 13:28:17 -0500 Subject: [PATCH 10/19] almost all of backend migrated; restoration next --- .gitignore | 5 ---- installer/create_installer.sh | 4 +-- invokeai/backend/__init__.py | 1 + invokeai/backend/{ldm => }/devices.py | 0 invokeai/backend/generator/__init__.py | 1 - invokeai/backend/generator/base.py | 4 +-- invokeai/backend/generator/img2img.py | 6 ++-- invokeai/backend/generator/inpaint.py | 9 +++--- invokeai/backend/generator/txt2img.py | 6 ++-- invokeai/backend/image_util/__init__.py | 28 ++++++++++++++++++ .../backend/image_util}/patchmatch.py | 0 .../backend/image_util}/pngwriter.py | 0 .../backend/image_util}/txt2mask.py | 0 .../backend/image_util/util.py | 0 invokeai/backend/invoke_ai_web_server.py | 13 +++++--- invokeai/backend/ldm/models/__init__.py | 10 ------- invokeai/backend/ldm/models/__init__.py~ | 10 ------- .../backend/{ldm/models => }/model_manager.py | 7 ++--- invokeai/backend/prompting/__init__.py | 7 +++++ .../{ldm => prompting}/conditioning.py | 4 +-- invokeai/backend/stable_diffusion/__init__.py | 16 ++++++++++ .../modules => stable_diffusion}/attention.py | 2 +- .../autoencoder.py | 4 +-- .../{ldm => stable_diffusion}/concepts_lib.py | 0 .../data/__init__.py | 0 .../{ldm => stable_diffusion}/data/base.py | 0 .../data/imagenet.py | 0 .../{ldm => stable_diffusion}/data/lsun.py | 0 .../data/personalized.py | 0 .../data/personalized_style.py | 0 .../diffusers_pipeline.py | 9 +++--- .../diffusion/__init__.py | 0 .../diffusion/__init__.py~ | 0 .../diffusion/classifier.py | 0 .../diffusion/cross_attention_control.py | 0 .../diffusion/cross_attention_map_saving.py | 0 .../diffusion/ddim.py | 2 +- .../diffusion/ddpm.py | 8 ++--- .../diffusion/ksampler.py | 0 .../diffusion/plms.py | 2 +- .../diffusion/sampler.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 0 .../diffusionmodules}/__init__.py | 0 .../diffusionmodules/model.py | 0 .../diffusionmodules/openaimodel.py | 0 .../diffusionmodules/util.py | 0 .../distributions}/__init__.py | 0 .../distributions/distributions.py | 0 .../{ldm/modules => stable_diffusion}/ema.py | 0 .../encoders}/__init__.py | 0 .../encoders/modules.py | 0 .../image_degradation/__init__.py | 0 .../image_degradation/bsrgan.py | 0 .../image_degradation/bsrgan_light.py | 0 .../image_degradation/utils/test.png | Bin .../image_degradation/utils_image.py | 0 .../losses/__init__.py | 0 .../losses/contperceptual.py | 0 .../losses/vqperceptual.py | 0 .../modules}/__init__.py | 0 .../{ldm => stable_diffusion}/offloading.py | 0 .../textual_inversion_manager.py | 2 +- .../x_transformer.py | 0 invokeai/backend/{ldm => }/util.py | 17 ----------- {ldm/invoke => invokeai/version}/__init__.py | 11 +++++-- .../version/invokeai_version.py | 0 ldm/generate.py | 18 +++++------ ldm/invoke/CLI.py | 13 ++++---- ldm/invoke/app/services/image_storage.py | 2 +- ldm/invoke/args.py | 20 ++++++------- ldm/invoke/config/model_install.py | 2 +- ldm/invoke/config/model_install_backend.py | 4 +-- ldm/invoke/merge_diffusers.py | 2 +- ldm/invoke/readline.py | 2 +- pyproject.toml | 6 ++-- tests/test_textual_inversion.py | 2 +- 76 files changed, 141 insertions(+), 120 deletions(-) rename invokeai/backend/{ldm => }/devices.py (100%) create mode 100644 invokeai/backend/image_util/__init__.py rename {ldm/invoke => invokeai/backend/image_util}/patchmatch.py (100%) rename {ldm/invoke => invokeai/backend/image_util}/pngwriter.py (100%) rename {ldm/invoke => invokeai/backend/image_util}/txt2mask.py (100%) rename ldm/invoke/image_util.py => invokeai/backend/image_util/util.py (100%) delete mode 100644 invokeai/backend/ldm/models/__init__.py delete mode 100644 invokeai/backend/ldm/models/__init__.py~ rename invokeai/backend/{ldm/models => }/model_manager.py (99%) create mode 100644 invokeai/backend/prompting/__init__.py rename invokeai/backend/{ldm => prompting}/conditioning.py (99%) create mode 100644 invokeai/backend/stable_diffusion/__init__.py rename invokeai/backend/{ldm/modules => stable_diffusion}/attention.py (99%) rename invokeai/backend/{ldm/models => stable_diffusion}/autoencoder.py (99%) rename invokeai/backend/{ldm => stable_diffusion}/concepts_lib.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/__init__.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/base.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/imagenet.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/lsun.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/personalized.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/data/personalized_style.py (100%) rename invokeai/backend/{generator => stable_diffusion}/diffusers_pipeline.py (99%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/__init__.py (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/__init__.py~ (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/classifier.py (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/cross_attention_control.py (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/cross_attention_map_saving.py (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/ddim.py (98%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/ddpm.py (99%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/ksampler.py (100%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/plms.py (99%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/sampler.py (99%) rename invokeai/backend/{ldm/models => stable_diffusion}/diffusion/shared_invokeai_diffusion.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion/diffusionmodules}/__init__.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/diffusionmodules/model.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/diffusionmodules/openaimodel.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/diffusionmodules/util.py (100%) rename invokeai/backend/{ldm/modules/diffusionmodules => stable_diffusion/distributions}/__init__.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/distributions/distributions.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/ema.py (100%) rename invokeai/backend/{ldm/modules/distributions => stable_diffusion/encoders}/__init__.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/encoders/modules.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/image_degradation/__init__.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/image_degradation/bsrgan.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/image_degradation/bsrgan_light.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/image_degradation/utils/test.png (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/image_degradation/utils_image.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/losses/__init__.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/losses/contperceptual.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/losses/vqperceptual.py (100%) rename invokeai/backend/{ldm/modules/encoders => stable_diffusion/modules}/__init__.py (100%) rename invokeai/backend/{ldm => stable_diffusion}/offloading.py (100%) rename invokeai/backend/{ldm/modules => stable_diffusion}/textual_inversion_manager.py (99%) rename invokeai/backend/{ldm/modules => stable_diffusion}/x_transformer.py (100%) rename invokeai/backend/{ldm => }/util.py (96%) rename {ldm/invoke => invokeai/version}/__init__.py (58%) rename ldm/invoke/_version.py => invokeai/version/invokeai_version.py (100%) diff --git a/.gitignore b/.gitignore index e28b2c432c..c84ffb1e65 100644 --- a/.gitignore +++ b/.gitignore @@ -213,11 +213,6 @@ gfpgan/ # config file (will be created by installer) configs/models.yaml -# weights (will be created by installer) -# models/ldm/stable-diffusion-v1/*.ckpt -# models/clipseg -# models/gfpgan - # ignore initfile .invokeai diff --git a/installer/create_installer.sh b/installer/create_installer.sh index 8d01c2b331..eac0028493 100755 --- a/installer/create_installer.sh +++ b/installer/create_installer.sh @@ -11,10 +11,10 @@ if [[ -v "VIRTUAL_ENV" ]]; then exit -1 fi -VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)") +VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)") PATCH="" VERSION="v${VERSION}${PATCH}" -LATEST_TAG="v2.3-latest" +LATEST_TAG="v3.0-latest" echo Building installer for version $VERSION echo "Be certain that you're in the 'installer' directory before continuing." diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 16ced6d9d3..5384b3cee0 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -3,5 +3,6 @@ Initialization file for invokeai.backend ''' # this is causing circular import issues # from .invoke_ai_web_server import InvokeAIWebServer +from .model_manager import ModelManager diff --git a/invokeai/backend/ldm/devices.py b/invokeai/backend/devices.py similarity index 100% rename from invokeai/backend/ldm/devices.py rename to invokeai/backend/devices.py diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py index c50c9d387d..4d61779472 100644 --- a/invokeai/backend/generator/__init__.py +++ b/invokeai/backend/generator/__init__.py @@ -2,5 +2,4 @@ Initialization file for the invokeai.generator package ''' from .base import Generator -from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline from .inpaint import infill_methods diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 994a06f461..c35cf58995 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -22,8 +22,8 @@ from pytorch_lightning import seed_everything from tqdm import trange import invokeai.assets.web as web_assets -from ..ldm.models.diffusion.ddpm import DiffusionWrapper -from ..ldm.util import rand_perlin_2d +from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper +from ..util import rand_perlin_2d downsampling = 8 CAUTION_IMG = 'caution.png' diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py index c50120fc98..edd78d6148 100644 --- a/invokeai/backend/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -6,8 +6,10 @@ import torch from diffusers import logging from .base import Generator -from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..ldm.models.diffusion import PostprocessingSettings +from ..stable_diffusion import (StableDiffusionGeneratorPipeline, + ConditioningData, + PostprocessingSettings + ) class Img2Img(Generator): def __init__(self, model, precision): diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index ec5e4087ea..a6e6fe4387 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -11,11 +11,12 @@ import numpy as np import torch from PIL import Image, ImageFilter, ImageOps, ImageChops -from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusionGeneratorPipeline, \ - ConditioningData +from ..stable_diffusion.diffusers_pipeline import (image_resized_to_grid_as_tensor, + StableDiffusionGeneratorPipeline, + ConditioningData + ) from .img2img import Img2Img -from ldm.invoke.patchmatch import PatchMatch -from ..ldm.util import debug_image +from ..image_util import PatchMatch, debug_image def infill_methods()->list[str]: diff --git a/invokeai/backend/generator/txt2img.py b/invokeai/backend/generator/txt2img.py index 6ff4cedd6c..40094a3033 100644 --- a/invokeai/backend/generator/txt2img.py +++ b/invokeai/backend/generator/txt2img.py @@ -5,8 +5,10 @@ import PIL.Image import torch from .base import Generator -from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..ldm.models import PostprocessingSettings +from ..stable_diffusion import (PostprocessingSettings, + StableDiffusionGeneratorPipeline, + ConditioningData + ) class Txt2Img(Generator): def __init__(self, model, precision): diff --git a/invokeai/backend/image_util/__init__.py b/invokeai/backend/image_util/__init__.py new file mode 100644 index 0000000000..8cb3199d2d --- /dev/null +++ b/invokeai/backend/image_util/__init__.py @@ -0,0 +1,28 @@ +''' +Initialization file for invokeai.backend.image_util methods. +''' +from .patchmatch import PatchMatch +from .txt2mask import Txt2Mask +from .util import InitImageResizer, make_grid +from .pngwriter import (PngWriter, + PromptFormatter, + retrieve_metadata, + write_metadata, + ) + +def debug_image( + debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False +): + if not debug_status: + return + + image_copy = debug_image.copy().convert("RGBA") + ImageDraw.Draw(image_copy).text((5, 5), debug_text, (255, 0, 0)) + + if debug_show: + image_copy.show() + + if debug_result: + return image_copy + + diff --git a/ldm/invoke/patchmatch.py b/invokeai/backend/image_util/patchmatch.py similarity index 100% rename from ldm/invoke/patchmatch.py rename to invokeai/backend/image_util/patchmatch.py diff --git a/ldm/invoke/pngwriter.py b/invokeai/backend/image_util/pngwriter.py similarity index 100% rename from ldm/invoke/pngwriter.py rename to invokeai/backend/image_util/pngwriter.py diff --git a/ldm/invoke/txt2mask.py b/invokeai/backend/image_util/txt2mask.py similarity index 100% rename from ldm/invoke/txt2mask.py rename to invokeai/backend/image_util/txt2mask.py diff --git a/ldm/invoke/image_util.py b/invokeai/backend/image_util/util.py similarity index 100% rename from ldm/invoke/image_util.py rename to invokeai/backend/image_util/util.py diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index f6e9446618..7c6177803c 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -23,16 +23,21 @@ from werkzeug.utils import secure_filename from invokeai.backend.modules.get_canvas_generation_mode import ( get_canvas_generation_mode, ) -from invokeai.backend.modules.parameters import parameters_to_command +from .modules.parameters import parameters_to_command +from .prompting import (get_tokens_for_prompt_object, + get_prompt_structure, + get_tokenizer + ) +from .image_util import PngWriter, retrieve_metadata +from .generator import infill_methods +from .stable_diffusion import PipelineIntermediateState + from ldm.generate import Generate from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from invokeai.backend.ldm.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer -from .generator import infill_methods, PipelineIntermediateState from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, global_models_dir ) from ldm.invoke.merge_diffusers import merge_diffusion_models -from ldm.invoke.pngwriter import PngWriter, retrieve_metadata # Loading Arguments opt = Args() diff --git a/invokeai/backend/ldm/models/__init__.py b/invokeai/backend/ldm/models/__init__.py deleted file mode 100644 index bd11ddb78c..0000000000 --- a/invokeai/backend/ldm/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -''' -Initialization file for the invokeai.backend.models package -''' -from .model_manager import ModelManager, SDLegacyType -from .diffusion import InvokeAIDiffuserComponent -from .diffusion.ddim import DDIMSampler -from .diffusion.ksampler import KSampler -from .diffusion.plms import PLMSSampler -from .diffusion.cross_attention_map_saving import AttentionMapSaver -from .diffusion.shared_invokeai_diffusion import PostprocessingSettings diff --git a/invokeai/backend/ldm/models/__init__.py~ b/invokeai/backend/ldm/models/__init__.py~ deleted file mode 100644 index 6e060d7fa5..0000000000 --- a/invokeai/backend/ldm/models/__init__.py~ +++ /dev/null @@ -1,10 +0,0 @@ -''' -Initialization file for the invokeai.models package -''' -from .model_manager import ModelManager, SDLegacyType -from .diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent -from .diffusion.ddim import DDIMSampler -from .diffusion.ksampler import KSampler -from .diffusion.plms import PLMSSampler -from .diffusion.cross_attention_map_saving import AttentionMapSaver -from .diffusion.shared_invokeai_diffusion import PostprocessingSettings diff --git a/invokeai/backend/ldm/models/model_manager.py b/invokeai/backend/model_manager.py similarity index 99% rename from invokeai/backend/ldm/models/model_manager.py rename to invokeai/backend/model_manager.py index 8edf220a4c..1f99654610 100644 --- a/invokeai/backend/ldm/models/model_manager.py +++ b/invokeai/backend/model_manager.py @@ -31,15 +31,14 @@ from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ..devices import CPU_DEVICE +from .devices import CPU_DEVICE from ldm.invoke.globals import Globals, global_cache_dir -from ..util import ( +from .util import ( ask_user, download_with_resume, url_attachment_name, ) -from ...generator.diffusers_pipeline import StableDiffusionGeneratorPipeline - +from .stable_diffusion import StableDiffusionGeneratorPipeline class SDLegacyType(Enum): V1 = 1 diff --git a/invokeai/backend/prompting/__init__.py b/invokeai/backend/prompting/__init__.py new file mode 100644 index 0000000000..29694c6538 --- /dev/null +++ b/invokeai/backend/prompting/__init__.py @@ -0,0 +1,7 @@ +''' +Initialization file for invokeai.backend.prompting +''' +from .conditioning import (get_uc_and_c_and_ec, + split_weighted_subprompts, + get_tokens_for_prompt_object, + get_prompt_structure, get_tokenizer) diff --git a/invokeai/backend/ldm/conditioning.py b/invokeai/backend/prompting/conditioning.py similarity index 99% rename from invokeai/backend/ldm/conditioning.py rename to invokeai/backend/prompting/conditioning.py index 918fa2def1..9ec8babf16 100644 --- a/invokeai/backend/ldm/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -13,8 +13,8 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser -from .devices import torch_dtype -from .models import InvokeAIDiffuserComponent +from ..devices import torch_dtype +from ..stable_diffusion import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py new file mode 100644 index 0000000000..13e28ce22d --- /dev/null +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -0,0 +1,16 @@ +''' +Initialization file for the invokeai.backend.stable_diffusion package +''' +from .diffusion import InvokeAIDiffuserComponent +from .diffusion.ddim import DDIMSampler +from .diffusion.ksampler import KSampler +from .diffusion.plms import PLMSSampler +from .diffusion.cross_attention_map_saving import AttentionMapSaver +from .diffusion.shared_invokeai_diffusion import PostprocessingSettings +from .textual_inversion_manager import TextualInversionManager +from .concepts_lib import HuggingFaceConceptsLibrary +from .diffusers_pipeline import (StableDiffusionGeneratorPipeline, + ConditioningData, + PipelineIntermediateState, + StableDiffusionGeneratorPipeline + ) diff --git a/invokeai/backend/ldm/modules/attention.py b/invokeai/backend/stable_diffusion/attention.py similarity index 99% rename from invokeai/backend/ldm/modules/attention.py rename to invokeai/backend/stable_diffusion/attention.py index 4fb426130f..27fc5cf4bd 100644 --- a/invokeai/backend/ldm/modules/attention.py +++ b/invokeai/backend/stable_diffusion/attention.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from ..models.diffusion import InvokeAICrossAttentionMixin +from .diffusion import InvokeAICrossAttentionMixin from .diffusionmodules.util import checkpoint def exists(val): diff --git a/invokeai/backend/ldm/models/autoencoder.py b/invokeai/backend/stable_diffusion/autoencoder.py similarity index 99% rename from invokeai/backend/ldm/models/autoencoder.py rename to invokeai/backend/stable_diffusion/autoencoder.py index 49c413a5fb..ba081bab28 100644 --- a/invokeai/backend/ldm/models/autoencoder.py +++ b/invokeai/backend/stable_diffusion/autoencoder.py @@ -5,8 +5,8 @@ from contextlib import contextmanager from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from ..modules.diffusionmodules.model import Encoder, Decoder -from ..modules.distributions.distributions import ( +from .diffusionmodules.model import Encoder, Decoder +from .distributions.distributions import ( DiagonalGaussianDistribution, ) diff --git a/invokeai/backend/ldm/concepts_lib.py b/invokeai/backend/stable_diffusion/concepts_lib.py similarity index 100% rename from invokeai/backend/ldm/concepts_lib.py rename to invokeai/backend/stable_diffusion/concepts_lib.py diff --git a/invokeai/backend/ldm/data/__init__.py b/invokeai/backend/stable_diffusion/data/__init__.py similarity index 100% rename from invokeai/backend/ldm/data/__init__.py rename to invokeai/backend/stable_diffusion/data/__init__.py diff --git a/invokeai/backend/ldm/data/base.py b/invokeai/backend/stable_diffusion/data/base.py similarity index 100% rename from invokeai/backend/ldm/data/base.py rename to invokeai/backend/stable_diffusion/data/base.py diff --git a/invokeai/backend/ldm/data/imagenet.py b/invokeai/backend/stable_diffusion/data/imagenet.py similarity index 100% rename from invokeai/backend/ldm/data/imagenet.py rename to invokeai/backend/stable_diffusion/data/imagenet.py diff --git a/invokeai/backend/ldm/data/lsun.py b/invokeai/backend/stable_diffusion/data/lsun.py similarity index 100% rename from invokeai/backend/ldm/data/lsun.py rename to invokeai/backend/stable_diffusion/data/lsun.py diff --git a/invokeai/backend/ldm/data/personalized.py b/invokeai/backend/stable_diffusion/data/personalized.py similarity index 100% rename from invokeai/backend/ldm/data/personalized.py rename to invokeai/backend/stable_diffusion/data/personalized.py diff --git a/invokeai/backend/ldm/data/personalized_style.py b/invokeai/backend/stable_diffusion/data/personalized_style.py similarity index 100% rename from invokeai/backend/ldm/data/personalized_style.py rename to invokeai/backend/stable_diffusion/data/personalized_style.py diff --git a/invokeai/backend/generator/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py similarity index 99% rename from invokeai/backend/generator/diffusers_pipeline.py rename to invokeai/backend/stable_diffusion/diffusers_pipeline.py index e9aa9fb86f..fcc958d61b 100644 --- a/invokeai/backend/generator/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -27,11 +27,10 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec from ldm.invoke.globals import Globals -from ..ldm.models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings -from ..ldm.modules.textual_inversion_manager import TextualInversionManager -from ..ldm.devices import normalize_device, CPU_DEVICE -from ..ldm.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..ldm.models.diffusion import AttentionMapSaver +from ..stable_diffusion.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver +from ..stable_diffusion.textual_inversion_manager import TextualInversionManager +from ..stable_diffusion.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup +from ..devices import normalize_device, CPU_DEVICE from compel import EmbeddingsProvider @dataclass diff --git a/invokeai/backend/ldm/models/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/__init__.py rename to invokeai/backend/stable_diffusion/diffusion/__init__.py diff --git a/invokeai/backend/ldm/models/diffusion/__init__.py~ b/invokeai/backend/stable_diffusion/diffusion/__init__.py~ similarity index 100% rename from invokeai/backend/ldm/models/diffusion/__init__.py~ rename to invokeai/backend/stable_diffusion/diffusion/__init__.py~ diff --git a/invokeai/backend/ldm/models/diffusion/classifier.py b/invokeai/backend/stable_diffusion/diffusion/classifier.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/classifier.py rename to invokeai/backend/stable_diffusion/diffusion/classifier.py diff --git a/invokeai/backend/ldm/models/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/cross_attention_control.py rename to invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py diff --git a/invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py rename to invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py diff --git a/invokeai/backend/ldm/models/diffusion/ddim.py b/invokeai/backend/stable_diffusion/diffusion/ddim.py similarity index 98% rename from invokeai/backend/ldm/models/diffusion/ddim.py rename to invokeai/backend/stable_diffusion/diffusion/ddim.py index f36e970552..41d6249fc5 100644 --- a/invokeai/backend/ldm/models/diffusion/ddim.py +++ b/invokeai/backend/stable_diffusion/diffusion/ddim.py @@ -3,7 +3,7 @@ import torch from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ...modules.diffusionmodules.util import noise_like +from ..diffusionmodules.util import noise_like class DDIMSampler(Sampler): def __init__(self, model, schedule='linear', device=None, **kwargs): diff --git a/invokeai/backend/ldm/models/diffusion/ddpm.py b/invokeai/backend/stable_diffusion/diffusion/ddpm.py similarity index 99% rename from invokeai/backend/ldm/models/diffusion/ddpm.py rename to invokeai/backend/stable_diffusion/diffusion/ddpm.py index f9e9485dd3..deba4ebaf1 100644 --- a/invokeai/backend/ldm/models/diffusion/ddpm.py +++ b/invokeai/backend/stable_diffusion/diffusion/ddpm.py @@ -22,7 +22,7 @@ from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig import urllib -from ...modules.textual_inversion_manager import TextualInversionManager +from ..textual_inversion_manager import TextualInversionManager from ...util import ( log_txt_as_img, exists, @@ -33,8 +33,8 @@ from ...util import ( count_params, instantiate_from_config, ) -from ...modules.ema import LitEma -from ...modules.distributions.distributions import ( +from ..ema import LitEma +from ..distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) @@ -43,7 +43,7 @@ from ..autoencoder import ( IdentityFirstStage, AutoencoderKL, ) -from ...modules.diffusionmodules.util import ( +from ..diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, diff --git a/invokeai/backend/ldm/models/diffusion/ksampler.py b/invokeai/backend/stable_diffusion/diffusion/ksampler.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/ksampler.py rename to invokeai/backend/stable_diffusion/diffusion/ksampler.py diff --git a/invokeai/backend/ldm/models/diffusion/plms.py b/invokeai/backend/stable_diffusion/diffusion/plms.py similarity index 99% rename from invokeai/backend/ldm/models/diffusion/plms.py rename to invokeai/backend/stable_diffusion/diffusion/plms.py index e0a187e900..2dfead482b 100644 --- a/invokeai/backend/ldm/models/diffusion/plms.py +++ b/invokeai/backend/stable_diffusion/diffusion/plms.py @@ -7,7 +7,7 @@ from functools import partial from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ...modules.diffusionmodules.util import noise_like +from ..diffusionmodules.util import noise_like class PLMSSampler(Sampler): diff --git a/invokeai/backend/ldm/models/diffusion/sampler.py b/invokeai/backend/stable_diffusion/diffusion/sampler.py similarity index 99% rename from invokeai/backend/ldm/models/diffusion/sampler.py rename to invokeai/backend/stable_diffusion/diffusion/sampler.py index bd9d8c157b..656897ee31 100644 --- a/invokeai/backend/ldm/models/diffusion/sampler.py +++ b/invokeai/backend/stable_diffusion/diffusion/sampler.py @@ -10,7 +10,7 @@ from functools import partial from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent -from ...modules.diffusionmodules.util import ( +from ..diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, diff --git a/invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py similarity index 100% rename from invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py rename to invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py diff --git a/invokeai/backend/ldm/modules/__init__.py b/invokeai/backend/stable_diffusion/diffusionmodules/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/__init__.py rename to invokeai/backend/stable_diffusion/diffusionmodules/__init__.py diff --git a/invokeai/backend/ldm/modules/diffusionmodules/model.py b/invokeai/backend/stable_diffusion/diffusionmodules/model.py similarity index 100% rename from invokeai/backend/ldm/modules/diffusionmodules/model.py rename to invokeai/backend/stable_diffusion/diffusionmodules/model.py diff --git a/invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py b/invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py similarity index 100% rename from invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py rename to invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py diff --git a/invokeai/backend/ldm/modules/diffusionmodules/util.py b/invokeai/backend/stable_diffusion/diffusionmodules/util.py similarity index 100% rename from invokeai/backend/ldm/modules/diffusionmodules/util.py rename to invokeai/backend/stable_diffusion/diffusionmodules/util.py diff --git a/invokeai/backend/ldm/modules/diffusionmodules/__init__.py b/invokeai/backend/stable_diffusion/distributions/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/diffusionmodules/__init__.py rename to invokeai/backend/stable_diffusion/distributions/__init__.py diff --git a/invokeai/backend/ldm/modules/distributions/distributions.py b/invokeai/backend/stable_diffusion/distributions/distributions.py similarity index 100% rename from invokeai/backend/ldm/modules/distributions/distributions.py rename to invokeai/backend/stable_diffusion/distributions/distributions.py diff --git a/invokeai/backend/ldm/modules/ema.py b/invokeai/backend/stable_diffusion/ema.py similarity index 100% rename from invokeai/backend/ldm/modules/ema.py rename to invokeai/backend/stable_diffusion/ema.py diff --git a/invokeai/backend/ldm/modules/distributions/__init__.py b/invokeai/backend/stable_diffusion/encoders/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/distributions/__init__.py rename to invokeai/backend/stable_diffusion/encoders/__init__.py diff --git a/invokeai/backend/ldm/modules/encoders/modules.py b/invokeai/backend/stable_diffusion/encoders/modules.py similarity index 100% rename from invokeai/backend/ldm/modules/encoders/modules.py rename to invokeai/backend/stable_diffusion/encoders/modules.py diff --git a/invokeai/backend/ldm/modules/image_degradation/__init__.py b/invokeai/backend/stable_diffusion/image_degradation/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/image_degradation/__init__.py rename to invokeai/backend/stable_diffusion/image_degradation/__init__.py diff --git a/invokeai/backend/ldm/modules/image_degradation/bsrgan.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py similarity index 100% rename from invokeai/backend/ldm/modules/image_degradation/bsrgan.py rename to invokeai/backend/stable_diffusion/image_degradation/bsrgan.py diff --git a/invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py similarity index 100% rename from invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py rename to invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py diff --git a/invokeai/backend/ldm/modules/image_degradation/utils/test.png b/invokeai/backend/stable_diffusion/image_degradation/utils/test.png similarity index 100% rename from invokeai/backend/ldm/modules/image_degradation/utils/test.png rename to invokeai/backend/stable_diffusion/image_degradation/utils/test.png diff --git a/invokeai/backend/ldm/modules/image_degradation/utils_image.py b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py similarity index 100% rename from invokeai/backend/ldm/modules/image_degradation/utils_image.py rename to invokeai/backend/stable_diffusion/image_degradation/utils_image.py diff --git a/invokeai/backend/ldm/modules/losses/__init__.py b/invokeai/backend/stable_diffusion/losses/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/losses/__init__.py rename to invokeai/backend/stable_diffusion/losses/__init__.py diff --git a/invokeai/backend/ldm/modules/losses/contperceptual.py b/invokeai/backend/stable_diffusion/losses/contperceptual.py similarity index 100% rename from invokeai/backend/ldm/modules/losses/contperceptual.py rename to invokeai/backend/stable_diffusion/losses/contperceptual.py diff --git a/invokeai/backend/ldm/modules/losses/vqperceptual.py b/invokeai/backend/stable_diffusion/losses/vqperceptual.py similarity index 100% rename from invokeai/backend/ldm/modules/losses/vqperceptual.py rename to invokeai/backend/stable_diffusion/losses/vqperceptual.py diff --git a/invokeai/backend/ldm/modules/encoders/__init__.py b/invokeai/backend/stable_diffusion/modules/__init__.py similarity index 100% rename from invokeai/backend/ldm/modules/encoders/__init__.py rename to invokeai/backend/stable_diffusion/modules/__init__.py diff --git a/invokeai/backend/ldm/offloading.py b/invokeai/backend/stable_diffusion/offloading.py similarity index 100% rename from invokeai/backend/ldm/offloading.py rename to invokeai/backend/stable_diffusion/offloading.py diff --git a/invokeai/backend/ldm/modules/textual_inversion_manager.py b/invokeai/backend/stable_diffusion/textual_inversion_manager.py similarity index 99% rename from invokeai/backend/ldm/modules/textual_inversion_manager.py rename to invokeai/backend/stable_diffusion/textual_inversion_manager.py index 2c5721ea7b..e45ea7362b 100644 --- a/invokeai/backend/ldm/modules/textual_inversion_manager.py +++ b/invokeai/backend/stable_diffusion/textual_inversion_manager.py @@ -9,7 +9,7 @@ from picklescan.scanner import scan_file_path from transformers import CLIPTextModel, CLIPTokenizer from compel.embeddings_provider import BaseTextualInversionManager -from ..concepts_lib import HuggingFaceConceptsLibrary +from .concepts_lib import HuggingFaceConceptsLibrary @dataclass diff --git a/invokeai/backend/ldm/modules/x_transformer.py b/invokeai/backend/stable_diffusion/x_transformer.py similarity index 100% rename from invokeai/backend/ldm/modules/x_transformer.py rename to invokeai/backend/stable_diffusion/x_transformer.py diff --git a/invokeai/backend/ldm/util.py b/invokeai/backend/util.py similarity index 96% rename from invokeai/backend/ldm/util.py rename to invokeai/backend/util.py index 0aac014ef9..2bd24d3ef0 100644 --- a/invokeai/backend/ldm/util.py +++ b/invokeai/backend/util.py @@ -17,7 +17,6 @@ from tqdm import tqdm from .devices import torch_dtype - def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) # xc a list of captions to plot @@ -280,22 +279,6 @@ def ask_user(question: str, answers: list): return valid_response -def debug_image( - debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False -): - if not debug_status: - return - - image_copy = debug_image.copy().convert("RGBA") - ImageDraw.Draw(image_copy).text((5, 5), debug_text, (255, 0, 0)) - - if debug_show: - image_copy.show() - - if debug_result: - return image_copy - - # ------------------------------------- def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path: ''' diff --git a/ldm/invoke/__init__.py b/invokeai/version/__init__.py similarity index 58% rename from ldm/invoke/__init__.py rename to invokeai/version/__init__.py index b643e80240..805834bb9a 100644 --- a/ldm/invoke/__init__.py +++ b/invokeai/version/__init__.py @@ -1,8 +1,15 @@ -from ._version import __version__ - +''' +initialization file for invokeai +''' +import invokeai +from .invokeai_version import __version__ __app_id__= 'invoke-ai/InvokeAI' __app_name__= 'InvokeAI' +# copy these attributes into the invokeai namespace +setattr(invokeai,'__version__',__version__) +setattr(invokeai,'__app_id__',__app_id__) +setattr(invokeai,'__app_name__',__app_name__) def _ignore_xformers_triton_message_on_windows(): import logging diff --git a/ldm/invoke/_version.py b/invokeai/version/invokeai_version.py similarity index 100% rename from ldm/invoke/_version.py rename to invokeai/version/invokeai_version.py diff --git a/ldm/generate.py b/ldm/generate.py index 536a7f2df8..4bce177258 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -25,19 +25,17 @@ from omegaconf import OmegaConf from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything -import invokeai.backend.ldm.conditioning - -from invokeai.backend.ldm.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) +from invokeai.backend import ModelManager +from invokeai.backend.prompting import get_uc_and_c_and_ec +from invokeai.backend.stable_diffusion import (DDIMSampler, KSampler, PLMSSampler) from invokeai.backend.generator import infill_methods -from ldm.invoke.args import metadata_from_png -from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary -from invokeai.backend.ldm.conditioning import get_uc_and_c_and_ec -from invokeai.backend.ldm.devices import choose_precision, choose_torch_device +from invokeai.backend.stable_diffusion.concepts_lib import HuggingFaceConceptsLibrary +from invokeai.backend.devices import choose_precision, choose_torch_device +from invokeai.backend.image_util import InitImageResizer, PngWriter, Txt2Mask + from ldm.invoke.globals import Globals, global_cache_dir -from ldm.invoke.image_util import InitImageResizer -from ldm.invoke.pngwriter import PngWriter +from ldm.invoke.args import metadata_from_png from ldm.invoke.seamless import configure_model_padding -from ldm.invoke.txt2mask import Txt2Mask def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 16f64e3bd1..80442cffd0 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -16,19 +16,18 @@ if sys.platform == "darwin": import pyparsing # type: ignore -import ldm.invoke +import invokeai.version from ..generate import Generate from .args import (Args, dream_cmd_from_png, metadata_dumps, metadata_from_png) -from invokeai.backend.generator import PipelineIntermediateState +from invokeai.backend.stable_diffusion import PipelineIntermediateState +from invokeai.backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata +from invokeai.backend import ModelManager from .globals import Globals -from .image_util import make_grid from .log import write_log -from invokeai.backend.ldm.models import ModelManager -from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .readline import Completer, get_completer -from invokeai.backend.ldm.util import url_attachment_name +from invokeai.backend.util import url_attachment_name # global used in multiple functions (fix) infile = None @@ -75,7 +74,7 @@ def main(): opt, FileNotFoundError(f"The file {config_file} could not be found.") ) - print(f">> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}") + print(f">> {invokeai.__app_name__}, version {invokeai.__version__}") print(f'>> InvokeAI runtime directory is "{Globals.root}"') # loading here to avoid long delays on startup diff --git a/ldm/invoke/app/services/image_storage.py b/ldm/invoke/app/services/image_storage.py index 03227d870b..fa6a85dfbe 100644 --- a/ldm/invoke/app/services/image_storage.py +++ b/ldm/invoke/app/services/image_storage.py @@ -8,7 +8,7 @@ from pathlib import Path from queue import Queue from typing import Dict from PIL.Image import Image -from ...pngwriter import PngWriter +from invokeai.backend.image_util import PngWriter class ImageType(str, Enum): diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 17bb8005cb..d661409d18 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -95,15 +95,15 @@ from argparse import Namespace from pathlib import Path from typing import List -import ldm.invoke -import ldm.invoke.pngwriter -from invokeai.backend.ldm.conditioning import split_weighted_subprompts +from invokeai.backend.image_util import retrieve_metadata +import invokeai.version from ldm.invoke.globals import Globals +from invokeai.backend.prompting import split_weighted_subprompts -APP_ID = ldm.invoke.__app_id__ -APP_NAME = ldm.invoke.__app_name__ -APP_VERSION = ldm.invoke.__version__ +APP_ID = invokeai.version.__app_id__ +APP_NAME = invokeai.version.__app_name__ +APP_VERSION = invokeai.version.__version__ SAMPLER_CHOICES = [ 'ddim', @@ -182,7 +182,7 @@ class Args(object): # and intercept --version request switches = self._arg_parser.parse_args(sysargs) if switches.version: - print(f'{ldm.invoke.__app_name__} {ldm.invoke.__version__}') + print(f'{APP_NAME} {APP_VERSION}') sys.exit(0) print('* Initializing, be patient...') @@ -1170,8 +1170,8 @@ def metadata_dumps(opt, 'model' : 'stable diffusion', 'model_id' : opt.model, 'model_hash' : model_hash, - 'app_id' : ldm.invoke.__app_id__, - 'app_version' : ldm.invoke.__version__, + 'app_id' : APP_ID, + 'app_version' : APP_VERSION, } # # add some RFC266 fields that are generated internally, and not as @@ -1242,7 +1242,7 @@ def args_from_png(png_file_path) -> list[Args]: data. ''' try: - meta = ldm.invoke.pngwriter.retrieve_metadata(png_file_path) + meta = retrieve_metadata(png_file_path) except AttributeError: return [legacy_metadata_load({},png_file_path)] diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 9dd22d2ac7..bdffe61a55 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -22,7 +22,7 @@ from npyscreen import widget from omegaconf import OmegaConf from shutil import get_terminal_size -from invokeai.backend.ldm.devices import choose_precision, choose_torch_device +from invokeai.backend.devices import choose_precision, choose_torch_device from ..globals import Globals, global_config_dir from .model_install_backend import (Dataset_path, default_config_file, default_dataset, get_root, diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index c50e5ebc3c..d281ffbd73 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -18,9 +18,9 @@ from tqdm import tqdm from typing import List import invokeai.configs as configs -from invokeai.backend.generator import StableDiffusionGeneratorPipeline +from invokeai.backend.stable_diffusion import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from invokeai.backend.ldm.models import ModelManager +from invokeai.backend import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 28abb5d432..10496d5b57 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -23,7 +23,7 @@ from omegaconf import OmegaConf from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from invokeai.backend.ldm.models import ModelManager +from invokeai.backend import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" diff --git a/ldm/invoke/readline.py b/ldm/invoke/readline.py index a605358a9b..7fe74fc953 100644 --- a/ldm/invoke/readline.py +++ b/ldm/invoke/readline.py @@ -12,8 +12,8 @@ import os import re import atexit from ldm.invoke.args import Args -from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.globals import Globals +from invokeai.backend.stable_diffusion import HuggingFaceConceptsLibrary # ---------------readline utilities--------------------- try: diff --git a/pyproject.toml b/pyproject.toml index 22e6f9282c..3e219172cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,15 +125,15 @@ dependencies = [ "Discord" = "https://discord.gg/ZmtBAhwWhy" [tool.setuptools.dynamic] -version = { attr = "ldm.invoke.__version__" } +version = { attr = "invokeai.version.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.web*", + "invokeai.assets.web*","invokeai.version*", "invokeai.generator*","invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", - "ldm*" + "ldm*", ] [tool.setuptools.package-data] diff --git a/tests/test_textual_inversion.py b/tests/test_textual_inversion.py index 54b396489e..53d2f2bfe6 100644 --- a/tests/test_textual_inversion.py +++ b/tests/test_textual_inversion.py @@ -4,7 +4,7 @@ from typing import Union import torch -from ldm.modules.textual_inversion_manager import TextualInversionManager +from invokeai.backend.stable_diffusion import TextualInversionManager KNOWN_WORDS = ['a', 'b', 'c'] From 6a990565ff6e93bfe6939ea5f239612f66f258b7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 00:02:15 -0500 Subject: [PATCH 11/19] all files migrated; tweaks needed --- .gitignore | 2 +- .../app/api/dependencies.py | 0 {ldm/invoke => invokeai}/app/api/events.py | 0 .../app/api/routers/images.py | 0 .../app/api/routers/sessions.py | 0 {ldm/invoke => invokeai}/app/api/sockets.py | 0 {ldm/invoke => invokeai}/app/api_app.py | 0 {ldm/invoke => invokeai}/app/cli_app.py | 0 .../app/invocations/__init__.py | 0 .../app/invocations/baseinvocation.py | 0 .../invoke => invokeai}/app/invocations/cv.py | 0 .../app/invocations/generate.py | 0 .../app/invocations/image.py | 0 .../app/invocations/prompt.py | 0 .../app/invocations/reconstruct.py | 0 .../app/invocations/upscale.py | 0 .../modules => app/services}/__init__.py | 0 .../app/services/events.py | 0 .../app/services/generate_initializer.py | 9 +- .../invoke => invokeai}/app/services/graph.py | 0 .../app/services/image_storage.py | 0 .../app/services/invocation_queue.py | 0 .../app/services/invocation_services.py | 3 +- .../app/services/invoker.py | 0 .../app/services/item_storage.py | 0 .../app/services/processor.py | 0 .../app/services/sqlite.py | 0 invokeai/backend/__init__.py | 6 +- {ldm/invoke => invokeai/backend}/args.py | 4 +- {ldm => invokeai/backend/config}/__init__.py | 0 .../backend}/config/invokeai_configure.py | 4 +- .../backend}/config/model_install_backend.py | 4 +- {ldm => invokeai/backend}/generate.py | 21 +- invokeai/backend/generator/base.py | 2 +- {ldm/invoke => invokeai/backend}/globals.py | 2 +- invokeai/backend/image_util/__init__.py | 1 + invokeai/backend/image_util/patchmatch.py | 2 +- .../backend/image_util}/seamless.py | 0 invokeai/backend/image_util/txt2mask.py | 2 +- invokeai/backend/model_management/__init__.py | 8 + .../convert_ckpt_to_diffusers.py | 8 +- .../{ => model_management}/model_manager.py | 160 ++++------ invokeai/backend/prompting/conditioning.py | 4 +- .../backend}/restoration/__init__.py | 0 .../backend}/restoration/base.py | 6 +- .../backend}/restoration/codeformer.py | 2 +- .../backend}/restoration/codeformer_arch.py | 3 +- .../backend}/restoration/gfpgan.py | 2 +- .../backend}/restoration/outcrop.py | 0 .../backend}/restoration/outpaint.py | 0 .../backend}/restoration/realesrgan.py | 2 +- .../backend}/restoration/vqgan_arch.py | 0 .../backend/stable_diffusion/concepts_lib.py | 2 +- .../stable_diffusion/diffusers_pipeline.py | 10 +- .../diffusion/cross_attention_control.py | 2 +- .../stable_diffusion/diffusion/ddpm.py | 2 +- .../stable_diffusion/diffusion/plms.py | 2 +- .../stable_diffusion/diffusion/sampler.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 2 +- .../stable_diffusion/diffusionmodules/util.py | 2 +- .../stable_diffusion/encoders/modules.py | 2 +- invokeai/backend/training/__init.py__ | 4 + .../training/textual_inversion_training.py | 2 +- invokeai/backend/util/__init__.py | 18 ++ invokeai/backend/{ => util}/devices.py | 4 +- {ldm/invoke => invokeai/backend/util}/log.py | 0 invokeai/backend/{ => util}/util.py | 0 invokeai/backend/web/__init__.py | 4 + .../backend/{ => web}/invoke_ai_web_server.py | 32 +- .../backend/web/modules}/__init__.py | 0 .../{ => web}/modules/create_cmd_parser.py | 0 .../modules/get_canvas_generation_mode.py | 0 .../backend/{ => web}/modules/parameters.py | 2 +- .../{ => web}/modules/parse_seed_weights.py | 0 .../init-img_full_transparency.png | Bin .../modules/test_images/init-img_opaque.png | Bin .../init-img_partial_transparency.png | Bin .../test_images/init-mask_has_mask.png | Bin .../modules/test_images/init-mask_no_mask.png | Bin {ldm/invoke => invokeai/frontend/CLI}/CLI.py | 25 +- invokeai/frontend/CLI/__init__.py | 4 + .../frontend/CLI}/readline.py | 6 +- invokeai/frontend/__init__.py | 3 + invokeai/frontend/config/__init__.py | 7 + .../frontend/config/invokeai_configure.py | 4 + .../frontend}/config/invokeai_update.py | 2 +- .../frontend}/config/model_install.py | 14 +- .../frontend}/config/widgets.py | 0 invokeai/frontend/merge/__init__.py | 4 + .../frontend/merge}/merge_diffusers.py | 14 +- invokeai/frontend/training/__init__.py | 5 + .../frontend}/training/textual_inversion.py | 4 +- invokeai/frontend/{ => web}/.babelrc | 0 invokeai/frontend/{ => web}/.eslintignore | 0 invokeai/frontend/{ => web}/.eslintrc.js | 0 invokeai/frontend/{ => web}/.gitignore | 0 invokeai/frontend/{ => web}/.husky/pre-commit | 0 invokeai/frontend/{ => web}/.prettierignore | 0 invokeai/frontend/{ => web}/.prettierrc.js | 0 .../{ => web}/.yarn/releases/yarn-1.22.19.cjs | 0 invokeai/frontend/{ => web}/.yarnrc | 0 invokeai/frontend/{ => web}/.yarnrc.yml | 0 invokeai/frontend/{ => web}/README.md | 0 invokeai/frontend/web/__init__.py | 3 + .../dist/assets/Inter-Bold-790c108b.ttf | Bin .../{ => web}/dist/assets/Inter-b9a8e5e2.ttf | Bin .../dist/assets/favicon-0d253ced.ico | Bin .../{ => web}/dist/assets/index-0e39fbc4.js | 0 .../{ => web}/dist/assets/index-14cb2922.css | 0 .../{ => web}/dist/assets/logo-13003d72.png | Bin invokeai/frontend/{ => web}/dist/index.html | 0 .../frontend/{ => web}/dist/locales/ar.json | 0 .../frontend/{ => web}/dist/locales/de.json | 0 .../frontend/{ => web}/dist/locales/en.json | 0 .../frontend/{ => web}/dist/locales/es.json | 0 .../frontend/{ => web}/dist/locales/fr.json | 0 .../frontend/{ => web}/dist/locales/it.json | 0 .../frontend/{ => web}/dist/locales/ja.json | 0 .../frontend/{ => web}/dist/locales/nl.json | 0 .../frontend/{ => web}/dist/locales/pl.json | 0 .../{ => web}/dist/locales/pt_BR.json | 0 .../frontend/{ => web}/dist/locales/ru.json | 0 .../frontend/{ => web}/dist/locales/uk.json | 0 .../{ => web}/dist/locales/zh_CN.json | 0 invokeai/frontend/{ => web}/favicon.ico | Bin invokeai/frontend/{ => web}/index.d.ts | 0 invokeai/frontend/{ => web}/index.html | 0 invokeai/frontend/{ => web}/package.json | 0 .../patches/redux-deep-persist+1.0.7.patch | 0 .../patches/redux-persist+6.0.0.patch | 0 .../frontend/{ => web}/public/locales/ar.json | 0 .../frontend/{ => web}/public/locales/de.json | 0 .../frontend/{ => web}/public/locales/en.json | 0 .../frontend/{ => web}/public/locales/es.json | 0 .../frontend/{ => web}/public/locales/fr.json | 0 .../frontend/{ => web}/public/locales/it.json | 0 .../frontend/{ => web}/public/locales/ja.json | 0 .../frontend/{ => web}/public/locales/nl.json | 0 .../frontend/{ => web}/public/locales/pl.json | 0 .../{ => web}/public/locales/pt_BR.json | 0 .../frontend/{ => web}/public/locales/ro.json | 0 .../frontend/{ => web}/public/locales/ru.json | 0 .../frontend/{ => web}/public/locales/uk.json | 0 .../{ => web}/public/locales/zh_CN.json | 0 invokeai/frontend/{ => web}/src/Loading.tsx | 0 invokeai/frontend/{ => web}/src/app/App.scss | 0 invokeai/frontend/{ => web}/src/app/App.tsx | 0 .../frontend/{ => web}/src/app/constants.ts | 0 .../contexts/ImageUploaderTriggerContext.ts | 0 .../frontend/{ => web}/src/app/features.ts | 0 .../frontend/{ => web}/src/app/invokeai.d.ts | 0 .../src/app/selectors/readinessSelector.ts | 0 .../{ => web}/src/app/socketio/actions.ts | 0 .../{ => web}/src/app/socketio/emitters.ts | 0 .../{ => web}/src/app/socketio/listeners.ts | 0 .../{ => web}/src/app/socketio/middleware.ts | 0 invokeai/frontend/{ => web}/src/app/store.ts | 0 .../frontend/{ => web}/src/app/storeHooks.ts | 0 invokeai/frontend/{ => web}/src/app/theme.ts | 0 invokeai/frontend/{ => web}/src/app/utils.ts | 0 .../src/assets/fonts/Inter/Inter-Bold.ttf | Bin .../src/assets/fonts/Inter/Inter.ttf | Bin .../{ => web}/src/assets/images/image2img.png | Bin .../{ => web}/src/assets/images/logo.png | Bin .../{ => web}/src/assets/images/mask.afdesign | Bin .../{ => web}/src/assets/images/mask.svg | 0 .../src/common/components/GuideIcon.tsx | 0 .../src/common/components/GuidePopover.scss | 0 .../src/common/components/GuidePopover.tsx | 0 .../src/common/components/IAIAlertDialog.tsx | 0 .../src/common/components/IAIButton.scss | 0 .../src/common/components/IAIButton.tsx | 0 .../src/common/components/IAICheckbox.scss | 0 .../src/common/components/IAICheckbox.tsx | 0 .../src/common/components/IAIColorPicker.scss | 0 .../src/common/components/IAIColorPicker.tsx | 0 .../src/common/components/IAIIconButton.scss | 0 .../src/common/components/IAIIconButton.tsx | 0 .../src/common/components/IAIInput.scss | 0 .../src/common/components/IAIInput.tsx | 0 .../src/common/components/IAINumberInput.scss | 0 .../src/common/components/IAINumberInput.tsx | 0 .../src/common/components/IAIPopover.scss | 0 .../src/common/components/IAIPopover.tsx | 0 .../src/common/components/IAISelect.scss | 0 .../src/common/components/IAISelect.tsx | 0 .../src/common/components/IAISimpleMenu.tsx | 0 .../src/common/components/IAISlider.scss | 0 .../src/common/components/IAISlider.tsx | 0 .../src/common/components/IAISwitch.scss | 0 .../src/common/components/IAISwitch.tsx | 0 .../common/components/ImageUploadOverlay.tsx | 0 .../src/common/components/ImageUploader.scss | 0 .../src/common/components/ImageUploader.tsx | 0 .../common/components/ImageUploaderButton.tsx | 0 .../components/ImageUploaderIconButton.tsx | 0 .../src/common/components/SubItemHook.tsx | 0 .../components/WorkInProgress/NodesWIP.tsx | 0 .../WorkInProgress/PostProcessingWIP.tsx | 0 .../components/WorkInProgress/Training.tsx | 0 .../WorkInProgress/WorkInProgress.scss | 0 .../common/components/radix-ui/IAISlider.scss | 0 .../common/components/radix-ui/IAISlider.tsx | 0 .../components/radix-ui/IAITooltip.scss | 0 .../common/components/radix-ui/IAITooltip.tsx | 0 .../common/hooks/useClickOutsideWatcher.ts | 0 .../src/common/hooks/useImageUploader.ts | 0 .../common/hooks/useSingleAndDoubleClick.ts | 0 .../src/common/hooks/useUpdateTranslations.ts | 0 .../src/common/icons/ImageToImageIcon.tsx | 0 .../src/common/icons/InpaintIcon.tsx | 0 .../{ => web}/src/common/icons/NodesIcon.tsx | 0 .../src/common/icons/OutpaintIcon.tsx | 0 .../src/common/icons/PostprocessingIcon.tsx | 0 .../src/common/icons/TextToImageIcon.tsx | 0 .../src/common/icons/TrainingIcon.tsx | 0 .../src/common/icons/UnifiedCanvas.afdesign | Bin .../src/common/icons/UnifiedCanvasIcon.tsx | 0 .../icons/design_files/BaseImage.afdesign | Bin .../icons/design_files/ImageToImage.afdesign | Bin .../icons/design_files/ImageToImage.svg | 0 .../icons/design_files/Inpaint.afdesign | Bin .../src/common/icons/design_files/Inpaint.svg | 0 .../common/icons/design_files/Nodes.afdesign | Bin .../src/common/icons/design_files/Nodes.svg | 0 .../icons/design_files/Outpaint.afdesign | Bin .../common/icons/design_files/Outpaint.svg | 0 .../design_files/Postprocessing.afdesign | Bin .../icons/design_files/Postprocessing.svg | 0 .../icons/design_files/TextToImage.afdesign | Bin .../common/icons/design_files/TextToImage.svg | 0 .../icons/design_files/Training.afdesign | Bin .../common/icons/design_files/Training.svg | 0 .../icons/design_files/UnifiedCanvas.afdesign | Bin .../icons/design_files/UnifiedCanvas.svg | 0 .../src/common/util/getPromptAndNegative.ts | 0 .../src/common/util/openBase64ImageInTab.ts | 0 .../src/common/util/parameterTranslation.ts | 0 .../src/common/util/promptToString.ts | 0 .../{ => web}/src/common/util/randomInt.ts | 0 .../src/common/util/roundDownToMultiple.ts | 0 .../src/common/util/seedWeightPairs.ts | 0 .../ClearCanvasHistoryButtonModal.tsx | 0 .../features/canvas/components/IAICanvas.tsx | 0 .../IAICanvasBoundingBoxOverlay.tsx | 0 .../canvas/components/IAICanvasGrid.tsx | 0 .../canvas/components/IAICanvasImage.tsx | 0 .../components/IAICanvasIntermediateImage.tsx | 0 .../components/IAICanvasMaskCompositer.tsx | 0 .../canvas/components/IAICanvasMaskLines.tsx | 0 .../components/IAICanvasObjectRenderer.tsx | 0 .../canvas/components/IAICanvasResizer.tsx | 0 .../components/IAICanvasStagingArea.tsx | 0 .../IAICanvasStagingAreaToolbar.tsx | 0 .../canvas/components/IAICanvasStatusText.tsx | 0 .../IAICanvasStatusTextCursorPos.tsx | 0 .../components/IAICanvasToolPreview.tsx | 0 .../IAICanvasToolbar/IAICanvasBoundingBox.tsx | 0 .../IAICanvasToolbar/IAICanvasMaskOptions.tsx | 0 .../IAICanvasToolbar/IAICanvasRedoButton.tsx | 0 .../IAICanvasSettingsButtonPopover.tsx | 0 .../IAICanvasToolChooserOptions.tsx | 0 .../IAICanvasToolbar/IAICanvasToolbar.tsx | 0 .../IAICanvasToolbar/IAICanvasUndoButton.tsx | 0 .../canvas/hooks/useCanvasDragMove.ts | 0 .../features/canvas/hooks/useCanvasHotkeys.ts | 0 .../canvas/hooks/useCanvasMouseDown.ts | 0 .../canvas/hooks/useCanvasMouseMove.ts | 0 .../canvas/hooks/useCanvasMouseOut.ts | 0 .../features/canvas/hooks/useCanvasMouseUp.ts | 0 .../features/canvas/hooks/useCanvasZoom.ts | 0 .../canvas/hooks/useColorUnderCursor.ts | 0 .../features/canvas/store/canvasSelectors.ts | 0 .../src/features/canvas/store/canvasSlice.ts | 0 .../src/features/canvas/store/canvasTypes.ts | 0 .../store/thunks/mergeAndUploadCanvas.ts | 0 .../canvas/util/calculateCoordinates.ts | 0 .../features/canvas/util/calculateScale.ts | 0 .../src/features/canvas/util/colorToString.ts | 0 .../src/features/canvas/util/constants.ts | 0 .../src/features/canvas/util/copyImage.ts | 0 .../src/features/canvas/util/downloadFile.ts | 0 .../features/canvas/util/floorCoordinates.ts | 0 .../src/features/canvas/util/generateMask.ts | 0 .../util/getScaledBoundingBoxDimensions.ts | 0 .../canvas/util/getScaledCursorPosition.ts | 0 .../canvas/util/konvaInstanceProvider.ts | 0 .../features/canvas/util/layerToDataURL.ts | 0 .../canvas/util/roundDimensionsTo64.ts | 0 .../features/canvas/util/roundToHundreth.ts | 0 .../components/CurrentImageButtons.scss | 0 .../components/CurrentImageButtons.tsx | 0 .../components/CurrentImageDisplay.scss | 0 .../components/CurrentImageDisplay.tsx | 0 .../components/CurrentImagePreview.tsx | 0 .../gallery/components/DeleteImageModal.tsx | 0 .../gallery/components/HoverableImage.scss | 0 .../gallery/components/HoverableImage.tsx | 0 .../gallery/components/ImageGallery.scss | 0 .../gallery/components/ImageGallery.tsx | 0 .../ImageMetadataViewer.scss | 0 .../ImageMetadataViewer.tsx | 0 .../gallery/hooks/useGetImageByUuid.ts | 0 .../gallery/store/gallerySelectors.ts | 0 .../features/gallery/store/gallerySlice.ts | 0 .../gallery/store/thunks/uploadImage.ts | 0 .../lightbox/components/Lightbox.scss | 0 .../features/lightbox/components/Lightbox.tsx | 0 .../lightbox/components/ReactPanZoom.tsx | 0 .../lightbox/store/lightboxSelectors.ts | 0 .../features/lightbox/store/lightboxSlice.ts | 0 .../AccordionItems/AdvancedSettings.scss | 0 .../AccordionItems/InvokeAccordionItem.tsx | 0 .../BoundingBox/BoundingBoxSettings.scss | 0 .../BoundingBox/BoundingBoxSettings.tsx | 0 .../Canvas/InfillAndScalingSettings.tsx | 0 .../Canvas/SeamCorrection/SeamBlur.tsx | 0 .../SeamCorrection/SeamCorrectionSettings.tsx | 0 .../Canvas/SeamCorrection/SeamSize.tsx | 0 .../Canvas/SeamCorrection/SeamSteps.tsx | 0 .../Canvas/SeamCorrection/SeamStrength.tsx | 0 .../FaceRestore/CodeformerFidelity.tsx | 0 .../FaceRestore/FaceRestoreSettings.tsx | 0 .../FaceRestore/FaceRestoreStrength.tsx | 0 .../FaceRestore/FaceRestoreToggle.tsx | 0 .../FaceRestore/FaceRestoreType.tsx | 0 .../ImageToImage/ImageFit.tsx | 0 .../ImageToImage/ImageToImageStrength.tsx | 0 .../Output/HiresSettings.tsx | 0 .../Output/ImageToImageOutputSettings.tsx | 0 .../Output/OutputSettings.tsx | 0 .../Output/SeamlessSettings.tsx | 0 .../Output/SymmetrySettings.tsx | 0 .../Output/SymmetryToggle.tsx | 0 .../AdvancedParameters/Seed/Perlin.tsx | 0 .../AdvancedParameters/Seed/RandomizeSeed.tsx | 0 .../AdvancedParameters/Seed/Seed.tsx | 0 .../AdvancedParameters/Seed/SeedSettings.tsx | 0 .../AdvancedParameters/Seed/ShuffleSeed.tsx | 0 .../AdvancedParameters/Seed/Threshold.tsx | 0 .../Upscale/UpscaleDenoisingStrength.tsx | 0 .../Upscale/UpscaleScale.tsx | 0 .../Upscale/UpscaleSettings.tsx | 0 .../Upscale/UpscaleStrength.tsx | 0 .../Upscale/UpscaleToggle.tsx | 0 .../Variations/GenerateVariations.tsx | 0 .../Variations/SeedWeights.tsx | 0 .../Variations/VariationAmount.tsx | 0 .../Variations/VariationsSettings.tsx | 0 .../MainParameters/MainCFGScale.tsx | 0 .../components/MainParameters/MainHeight.tsx | 0 .../MainParameters/MainIterations.tsx | 0 .../MainParameters/MainParameters.scss | 0 .../MainParameters/MainParameters.tsx | 0 .../components/MainParameters/MainSampler.tsx | 0 .../components/MainParameters/MainSteps.tsx | 0 .../components/MainParameters/MainWidth.tsx | 0 .../components/ParametersAccordion.tsx | 0 .../ProcessButtons/CancelButton.tsx | 0 .../ProcessButtons/InvokeButton.tsx | 0 .../components/ProcessButtons/Loopback.tsx | 0 .../ProcessButtons/ProcessButtons.scss | 0 .../ProcessButtons/ProcessButtons.tsx | 0 .../PromptInput/NegativePromptInput.tsx | 0 .../components/PromptInput/PromptInput.scss | 0 .../components/PromptInput/PromptInput.tsx | 0 .../features/parameters/hooks/usePrompt.ts | 0 .../parameters/store/generationSelectors.ts | 0 .../parameters/store/generationSlice.ts | 0 .../store/postprocessingSelectors.ts | 0 .../parameters/store/postprocessingSlice.ts | 0 .../components/ClearTempFolderButtonModal.tsx | 0 .../features/system/components/Console.scss | 0 .../features/system/components/Console.tsx | 0 .../components/HotkeysModal/HotkeysModal.scss | 0 .../components/HotkeysModal/HotkeysModal.tsx | 0 .../HotkeysModal/HotkeysModalItem.tsx | 0 .../system/components/LanguagePicker.tsx | 0 .../src/features/system/components/Modal.scss | 0 .../ModelManager/AddCheckpointModel.tsx | 0 .../ModelManager/AddDiffusersModel.tsx | 0 .../components/ModelManager/AddModel.scss | 0 .../components/ModelManager/AddModel.tsx | 0 .../ModelManager/CheckpointModelEdit.tsx | 0 .../ModelManager/DiffusersModelEdit.tsx | 0 .../components/ModelManager/MergeModels.tsx | 0 .../components/ModelManager/ModelConvert.tsx | 0 .../components/ModelManager/ModelList.tsx | 0 .../components/ModelManager/ModelListItem.tsx | 0 .../ModelManager/ModelManagerModal.tsx | 0 .../components/ModelManager/SearchModels.tsx | 0 .../system/components/ModelSelect.tsx | 0 .../system/components/ProgressBar.scss | 0 .../system/components/ProgressBar.tsx | 0 .../SettingsModal/SettingsModal.scss | 0 .../SettingsModal/SettingsModal.tsx | 0 .../system/components/SiteHeader.scss | 0 .../features/system/components/SiteHeader.tsx | 0 .../system/components/StatusIndicator.scss | 0 .../system/components/StatusIndicator.tsx | 0 .../system/components/ThemeChanger.tsx | 0 .../features/system/hooks/useToastWatcher.ts | 0 .../features/system/store/systemSelectors.ts | 0 .../src/features/system/store/systemSlice.ts | 0 .../ui/components/FloatingButton.scss | 0 .../ui/components/FloatingGalleryButton.tsx | 0 .../FloatingParametersPanelButtons.tsx | 0 .../components/ImageToImage/ImageToImage.scss | 0 .../ImageToImage/ImageToImageDisplay.tsx | 0 .../ImageToImage/ImageToImageOptions.tsx | 0 .../ImageToImage/ImageToImagePanel.tsx | 0 .../ImageToImage/InitImagePreview.tsx | 0 .../ImageToImage/InitialImageOverlay.tsx | 0 .../ui/components/ImageToImage/index.tsx | 0 .../ui/components/InvokeParametersPanel.scss | 0 .../ui/components/InvokeParametersPanel.tsx | 0 .../features/ui/components/InvokeTabs.scss | 0 .../src/features/ui/components/InvokeTabs.tsx | 0 .../ui/components/InvokeWorkarea.scss | 0 .../features/ui/components/InvokeWorkarea.tsx | 0 .../components/TextToImage/TextToImage.scss | 0 .../TextToImage/TextToImageDisplay.tsx | 0 .../TextToImage/TextToImagePanel.tsx | 0 .../ui/components/TextToImage/index.tsx | 0 .../UnifiedCanvas/CanvasWorkarea.scss | 0 .../UnifiedCanvasDisplayBeta.tsx | 0 .../UnifiedCanvasBaseBrushSettings.tsx | 0 .../UnifiedCanvasBrushSettings.tsx | 0 .../UnifiedCanvasBrushSize.tsx | 0 .../UnifiedCanvasClearMask.tsx | 0 .../UnifiedCanvasColorPicker.tsx | 0 .../UnifiedCanvasDarkenOutsideSelection.tsx | 0 .../UnifiedCanvasEnableMask.tsx | 0 .../UnifiedCanvasLimitStrokesToBox.tsx | 0 .../UnifiedCanvasMaskBrushSettings.tsx | 0 .../UnifiedCanvasMoveSettings.tsx | 0 .../UnifiedCanvasPreserveMask.tsx | 0 .../UnifiedCanvasSettings.tsx | 0 .../UnifiedCanvasShowGrid.tsx | 0 .../UnifiedCanvasSnapToGrid.tsx | 0 .../UnifiedCanvasToolSettingsBeta.tsx | 0 .../UnifiedCanvasCopyToClipboard.tsx | 0 .../UnifiedCanvasDownloadImage.tsx | 0 .../UnifiedCanvasFileUploader.tsx | 0 .../UnifiedCanvasLayerSelect.tsx | 0 .../UnifiedCanvasMergeVisible.tsx | 0 .../UnifiedCanvasMoveTool.tsx | 0 .../UnifiedCanvasProcessingButtons.tsx | 0 .../UnifiedCanvasResetCanvas.tsx | 0 .../UnifiedCanvasResetView.tsx | 0 .../UnifiedCanvasSaveToGallery.tsx | 0 .../UnifiedCanvasToolSelect.tsx | 0 .../UnifiedCanvasToolbarBeta.tsx | 0 .../UnifiedCanvas/UnifiedCanvasDisplay.tsx | 0 .../UnifiedCanvas/UnifiedCanvasPanel.tsx | 0 .../UnifiedCanvas/UnifiedCanvasWorkarea.tsx | 0 .../{ => web}/src/features/ui/store/tabMap.ts | 0 .../src/features/ui/store/uiSelectors.ts | 0 .../src/features/ui/store/uiSlice.ts | 0 .../src/features/ui/store/uiTypes.ts | 0 invokeai/frontend/{ => web}/src/i18.d.ts | 0 invokeai/frontend/{ => web}/src/i18n.ts | 0 invokeai/frontend/{ => web}/src/main.tsx | 0 invokeai/frontend/{ => web}/src/persistor.ts | 0 .../{ => web}/src/styles/Mixins/Buttons.scss | 0 .../src/styles/Mixins/_Responsive.scss | 0 .../{ => web}/src/styles/Mixins/_Shared.scss | 0 .../src/styles/Mixins/_Variables.scss | 0 .../{ => web}/src/styles/Mixins/index.scss | 0 .../src/styles/Themes/_Colors_Dark.scss | 0 .../src/styles/Themes/_Colors_Green.scss | 0 .../src/styles/Themes/_Colors_Light.scss | 0 .../{ => web}/src/styles/_Animations.scss | 0 .../frontend/{ => web}/src/styles/_Fonts.scss | 0 .../frontend/{ => web}/src/styles/_Misc.scss | 0 .../frontend/{ => web}/src/styles/index.scss | 0 invokeai/frontend/{ => web}/src/vite-env.d.ts | 0 invokeai/frontend/{ => web}/stats.html | 0 invokeai/frontend/{ => web}/tsconfig.json | 0 .../frontend/{ => web}/tsconfig.node.json | 0 invokeai/frontend/{ => web}/vite.config.ts | 0 invokeai/frontend/{ => web}/yarn.lock | 0 ldm/invoke/config/__init__.py | 0 .../restoration/codeformer/weights/README | 3 - ldm/invoke/server.py | 282 ------------------ ldm/invoke/server_legacy.py | 246 --------------- ldm/lr_scheduler.py | 143 --------- ldm/models/__init__.py | 0 ldm/simplet2i.py | 13 - pyproject.toml | 26 +- tests/nodes/test_graph_execution_state.py | 12 +- tests/nodes/test_invoker.py | 14 +- tests/nodes/test_node_graph.py | 8 +- tests/nodes/test_nodes.py | 12 +- tests/nodes/test_sqlite.py | 2 +- tests/test_path.py | 5 +- 496 files changed, 276 insertions(+), 934 deletions(-) rename {ldm/invoke => invokeai}/app/api/dependencies.py (100%) rename {ldm/invoke => invokeai}/app/api/events.py (100%) rename {ldm/invoke => invokeai}/app/api/routers/images.py (100%) rename {ldm/invoke => invokeai}/app/api/routers/sessions.py (100%) rename {ldm/invoke => invokeai}/app/api/sockets.py (100%) rename {ldm/invoke => invokeai}/app/api_app.py (100%) rename {ldm/invoke => invokeai}/app/cli_app.py (100%) rename {ldm/invoke => invokeai}/app/invocations/__init__.py (100%) rename {ldm/invoke => invokeai}/app/invocations/baseinvocation.py (100%) rename {ldm/invoke => invokeai}/app/invocations/cv.py (100%) rename {ldm/invoke => invokeai}/app/invocations/generate.py (100%) rename {ldm/invoke => invokeai}/app/invocations/image.py (100%) rename {ldm/invoke => invokeai}/app/invocations/prompt.py (100%) rename {ldm/invoke => invokeai}/app/invocations/reconstruct.py (100%) rename {ldm/invoke => invokeai}/app/invocations/upscale.py (100%) rename invokeai/{backend/modules => app/services}/__init__.py (100%) rename {ldm/invoke => invokeai}/app/services/events.py (100%) rename {ldm/invoke => invokeai}/app/services/generate_initializer.py (97%) rename {ldm/invoke => invokeai}/app/services/graph.py (100%) rename {ldm/invoke => invokeai}/app/services/image_storage.py (100%) rename {ldm/invoke => invokeai}/app/services/invocation_queue.py (100%) rename {ldm/invoke => invokeai}/app/services/invocation_services.py (96%) rename {ldm/invoke => invokeai}/app/services/invoker.py (100%) rename {ldm/invoke => invokeai}/app/services/item_storage.py (100%) rename {ldm/invoke => invokeai}/app/services/processor.py (100%) rename {ldm/invoke => invokeai}/app/services/sqlite.py (100%) rename {ldm/invoke => invokeai/backend}/args.py (99%) rename {ldm => invokeai/backend/config}/__init__.py (100%) rename {ldm/invoke => invokeai/backend}/config/invokeai_configure.py (99%) rename {ldm/invoke => invokeai/backend}/config/model_install_backend.py (99%) rename {ldm => invokeai/backend}/generate.py (98%) rename {ldm/invoke => invokeai/backend}/globals.py (98%) rename {ldm/invoke => invokeai/backend/image_util}/seamless.py (100%) create mode 100644 invokeai/backend/model_management/__init__.py rename ldm/invoke/ckpt_to_diffuser.py => invokeai/backend/model_management/convert_ckpt_to_diffusers.py (99%) rename invokeai/backend/{ => model_management}/model_manager.py (91%) rename {ldm/invoke => invokeai/backend}/restoration/__init__.py (100%) rename {ldm/invoke => invokeai/backend}/restoration/base.py (84%) rename {ldm/invoke => invokeai/backend}/restoration/codeformer.py (98%) rename {ldm/invoke => invokeai/backend}/restoration/codeformer_arch.py (99%) rename {ldm/invoke => invokeai/backend}/restoration/gfpgan.py (98%) rename {ldm/invoke => invokeai/backend}/restoration/outcrop.py (100%) rename {ldm/invoke => invokeai/backend}/restoration/outpaint.py (100%) rename {ldm/invoke => invokeai/backend}/restoration/realesrgan.py (98%) rename {ldm/invoke => invokeai/backend}/restoration/vqgan_arch.py (100%) create mode 100644 invokeai/backend/training/__init.py__ rename {ldm/invoke => invokeai/backend}/training/textual_inversion_training.py (99%) create mode 100644 invokeai/backend/util/__init__.py rename invokeai/backend/{ => util}/devices.py (94%) rename {ldm/invoke => invokeai/backend/util}/log.py (100%) rename invokeai/backend/{ => util}/util.py (100%) create mode 100644 invokeai/backend/web/__init__.py rename invokeai/backend/{ => web}/invoke_ai_web_server.py (98%) rename {ldm/invoke/app/services => invokeai/backend/web/modules}/__init__.py (100%) rename invokeai/backend/{ => web}/modules/create_cmd_parser.py (100%) rename invokeai/backend/{ => web}/modules/get_canvas_generation_mode.py (100%) rename invokeai/backend/{ => web}/modules/parameters.py (97%) rename invokeai/backend/{ => web}/modules/parse_seed_weights.py (100%) rename invokeai/backend/{ => web}/modules/test_images/init-img_full_transparency.png (100%) rename invokeai/backend/{ => web}/modules/test_images/init-img_opaque.png (100%) rename invokeai/backend/{ => web}/modules/test_images/init-img_partial_transparency.png (100%) rename invokeai/backend/{ => web}/modules/test_images/init-mask_has_mask.png (100%) rename invokeai/backend/{ => web}/modules/test_images/init-mask_no_mask.png (100%) rename {ldm/invoke => invokeai/frontend/CLI}/CLI.py (98%) create mode 100644 invokeai/frontend/CLI/__init__.py rename {ldm/invoke => invokeai/frontend/CLI}/readline.py (99%) create mode 100644 invokeai/frontend/__init__.py create mode 100644 invokeai/frontend/config/__init__.py create mode 100644 invokeai/frontend/config/invokeai_configure.py rename {ldm/invoke => invokeai/frontend}/config/invokeai_update.py (98%) rename {ldm/invoke => invokeai/frontend}/config/model_install.py (97%) rename {ldm/invoke => invokeai/frontend}/config/widgets.py (100%) create mode 100644 invokeai/frontend/merge/__init__.py rename {ldm/invoke => invokeai/frontend/merge}/merge_diffusers.py (97%) create mode 100644 invokeai/frontend/training/__init__.py rename {ldm/invoke => invokeai/frontend}/training/textual_inversion.py (99%) rename invokeai/frontend/{ => web}/.babelrc (100%) rename invokeai/frontend/{ => web}/.eslintignore (100%) rename invokeai/frontend/{ => web}/.eslintrc.js (100%) rename invokeai/frontend/{ => web}/.gitignore (100%) rename invokeai/frontend/{ => web}/.husky/pre-commit (100%) rename invokeai/frontend/{ => web}/.prettierignore (100%) rename invokeai/frontend/{ => web}/.prettierrc.js (100%) rename invokeai/frontend/{ => web}/.yarn/releases/yarn-1.22.19.cjs (100%) rename invokeai/frontend/{ => web}/.yarnrc (100%) rename invokeai/frontend/{ => web}/.yarnrc.yml (100%) rename invokeai/frontend/{ => web}/README.md (100%) create mode 100644 invokeai/frontend/web/__init__.py rename invokeai/frontend/{ => web}/dist/assets/Inter-Bold-790c108b.ttf (100%) rename invokeai/frontend/{ => web}/dist/assets/Inter-b9a8e5e2.ttf (100%) rename invokeai/frontend/{ => web}/dist/assets/favicon-0d253ced.ico (100%) rename invokeai/frontend/{ => web}/dist/assets/index-0e39fbc4.js (100%) rename invokeai/frontend/{ => web}/dist/assets/index-14cb2922.css (100%) rename invokeai/frontend/{ => web}/dist/assets/logo-13003d72.png (100%) rename invokeai/frontend/{ => web}/dist/index.html (100%) rename invokeai/frontend/{ => web}/dist/locales/ar.json (100%) rename invokeai/frontend/{ => web}/dist/locales/de.json (100%) rename invokeai/frontend/{ => web}/dist/locales/en.json (100%) rename invokeai/frontend/{ => web}/dist/locales/es.json (100%) rename invokeai/frontend/{ => web}/dist/locales/fr.json (100%) rename invokeai/frontend/{ => web}/dist/locales/it.json (100%) rename invokeai/frontend/{ => web}/dist/locales/ja.json (100%) rename invokeai/frontend/{ => web}/dist/locales/nl.json (100%) rename invokeai/frontend/{ => web}/dist/locales/pl.json (100%) rename invokeai/frontend/{ => web}/dist/locales/pt_BR.json (100%) rename invokeai/frontend/{ => web}/dist/locales/ru.json (100%) rename invokeai/frontend/{ => web}/dist/locales/uk.json (100%) rename invokeai/frontend/{ => web}/dist/locales/zh_CN.json (100%) rename invokeai/frontend/{ => web}/favicon.ico (100%) rename invokeai/frontend/{ => web}/index.d.ts (100%) rename invokeai/frontend/{ => web}/index.html (100%) rename invokeai/frontend/{ => web}/package.json (100%) rename invokeai/frontend/{ => web}/patches/redux-deep-persist+1.0.7.patch (100%) rename invokeai/frontend/{ => web}/patches/redux-persist+6.0.0.patch (100%) rename invokeai/frontend/{ => web}/public/locales/ar.json (100%) rename invokeai/frontend/{ => web}/public/locales/de.json (100%) rename invokeai/frontend/{ => web}/public/locales/en.json (100%) rename invokeai/frontend/{ => web}/public/locales/es.json (100%) rename invokeai/frontend/{ => web}/public/locales/fr.json (100%) rename invokeai/frontend/{ => web}/public/locales/it.json (100%) rename invokeai/frontend/{ => web}/public/locales/ja.json (100%) rename invokeai/frontend/{ => web}/public/locales/nl.json (100%) rename invokeai/frontend/{ => web}/public/locales/pl.json (100%) rename invokeai/frontend/{ => web}/public/locales/pt_BR.json (100%) rename invokeai/frontend/{ => web}/public/locales/ro.json (100%) rename invokeai/frontend/{ => web}/public/locales/ru.json (100%) rename invokeai/frontend/{ => web}/public/locales/uk.json (100%) rename invokeai/frontend/{ => web}/public/locales/zh_CN.json (100%) rename invokeai/frontend/{ => web}/src/Loading.tsx (100%) rename invokeai/frontend/{ => web}/src/app/App.scss (100%) rename invokeai/frontend/{ => web}/src/app/App.tsx (100%) rename invokeai/frontend/{ => web}/src/app/constants.ts (100%) rename invokeai/frontend/{ => web}/src/app/contexts/ImageUploaderTriggerContext.ts (100%) rename invokeai/frontend/{ => web}/src/app/features.ts (100%) rename invokeai/frontend/{ => web}/src/app/invokeai.d.ts (100%) rename invokeai/frontend/{ => web}/src/app/selectors/readinessSelector.ts (100%) rename invokeai/frontend/{ => web}/src/app/socketio/actions.ts (100%) rename invokeai/frontend/{ => web}/src/app/socketio/emitters.ts (100%) rename invokeai/frontend/{ => web}/src/app/socketio/listeners.ts (100%) rename invokeai/frontend/{ => web}/src/app/socketio/middleware.ts (100%) rename invokeai/frontend/{ => web}/src/app/store.ts (100%) rename invokeai/frontend/{ => web}/src/app/storeHooks.ts (100%) rename invokeai/frontend/{ => web}/src/app/theme.ts (100%) rename invokeai/frontend/{ => web}/src/app/utils.ts (100%) rename invokeai/frontend/{ => web}/src/assets/fonts/Inter/Inter-Bold.ttf (100%) rename invokeai/frontend/{ => web}/src/assets/fonts/Inter/Inter.ttf (100%) rename invokeai/frontend/{ => web}/src/assets/images/image2img.png (100%) rename invokeai/frontend/{ => web}/src/assets/images/logo.png (100%) rename invokeai/frontend/{ => web}/src/assets/images/mask.afdesign (100%) rename invokeai/frontend/{ => web}/src/assets/images/mask.svg (100%) rename invokeai/frontend/{ => web}/src/common/components/GuideIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/GuidePopover.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/GuidePopover.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIAlertDialog.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIButton.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIButton.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAICheckbox.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAICheckbox.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIColorPicker.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIColorPicker.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIIconButton.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIIconButton.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIInput.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIInput.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAINumberInput.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAINumberInput.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIPopover.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAIPopover.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISelect.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISelect.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISimpleMenu.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISlider.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISlider.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISwitch.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/IAISwitch.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/ImageUploadOverlay.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/ImageUploader.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/ImageUploader.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/ImageUploaderButton.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/ImageUploaderIconButton.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/SubItemHook.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/WorkInProgress/NodesWIP.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/WorkInProgress/PostProcessingWIP.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/WorkInProgress/Training.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/WorkInProgress/WorkInProgress.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/radix-ui/IAISlider.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/radix-ui/IAISlider.tsx (100%) rename invokeai/frontend/{ => web}/src/common/components/radix-ui/IAITooltip.scss (100%) rename invokeai/frontend/{ => web}/src/common/components/radix-ui/IAITooltip.tsx (100%) rename invokeai/frontend/{ => web}/src/common/hooks/useClickOutsideWatcher.ts (100%) rename invokeai/frontend/{ => web}/src/common/hooks/useImageUploader.ts (100%) rename invokeai/frontend/{ => web}/src/common/hooks/useSingleAndDoubleClick.ts (100%) rename invokeai/frontend/{ => web}/src/common/hooks/useUpdateTranslations.ts (100%) rename invokeai/frontend/{ => web}/src/common/icons/ImageToImageIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/InpaintIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/NodesIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/OutpaintIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/PostprocessingIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/TextToImageIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/TrainingIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/UnifiedCanvas.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/UnifiedCanvasIcon.tsx (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/BaseImage.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/ImageToImage.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/ImageToImage.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Inpaint.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Inpaint.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Nodes.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Nodes.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Outpaint.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Outpaint.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Postprocessing.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Postprocessing.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/TextToImage.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/TextToImage.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Training.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/Training.svg (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/UnifiedCanvas.afdesign (100%) rename invokeai/frontend/{ => web}/src/common/icons/design_files/UnifiedCanvas.svg (100%) rename invokeai/frontend/{ => web}/src/common/util/getPromptAndNegative.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/openBase64ImageInTab.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/parameterTranslation.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/promptToString.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/randomInt.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/roundDownToMultiple.ts (100%) rename invokeai/frontend/{ => web}/src/common/util/seedWeightPairs.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/ClearCanvasHistoryButtonModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvas.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasBoundingBoxOverlay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasGrid.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasImage.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasIntermediateImage.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasMaskCompositer.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasMaskLines.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasObjectRenderer.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasResizer.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasStagingArea.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasStatusText.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasStatusText/IAICanvasStatusTextCursorPos.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolPreview.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasRedoButton.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolbar.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/components/IAICanvasToolbar/IAICanvasUndoButton.tsx (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasDragMove.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasHotkeys.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasMouseDown.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasMouseMove.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasMouseOut.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasMouseUp.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useCanvasZoom.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/hooks/useColorUnderCursor.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/store/canvasSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/store/canvasSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/store/canvasTypes.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/store/thunks/mergeAndUploadCanvas.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/calculateCoordinates.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/calculateScale.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/colorToString.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/constants.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/copyImage.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/downloadFile.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/floorCoordinates.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/generateMask.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/getScaledBoundingBoxDimensions.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/getScaledCursorPosition.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/konvaInstanceProvider.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/layerToDataURL.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/roundDimensionsTo64.ts (100%) rename invokeai/frontend/{ => web}/src/features/canvas/util/roundToHundreth.ts (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/CurrentImageButtons.scss (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/CurrentImageButtons.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/CurrentImageDisplay.scss (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/CurrentImageDisplay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/CurrentImagePreview.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/DeleteImageModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/HoverableImage.scss (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/HoverableImage.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/ImageGallery.scss (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/ImageGallery.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.scss (100%) rename invokeai/frontend/{ => web}/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.tsx (100%) rename invokeai/frontend/{ => web}/src/features/gallery/hooks/useGetImageByUuid.ts (100%) rename invokeai/frontend/{ => web}/src/features/gallery/store/gallerySelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/gallery/store/gallerySlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/gallery/store/thunks/uploadImage.ts (100%) rename invokeai/frontend/{ => web}/src/features/lightbox/components/Lightbox.scss (100%) rename invokeai/frontend/{ => web}/src/features/lightbox/components/Lightbox.tsx (100%) rename invokeai/frontend/{ => web}/src/features/lightbox/components/ReactPanZoom.tsx (100%) rename invokeai/frontend/{ => web}/src/features/lightbox/store/lightboxSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/lightbox/store/lightboxSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AccordionItems/AdvancedSettings.scss (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AccordionItems/InvokeAccordionItem.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamBlur.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSize.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSteps.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamStrength.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/FaceRestore/CodeformerFidelity.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreStrength.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreType.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageFit.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/HiresSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/SymmetrySettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Output/SymmetryToggle.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/Perlin.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/RandomizeSeed.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/Seed.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/SeedSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/ShuffleSeed.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Seed/Threshold.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleDenoisingStrength.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleScale.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleStrength.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Variations/GenerateVariations.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Variations/SeedWeights.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Variations/VariationAmount.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainCFGScale.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainHeight.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainIterations.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainParameters.scss (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainParameters.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainSampler.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainSteps.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/MainParameters/MainWidth.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ParametersAccordion.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ProcessButtons/CancelButton.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ProcessButtons/InvokeButton.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ProcessButtons/Loopback.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ProcessButtons/ProcessButtons.scss (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/ProcessButtons/ProcessButtons.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/PromptInput/NegativePromptInput.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/PromptInput/PromptInput.scss (100%) rename invokeai/frontend/{ => web}/src/features/parameters/components/PromptInput/PromptInput.tsx (100%) rename invokeai/frontend/{ => web}/src/features/parameters/hooks/usePrompt.ts (100%) rename invokeai/frontend/{ => web}/src/features/parameters/store/generationSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/parameters/store/generationSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/parameters/store/postprocessingSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/parameters/store/postprocessingSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ClearTempFolderButtonModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/Console.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/Console.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/HotkeysModal/HotkeysModal.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/HotkeysModal/HotkeysModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/HotkeysModal/HotkeysModalItem.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/LanguagePicker.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/Modal.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/AddCheckpointModel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/AddDiffusersModel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/AddModel.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/AddModel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/CheckpointModelEdit.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/DiffusersModelEdit.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/MergeModels.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/ModelConvert.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/ModelList.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/ModelListItem.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/ModelManagerModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelManager/SearchModels.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ModelSelect.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ProgressBar.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ProgressBar.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/SettingsModal/SettingsModal.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/SettingsModal/SettingsModal.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/SiteHeader.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/SiteHeader.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/StatusIndicator.scss (100%) rename invokeai/frontend/{ => web}/src/features/system/components/StatusIndicator.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/components/ThemeChanger.tsx (100%) rename invokeai/frontend/{ => web}/src/features/system/hooks/useToastWatcher.ts (100%) rename invokeai/frontend/{ => web}/src/features/system/store/systemSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/system/store/systemSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/FloatingButton.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/FloatingGalleryButton.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/FloatingParametersPanelButtons.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/ImageToImage.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/ImageToImageDisplay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/ImageToImageOptions.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/InitImagePreview.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/InitialImageOverlay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/ImageToImage/index.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeParametersPanel.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeParametersPanel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeTabs.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeTabs.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeWorkarea.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/InvokeWorkarea.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/TextToImage/TextToImage.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/TextToImage/TextToImageDisplay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/TextToImage/TextToImagePanel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/TextToImage/index.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/CanvasWorkarea.scss (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx (100%) rename invokeai/frontend/{ => web}/src/features/ui/store/tabMap.ts (100%) rename invokeai/frontend/{ => web}/src/features/ui/store/uiSelectors.ts (100%) rename invokeai/frontend/{ => web}/src/features/ui/store/uiSlice.ts (100%) rename invokeai/frontend/{ => web}/src/features/ui/store/uiTypes.ts (100%) rename invokeai/frontend/{ => web}/src/i18.d.ts (100%) rename invokeai/frontend/{ => web}/src/i18n.ts (100%) rename invokeai/frontend/{ => web}/src/main.tsx (100%) rename invokeai/frontend/{ => web}/src/persistor.ts (100%) rename invokeai/frontend/{ => web}/src/styles/Mixins/Buttons.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Mixins/_Responsive.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Mixins/_Shared.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Mixins/_Variables.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Mixins/index.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Themes/_Colors_Dark.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Themes/_Colors_Green.scss (100%) rename invokeai/frontend/{ => web}/src/styles/Themes/_Colors_Light.scss (100%) rename invokeai/frontend/{ => web}/src/styles/_Animations.scss (100%) rename invokeai/frontend/{ => web}/src/styles/_Fonts.scss (100%) rename invokeai/frontend/{ => web}/src/styles/_Misc.scss (100%) rename invokeai/frontend/{ => web}/src/styles/index.scss (100%) rename invokeai/frontend/{ => web}/src/vite-env.d.ts (100%) rename invokeai/frontend/{ => web}/stats.html (100%) rename invokeai/frontend/{ => web}/tsconfig.json (100%) rename invokeai/frontend/{ => web}/tsconfig.node.json (100%) rename invokeai/frontend/{ => web}/vite.config.ts (100%) rename invokeai/frontend/{ => web}/yarn.lock (100%) delete mode 100644 ldm/invoke/config/__init__.py delete mode 100644 ldm/invoke/restoration/codeformer/weights/README delete mode 100644 ldm/invoke/server.py delete mode 100644 ldm/invoke/server_legacy.py delete mode 100644 ldm/lr_scheduler.py delete mode 100644 ldm/models/__init__.py delete mode 100644 ldm/simplet2i.py diff --git a/.gitignore b/.gitignore index c84ffb1e65..031c102165 100644 --- a/.gitignore +++ b/.gitignore @@ -198,7 +198,7 @@ checkpoints .DS_Store # Let the frontend manage its own gitignore -!invokeai/frontend/* +!invokeai/frontend/web/* # Scratch folder .scratch/ diff --git a/ldm/invoke/app/api/dependencies.py b/invokeai/app/api/dependencies.py similarity index 100% rename from ldm/invoke/app/api/dependencies.py rename to invokeai/app/api/dependencies.py diff --git a/ldm/invoke/app/api/events.py b/invokeai/app/api/events.py similarity index 100% rename from ldm/invoke/app/api/events.py rename to invokeai/app/api/events.py diff --git a/ldm/invoke/app/api/routers/images.py b/invokeai/app/api/routers/images.py similarity index 100% rename from ldm/invoke/app/api/routers/images.py rename to invokeai/app/api/routers/images.py diff --git a/ldm/invoke/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py similarity index 100% rename from ldm/invoke/app/api/routers/sessions.py rename to invokeai/app/api/routers/sessions.py diff --git a/ldm/invoke/app/api/sockets.py b/invokeai/app/api/sockets.py similarity index 100% rename from ldm/invoke/app/api/sockets.py rename to invokeai/app/api/sockets.py diff --git a/ldm/invoke/app/api_app.py b/invokeai/app/api_app.py similarity index 100% rename from ldm/invoke/app/api_app.py rename to invokeai/app/api_app.py diff --git a/ldm/invoke/app/cli_app.py b/invokeai/app/cli_app.py similarity index 100% rename from ldm/invoke/app/cli_app.py rename to invokeai/app/cli_app.py diff --git a/ldm/invoke/app/invocations/__init__.py b/invokeai/app/invocations/__init__.py similarity index 100% rename from ldm/invoke/app/invocations/__init__.py rename to invokeai/app/invocations/__init__.py diff --git a/ldm/invoke/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py similarity index 100% rename from ldm/invoke/app/invocations/baseinvocation.py rename to invokeai/app/invocations/baseinvocation.py diff --git a/ldm/invoke/app/invocations/cv.py b/invokeai/app/invocations/cv.py similarity index 100% rename from ldm/invoke/app/invocations/cv.py rename to invokeai/app/invocations/cv.py diff --git a/ldm/invoke/app/invocations/generate.py b/invokeai/app/invocations/generate.py similarity index 100% rename from ldm/invoke/app/invocations/generate.py rename to invokeai/app/invocations/generate.py diff --git a/ldm/invoke/app/invocations/image.py b/invokeai/app/invocations/image.py similarity index 100% rename from ldm/invoke/app/invocations/image.py rename to invokeai/app/invocations/image.py diff --git a/ldm/invoke/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py similarity index 100% rename from ldm/invoke/app/invocations/prompt.py rename to invokeai/app/invocations/prompt.py diff --git a/ldm/invoke/app/invocations/reconstruct.py b/invokeai/app/invocations/reconstruct.py similarity index 100% rename from ldm/invoke/app/invocations/reconstruct.py rename to invokeai/app/invocations/reconstruct.py diff --git a/ldm/invoke/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py similarity index 100% rename from ldm/invoke/app/invocations/upscale.py rename to invokeai/app/invocations/upscale.py diff --git a/invokeai/backend/modules/__init__.py b/invokeai/app/services/__init__.py similarity index 100% rename from invokeai/backend/modules/__init__.py rename to invokeai/app/services/__init__.py diff --git a/ldm/invoke/app/services/events.py b/invokeai/app/services/events.py similarity index 100% rename from ldm/invoke/app/services/events.py rename to invokeai/app/services/events.py diff --git a/ldm/invoke/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py similarity index 97% rename from ldm/invoke/app/services/generate_initializer.py rename to invokeai/app/services/generate_initializer.py index e1162a1b9a..c6e81a2403 100644 --- a/ldm/invoke/app/services/generate_initializer.py +++ b/invokeai/app/services/generate_initializer.py @@ -3,11 +3,10 @@ import os import sys import traceback -from invokeai.backend.models import ModelManager +from invokeai.backend import ModelManager, Generate from ...globals import Globals -from ....generate import Generate -import ldm.invoke +import invokeai.version # TODO: most of this code should be split into individual services as the Generate.py code is deprecated def get_generate(args, config) -> Generate: @@ -16,7 +15,7 @@ def get_generate(args, config) -> Generate: if not os.path.exists(config_file): report_model_error(args, FileNotFoundError(f"The file {config_file} could not be found.")) - print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') + print(f'>> {invokeai.version.__app_name__}, version {invokeai.version.__version__}') print(f'>> InvokeAI runtime directory is "{Globals.root}"') # these two lines prevent a horrible warning message from appearing @@ -160,7 +159,7 @@ def report_model_error(opt:Namespace, e:Exception): # Temporary initializer for Generate until we migrate off of it def old_get_generate(args, config) -> Generate: # TODO: Remove the need for globals - from ldm.invoke.globals import Globals + from invokeai.backend.globals import Globals # alert - setting globals here Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.')) diff --git a/ldm/invoke/app/services/graph.py b/invokeai/app/services/graph.py similarity index 100% rename from ldm/invoke/app/services/graph.py rename to invokeai/app/services/graph.py diff --git a/ldm/invoke/app/services/image_storage.py b/invokeai/app/services/image_storage.py similarity index 100% rename from ldm/invoke/app/services/image_storage.py rename to invokeai/app/services/image_storage.py diff --git a/ldm/invoke/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue.py similarity index 100% rename from ldm/invoke/app/services/invocation_queue.py rename to invokeai/app/services/invocation_queue.py diff --git a/ldm/invoke/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py similarity index 96% rename from ldm/invoke/app/services/invocation_services.py rename to invokeai/app/services/invocation_services.py index 40a64e64e5..93d60bb230 100644 --- a/ldm/invoke/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -3,8 +3,7 @@ from .invocation_queue import InvocationQueueABC from .item_storage import ItemStorageABC from .image_storage import ImageStorageBase from .events import EventServiceBase -from ....generate import Generate - +from invokeai.backend import Generate class InvocationServices(): """Services that can be used by invocations""" diff --git a/ldm/invoke/app/services/invoker.py b/invokeai/app/services/invoker.py similarity index 100% rename from ldm/invoke/app/services/invoker.py rename to invokeai/app/services/invoker.py diff --git a/ldm/invoke/app/services/item_storage.py b/invokeai/app/services/item_storage.py similarity index 100% rename from ldm/invoke/app/services/item_storage.py rename to invokeai/app/services/item_storage.py diff --git a/ldm/invoke/app/services/processor.py b/invokeai/app/services/processor.py similarity index 100% rename from ldm/invoke/app/services/processor.py rename to invokeai/app/services/processor.py diff --git a/ldm/invoke/app/services/sqlite.py b/invokeai/app/services/sqlite.py similarity index 100% rename from ldm/invoke/app/services/sqlite.py rename to invokeai/app/services/sqlite.py diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 5384b3cee0..7bdc6e7270 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -1,8 +1,8 @@ ''' Initialization file for invokeai.backend ''' -# this is causing circular import issues -# from .invoke_ai_web_server import InvokeAIWebServer -from .model_manager import ModelManager +from .model_management import ModelManager +from .generate import Generate + diff --git a/ldm/invoke/args.py b/invokeai/backend/args.py similarity index 99% rename from ldm/invoke/args.py rename to invokeai/backend/args.py index d661409d18..7de6b35edb 100644 --- a/ldm/invoke/args.py +++ b/invokeai/backend/args.py @@ -98,8 +98,8 @@ from typing import List from invokeai.backend.image_util import retrieve_metadata import invokeai.version -from ldm.invoke.globals import Globals -from invokeai.backend.prompting import split_weighted_subprompts +from .globals import Globals +from .prompting import split_weighted_subprompts APP_ID = invokeai.version.__app_id__ APP_NAME = invokeai.version.__app_name__ diff --git a/ldm/__init__.py b/invokeai/backend/config/__init__.py similarity index 100% rename from ldm/__init__.py rename to invokeai/backend/config/__init__.py diff --git a/ldm/invoke/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py similarity index 99% rename from ldm/invoke/config/invokeai_configure.py rename to invokeai/backend/config/invokeai_configure.py index bb967fba37..9b1cc5affa 100755 --- a/ldm/invoke/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -39,14 +39,14 @@ import invokeai.configs as configs from ..args import PRECISION_CHOICES, Args from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir -from .model_install import addModelsForm, process_and_execute +from ...frontend.config.model_install import addModelsForm, process_and_execute from .model_install_backend import ( default_dataset, download_from_hf, recommended_datasets, hf_download_with_resume, ) -from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size +from ...frontend.config.widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size warnings.filterwarnings("ignore") diff --git a/ldm/invoke/config/model_install_backend.py b/invokeai/backend/config/model_install_backend.py similarity index 99% rename from ldm/invoke/config/model_install_backend.py rename to invokeai/backend/config/model_install_backend.py index d281ffbd73..6157c2b2b4 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/invokeai/backend/config/model_install_backend.py @@ -18,9 +18,9 @@ from tqdm import tqdm from typing import List import invokeai.configs as configs -from invokeai.backend.stable_diffusion import StableDiffusionGeneratorPipeline +from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from invokeai.backend import ModelManager +from ..model_management import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/generate.py b/invokeai/backend/generate.py similarity index 98% rename from ldm/generate.py rename to invokeai/backend/generate.py index 4bce177258..329e1b50bd 100644 --- a/ldm/generate.py +++ b/invokeai/backend/generate.py @@ -25,17 +25,18 @@ from omegaconf import OmegaConf from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything -from invokeai.backend import ModelManager -from invokeai.backend.prompting import get_uc_and_c_and_ec -from invokeai.backend.stable_diffusion import (DDIMSampler, KSampler, PLMSSampler) -from invokeai.backend.generator import infill_methods -from invokeai.backend.stable_diffusion.concepts_lib import HuggingFaceConceptsLibrary -from invokeai.backend.devices import choose_precision, choose_torch_device -from invokeai.backend.image_util import InitImageResizer, PngWriter, Txt2Mask +from . import ModelManager +from .prompting import get_uc_and_c_and_ec +from .stable_diffusion import (DDIMSampler, KSampler, PLMSSampler, HuggingFaceConceptsLibrary) +from .generator import infill_methods +from .util import choose_precision, choose_torch_device +from .image_util import (InitImageResizer, + PngWriter, + Txt2Mask, + configure_model_padding) -from ldm.invoke.globals import Globals, global_cache_dir -from ldm.invoke.args import metadata_from_png -from ldm.invoke.seamless import configure_model_padding +from .globals import Globals, global_cache_dir +from .args import metadata_from_png def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index c35cf58995..767b632103 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -23,7 +23,7 @@ from tqdm import trange import invokeai.assets.web as web_assets from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper -from ..util import rand_perlin_2d +from ..util.util import rand_perlin_2d downsampling = 8 CAUTION_IMG = 'caution.png' diff --git a/ldm/invoke/globals.py b/invokeai/backend/globals.py similarity index 98% rename from ldm/invoke/globals.py rename to invokeai/backend/globals.py index c6ee0bbc54..f41160a39b 100644 --- a/ldm/invoke/globals.py +++ b/invokeai/backend/globals.py @@ -1,5 +1,5 @@ ''' -ldm.invoke.globals defines a small number of global variables that would +invokeai.backend.globals defines a small number of global variables that would otherwise have to be passed through long and complex call chains. It defines a Namespace object named "Globals" that contains diff --git a/invokeai/backend/image_util/__init__.py b/invokeai/backend/image_util/__init__.py index 8cb3199d2d..3b55f094d7 100644 --- a/invokeai/backend/image_util/__init__.py +++ b/invokeai/backend/image_util/__init__.py @@ -9,6 +9,7 @@ from .pngwriter import (PngWriter, retrieve_metadata, write_metadata, ) +from .seamless import configure_model_padding def debug_image( debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py index 925fabe1bf..1c27a1d67d 100644 --- a/invokeai/backend/image_util/patchmatch.py +++ b/invokeai/backend/image_util/patchmatch.py @@ -4,7 +4,7 @@ wraps the actual patchmatch object. It respects the global "try_patchmatch" attribute, so that patchmatch loading can be suppressed or deferred ''' -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals import numpy as np class PatchMatch: diff --git a/ldm/invoke/seamless.py b/invokeai/backend/image_util/seamless.py similarity index 100% rename from ldm/invoke/seamless.py rename to invokeai/backend/image_util/seamless.py diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index ab9f94549b..6e64df3f31 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -32,7 +32,7 @@ import numpy as np from transformers import AutoProcessor, CLIPSegForImageSegmentation from PIL import Image, ImageOps from torchvision import transforms -from ldm.invoke.globals import global_cache_dir +from invokeai.backend.globals import global_cache_dir CLIPSEG_MODEL = 'CIDAS/clipseg-rd64-refined' CLIPSEG_SIZE = 352 diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py new file mode 100644 index 0000000000..e83527123f --- /dev/null +++ b/invokeai/backend/model_management/__init__.py @@ -0,0 +1,8 @@ +''' +Initialization file for invokeai.backend.model_management +''' +from .model_manager import ModelManager +from .convert_ckpt_to_diffusers import (load_pipeline_from_original_stable_diffusion_ckpt, + convert_ckpt_to_diffusers) +from ...frontend.merge.merge_diffusers import (merge_diffusion_models, + merge_diffusion_models_and_commit) diff --git a/ldm/invoke/ckpt_to_diffuser.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py similarity index 99% rename from ldm/invoke/ckpt_to_diffuser.py rename to invokeai/backend/model_management/convert_ckpt_to_diffusers.py index f6cac0b814..395432c1e6 100644 --- a/ldm/invoke/ckpt_to_diffuser.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -21,11 +21,11 @@ import re import torch import warnings from pathlib import Path -from ldm.invoke.globals import ( +from invokeai.backend.globals import ( global_cache_dir, global_config_dir, ) -from invokeai.models import ModelManager, SDLegacyType +from .model_manager import ModelManager, SDLegacyType from safetensors.torch import load_file from typing import Union @@ -56,7 +56,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS from diffusers.utils import is_safetensors_available from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig -from invokeai.generator import StableDiffusionGeneratorPipeline +from ..stable_diffusion import StableDiffusionGeneratorPipeline def shave_segments(path, n_shave_prefix_segments=1): """ @@ -1014,7 +1014,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( return pipe -def convert_ckpt_to_diffuser( +def convert_ckpt_to_diffusers( checkpoint_path:Union[str,Path], dump_path:Union[str,Path], **kwargs, diff --git a/invokeai/backend/model_manager.py b/invokeai/backend/model_management/model_manager.py similarity index 91% rename from invokeai/backend/model_manager.py rename to invokeai/backend/model_management/model_manager.py index 1f99654610..34a10969cd 100644 --- a/invokeai/backend/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -31,14 +31,13 @@ from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from .devices import CPU_DEVICE -from ldm.invoke.globals import Globals, global_cache_dir -from .util import ( +from ..util import CPU_DEVICE +from invokeai.backend.globals import Globals, global_cache_dir +from ..util import ( ask_user, download_with_resume, - url_attachment_name, ) -from .stable_diffusion import StableDiffusionGeneratorPipeline +from ..stable_diffusion import StableDiffusionGeneratorPipeline class SDLegacyType(Enum): V1 = 1 @@ -416,6 +415,51 @@ class ModelManager(object): return pipeline, width, height, model_hash + def _load_ckpt_model(self, model_name, mconfig): + config = mconfig.config + weights = mconfig.weights + vae = mconfig.get("vae") + width = mconfig.width + height = mconfig.height + + if not os.path.isabs(config): + config = os.path.join(Globals.root, config) + if not os.path.isabs(weights): + weights = os.path.normpath(os.path.join(Globals.root, weights)) + + # Convert to diffusers and return a diffusers pipeline + print( + f">> Converting legacy checkpoint {model_name} into a diffusers model..." + ) + + from . import load_pipeline_from_original_stable_diffusion_ckpt + + self.offload_model(self.current_model) + if vae_config := self._choose_diffusers_vae(model_name): + vae = self._load_vae(vae_config) + if self._has_cuda(): + torch.cuda.empty_cache() + pipeline = load_pipeline_from_original_stable_diffusion_ckpt( + checkpoint_path=weights, + original_config_file=config, + vae=vae, + return_generator_pipeline=True, + precision=torch.float16 + if self.precision == "float16" + else torch.float32, + ) + if self.sequential_offload: + pipeline.enable_offload_submodels(self.device) + else: + pipeline.to(self.device) + + return ( + pipeline, + width, + height, + "NOHASH", + ) + def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path: if isinstance(model_name, DictConfig) or isinstance(model_name, dict): mconfig = model_name @@ -519,66 +563,6 @@ class ModelManager(object): self.commit(commit_to_conf) return model_name - def import_ckpt_model( - self, - weights: Union[str, Path], - config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml", - vae: Union[str, Path] = None, - model_name: str = None, - model_description: str = None, - commit_to_conf: Path = None, - ) -> str: - """ - Attempts to install the indicated ckpt file and returns True if successful. - - "weights" can be either a path-like object corresponding to a local .ckpt file - or a http/https URL pointing to a remote model. - - "vae" is a Path or str object pointing to a ckpt or safetensors file to be used - as the VAE for this model. - - "config" is the model config file to use with this ckpt file. It defaults to - v1-inference.yaml. If a URL is provided, the config will be downloaded. - - You can optionally provide a model name and/or description. If not provided, - then these will be derived from the weight file name. If you provide a commit_to_conf - path to the configuration file, then the new entry will be committed to the - models.yaml file. - - Return value is the name of the imported file, or None if an error occurred. - """ - if str(weights).startswith(("http:", "https:")): - model_name = model_name or url_attachment_name(weights) - - weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1") - config_path = self._resolve_path(config, "configs/stable-diffusion") - - if weights_path is None or not weights_path.exists(): - return - if config_path is None or not config_path.exists(): - return - - model_name = ( - model_name or Path(weights).stem - ) # note this gives ugly pathnames if used on a URL without a Content-Disposition header - model_description = ( - model_description or f"Imported stable diffusion weights file {model_name}" - ) - new_config = dict( - weights=str(weights_path), - config=str(config_path), - description=model_description, - format="ckpt", - width=512, - height=512, - ) - if vae: - new_config["vae"] = vae - self.add_model(model_name, new_config, True) - if commit_to_conf: - self.commit(commit_to_conf) - return model_name - @classmethod def probe_model_type(self, checkpoint: dict) -> SDLegacyType: """ @@ -746,36 +730,18 @@ class ModelManager(object): ) return - if convert: - diffuser_path = Path( - Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem - ) - model_name = self.convert_and_import( - model_path, - diffusers_path=diffuser_path, - vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), - model_name=model_name, - model_description=description, - original_config_file=model_config_file, - commit_to_conf=commit_to_conf, - ) - else: - model_name = self.import_ckpt_model( - model_path, - config=model_config_file, - model_name=model_name, - model_description=description, - vae=str( - Path( - Globals.root, - "models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt", - ) - ), - commit_to_conf=commit_to_conf, - ) - - if commit_to_conf: - self.commit(commit_to_conf) + diffuser_path = Path( + Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem + ) + model_name = self.convert_and_import( + model_path, + diffusers_path=diffuser_path, + vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), + model_name=model_name, + model_description=description, + original_config_file=model_config_file, + commit_to_conf=commit_to_conf, + ) return model_name def convert_and_import( @@ -800,7 +766,7 @@ class ModelManager(object): new_config = None - from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser + from . import convert_ckpt_to_diffusers if diffusers_path.exists(): print( @@ -815,7 +781,7 @@ class ModelManager(object): # By passing the specified VAE to the conversion function, the autoencoder # will be built into the model rather than tacked on afterward via the config file vae_model = self._load_vae(vae) if vae else None - convert_ckpt_to_diffuser( + convert_ckpt_to_diffusers ( ckpt_path, diffusers_path, extract_ema=True, diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 9ec8babf16..b9378cf5ed 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -13,9 +13,9 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser -from ..devices import torch_dtype +from ..util import torch_dtype from ..stable_diffusion import InvokeAIDiffuserComponent -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: # TODO remove legacy ckpt fallback handling diff --git a/ldm/invoke/restoration/__init__.py b/invokeai/backend/restoration/__init__.py similarity index 100% rename from ldm/invoke/restoration/__init__.py rename to invokeai/backend/restoration/__init__.py diff --git a/ldm/invoke/restoration/base.py b/invokeai/backend/restoration/base.py similarity index 84% rename from ldm/invoke/restoration/base.py rename to invokeai/backend/restoration/base.py index 5b4bc483c2..036d56a271 100644 --- a/ldm/invoke/restoration/base.py +++ b/invokeai/backend/restoration/base.py @@ -23,16 +23,16 @@ class Restoration(): # Face Restore Models def load_gfpgan(self, gfpgan_model_path): - from ldm.invoke.restoration.gfpgan import GFPGAN + from .gfpgan import GFPGAN return GFPGAN(gfpgan_model_path) def load_codeformer(self): - from ldm.invoke.restoration.codeformer import CodeFormerRestoration + from .codeformer import CodeFormerRestoration return CodeFormerRestoration() # Upscale Models def load_esrgan(self, esrgan_bg_tile=400): - from ldm.invoke.restoration.realesrgan import ESRGAN + from .realesrgan import ESRGAN esrgan = ESRGAN(esrgan_bg_tile) print('>> ESRGAN Initialized') return esrgan; diff --git a/ldm/invoke/restoration/codeformer.py b/invokeai/backend/restoration/codeformer.py similarity index 98% rename from ldm/invoke/restoration/codeformer.py rename to invokeai/backend/restoration/codeformer.py index cac0d839bc..ab4ce82376 100644 --- a/ldm/invoke/restoration/codeformer.py +++ b/invokeai/backend/restoration/codeformer.py @@ -3,7 +3,7 @@ import torch import numpy as np import warnings import sys -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' diff --git a/ldm/invoke/restoration/codeformer_arch.py b/invokeai/backend/restoration/codeformer_arch.py similarity index 99% rename from ldm/invoke/restoration/codeformer_arch.py rename to invokeai/backend/restoration/codeformer_arch.py index eb7d122aad..dc71c86b33 100644 --- a/ldm/invoke/restoration/codeformer_arch.py +++ b/invokeai/backend/restoration/codeformer_arch.py @@ -5,7 +5,7 @@ from torch import nn, Tensor import torch.nn.functional as F from typing import Optional, List -from ldm.invoke.restoration.vqgan_arch import * +from .vqgan_arch import * from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY @@ -25,7 +25,6 @@ def calc_mean_std(feat, eps=1e-5): feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) return feat_mean, feat_std - def adaptive_instance_normalization(content_feat, style_feat): """Adaptive instance normalization. diff --git a/ldm/invoke/restoration/gfpgan.py b/invokeai/backend/restoration/gfpgan.py similarity index 98% rename from ldm/invoke/restoration/gfpgan.py rename to invokeai/backend/restoration/gfpgan.py index 748924937d..5fff52583f 100644 --- a/ldm/invoke/restoration/gfpgan.py +++ b/invokeai/backend/restoration/gfpgan.py @@ -3,7 +3,7 @@ import warnings import os import sys import numpy as np -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals from PIL import Image diff --git a/ldm/invoke/restoration/outcrop.py b/invokeai/backend/restoration/outcrop.py similarity index 100% rename from ldm/invoke/restoration/outcrop.py rename to invokeai/backend/restoration/outcrop.py diff --git a/ldm/invoke/restoration/outpaint.py b/invokeai/backend/restoration/outpaint.py similarity index 100% rename from ldm/invoke/restoration/outpaint.py rename to invokeai/backend/restoration/outpaint.py diff --git a/ldm/invoke/restoration/realesrgan.py b/invokeai/backend/restoration/realesrgan.py similarity index 98% rename from ldm/invoke/restoration/realesrgan.py rename to invokeai/backend/restoration/realesrgan.py index a8c64c2548..0b99f8bbb6 100644 --- a/ldm/invoke/restoration/realesrgan.py +++ b/invokeai/backend/restoration/realesrgan.py @@ -3,7 +3,7 @@ import warnings import numpy as np import os -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals from PIL import Image from PIL.Image import Image as ImageType diff --git a/ldm/invoke/restoration/vqgan_arch.py b/invokeai/backend/restoration/vqgan_arch.py similarity index 100% rename from ldm/invoke/restoration/vqgan_arch.py rename to invokeai/backend/restoration/vqgan_arch.py diff --git a/invokeai/backend/stable_diffusion/concepts_lib.py b/invokeai/backend/stable_diffusion/concepts_lib.py index c774f29674..63d0a660a0 100644 --- a/invokeai/backend/stable_diffusion/concepts_lib.py +++ b/invokeai/backend/stable_diffusion/concepts_lib.py @@ -10,7 +10,7 @@ import traceback from typing import Callable from urllib import request, error as ul_error from huggingface_hub import HfFolder, hf_hub_url, ModelSearchArguments, ModelFilter, HfApi -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals class HuggingFaceConceptsLibrary(object): def __init__(self, root=None): diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index fcc958d61b..c4ac77aaf6 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -26,11 +26,11 @@ from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec -from ldm.invoke.globals import Globals -from ..stable_diffusion.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver -from ..stable_diffusion.textual_inversion_manager import TextualInversionManager -from ..stable_diffusion.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..devices import normalize_device, CPU_DEVICE +from invokeai.backend.globals import Globals +from .diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver +from .textual_inversion_manager import TextualInversionManager +from .offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup +from ..util import normalize_device, CPU_DEVICE from compel import EmbeddingsProvider @dataclass diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 6e91e5c868..3373bf0e61 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -15,7 +15,7 @@ from torch import nn from compel.cross_attention_control import Arguments from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.cross_attention import AttnProcessor -from ...devices import torch_dtype +from ...util import torch_dtype class CrossAttentionType(enum.Enum): diff --git a/invokeai/backend/stable_diffusion/diffusion/ddpm.py b/invokeai/backend/stable_diffusion/diffusion/ddpm.py index deba4ebaf1..36251a0940 100644 --- a/invokeai/backend/stable_diffusion/diffusion/ddpm.py +++ b/invokeai/backend/stable_diffusion/diffusion/ddpm.py @@ -23,7 +23,7 @@ from omegaconf import ListConfig import urllib from ..textual_inversion_manager import TextualInversionManager -from ...util import ( +from ...util.util import ( log_txt_as_img, exists, default, diff --git a/invokeai/backend/stable_diffusion/diffusion/plms.py b/invokeai/backend/stable_diffusion/diffusion/plms.py index 2dfead482b..f4aa9bb12d 100644 --- a/invokeai/backend/stable_diffusion/diffusion/plms.py +++ b/invokeai/backend/stable_diffusion/diffusion/plms.py @@ -4,7 +4,7 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ...devices import choose_torch_device +from ...util import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler from ..diffusionmodules.util import noise_like diff --git a/invokeai/backend/stable_diffusion/diffusion/sampler.py b/invokeai/backend/stable_diffusion/diffusion/sampler.py index 656897ee31..5f9ee2da29 100644 --- a/invokeai/backend/stable_diffusion/diffusion/sampler.py +++ b/invokeai/backend/stable_diffusion/diffusion/sampler.py @@ -7,7 +7,7 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ...devices import choose_torch_device +from ...util import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from ..diffusionmodules.util import ( diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 32b978f704..b02a381d70 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -8,7 +8,7 @@ import torch from diffusers.models.cross_attention import AttnProcessor from typing_extensions import TypeAlias -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals from .cross_attention_control import Arguments, \ restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ CrossAttentionType, SwapCrossAttnContext diff --git a/invokeai/backend/stable_diffusion/diffusionmodules/util.py b/invokeai/backend/stable_diffusion/diffusionmodules/util.py index a943e51d9e..a23f1abba4 100644 --- a/invokeai/backend/stable_diffusion/diffusionmodules/util.py +++ b/invokeai/backend/stable_diffusion/diffusionmodules/util.py @@ -15,7 +15,7 @@ import torch.nn as nn import numpy as np from einops import repeat -from ...util import instantiate_from_config +from ...util.util import instantiate_from_config def make_beta_schedule( diff --git a/invokeai/backend/stable_diffusion/encoders/modules.py b/invokeai/backend/stable_diffusion/encoders/modules.py index 32ac0de7a1..3c20b8d119 100644 --- a/invokeai/backend/stable_diffusion/encoders/modules.py +++ b/invokeai/backend/stable_diffusion/encoders/modules.py @@ -10,7 +10,7 @@ from einops import repeat from transformers import CLIPTokenizer, CLIPTextModel from ldm.invoke.devices import choose_torch_device -from ldm.invoke.globals import global_cache_dir +from invokeai.backend.globals import global_cache_dir from ldm.modules.x_transformer import ( Encoder, TransformerWrapper, diff --git a/invokeai/backend/training/__init.py__ b/invokeai/backend/training/__init.py__ new file mode 100644 index 0000000000..16bde19390 --- /dev/null +++ b/invokeai/backend/training/__init.py__ @@ -0,0 +1,4 @@ +''' +Initialization file for invokeai.backend.training +''' +from .textual_inversion_training import do_textual_inversion_training, parse_args diff --git a/ldm/invoke/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py similarity index 99% rename from ldm/invoke/training/textual_inversion_training.py rename to invokeai/backend/training/textual_inversion_training.py index 58c67b2ca8..4e9bd36eef 100644 --- a/ldm/invoke/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -48,7 +48,7 @@ from transformers import CLIPTextModel, CLIPTokenizer # invokeai stuff from ldm.invoke.args import ArgFormatter, PagingArgumentParser -from ldm.invoke.globals import Globals, global_cache_dir +from invokeai.backend.globals import Globals, global_cache_dir if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py new file mode 100644 index 0000000000..434e03c273 --- /dev/null +++ b/invokeai/backend/util/__init__.py @@ -0,0 +1,18 @@ +''' +Initialization file for invokeai.backend.util +''' +from .devices import (choose_torch_device, + choose_precision, + normalize_device, + torch_dtype, + CPU_DEVICE, + CUDA_DEVICE, + MPS_DEVICE, + ) +from .util import (ask_user, + download_with_resume, + instantiate_from_config, + url_attachment_name, + ) +from .log import write_log + diff --git a/invokeai/backend/devices.py b/invokeai/backend/util/devices.py similarity index 94% rename from invokeai/backend/devices.py rename to invokeai/backend/util/devices.py index 8d8132e878..167847eae0 100644 --- a/invokeai/backend/devices.py +++ b/invokeai/backend/util/devices.py @@ -5,9 +5,11 @@ from contextlib import nullcontext import torch from torch import autocast -from ldm.invoke.globals import Globals +from invokeai.backend.globals import Globals CPU_DEVICE = torch.device("cpu") +CUDA_DEVICE = torch.device("cuda") +MPS_DEVICE = torch.device("mps") def choose_torch_device() -> torch.device: '''Convenience routine for guessing which GPU device to run model on''' diff --git a/ldm/invoke/log.py b/invokeai/backend/util/log.py similarity index 100% rename from ldm/invoke/log.py rename to invokeai/backend/util/log.py diff --git a/invokeai/backend/util.py b/invokeai/backend/util/util.py similarity index 100% rename from invokeai/backend/util.py rename to invokeai/backend/util/util.py diff --git a/invokeai/backend/web/__init__.py b/invokeai/backend/web/__init__.py new file mode 100644 index 0000000000..ef771f61be --- /dev/null +++ b/invokeai/backend/web/__init__.py @@ -0,0 +1,4 @@ +''' +Initialization file for the web backend. +''' +from .invoke_ai_web_server import InvokeAIWebServer diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py similarity index 98% rename from invokeai/backend/invoke_ai_web_server.py rename to invokeai/backend/web/invoke_ai_web_server.py index 7c6177803c..c05de2f831 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/web/invoke_ai_web_server.py @@ -12,7 +12,7 @@ from threading import Event from uuid import uuid4 import eventlet -import invokeai.frontend.dist as frontend +import invokeai.frontend.web.dist as frontend from PIL import Image from PIL.Image import Image as ImageType from compel.prompt_parser import Blend @@ -20,24 +20,24 @@ from flask import Flask, redirect, send_from_directory, request, make_response from flask_socketio import SocketIO from werkzeug.utils import secure_filename -from invokeai.backend.modules.get_canvas_generation_mode import ( +from .modules.get_canvas_generation_mode import ( get_canvas_generation_mode, ) from .modules.parameters import parameters_to_command -from .prompting import (get_tokens_for_prompt_object, - get_prompt_structure, - get_tokenizer - ) -from .image_util import PngWriter, retrieve_metadata -from .generator import infill_methods -from .stable_diffusion import PipelineIntermediateState +from ..prompting import (get_tokens_for_prompt_object, + get_prompt_structure, + get_tokenizer + ) +from ..image_util import PngWriter, retrieve_metadata +from ..generator import infill_methods +from ..stable_diffusion import PipelineIntermediateState -from ldm.generate import Generate -from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, - global_models_dir - ) -from ldm.invoke.merge_diffusers import merge_diffusion_models +from .. import Generate +from ..args import Args, APP_ID, APP_VERSION, calculate_init_img_hash +from ..globals import ( Globals, global_converted_ckpts_dir, + global_models_dir + ) +from ..model_management import merge_diffusion_models # Loading Arguments opt = Args() @@ -236,7 +236,7 @@ class InvokeAIWebServer: sys.exit(0) else: useSSL = args.certfile or args.keyfile - print(">> Started Invoke AI Web Server!") + print(">> Started Invoke AI Web Server") if self.host == "0.0.0.0": print( f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address." diff --git a/ldm/invoke/app/services/__init__.py b/invokeai/backend/web/modules/__init__.py similarity index 100% rename from ldm/invoke/app/services/__init__.py rename to invokeai/backend/web/modules/__init__.py diff --git a/invokeai/backend/modules/create_cmd_parser.py b/invokeai/backend/web/modules/create_cmd_parser.py similarity index 100% rename from invokeai/backend/modules/create_cmd_parser.py rename to invokeai/backend/web/modules/create_cmd_parser.py diff --git a/invokeai/backend/modules/get_canvas_generation_mode.py b/invokeai/backend/web/modules/get_canvas_generation_mode.py similarity index 100% rename from invokeai/backend/modules/get_canvas_generation_mode.py rename to invokeai/backend/web/modules/get_canvas_generation_mode.py diff --git a/invokeai/backend/modules/parameters.py b/invokeai/backend/web/modules/parameters.py similarity index 97% rename from invokeai/backend/modules/parameters.py rename to invokeai/backend/web/modules/parameters.py index 98799c284b..a650fa37f9 100644 --- a/invokeai/backend/modules/parameters.py +++ b/invokeai/backend/web/modules/parameters.py @@ -1,4 +1,4 @@ -from invokeai.backend.modules.parse_seed_weights import parse_seed_weights +from .parse_seed_weights import parse_seed_weights import argparse SAMPLER_CHOICES = [ diff --git a/invokeai/backend/modules/parse_seed_weights.py b/invokeai/backend/web/modules/parse_seed_weights.py similarity index 100% rename from invokeai/backend/modules/parse_seed_weights.py rename to invokeai/backend/web/modules/parse_seed_weights.py diff --git a/invokeai/backend/modules/test_images/init-img_full_transparency.png b/invokeai/backend/web/modules/test_images/init-img_full_transparency.png similarity index 100% rename from invokeai/backend/modules/test_images/init-img_full_transparency.png rename to invokeai/backend/web/modules/test_images/init-img_full_transparency.png diff --git a/invokeai/backend/modules/test_images/init-img_opaque.png b/invokeai/backend/web/modules/test_images/init-img_opaque.png similarity index 100% rename from invokeai/backend/modules/test_images/init-img_opaque.png rename to invokeai/backend/web/modules/test_images/init-img_opaque.png diff --git a/invokeai/backend/modules/test_images/init-img_partial_transparency.png b/invokeai/backend/web/modules/test_images/init-img_partial_transparency.png similarity index 100% rename from invokeai/backend/modules/test_images/init-img_partial_transparency.png rename to invokeai/backend/web/modules/test_images/init-img_partial_transparency.png diff --git a/invokeai/backend/modules/test_images/init-mask_has_mask.png b/invokeai/backend/web/modules/test_images/init-mask_has_mask.png similarity index 100% rename from invokeai/backend/modules/test_images/init-mask_has_mask.png rename to invokeai/backend/web/modules/test_images/init-mask_has_mask.png diff --git a/invokeai/backend/modules/test_images/init-mask_no_mask.png b/invokeai/backend/web/modules/test_images/init-mask_no_mask.png similarity index 100% rename from invokeai/backend/modules/test_images/init-mask_no_mask.png rename to invokeai/backend/web/modules/test_images/init-mask_no_mask.png diff --git a/ldm/invoke/CLI.py b/invokeai/frontend/CLI/CLI.py similarity index 98% rename from ldm/invoke/CLI.py rename to invokeai/frontend/CLI/CLI.py index 80442cffd0..0e74e4bff5 100644 --- a/ldm/invoke/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -18,21 +18,22 @@ import pyparsing # type: ignore import invokeai.version -from ..generate import Generate -from .args import (Args, dream_cmd_from_png, metadata_dumps, +from ...backend import Generate +from ...backend.args import (Args, + dream_cmd_from_png, + metadata_dumps, metadata_from_png) -from invokeai.backend.stable_diffusion import PipelineIntermediateState -from invokeai.backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata -from invokeai.backend import ModelManager -from .globals import Globals -from .log import write_log +from ...backend.stable_diffusion import PipelineIntermediateState +from ...backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata +from ...backend import ModelManager +from ...backend.globals import Globals +from ...backend.util import write_log from .readline import Completer, get_completer -from invokeai.backend.util import url_attachment_name +from ...backend.util import url_attachment_name # global used in multiple functions (fix) infile = None - def main(): """Initialize command-line parsers and the diffusion model""" global infile @@ -82,8 +83,6 @@ def main(): # when the frozen CLIP tokenizer is imported import transformers # type: ignore - from ldm.generate import Generate - transformers.logging.set_verbosity_error() import diffusers @@ -1021,7 +1020,7 @@ def get_next_command(infile=None, model_name="no model") -> str: # command stri def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan): print("\n* --web was specified, starting web server...") - from invokeai.backend.invoke_ai_web_server import InvokeAIWebServer + from invokeai.backend.web import InvokeAIWebServer # Change working directory to the stable-diffusion directory os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) @@ -1075,7 +1074,7 @@ def load_face_restoration(opt): try: gfpgan, codeformer, esrgan = None, None, None if opt.restore or opt.esrgan: - from ldm.invoke.restoration import Restoration + from invokeai.backend.restoration import Restoration restoration = Restoration() if opt.restore: diff --git a/invokeai/frontend/CLI/__init__.py b/invokeai/frontend/CLI/__init__.py new file mode 100644 index 0000000000..4db5e778ff --- /dev/null +++ b/invokeai/frontend/CLI/__init__.py @@ -0,0 +1,4 @@ +''' +Initialization file for invokeai.frontend.CLI +''' +from .CLI import main as invokeai_command_line_interface diff --git a/ldm/invoke/readline.py b/invokeai/frontend/CLI/readline.py similarity index 99% rename from ldm/invoke/readline.py rename to invokeai/frontend/CLI/readline.py index 7fe74fc953..21e064662b 100644 --- a/ldm/invoke/readline.py +++ b/invokeai/frontend/CLI/readline.py @@ -11,9 +11,9 @@ seeds: import os import re import atexit -from ldm.invoke.args import Args -from ldm.invoke.globals import Globals -from invokeai.backend.stable_diffusion import HuggingFaceConceptsLibrary +from ...backend.args import Args +from ...backend.globals import Globals +from ...backend.stable_diffusion import HuggingFaceConceptsLibrary # ---------------readline utilities--------------------- try: diff --git a/invokeai/frontend/__init__.py b/invokeai/frontend/__init__.py new file mode 100644 index 0000000000..98fdf870e9 --- /dev/null +++ b/invokeai/frontend/__init__.py @@ -0,0 +1,3 @@ +''' +Initialization file for invokeai.frontend +''' diff --git a/invokeai/frontend/config/__init__.py b/invokeai/frontend/config/__init__.py new file mode 100644 index 0000000000..0d1d2aec75 --- /dev/null +++ b/invokeai/frontend/config/__init__.py @@ -0,0 +1,7 @@ +''' +Initialization file for invokeai.frontend.config +''' +from .model_install import main as invokeai_model_install +from .invokeai_configure import main as invokeai_configure +from .invokeai_update import main as invokeai_update + diff --git a/invokeai/frontend/config/invokeai_configure.py b/invokeai/frontend/config/invokeai_configure.py new file mode 100644 index 0000000000..748d7bb8ca --- /dev/null +++ b/invokeai/frontend/config/invokeai_configure.py @@ -0,0 +1,4 @@ +''' +Wrapper for invokeai.backend.configure.invokeai_configure +''' +from ...backend.config.invokeai_configure import main diff --git a/ldm/invoke/config/invokeai_update.py b/invokeai/frontend/config/invokeai_update.py similarity index 98% rename from ldm/invoke/config/invokeai_update.py rename to invokeai/frontend/config/invokeai_update.py index 8ad7290136..d3a532c627 100644 --- a/ldm/invoke/config/invokeai_update.py +++ b/invokeai/frontend/config/invokeai_update.py @@ -13,7 +13,7 @@ from rich.style import Style from rich.syntax import Syntax from rich.text import Text -from ldm.invoke import __version__ +from invokeai.version import __version__ INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive" INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases" diff --git a/ldm/invoke/config/model_install.py b/invokeai/frontend/config/model_install.py similarity index 97% rename from ldm/invoke/config/model_install.py rename to invokeai/frontend/config/model_install.py index bdffe61a55..f64a656211 100644 --- a/ldm/invoke/config/model_install.py +++ b/invokeai/frontend/config/model_install.py @@ -22,13 +22,13 @@ from npyscreen import widget from omegaconf import OmegaConf from shutil import get_terminal_size -from invokeai.backend.devices import choose_precision, choose_torch_device -from ..globals import Globals, global_config_dir -from .model_install_backend import (Dataset_path, default_config_file, - default_dataset, get_root, - install_requested_models, - recommended_datasets, - ) +from ...backend.util import choose_precision, choose_torch_device +from invokeai.backend.globals import Globals, global_config_dir +from ...backend.config.model_install_backend import (Dataset_path, default_config_file, + default_dataset, get_root, + install_requested_models, + recommended_datasets, + ) from .widgets import (MultiSelectColumns, TextBox, OffsetButtonPress, CenteredTitleText, set_min_terminal_size, diff --git a/ldm/invoke/config/widgets.py b/invokeai/frontend/config/widgets.py similarity index 100% rename from ldm/invoke/config/widgets.py rename to invokeai/frontend/config/widgets.py diff --git a/invokeai/frontend/merge/__init__.py b/invokeai/frontend/merge/__init__.py new file mode 100644 index 0000000000..8e46a0621b --- /dev/null +++ b/invokeai/frontend/merge/__init__.py @@ -0,0 +1,4 @@ +''' +Initialization file for invokeai.frontend.merge +''' +from .merge_diffusers import main as invokeai_merge_diffusers diff --git a/ldm/invoke/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py similarity index 97% rename from ldm/invoke/merge_diffusers.py rename to invokeai/frontend/merge/merge_diffusers.py index 10496d5b57..82765af96d 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -20,10 +20,10 @@ from diffusers import logging as dlogging from npyscreen import widget from omegaconf import OmegaConf -from ldm.invoke.config.widgets import FloatTitleSlider -from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, +from ...frontend.config.widgets import FloatTitleSlider +from ...backend.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from invokeai.backend import ModelManager +from ...backend.model_management import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" @@ -199,13 +199,13 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): self.add_widget_intelligent( npyscreen.FixedText, color="CONTROL", - value=f"Select two models to merge and optionally a third.", + value="Select two models to merge and optionally a third.", editable=False, ) self.add_widget_intelligent( npyscreen.FixedText, color="CONTROL", - value=f"Use up and down arrows to move, to select an item, and to move from one field to the next.", + value="Use up and down arrows to move, to select an item, and to move from one field to the next.", editable=False, ) self.add_widget_intelligent( @@ -453,9 +453,9 @@ def main(): "** You need to have at least two diffusers models defined in models.yaml in order to merge" ) else: - print(f"** Not enough room for the user interface. Try making this window larger.") + print("** Not enough room for the user interface. Try making this window larger.") sys.exit(-1) - except Exception as e: + except Exception: print(">> An error occurred:") traceback.print_exc() sys.exit(-1) diff --git a/invokeai/frontend/training/__init__.py b/invokeai/frontend/training/__init__.py new file mode 100644 index 0000000000..1aeece6b5f --- /dev/null +++ b/invokeai/frontend/training/__init__.py @@ -0,0 +1,5 @@ +''' +Initialization file for invokeai.frontend.training +''' +from .textual_inversion import main as invokeai_textual_inversion + diff --git a/ldm/invoke/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py similarity index 99% rename from ldm/invoke/training/textual_inversion.py rename to invokeai/frontend/training/textual_inversion.py index 2961e4d99c..5cd5d71909 100755 --- a/ldm/invoke/training/textual_inversion.py +++ b/invokeai/frontend/training/textual_inversion.py @@ -20,8 +20,8 @@ import npyscreen from npyscreen import widget from omegaconf import OmegaConf -from ldm.invoke.globals import Globals, global_set_root -from ldm.invoke.training.textual_inversion_training import ( +from invokeai.backend.globals import Globals, global_set_root +from ...backend.training import ( do_textual_inversion_training, parse_args, ) diff --git a/invokeai/frontend/.babelrc b/invokeai/frontend/web/.babelrc similarity index 100% rename from invokeai/frontend/.babelrc rename to invokeai/frontend/web/.babelrc diff --git a/invokeai/frontend/.eslintignore b/invokeai/frontend/web/.eslintignore similarity index 100% rename from invokeai/frontend/.eslintignore rename to invokeai/frontend/web/.eslintignore diff --git a/invokeai/frontend/.eslintrc.js b/invokeai/frontend/web/.eslintrc.js similarity index 100% rename from invokeai/frontend/.eslintrc.js rename to invokeai/frontend/web/.eslintrc.js diff --git a/invokeai/frontend/.gitignore b/invokeai/frontend/web/.gitignore similarity index 100% rename from invokeai/frontend/.gitignore rename to invokeai/frontend/web/.gitignore diff --git a/invokeai/frontend/.husky/pre-commit b/invokeai/frontend/web/.husky/pre-commit similarity index 100% rename from invokeai/frontend/.husky/pre-commit rename to invokeai/frontend/web/.husky/pre-commit diff --git a/invokeai/frontend/.prettierignore b/invokeai/frontend/web/.prettierignore similarity index 100% rename from invokeai/frontend/.prettierignore rename to invokeai/frontend/web/.prettierignore diff --git a/invokeai/frontend/.prettierrc.js b/invokeai/frontend/web/.prettierrc.js similarity index 100% rename from invokeai/frontend/.prettierrc.js rename to invokeai/frontend/web/.prettierrc.js diff --git a/invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs b/invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs similarity index 100% rename from invokeai/frontend/.yarn/releases/yarn-1.22.19.cjs rename to invokeai/frontend/web/.yarn/releases/yarn-1.22.19.cjs diff --git a/invokeai/frontend/.yarnrc b/invokeai/frontend/web/.yarnrc similarity index 100% rename from invokeai/frontend/.yarnrc rename to invokeai/frontend/web/.yarnrc diff --git a/invokeai/frontend/.yarnrc.yml b/invokeai/frontend/web/.yarnrc.yml similarity index 100% rename from invokeai/frontend/.yarnrc.yml rename to invokeai/frontend/web/.yarnrc.yml diff --git a/invokeai/frontend/README.md b/invokeai/frontend/web/README.md similarity index 100% rename from invokeai/frontend/README.md rename to invokeai/frontend/web/README.md diff --git a/invokeai/frontend/web/__init__.py b/invokeai/frontend/web/__init__.py new file mode 100644 index 0000000000..010129ece2 --- /dev/null +++ b/invokeai/frontend/web/__init__.py @@ -0,0 +1,3 @@ +''' +Initialization file for invokeai.frontend.web +''' diff --git a/invokeai/frontend/dist/assets/Inter-Bold-790c108b.ttf b/invokeai/frontend/web/dist/assets/Inter-Bold-790c108b.ttf similarity index 100% rename from invokeai/frontend/dist/assets/Inter-Bold-790c108b.ttf rename to invokeai/frontend/web/dist/assets/Inter-Bold-790c108b.ttf diff --git a/invokeai/frontend/dist/assets/Inter-b9a8e5e2.ttf b/invokeai/frontend/web/dist/assets/Inter-b9a8e5e2.ttf similarity index 100% rename from invokeai/frontend/dist/assets/Inter-b9a8e5e2.ttf rename to invokeai/frontend/web/dist/assets/Inter-b9a8e5e2.ttf diff --git a/invokeai/frontend/dist/assets/favicon-0d253ced.ico b/invokeai/frontend/web/dist/assets/favicon-0d253ced.ico similarity index 100% rename from invokeai/frontend/dist/assets/favicon-0d253ced.ico rename to invokeai/frontend/web/dist/assets/favicon-0d253ced.ico diff --git a/invokeai/frontend/dist/assets/index-0e39fbc4.js b/invokeai/frontend/web/dist/assets/index-0e39fbc4.js similarity index 100% rename from invokeai/frontend/dist/assets/index-0e39fbc4.js rename to invokeai/frontend/web/dist/assets/index-0e39fbc4.js diff --git a/invokeai/frontend/dist/assets/index-14cb2922.css b/invokeai/frontend/web/dist/assets/index-14cb2922.css similarity index 100% rename from invokeai/frontend/dist/assets/index-14cb2922.css rename to invokeai/frontend/web/dist/assets/index-14cb2922.css diff --git a/invokeai/frontend/dist/assets/logo-13003d72.png b/invokeai/frontend/web/dist/assets/logo-13003d72.png similarity index 100% rename from invokeai/frontend/dist/assets/logo-13003d72.png rename to invokeai/frontend/web/dist/assets/logo-13003d72.png diff --git a/invokeai/frontend/dist/index.html b/invokeai/frontend/web/dist/index.html similarity index 100% rename from invokeai/frontend/dist/index.html rename to invokeai/frontend/web/dist/index.html diff --git a/invokeai/frontend/dist/locales/ar.json b/invokeai/frontend/web/dist/locales/ar.json similarity index 100% rename from invokeai/frontend/dist/locales/ar.json rename to invokeai/frontend/web/dist/locales/ar.json diff --git a/invokeai/frontend/dist/locales/de.json b/invokeai/frontend/web/dist/locales/de.json similarity index 100% rename from invokeai/frontend/dist/locales/de.json rename to invokeai/frontend/web/dist/locales/de.json diff --git a/invokeai/frontend/dist/locales/en.json b/invokeai/frontend/web/dist/locales/en.json similarity index 100% rename from invokeai/frontend/dist/locales/en.json rename to invokeai/frontend/web/dist/locales/en.json diff --git a/invokeai/frontend/dist/locales/es.json b/invokeai/frontend/web/dist/locales/es.json similarity index 100% rename from invokeai/frontend/dist/locales/es.json rename to invokeai/frontend/web/dist/locales/es.json diff --git a/invokeai/frontend/dist/locales/fr.json b/invokeai/frontend/web/dist/locales/fr.json similarity index 100% rename from invokeai/frontend/dist/locales/fr.json rename to invokeai/frontend/web/dist/locales/fr.json diff --git a/invokeai/frontend/dist/locales/it.json b/invokeai/frontend/web/dist/locales/it.json similarity index 100% rename from invokeai/frontend/dist/locales/it.json rename to invokeai/frontend/web/dist/locales/it.json diff --git a/invokeai/frontend/dist/locales/ja.json b/invokeai/frontend/web/dist/locales/ja.json similarity index 100% rename from invokeai/frontend/dist/locales/ja.json rename to invokeai/frontend/web/dist/locales/ja.json diff --git a/invokeai/frontend/dist/locales/nl.json b/invokeai/frontend/web/dist/locales/nl.json similarity index 100% rename from invokeai/frontend/dist/locales/nl.json rename to invokeai/frontend/web/dist/locales/nl.json diff --git a/invokeai/frontend/dist/locales/pl.json b/invokeai/frontend/web/dist/locales/pl.json similarity index 100% rename from invokeai/frontend/dist/locales/pl.json rename to invokeai/frontend/web/dist/locales/pl.json diff --git a/invokeai/frontend/dist/locales/pt_BR.json b/invokeai/frontend/web/dist/locales/pt_BR.json similarity index 100% rename from invokeai/frontend/dist/locales/pt_BR.json rename to invokeai/frontend/web/dist/locales/pt_BR.json diff --git a/invokeai/frontend/dist/locales/ru.json b/invokeai/frontend/web/dist/locales/ru.json similarity index 100% rename from invokeai/frontend/dist/locales/ru.json rename to invokeai/frontend/web/dist/locales/ru.json diff --git a/invokeai/frontend/dist/locales/uk.json b/invokeai/frontend/web/dist/locales/uk.json similarity index 100% rename from invokeai/frontend/dist/locales/uk.json rename to invokeai/frontend/web/dist/locales/uk.json diff --git a/invokeai/frontend/dist/locales/zh_CN.json b/invokeai/frontend/web/dist/locales/zh_CN.json similarity index 100% rename from invokeai/frontend/dist/locales/zh_CN.json rename to invokeai/frontend/web/dist/locales/zh_CN.json diff --git a/invokeai/frontend/favicon.ico b/invokeai/frontend/web/favicon.ico similarity index 100% rename from invokeai/frontend/favicon.ico rename to invokeai/frontend/web/favicon.ico diff --git a/invokeai/frontend/index.d.ts b/invokeai/frontend/web/index.d.ts similarity index 100% rename from invokeai/frontend/index.d.ts rename to invokeai/frontend/web/index.d.ts diff --git a/invokeai/frontend/index.html b/invokeai/frontend/web/index.html similarity index 100% rename from invokeai/frontend/index.html rename to invokeai/frontend/web/index.html diff --git a/invokeai/frontend/package.json b/invokeai/frontend/web/package.json similarity index 100% rename from invokeai/frontend/package.json rename to invokeai/frontend/web/package.json diff --git a/invokeai/frontend/patches/redux-deep-persist+1.0.7.patch b/invokeai/frontend/web/patches/redux-deep-persist+1.0.7.patch similarity index 100% rename from invokeai/frontend/patches/redux-deep-persist+1.0.7.patch rename to invokeai/frontend/web/patches/redux-deep-persist+1.0.7.patch diff --git a/invokeai/frontend/patches/redux-persist+6.0.0.patch b/invokeai/frontend/web/patches/redux-persist+6.0.0.patch similarity index 100% rename from invokeai/frontend/patches/redux-persist+6.0.0.patch rename to invokeai/frontend/web/patches/redux-persist+6.0.0.patch diff --git a/invokeai/frontend/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json similarity index 100% rename from invokeai/frontend/public/locales/ar.json rename to invokeai/frontend/web/public/locales/ar.json diff --git a/invokeai/frontend/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json similarity index 100% rename from invokeai/frontend/public/locales/de.json rename to invokeai/frontend/web/public/locales/de.json diff --git a/invokeai/frontend/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json similarity index 100% rename from invokeai/frontend/public/locales/en.json rename to invokeai/frontend/web/public/locales/en.json diff --git a/invokeai/frontend/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json similarity index 100% rename from invokeai/frontend/public/locales/es.json rename to invokeai/frontend/web/public/locales/es.json diff --git a/invokeai/frontend/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json similarity index 100% rename from invokeai/frontend/public/locales/fr.json rename to invokeai/frontend/web/public/locales/fr.json diff --git a/invokeai/frontend/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json similarity index 100% rename from invokeai/frontend/public/locales/it.json rename to invokeai/frontend/web/public/locales/it.json diff --git a/invokeai/frontend/public/locales/ja.json b/invokeai/frontend/web/public/locales/ja.json similarity index 100% rename from invokeai/frontend/public/locales/ja.json rename to invokeai/frontend/web/public/locales/ja.json diff --git a/invokeai/frontend/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json similarity index 100% rename from invokeai/frontend/public/locales/nl.json rename to invokeai/frontend/web/public/locales/nl.json diff --git a/invokeai/frontend/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json similarity index 100% rename from invokeai/frontend/public/locales/pl.json rename to invokeai/frontend/web/public/locales/pl.json diff --git a/invokeai/frontend/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json similarity index 100% rename from invokeai/frontend/public/locales/pt_BR.json rename to invokeai/frontend/web/public/locales/pt_BR.json diff --git a/invokeai/frontend/public/locales/ro.json b/invokeai/frontend/web/public/locales/ro.json similarity index 100% rename from invokeai/frontend/public/locales/ro.json rename to invokeai/frontend/web/public/locales/ro.json diff --git a/invokeai/frontend/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json similarity index 100% rename from invokeai/frontend/public/locales/ru.json rename to invokeai/frontend/web/public/locales/ru.json diff --git a/invokeai/frontend/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json similarity index 100% rename from invokeai/frontend/public/locales/uk.json rename to invokeai/frontend/web/public/locales/uk.json diff --git a/invokeai/frontend/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json similarity index 100% rename from invokeai/frontend/public/locales/zh_CN.json rename to invokeai/frontend/web/public/locales/zh_CN.json diff --git a/invokeai/frontend/src/Loading.tsx b/invokeai/frontend/web/src/Loading.tsx similarity index 100% rename from invokeai/frontend/src/Loading.tsx rename to invokeai/frontend/web/src/Loading.tsx diff --git a/invokeai/frontend/src/app/App.scss b/invokeai/frontend/web/src/app/App.scss similarity index 100% rename from invokeai/frontend/src/app/App.scss rename to invokeai/frontend/web/src/app/App.scss diff --git a/invokeai/frontend/src/app/App.tsx b/invokeai/frontend/web/src/app/App.tsx similarity index 100% rename from invokeai/frontend/src/app/App.tsx rename to invokeai/frontend/web/src/app/App.tsx diff --git a/invokeai/frontend/src/app/constants.ts b/invokeai/frontend/web/src/app/constants.ts similarity index 100% rename from invokeai/frontend/src/app/constants.ts rename to invokeai/frontend/web/src/app/constants.ts diff --git a/invokeai/frontend/src/app/contexts/ImageUploaderTriggerContext.ts b/invokeai/frontend/web/src/app/contexts/ImageUploaderTriggerContext.ts similarity index 100% rename from invokeai/frontend/src/app/contexts/ImageUploaderTriggerContext.ts rename to invokeai/frontend/web/src/app/contexts/ImageUploaderTriggerContext.ts diff --git a/invokeai/frontend/src/app/features.ts b/invokeai/frontend/web/src/app/features.ts similarity index 100% rename from invokeai/frontend/src/app/features.ts rename to invokeai/frontend/web/src/app/features.ts diff --git a/invokeai/frontend/src/app/invokeai.d.ts b/invokeai/frontend/web/src/app/invokeai.d.ts similarity index 100% rename from invokeai/frontend/src/app/invokeai.d.ts rename to invokeai/frontend/web/src/app/invokeai.d.ts diff --git a/invokeai/frontend/src/app/selectors/readinessSelector.ts b/invokeai/frontend/web/src/app/selectors/readinessSelector.ts similarity index 100% rename from invokeai/frontend/src/app/selectors/readinessSelector.ts rename to invokeai/frontend/web/src/app/selectors/readinessSelector.ts diff --git a/invokeai/frontend/src/app/socketio/actions.ts b/invokeai/frontend/web/src/app/socketio/actions.ts similarity index 100% rename from invokeai/frontend/src/app/socketio/actions.ts rename to invokeai/frontend/web/src/app/socketio/actions.ts diff --git a/invokeai/frontend/src/app/socketio/emitters.ts b/invokeai/frontend/web/src/app/socketio/emitters.ts similarity index 100% rename from invokeai/frontend/src/app/socketio/emitters.ts rename to invokeai/frontend/web/src/app/socketio/emitters.ts diff --git a/invokeai/frontend/src/app/socketio/listeners.ts b/invokeai/frontend/web/src/app/socketio/listeners.ts similarity index 100% rename from invokeai/frontend/src/app/socketio/listeners.ts rename to invokeai/frontend/web/src/app/socketio/listeners.ts diff --git a/invokeai/frontend/src/app/socketio/middleware.ts b/invokeai/frontend/web/src/app/socketio/middleware.ts similarity index 100% rename from invokeai/frontend/src/app/socketio/middleware.ts rename to invokeai/frontend/web/src/app/socketio/middleware.ts diff --git a/invokeai/frontend/src/app/store.ts b/invokeai/frontend/web/src/app/store.ts similarity index 100% rename from invokeai/frontend/src/app/store.ts rename to invokeai/frontend/web/src/app/store.ts diff --git a/invokeai/frontend/src/app/storeHooks.ts b/invokeai/frontend/web/src/app/storeHooks.ts similarity index 100% rename from invokeai/frontend/src/app/storeHooks.ts rename to invokeai/frontend/web/src/app/storeHooks.ts diff --git a/invokeai/frontend/src/app/theme.ts b/invokeai/frontend/web/src/app/theme.ts similarity index 100% rename from invokeai/frontend/src/app/theme.ts rename to invokeai/frontend/web/src/app/theme.ts diff --git a/invokeai/frontend/src/app/utils.ts b/invokeai/frontend/web/src/app/utils.ts similarity index 100% rename from invokeai/frontend/src/app/utils.ts rename to invokeai/frontend/web/src/app/utils.ts diff --git a/invokeai/frontend/src/assets/fonts/Inter/Inter-Bold.ttf b/invokeai/frontend/web/src/assets/fonts/Inter/Inter-Bold.ttf similarity index 100% rename from invokeai/frontend/src/assets/fonts/Inter/Inter-Bold.ttf rename to invokeai/frontend/web/src/assets/fonts/Inter/Inter-Bold.ttf diff --git a/invokeai/frontend/src/assets/fonts/Inter/Inter.ttf b/invokeai/frontend/web/src/assets/fonts/Inter/Inter.ttf similarity index 100% rename from invokeai/frontend/src/assets/fonts/Inter/Inter.ttf rename to invokeai/frontend/web/src/assets/fonts/Inter/Inter.ttf diff --git a/invokeai/frontend/src/assets/images/image2img.png b/invokeai/frontend/web/src/assets/images/image2img.png similarity index 100% rename from invokeai/frontend/src/assets/images/image2img.png rename to invokeai/frontend/web/src/assets/images/image2img.png diff --git a/invokeai/frontend/src/assets/images/logo.png b/invokeai/frontend/web/src/assets/images/logo.png similarity index 100% rename from invokeai/frontend/src/assets/images/logo.png rename to invokeai/frontend/web/src/assets/images/logo.png diff --git a/invokeai/frontend/src/assets/images/mask.afdesign b/invokeai/frontend/web/src/assets/images/mask.afdesign similarity index 100% rename from invokeai/frontend/src/assets/images/mask.afdesign rename to invokeai/frontend/web/src/assets/images/mask.afdesign diff --git a/invokeai/frontend/src/assets/images/mask.svg b/invokeai/frontend/web/src/assets/images/mask.svg similarity index 100% rename from invokeai/frontend/src/assets/images/mask.svg rename to invokeai/frontend/web/src/assets/images/mask.svg diff --git a/invokeai/frontend/src/common/components/GuideIcon.tsx b/invokeai/frontend/web/src/common/components/GuideIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/components/GuideIcon.tsx rename to invokeai/frontend/web/src/common/components/GuideIcon.tsx diff --git a/invokeai/frontend/src/common/components/GuidePopover.scss b/invokeai/frontend/web/src/common/components/GuidePopover.scss similarity index 100% rename from invokeai/frontend/src/common/components/GuidePopover.scss rename to invokeai/frontend/web/src/common/components/GuidePopover.scss diff --git a/invokeai/frontend/src/common/components/GuidePopover.tsx b/invokeai/frontend/web/src/common/components/GuidePopover.tsx similarity index 100% rename from invokeai/frontend/src/common/components/GuidePopover.tsx rename to invokeai/frontend/web/src/common/components/GuidePopover.tsx diff --git a/invokeai/frontend/src/common/components/IAIAlertDialog.tsx b/invokeai/frontend/web/src/common/components/IAIAlertDialog.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIAlertDialog.tsx rename to invokeai/frontend/web/src/common/components/IAIAlertDialog.tsx diff --git a/invokeai/frontend/src/common/components/IAIButton.scss b/invokeai/frontend/web/src/common/components/IAIButton.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAIButton.scss rename to invokeai/frontend/web/src/common/components/IAIButton.scss diff --git a/invokeai/frontend/src/common/components/IAIButton.tsx b/invokeai/frontend/web/src/common/components/IAIButton.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIButton.tsx rename to invokeai/frontend/web/src/common/components/IAIButton.tsx diff --git a/invokeai/frontend/src/common/components/IAICheckbox.scss b/invokeai/frontend/web/src/common/components/IAICheckbox.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAICheckbox.scss rename to invokeai/frontend/web/src/common/components/IAICheckbox.scss diff --git a/invokeai/frontend/src/common/components/IAICheckbox.tsx b/invokeai/frontend/web/src/common/components/IAICheckbox.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAICheckbox.tsx rename to invokeai/frontend/web/src/common/components/IAICheckbox.tsx diff --git a/invokeai/frontend/src/common/components/IAIColorPicker.scss b/invokeai/frontend/web/src/common/components/IAIColorPicker.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAIColorPicker.scss rename to invokeai/frontend/web/src/common/components/IAIColorPicker.scss diff --git a/invokeai/frontend/src/common/components/IAIColorPicker.tsx b/invokeai/frontend/web/src/common/components/IAIColorPicker.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIColorPicker.tsx rename to invokeai/frontend/web/src/common/components/IAIColorPicker.tsx diff --git a/invokeai/frontend/src/common/components/IAIIconButton.scss b/invokeai/frontend/web/src/common/components/IAIIconButton.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAIIconButton.scss rename to invokeai/frontend/web/src/common/components/IAIIconButton.scss diff --git a/invokeai/frontend/src/common/components/IAIIconButton.tsx b/invokeai/frontend/web/src/common/components/IAIIconButton.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIIconButton.tsx rename to invokeai/frontend/web/src/common/components/IAIIconButton.tsx diff --git a/invokeai/frontend/src/common/components/IAIInput.scss b/invokeai/frontend/web/src/common/components/IAIInput.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAIInput.scss rename to invokeai/frontend/web/src/common/components/IAIInput.scss diff --git a/invokeai/frontend/src/common/components/IAIInput.tsx b/invokeai/frontend/web/src/common/components/IAIInput.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIInput.tsx rename to invokeai/frontend/web/src/common/components/IAIInput.tsx diff --git a/invokeai/frontend/src/common/components/IAINumberInput.scss b/invokeai/frontend/web/src/common/components/IAINumberInput.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAINumberInput.scss rename to invokeai/frontend/web/src/common/components/IAINumberInput.scss diff --git a/invokeai/frontend/src/common/components/IAINumberInput.tsx b/invokeai/frontend/web/src/common/components/IAINumberInput.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAINumberInput.tsx rename to invokeai/frontend/web/src/common/components/IAINumberInput.tsx diff --git a/invokeai/frontend/src/common/components/IAIPopover.scss b/invokeai/frontend/web/src/common/components/IAIPopover.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAIPopover.scss rename to invokeai/frontend/web/src/common/components/IAIPopover.scss diff --git a/invokeai/frontend/src/common/components/IAIPopover.tsx b/invokeai/frontend/web/src/common/components/IAIPopover.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAIPopover.tsx rename to invokeai/frontend/web/src/common/components/IAIPopover.tsx diff --git a/invokeai/frontend/src/common/components/IAISelect.scss b/invokeai/frontend/web/src/common/components/IAISelect.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAISelect.scss rename to invokeai/frontend/web/src/common/components/IAISelect.scss diff --git a/invokeai/frontend/src/common/components/IAISelect.tsx b/invokeai/frontend/web/src/common/components/IAISelect.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAISelect.tsx rename to invokeai/frontend/web/src/common/components/IAISelect.tsx diff --git a/invokeai/frontend/src/common/components/IAISimpleMenu.tsx b/invokeai/frontend/web/src/common/components/IAISimpleMenu.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAISimpleMenu.tsx rename to invokeai/frontend/web/src/common/components/IAISimpleMenu.tsx diff --git a/invokeai/frontend/src/common/components/IAISlider.scss b/invokeai/frontend/web/src/common/components/IAISlider.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAISlider.scss rename to invokeai/frontend/web/src/common/components/IAISlider.scss diff --git a/invokeai/frontend/src/common/components/IAISlider.tsx b/invokeai/frontend/web/src/common/components/IAISlider.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAISlider.tsx rename to invokeai/frontend/web/src/common/components/IAISlider.tsx diff --git a/invokeai/frontend/src/common/components/IAISwitch.scss b/invokeai/frontend/web/src/common/components/IAISwitch.scss similarity index 100% rename from invokeai/frontend/src/common/components/IAISwitch.scss rename to invokeai/frontend/web/src/common/components/IAISwitch.scss diff --git a/invokeai/frontend/src/common/components/IAISwitch.tsx b/invokeai/frontend/web/src/common/components/IAISwitch.tsx similarity index 100% rename from invokeai/frontend/src/common/components/IAISwitch.tsx rename to invokeai/frontend/web/src/common/components/IAISwitch.tsx diff --git a/invokeai/frontend/src/common/components/ImageUploadOverlay.tsx b/invokeai/frontend/web/src/common/components/ImageUploadOverlay.tsx similarity index 100% rename from invokeai/frontend/src/common/components/ImageUploadOverlay.tsx rename to invokeai/frontend/web/src/common/components/ImageUploadOverlay.tsx diff --git a/invokeai/frontend/src/common/components/ImageUploader.scss b/invokeai/frontend/web/src/common/components/ImageUploader.scss similarity index 100% rename from invokeai/frontend/src/common/components/ImageUploader.scss rename to invokeai/frontend/web/src/common/components/ImageUploader.scss diff --git a/invokeai/frontend/src/common/components/ImageUploader.tsx b/invokeai/frontend/web/src/common/components/ImageUploader.tsx similarity index 100% rename from invokeai/frontend/src/common/components/ImageUploader.tsx rename to invokeai/frontend/web/src/common/components/ImageUploader.tsx diff --git a/invokeai/frontend/src/common/components/ImageUploaderButton.tsx b/invokeai/frontend/web/src/common/components/ImageUploaderButton.tsx similarity index 100% rename from invokeai/frontend/src/common/components/ImageUploaderButton.tsx rename to invokeai/frontend/web/src/common/components/ImageUploaderButton.tsx diff --git a/invokeai/frontend/src/common/components/ImageUploaderIconButton.tsx b/invokeai/frontend/web/src/common/components/ImageUploaderIconButton.tsx similarity index 100% rename from invokeai/frontend/src/common/components/ImageUploaderIconButton.tsx rename to invokeai/frontend/web/src/common/components/ImageUploaderIconButton.tsx diff --git a/invokeai/frontend/src/common/components/SubItemHook.tsx b/invokeai/frontend/web/src/common/components/SubItemHook.tsx similarity index 100% rename from invokeai/frontend/src/common/components/SubItemHook.tsx rename to invokeai/frontend/web/src/common/components/SubItemHook.tsx diff --git a/invokeai/frontend/src/common/components/WorkInProgress/NodesWIP.tsx b/invokeai/frontend/web/src/common/components/WorkInProgress/NodesWIP.tsx similarity index 100% rename from invokeai/frontend/src/common/components/WorkInProgress/NodesWIP.tsx rename to invokeai/frontend/web/src/common/components/WorkInProgress/NodesWIP.tsx diff --git a/invokeai/frontend/src/common/components/WorkInProgress/PostProcessingWIP.tsx b/invokeai/frontend/web/src/common/components/WorkInProgress/PostProcessingWIP.tsx similarity index 100% rename from invokeai/frontend/src/common/components/WorkInProgress/PostProcessingWIP.tsx rename to invokeai/frontend/web/src/common/components/WorkInProgress/PostProcessingWIP.tsx diff --git a/invokeai/frontend/src/common/components/WorkInProgress/Training.tsx b/invokeai/frontend/web/src/common/components/WorkInProgress/Training.tsx similarity index 100% rename from invokeai/frontend/src/common/components/WorkInProgress/Training.tsx rename to invokeai/frontend/web/src/common/components/WorkInProgress/Training.tsx diff --git a/invokeai/frontend/src/common/components/WorkInProgress/WorkInProgress.scss b/invokeai/frontend/web/src/common/components/WorkInProgress/WorkInProgress.scss similarity index 100% rename from invokeai/frontend/src/common/components/WorkInProgress/WorkInProgress.scss rename to invokeai/frontend/web/src/common/components/WorkInProgress/WorkInProgress.scss diff --git a/invokeai/frontend/src/common/components/radix-ui/IAISlider.scss b/invokeai/frontend/web/src/common/components/radix-ui/IAISlider.scss similarity index 100% rename from invokeai/frontend/src/common/components/radix-ui/IAISlider.scss rename to invokeai/frontend/web/src/common/components/radix-ui/IAISlider.scss diff --git a/invokeai/frontend/src/common/components/radix-ui/IAISlider.tsx b/invokeai/frontend/web/src/common/components/radix-ui/IAISlider.tsx similarity index 100% rename from invokeai/frontend/src/common/components/radix-ui/IAISlider.tsx rename to invokeai/frontend/web/src/common/components/radix-ui/IAISlider.tsx diff --git a/invokeai/frontend/src/common/components/radix-ui/IAITooltip.scss b/invokeai/frontend/web/src/common/components/radix-ui/IAITooltip.scss similarity index 100% rename from invokeai/frontend/src/common/components/radix-ui/IAITooltip.scss rename to invokeai/frontend/web/src/common/components/radix-ui/IAITooltip.scss diff --git a/invokeai/frontend/src/common/components/radix-ui/IAITooltip.tsx b/invokeai/frontend/web/src/common/components/radix-ui/IAITooltip.tsx similarity index 100% rename from invokeai/frontend/src/common/components/radix-ui/IAITooltip.tsx rename to invokeai/frontend/web/src/common/components/radix-ui/IAITooltip.tsx diff --git a/invokeai/frontend/src/common/hooks/useClickOutsideWatcher.ts b/invokeai/frontend/web/src/common/hooks/useClickOutsideWatcher.ts similarity index 100% rename from invokeai/frontend/src/common/hooks/useClickOutsideWatcher.ts rename to invokeai/frontend/web/src/common/hooks/useClickOutsideWatcher.ts diff --git a/invokeai/frontend/src/common/hooks/useImageUploader.ts b/invokeai/frontend/web/src/common/hooks/useImageUploader.ts similarity index 100% rename from invokeai/frontend/src/common/hooks/useImageUploader.ts rename to invokeai/frontend/web/src/common/hooks/useImageUploader.ts diff --git a/invokeai/frontend/src/common/hooks/useSingleAndDoubleClick.ts b/invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts similarity index 100% rename from invokeai/frontend/src/common/hooks/useSingleAndDoubleClick.ts rename to invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts diff --git a/invokeai/frontend/src/common/hooks/useUpdateTranslations.ts b/invokeai/frontend/web/src/common/hooks/useUpdateTranslations.ts similarity index 100% rename from invokeai/frontend/src/common/hooks/useUpdateTranslations.ts rename to invokeai/frontend/web/src/common/hooks/useUpdateTranslations.ts diff --git a/invokeai/frontend/src/common/icons/ImageToImageIcon.tsx b/invokeai/frontend/web/src/common/icons/ImageToImageIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/ImageToImageIcon.tsx rename to invokeai/frontend/web/src/common/icons/ImageToImageIcon.tsx diff --git a/invokeai/frontend/src/common/icons/InpaintIcon.tsx b/invokeai/frontend/web/src/common/icons/InpaintIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/InpaintIcon.tsx rename to invokeai/frontend/web/src/common/icons/InpaintIcon.tsx diff --git a/invokeai/frontend/src/common/icons/NodesIcon.tsx b/invokeai/frontend/web/src/common/icons/NodesIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/NodesIcon.tsx rename to invokeai/frontend/web/src/common/icons/NodesIcon.tsx diff --git a/invokeai/frontend/src/common/icons/OutpaintIcon.tsx b/invokeai/frontend/web/src/common/icons/OutpaintIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/OutpaintIcon.tsx rename to invokeai/frontend/web/src/common/icons/OutpaintIcon.tsx diff --git a/invokeai/frontend/src/common/icons/PostprocessingIcon.tsx b/invokeai/frontend/web/src/common/icons/PostprocessingIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/PostprocessingIcon.tsx rename to invokeai/frontend/web/src/common/icons/PostprocessingIcon.tsx diff --git a/invokeai/frontend/src/common/icons/TextToImageIcon.tsx b/invokeai/frontend/web/src/common/icons/TextToImageIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/TextToImageIcon.tsx rename to invokeai/frontend/web/src/common/icons/TextToImageIcon.tsx diff --git a/invokeai/frontend/src/common/icons/TrainingIcon.tsx b/invokeai/frontend/web/src/common/icons/TrainingIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/TrainingIcon.tsx rename to invokeai/frontend/web/src/common/icons/TrainingIcon.tsx diff --git a/invokeai/frontend/src/common/icons/UnifiedCanvas.afdesign b/invokeai/frontend/web/src/common/icons/UnifiedCanvas.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/UnifiedCanvas.afdesign rename to invokeai/frontend/web/src/common/icons/UnifiedCanvas.afdesign diff --git a/invokeai/frontend/src/common/icons/UnifiedCanvasIcon.tsx b/invokeai/frontend/web/src/common/icons/UnifiedCanvasIcon.tsx similarity index 100% rename from invokeai/frontend/src/common/icons/UnifiedCanvasIcon.tsx rename to invokeai/frontend/web/src/common/icons/UnifiedCanvasIcon.tsx diff --git a/invokeai/frontend/src/common/icons/design_files/BaseImage.afdesign b/invokeai/frontend/web/src/common/icons/design_files/BaseImage.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/BaseImage.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/BaseImage.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/ImageToImage.afdesign b/invokeai/frontend/web/src/common/icons/design_files/ImageToImage.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/ImageToImage.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/ImageToImage.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/ImageToImage.svg b/invokeai/frontend/web/src/common/icons/design_files/ImageToImage.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/ImageToImage.svg rename to invokeai/frontend/web/src/common/icons/design_files/ImageToImage.svg diff --git a/invokeai/frontend/src/common/icons/design_files/Inpaint.afdesign b/invokeai/frontend/web/src/common/icons/design_files/Inpaint.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Inpaint.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/Inpaint.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/Inpaint.svg b/invokeai/frontend/web/src/common/icons/design_files/Inpaint.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Inpaint.svg rename to invokeai/frontend/web/src/common/icons/design_files/Inpaint.svg diff --git a/invokeai/frontend/src/common/icons/design_files/Nodes.afdesign b/invokeai/frontend/web/src/common/icons/design_files/Nodes.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Nodes.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/Nodes.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/Nodes.svg b/invokeai/frontend/web/src/common/icons/design_files/Nodes.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Nodes.svg rename to invokeai/frontend/web/src/common/icons/design_files/Nodes.svg diff --git a/invokeai/frontend/src/common/icons/design_files/Outpaint.afdesign b/invokeai/frontend/web/src/common/icons/design_files/Outpaint.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Outpaint.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/Outpaint.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/Outpaint.svg b/invokeai/frontend/web/src/common/icons/design_files/Outpaint.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Outpaint.svg rename to invokeai/frontend/web/src/common/icons/design_files/Outpaint.svg diff --git a/invokeai/frontend/src/common/icons/design_files/Postprocessing.afdesign b/invokeai/frontend/web/src/common/icons/design_files/Postprocessing.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Postprocessing.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/Postprocessing.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/Postprocessing.svg b/invokeai/frontend/web/src/common/icons/design_files/Postprocessing.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Postprocessing.svg rename to invokeai/frontend/web/src/common/icons/design_files/Postprocessing.svg diff --git a/invokeai/frontend/src/common/icons/design_files/TextToImage.afdesign b/invokeai/frontend/web/src/common/icons/design_files/TextToImage.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/TextToImage.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/TextToImage.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/TextToImage.svg b/invokeai/frontend/web/src/common/icons/design_files/TextToImage.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/TextToImage.svg rename to invokeai/frontend/web/src/common/icons/design_files/TextToImage.svg diff --git a/invokeai/frontend/src/common/icons/design_files/Training.afdesign b/invokeai/frontend/web/src/common/icons/design_files/Training.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Training.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/Training.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/Training.svg b/invokeai/frontend/web/src/common/icons/design_files/Training.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/Training.svg rename to invokeai/frontend/web/src/common/icons/design_files/Training.svg diff --git a/invokeai/frontend/src/common/icons/design_files/UnifiedCanvas.afdesign b/invokeai/frontend/web/src/common/icons/design_files/UnifiedCanvas.afdesign similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/UnifiedCanvas.afdesign rename to invokeai/frontend/web/src/common/icons/design_files/UnifiedCanvas.afdesign diff --git a/invokeai/frontend/src/common/icons/design_files/UnifiedCanvas.svg b/invokeai/frontend/web/src/common/icons/design_files/UnifiedCanvas.svg similarity index 100% rename from invokeai/frontend/src/common/icons/design_files/UnifiedCanvas.svg rename to invokeai/frontend/web/src/common/icons/design_files/UnifiedCanvas.svg diff --git a/invokeai/frontend/src/common/util/getPromptAndNegative.ts b/invokeai/frontend/web/src/common/util/getPromptAndNegative.ts similarity index 100% rename from invokeai/frontend/src/common/util/getPromptAndNegative.ts rename to invokeai/frontend/web/src/common/util/getPromptAndNegative.ts diff --git a/invokeai/frontend/src/common/util/openBase64ImageInTab.ts b/invokeai/frontend/web/src/common/util/openBase64ImageInTab.ts similarity index 100% rename from invokeai/frontend/src/common/util/openBase64ImageInTab.ts rename to invokeai/frontend/web/src/common/util/openBase64ImageInTab.ts diff --git a/invokeai/frontend/src/common/util/parameterTranslation.ts b/invokeai/frontend/web/src/common/util/parameterTranslation.ts similarity index 100% rename from invokeai/frontend/src/common/util/parameterTranslation.ts rename to invokeai/frontend/web/src/common/util/parameterTranslation.ts diff --git a/invokeai/frontend/src/common/util/promptToString.ts b/invokeai/frontend/web/src/common/util/promptToString.ts similarity index 100% rename from invokeai/frontend/src/common/util/promptToString.ts rename to invokeai/frontend/web/src/common/util/promptToString.ts diff --git a/invokeai/frontend/src/common/util/randomInt.ts b/invokeai/frontend/web/src/common/util/randomInt.ts similarity index 100% rename from invokeai/frontend/src/common/util/randomInt.ts rename to invokeai/frontend/web/src/common/util/randomInt.ts diff --git a/invokeai/frontend/src/common/util/roundDownToMultiple.ts b/invokeai/frontend/web/src/common/util/roundDownToMultiple.ts similarity index 100% rename from invokeai/frontend/src/common/util/roundDownToMultiple.ts rename to invokeai/frontend/web/src/common/util/roundDownToMultiple.ts diff --git a/invokeai/frontend/src/common/util/seedWeightPairs.ts b/invokeai/frontend/web/src/common/util/seedWeightPairs.ts similarity index 100% rename from invokeai/frontend/src/common/util/seedWeightPairs.ts rename to invokeai/frontend/web/src/common/util/seedWeightPairs.ts diff --git a/invokeai/frontend/src/features/canvas/components/ClearCanvasHistoryButtonModal.tsx b/invokeai/frontend/web/src/features/canvas/components/ClearCanvasHistoryButtonModal.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/ClearCanvasHistoryButtonModal.tsx rename to invokeai/frontend/web/src/features/canvas/components/ClearCanvasHistoryButtonModal.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvas.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvas.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvas.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasBoundingBoxOverlay.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasBoundingBoxOverlay.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasBoundingBoxOverlay.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasBoundingBoxOverlay.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasGrid.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasGrid.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasGrid.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasGrid.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasImage.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasImage.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasImage.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasIntermediateImage.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasIntermediateImage.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasIntermediateImage.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasIntermediateImage.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasMaskCompositer.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasMaskCompositer.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasMaskLines.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskLines.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasMaskLines.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskLines.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasObjectRenderer.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasObjectRenderer.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasObjectRenderer.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasObjectRenderer.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasResizer.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasResizer.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasResizer.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasResizer.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasStagingArea.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasStagingArea.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingArea.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasStagingAreaToolbar.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasStatusText.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStatusText.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasStatusText.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasStatusText.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasStatusText/IAICanvasStatusTextCursorPos.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasStatusText/IAICanvasStatusTextCursorPos.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasStatusText/IAICanvasStatusTextCursorPos.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasStatusText/IAICanvasStatusTextCursorPos.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolPreview.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolPreview.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolPreview.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolPreview.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasBoundingBox.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasMaskOptions.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasRedoButton.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasRedoButton.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasRedoButton.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasRedoButton.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasSettingsButtonPopover.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolChooserOptions.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolbar.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolbar.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolbar.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasToolbar.tsx diff --git a/invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasUndoButton.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasUndoButton.tsx similarity index 100% rename from invokeai/frontend/src/features/canvas/components/IAICanvasToolbar/IAICanvasUndoButton.tsx rename to invokeai/frontend/web/src/features/canvas/components/IAICanvasToolbar/IAICanvasUndoButton.tsx diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasDragMove.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasDragMove.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasDragMove.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasDragMove.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasHotkeys.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasHotkeys.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasHotkeys.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasHotkeys.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasMouseDown.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseDown.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasMouseDown.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseDown.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasMouseMove.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseMove.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasMouseMove.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseMove.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasMouseOut.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseOut.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasMouseOut.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseOut.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasMouseUp.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseUp.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasMouseUp.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasMouseUp.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useCanvasZoom.ts b/invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useCanvasZoom.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useCanvasZoom.ts diff --git a/invokeai/frontend/src/features/canvas/hooks/useColorUnderCursor.ts b/invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/hooks/useColorUnderCursor.ts rename to invokeai/frontend/web/src/features/canvas/hooks/useColorUnderCursor.ts diff --git a/invokeai/frontend/src/features/canvas/store/canvasSelectors.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/store/canvasSelectors.ts rename to invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts diff --git a/invokeai/frontend/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/store/canvasSlice.ts rename to invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts diff --git a/invokeai/frontend/src/features/canvas/store/canvasTypes.ts b/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/store/canvasTypes.ts rename to invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts diff --git a/invokeai/frontend/src/features/canvas/store/thunks/mergeAndUploadCanvas.ts b/invokeai/frontend/web/src/features/canvas/store/thunks/mergeAndUploadCanvas.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/store/thunks/mergeAndUploadCanvas.ts rename to invokeai/frontend/web/src/features/canvas/store/thunks/mergeAndUploadCanvas.ts diff --git a/invokeai/frontend/src/features/canvas/util/calculateCoordinates.ts b/invokeai/frontend/web/src/features/canvas/util/calculateCoordinates.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/calculateCoordinates.ts rename to invokeai/frontend/web/src/features/canvas/util/calculateCoordinates.ts diff --git a/invokeai/frontend/src/features/canvas/util/calculateScale.ts b/invokeai/frontend/web/src/features/canvas/util/calculateScale.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/calculateScale.ts rename to invokeai/frontend/web/src/features/canvas/util/calculateScale.ts diff --git a/invokeai/frontend/src/features/canvas/util/colorToString.ts b/invokeai/frontend/web/src/features/canvas/util/colorToString.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/colorToString.ts rename to invokeai/frontend/web/src/features/canvas/util/colorToString.ts diff --git a/invokeai/frontend/src/features/canvas/util/constants.ts b/invokeai/frontend/web/src/features/canvas/util/constants.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/constants.ts rename to invokeai/frontend/web/src/features/canvas/util/constants.ts diff --git a/invokeai/frontend/src/features/canvas/util/copyImage.ts b/invokeai/frontend/web/src/features/canvas/util/copyImage.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/copyImage.ts rename to invokeai/frontend/web/src/features/canvas/util/copyImage.ts diff --git a/invokeai/frontend/src/features/canvas/util/downloadFile.ts b/invokeai/frontend/web/src/features/canvas/util/downloadFile.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/downloadFile.ts rename to invokeai/frontend/web/src/features/canvas/util/downloadFile.ts diff --git a/invokeai/frontend/src/features/canvas/util/floorCoordinates.ts b/invokeai/frontend/web/src/features/canvas/util/floorCoordinates.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/floorCoordinates.ts rename to invokeai/frontend/web/src/features/canvas/util/floorCoordinates.ts diff --git a/invokeai/frontend/src/features/canvas/util/generateMask.ts b/invokeai/frontend/web/src/features/canvas/util/generateMask.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/generateMask.ts rename to invokeai/frontend/web/src/features/canvas/util/generateMask.ts diff --git a/invokeai/frontend/src/features/canvas/util/getScaledBoundingBoxDimensions.ts b/invokeai/frontend/web/src/features/canvas/util/getScaledBoundingBoxDimensions.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/getScaledBoundingBoxDimensions.ts rename to invokeai/frontend/web/src/features/canvas/util/getScaledBoundingBoxDimensions.ts diff --git a/invokeai/frontend/src/features/canvas/util/getScaledCursorPosition.ts b/invokeai/frontend/web/src/features/canvas/util/getScaledCursorPosition.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/getScaledCursorPosition.ts rename to invokeai/frontend/web/src/features/canvas/util/getScaledCursorPosition.ts diff --git a/invokeai/frontend/src/features/canvas/util/konvaInstanceProvider.ts b/invokeai/frontend/web/src/features/canvas/util/konvaInstanceProvider.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/konvaInstanceProvider.ts rename to invokeai/frontend/web/src/features/canvas/util/konvaInstanceProvider.ts diff --git a/invokeai/frontend/src/features/canvas/util/layerToDataURL.ts b/invokeai/frontend/web/src/features/canvas/util/layerToDataURL.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/layerToDataURL.ts rename to invokeai/frontend/web/src/features/canvas/util/layerToDataURL.ts diff --git a/invokeai/frontend/src/features/canvas/util/roundDimensionsTo64.ts b/invokeai/frontend/web/src/features/canvas/util/roundDimensionsTo64.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/roundDimensionsTo64.ts rename to invokeai/frontend/web/src/features/canvas/util/roundDimensionsTo64.ts diff --git a/invokeai/frontend/src/features/canvas/util/roundToHundreth.ts b/invokeai/frontend/web/src/features/canvas/util/roundToHundreth.ts similarity index 100% rename from invokeai/frontend/src/features/canvas/util/roundToHundreth.ts rename to invokeai/frontend/web/src/features/canvas/util/roundToHundreth.ts diff --git a/invokeai/frontend/src/features/gallery/components/CurrentImageButtons.scss b/invokeai/frontend/web/src/features/gallery/components/CurrentImageButtons.scss similarity index 100% rename from invokeai/frontend/src/features/gallery/components/CurrentImageButtons.scss rename to invokeai/frontend/web/src/features/gallery/components/CurrentImageButtons.scss diff --git a/invokeai/frontend/src/features/gallery/components/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImageButtons.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/CurrentImageButtons.tsx rename to invokeai/frontend/web/src/features/gallery/components/CurrentImageButtons.tsx diff --git a/invokeai/frontend/src/features/gallery/components/CurrentImageDisplay.scss b/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.scss similarity index 100% rename from invokeai/frontend/src/features/gallery/components/CurrentImageDisplay.scss rename to invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.scss diff --git a/invokeai/frontend/src/features/gallery/components/CurrentImageDisplay.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/CurrentImageDisplay.tsx rename to invokeai/frontend/web/src/features/gallery/components/CurrentImageDisplay.tsx diff --git a/invokeai/frontend/src/features/gallery/components/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/CurrentImagePreview.tsx rename to invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx diff --git a/invokeai/frontend/src/features/gallery/components/DeleteImageModal.tsx b/invokeai/frontend/web/src/features/gallery/components/DeleteImageModal.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/DeleteImageModal.tsx rename to invokeai/frontend/web/src/features/gallery/components/DeleteImageModal.tsx diff --git a/invokeai/frontend/src/features/gallery/components/HoverableImage.scss b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.scss similarity index 100% rename from invokeai/frontend/src/features/gallery/components/HoverableImage.scss rename to invokeai/frontend/web/src/features/gallery/components/HoverableImage.scss diff --git a/invokeai/frontend/src/features/gallery/components/HoverableImage.tsx b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/HoverableImage.tsx rename to invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx diff --git a/invokeai/frontend/src/features/gallery/components/ImageGallery.scss b/invokeai/frontend/web/src/features/gallery/components/ImageGallery.scss similarity index 100% rename from invokeai/frontend/src/features/gallery/components/ImageGallery.scss rename to invokeai/frontend/web/src/features/gallery/components/ImageGallery.scss diff --git a/invokeai/frontend/src/features/gallery/components/ImageGallery.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGallery.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/ImageGallery.tsx rename to invokeai/frontend/web/src/features/gallery/components/ImageGallery.tsx diff --git a/invokeai/frontend/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.scss b/invokeai/frontend/web/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.scss similarity index 100% rename from invokeai/frontend/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.scss rename to invokeai/frontend/web/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.scss diff --git a/invokeai/frontend/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.tsx similarity index 100% rename from invokeai/frontend/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.tsx rename to invokeai/frontend/web/src/features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer.tsx diff --git a/invokeai/frontend/src/features/gallery/hooks/useGetImageByUuid.ts b/invokeai/frontend/web/src/features/gallery/hooks/useGetImageByUuid.ts similarity index 100% rename from invokeai/frontend/src/features/gallery/hooks/useGetImageByUuid.ts rename to invokeai/frontend/web/src/features/gallery/hooks/useGetImageByUuid.ts diff --git a/invokeai/frontend/src/features/gallery/store/gallerySelectors.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts similarity index 100% rename from invokeai/frontend/src/features/gallery/store/gallerySelectors.ts rename to invokeai/frontend/web/src/features/gallery/store/gallerySelectors.ts diff --git a/invokeai/frontend/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts similarity index 100% rename from invokeai/frontend/src/features/gallery/store/gallerySlice.ts rename to invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts diff --git a/invokeai/frontend/src/features/gallery/store/thunks/uploadImage.ts b/invokeai/frontend/web/src/features/gallery/store/thunks/uploadImage.ts similarity index 100% rename from invokeai/frontend/src/features/gallery/store/thunks/uploadImage.ts rename to invokeai/frontend/web/src/features/gallery/store/thunks/uploadImage.ts diff --git a/invokeai/frontend/src/features/lightbox/components/Lightbox.scss b/invokeai/frontend/web/src/features/lightbox/components/Lightbox.scss similarity index 100% rename from invokeai/frontend/src/features/lightbox/components/Lightbox.scss rename to invokeai/frontend/web/src/features/lightbox/components/Lightbox.scss diff --git a/invokeai/frontend/src/features/lightbox/components/Lightbox.tsx b/invokeai/frontend/web/src/features/lightbox/components/Lightbox.tsx similarity index 100% rename from invokeai/frontend/src/features/lightbox/components/Lightbox.tsx rename to invokeai/frontend/web/src/features/lightbox/components/Lightbox.tsx diff --git a/invokeai/frontend/src/features/lightbox/components/ReactPanZoom.tsx b/invokeai/frontend/web/src/features/lightbox/components/ReactPanZoom.tsx similarity index 100% rename from invokeai/frontend/src/features/lightbox/components/ReactPanZoom.tsx rename to invokeai/frontend/web/src/features/lightbox/components/ReactPanZoom.tsx diff --git a/invokeai/frontend/src/features/lightbox/store/lightboxSelectors.ts b/invokeai/frontend/web/src/features/lightbox/store/lightboxSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/lightbox/store/lightboxSelectors.ts rename to invokeai/frontend/web/src/features/lightbox/store/lightboxSelectors.ts diff --git a/invokeai/frontend/src/features/lightbox/store/lightboxSlice.ts b/invokeai/frontend/web/src/features/lightbox/store/lightboxSlice.ts similarity index 100% rename from invokeai/frontend/src/features/lightbox/store/lightboxSlice.ts rename to invokeai/frontend/web/src/features/lightbox/store/lightboxSlice.ts diff --git a/invokeai/frontend/src/features/parameters/components/AccordionItems/AdvancedSettings.scss b/invokeai/frontend/web/src/features/parameters/components/AccordionItems/AdvancedSettings.scss similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AccordionItems/AdvancedSettings.scss rename to invokeai/frontend/web/src/features/parameters/components/AccordionItems/AdvancedSettings.scss diff --git a/invokeai/frontend/src/features/parameters/components/AccordionItems/InvokeAccordionItem.tsx b/invokeai/frontend/web/src/features/parameters/components/AccordionItems/InvokeAccordionItem.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AccordionItems/InvokeAccordionItem.tsx rename to invokeai/frontend/web/src/features/parameters/components/AccordionItems/InvokeAccordionItem.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamBlur.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamBlur.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamBlur.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamBlur.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSize.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSize.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSize.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSize.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSteps.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSteps.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSteps.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamSteps.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamStrength.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamStrength.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamStrength.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/CodeformerFidelity.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/CodeformerFidelity.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/CodeformerFidelity.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/CodeformerFidelity.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreStrength.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreStrength.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreStrength.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreToggle.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreType.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreType.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreType.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/FaceRestore/FaceRestoreType.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageFit.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageFit.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageFit.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageFit.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/ImageToImage/ImageToImageStrength.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/HiresSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/HiresSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/HiresSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/HiresSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SymmetrySettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SymmetrySettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SymmetrySettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SymmetrySettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SymmetryToggle.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SymmetryToggle.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/SymmetryToggle.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Output/SymmetryToggle.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Perlin.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Perlin.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Perlin.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Perlin.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/RandomizeSeed.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/RandomizeSeed.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/RandomizeSeed.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/RandomizeSeed.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Seed.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Seed.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Seed.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Seed.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/SeedSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/SeedSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/SeedSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/SeedSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/ShuffleSeed.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/ShuffleSeed.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/ShuffleSeed.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/ShuffleSeed.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Threshold.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Threshold.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Seed/Threshold.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Seed/Threshold.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleDenoisingStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleDenoisingStrength.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleDenoisingStrength.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleDenoisingStrength.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleScale.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleScale.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleScale.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleScale.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleStrength.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleStrength.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleStrength.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/GenerateVariations.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/GenerateVariations.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/GenerateVariations.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/GenerateVariations.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/SeedWeights.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/SeedWeights.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/SeedWeights.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/SeedWeights.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/VariationAmount.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/VariationAmount.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/VariationAmount.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/VariationAmount.tsx diff --git a/invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx b/invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx rename to invokeai/frontend/web/src/features/parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainCFGScale.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainCFGScale.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainCFGScale.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainCFGScale.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainHeight.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainHeight.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainHeight.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainHeight.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainIterations.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainIterations.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainIterations.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainIterations.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainParameters.scss b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainParameters.scss similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainParameters.scss rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainParameters.scss diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainParameters.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainParameters.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainParameters.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainParameters.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainSampler.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainSampler.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainSampler.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainSampler.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainSteps.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainSteps.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainSteps.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainSteps.tsx diff --git a/invokeai/frontend/src/features/parameters/components/MainParameters/MainWidth.tsx b/invokeai/frontend/web/src/features/parameters/components/MainParameters/MainWidth.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/MainParameters/MainWidth.tsx rename to invokeai/frontend/web/src/features/parameters/components/MainParameters/MainWidth.tsx diff --git a/invokeai/frontend/src/features/parameters/components/ParametersAccordion.tsx b/invokeai/frontend/web/src/features/parameters/components/ParametersAccordion.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ParametersAccordion.tsx rename to invokeai/frontend/web/src/features/parameters/components/ParametersAccordion.tsx diff --git a/invokeai/frontend/src/features/parameters/components/ProcessButtons/CancelButton.tsx b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ProcessButtons/CancelButton.tsx rename to invokeai/frontend/web/src/features/parameters/components/ProcessButtons/CancelButton.tsx diff --git a/invokeai/frontend/src/features/parameters/components/ProcessButtons/InvokeButton.tsx b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/InvokeButton.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ProcessButtons/InvokeButton.tsx rename to invokeai/frontend/web/src/features/parameters/components/ProcessButtons/InvokeButton.tsx diff --git a/invokeai/frontend/src/features/parameters/components/ProcessButtons/Loopback.tsx b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/Loopback.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ProcessButtons/Loopback.tsx rename to invokeai/frontend/web/src/features/parameters/components/ProcessButtons/Loopback.tsx diff --git a/invokeai/frontend/src/features/parameters/components/ProcessButtons/ProcessButtons.scss b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/ProcessButtons.scss similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ProcessButtons/ProcessButtons.scss rename to invokeai/frontend/web/src/features/parameters/components/ProcessButtons/ProcessButtons.scss diff --git a/invokeai/frontend/src/features/parameters/components/ProcessButtons/ProcessButtons.tsx b/invokeai/frontend/web/src/features/parameters/components/ProcessButtons/ProcessButtons.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/ProcessButtons/ProcessButtons.tsx rename to invokeai/frontend/web/src/features/parameters/components/ProcessButtons/ProcessButtons.tsx diff --git a/invokeai/frontend/src/features/parameters/components/PromptInput/NegativePromptInput.tsx b/invokeai/frontend/web/src/features/parameters/components/PromptInput/NegativePromptInput.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/PromptInput/NegativePromptInput.tsx rename to invokeai/frontend/web/src/features/parameters/components/PromptInput/NegativePromptInput.tsx diff --git a/invokeai/frontend/src/features/parameters/components/PromptInput/PromptInput.scss b/invokeai/frontend/web/src/features/parameters/components/PromptInput/PromptInput.scss similarity index 100% rename from invokeai/frontend/src/features/parameters/components/PromptInput/PromptInput.scss rename to invokeai/frontend/web/src/features/parameters/components/PromptInput/PromptInput.scss diff --git a/invokeai/frontend/src/features/parameters/components/PromptInput/PromptInput.tsx b/invokeai/frontend/web/src/features/parameters/components/PromptInput/PromptInput.tsx similarity index 100% rename from invokeai/frontend/src/features/parameters/components/PromptInput/PromptInput.tsx rename to invokeai/frontend/web/src/features/parameters/components/PromptInput/PromptInput.tsx diff --git a/invokeai/frontend/src/features/parameters/hooks/usePrompt.ts b/invokeai/frontend/web/src/features/parameters/hooks/usePrompt.ts similarity index 100% rename from invokeai/frontend/src/features/parameters/hooks/usePrompt.ts rename to invokeai/frontend/web/src/features/parameters/hooks/usePrompt.ts diff --git a/invokeai/frontend/src/features/parameters/store/generationSelectors.ts b/invokeai/frontend/web/src/features/parameters/store/generationSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/parameters/store/generationSelectors.ts rename to invokeai/frontend/web/src/features/parameters/store/generationSelectors.ts diff --git a/invokeai/frontend/src/features/parameters/store/generationSlice.ts b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts similarity index 100% rename from invokeai/frontend/src/features/parameters/store/generationSlice.ts rename to invokeai/frontend/web/src/features/parameters/store/generationSlice.ts diff --git a/invokeai/frontend/src/features/parameters/store/postprocessingSelectors.ts b/invokeai/frontend/web/src/features/parameters/store/postprocessingSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/parameters/store/postprocessingSelectors.ts rename to invokeai/frontend/web/src/features/parameters/store/postprocessingSelectors.ts diff --git a/invokeai/frontend/src/features/parameters/store/postprocessingSlice.ts b/invokeai/frontend/web/src/features/parameters/store/postprocessingSlice.ts similarity index 100% rename from invokeai/frontend/src/features/parameters/store/postprocessingSlice.ts rename to invokeai/frontend/web/src/features/parameters/store/postprocessingSlice.ts diff --git a/invokeai/frontend/src/features/system/components/ClearTempFolderButtonModal.tsx b/invokeai/frontend/web/src/features/system/components/ClearTempFolderButtonModal.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ClearTempFolderButtonModal.tsx rename to invokeai/frontend/web/src/features/system/components/ClearTempFolderButtonModal.tsx diff --git a/invokeai/frontend/src/features/system/components/Console.scss b/invokeai/frontend/web/src/features/system/components/Console.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/Console.scss rename to invokeai/frontend/web/src/features/system/components/Console.scss diff --git a/invokeai/frontend/src/features/system/components/Console.tsx b/invokeai/frontend/web/src/features/system/components/Console.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/Console.tsx rename to invokeai/frontend/web/src/features/system/components/Console.tsx diff --git a/invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModal.scss b/invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModal.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModal.scss rename to invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModal.scss diff --git a/invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModal.tsx b/invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModal.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModal.tsx rename to invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModal.tsx diff --git a/invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModalItem.tsx b/invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModalItem.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/HotkeysModal/HotkeysModalItem.tsx rename to invokeai/frontend/web/src/features/system/components/HotkeysModal/HotkeysModalItem.tsx diff --git a/invokeai/frontend/src/features/system/components/LanguagePicker.tsx b/invokeai/frontend/web/src/features/system/components/LanguagePicker.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/LanguagePicker.tsx rename to invokeai/frontend/web/src/features/system/components/LanguagePicker.tsx diff --git a/invokeai/frontend/src/features/system/components/Modal.scss b/invokeai/frontend/web/src/features/system/components/Modal.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/Modal.scss rename to invokeai/frontend/web/src/features/system/components/Modal.scss diff --git a/invokeai/frontend/src/features/system/components/ModelManager/AddCheckpointModel.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/AddCheckpointModel.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/AddCheckpointModel.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/AddDiffusersModel.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/AddDiffusersModel.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/AddDiffusersModel.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/AddDiffusersModel.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/AddModel.scss b/invokeai/frontend/web/src/features/system/components/ModelManager/AddModel.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/AddModel.scss rename to invokeai/frontend/web/src/features/system/components/ModelManager/AddModel.scss diff --git a/invokeai/frontend/src/features/system/components/ModelManager/AddModel.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/AddModel.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/AddModel.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/AddModel.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/CheckpointModelEdit.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/CheckpointModelEdit.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/CheckpointModelEdit.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/CheckpointModelEdit.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/DiffusersModelEdit.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/DiffusersModelEdit.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/DiffusersModelEdit.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/DiffusersModelEdit.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/MergeModels.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/MergeModels.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/MergeModels.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/ModelConvert.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/ModelConvert.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/ModelConvert.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/ModelConvert.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/ModelList.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/ModelList.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/ModelList.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/ModelList.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/ModelListItem.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/ModelListItem.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/ModelListItem.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/ModelListItem.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/ModelManagerModal.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/ModelManagerModal.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/ModelManagerModal.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/ModelManagerModal.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelManager/SearchModels.tsx b/invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelManager/SearchModels.tsx rename to invokeai/frontend/web/src/features/system/components/ModelManager/SearchModels.tsx diff --git a/invokeai/frontend/src/features/system/components/ModelSelect.tsx b/invokeai/frontend/web/src/features/system/components/ModelSelect.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ModelSelect.tsx rename to invokeai/frontend/web/src/features/system/components/ModelSelect.tsx diff --git a/invokeai/frontend/src/features/system/components/ProgressBar.scss b/invokeai/frontend/web/src/features/system/components/ProgressBar.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/ProgressBar.scss rename to invokeai/frontend/web/src/features/system/components/ProgressBar.scss diff --git a/invokeai/frontend/src/features/system/components/ProgressBar.tsx b/invokeai/frontend/web/src/features/system/components/ProgressBar.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ProgressBar.tsx rename to invokeai/frontend/web/src/features/system/components/ProgressBar.tsx diff --git a/invokeai/frontend/src/features/system/components/SettingsModal/SettingsModal.scss b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/SettingsModal/SettingsModal.scss rename to invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.scss diff --git a/invokeai/frontend/src/features/system/components/SettingsModal/SettingsModal.tsx b/invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/SettingsModal/SettingsModal.tsx rename to invokeai/frontend/web/src/features/system/components/SettingsModal/SettingsModal.tsx diff --git a/invokeai/frontend/src/features/system/components/SiteHeader.scss b/invokeai/frontend/web/src/features/system/components/SiteHeader.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/SiteHeader.scss rename to invokeai/frontend/web/src/features/system/components/SiteHeader.scss diff --git a/invokeai/frontend/src/features/system/components/SiteHeader.tsx b/invokeai/frontend/web/src/features/system/components/SiteHeader.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/SiteHeader.tsx rename to invokeai/frontend/web/src/features/system/components/SiteHeader.tsx diff --git a/invokeai/frontend/src/features/system/components/StatusIndicator.scss b/invokeai/frontend/web/src/features/system/components/StatusIndicator.scss similarity index 100% rename from invokeai/frontend/src/features/system/components/StatusIndicator.scss rename to invokeai/frontend/web/src/features/system/components/StatusIndicator.scss diff --git a/invokeai/frontend/src/features/system/components/StatusIndicator.tsx b/invokeai/frontend/web/src/features/system/components/StatusIndicator.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/StatusIndicator.tsx rename to invokeai/frontend/web/src/features/system/components/StatusIndicator.tsx diff --git a/invokeai/frontend/src/features/system/components/ThemeChanger.tsx b/invokeai/frontend/web/src/features/system/components/ThemeChanger.tsx similarity index 100% rename from invokeai/frontend/src/features/system/components/ThemeChanger.tsx rename to invokeai/frontend/web/src/features/system/components/ThemeChanger.tsx diff --git a/invokeai/frontend/src/features/system/hooks/useToastWatcher.ts b/invokeai/frontend/web/src/features/system/hooks/useToastWatcher.ts similarity index 100% rename from invokeai/frontend/src/features/system/hooks/useToastWatcher.ts rename to invokeai/frontend/web/src/features/system/hooks/useToastWatcher.ts diff --git a/invokeai/frontend/src/features/system/store/systemSelectors.ts b/invokeai/frontend/web/src/features/system/store/systemSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/system/store/systemSelectors.ts rename to invokeai/frontend/web/src/features/system/store/systemSelectors.ts diff --git a/invokeai/frontend/src/features/system/store/systemSlice.ts b/invokeai/frontend/web/src/features/system/store/systemSlice.ts similarity index 100% rename from invokeai/frontend/src/features/system/store/systemSlice.ts rename to invokeai/frontend/web/src/features/system/store/systemSlice.ts diff --git a/invokeai/frontend/src/features/ui/components/FloatingButton.scss b/invokeai/frontend/web/src/features/ui/components/FloatingButton.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/FloatingButton.scss rename to invokeai/frontend/web/src/features/ui/components/FloatingButton.scss diff --git a/invokeai/frontend/src/features/ui/components/FloatingGalleryButton.tsx b/invokeai/frontend/web/src/features/ui/components/FloatingGalleryButton.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/FloatingGalleryButton.tsx rename to invokeai/frontend/web/src/features/ui/components/FloatingGalleryButton.tsx diff --git a/invokeai/frontend/src/features/ui/components/FloatingParametersPanelButtons.tsx b/invokeai/frontend/web/src/features/ui/components/FloatingParametersPanelButtons.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/FloatingParametersPanelButtons.tsx rename to invokeai/frontend/web/src/features/ui/components/FloatingParametersPanelButtons.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImage.scss b/invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImage.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImage.scss rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImage.scss diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImageDisplay.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImageDisplay.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImageDisplay.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImageDisplay.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImageOptions.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImageOptions.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImageOptions.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImageOptions.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/InitImagePreview.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/InitImagePreview.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/InitImagePreview.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/InitImagePreview.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/InitialImageOverlay.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/InitialImageOverlay.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/InitialImageOverlay.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/InitialImageOverlay.tsx diff --git a/invokeai/frontend/src/features/ui/components/ImageToImage/index.tsx b/invokeai/frontend/web/src/features/ui/components/ImageToImage/index.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/ImageToImage/index.tsx rename to invokeai/frontend/web/src/features/ui/components/ImageToImage/index.tsx diff --git a/invokeai/frontend/src/features/ui/components/InvokeParametersPanel.scss b/invokeai/frontend/web/src/features/ui/components/InvokeParametersPanel.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeParametersPanel.scss rename to invokeai/frontend/web/src/features/ui/components/InvokeParametersPanel.scss diff --git a/invokeai/frontend/src/features/ui/components/InvokeParametersPanel.tsx b/invokeai/frontend/web/src/features/ui/components/InvokeParametersPanel.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeParametersPanel.tsx rename to invokeai/frontend/web/src/features/ui/components/InvokeParametersPanel.tsx diff --git a/invokeai/frontend/src/features/ui/components/InvokeTabs.scss b/invokeai/frontend/web/src/features/ui/components/InvokeTabs.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeTabs.scss rename to invokeai/frontend/web/src/features/ui/components/InvokeTabs.scss diff --git a/invokeai/frontend/src/features/ui/components/InvokeTabs.tsx b/invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeTabs.tsx rename to invokeai/frontend/web/src/features/ui/components/InvokeTabs.tsx diff --git a/invokeai/frontend/src/features/ui/components/InvokeWorkarea.scss b/invokeai/frontend/web/src/features/ui/components/InvokeWorkarea.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeWorkarea.scss rename to invokeai/frontend/web/src/features/ui/components/InvokeWorkarea.scss diff --git a/invokeai/frontend/src/features/ui/components/InvokeWorkarea.tsx b/invokeai/frontend/web/src/features/ui/components/InvokeWorkarea.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/InvokeWorkarea.tsx rename to invokeai/frontend/web/src/features/ui/components/InvokeWorkarea.tsx diff --git a/invokeai/frontend/src/features/ui/components/TextToImage/TextToImage.scss b/invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImage.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/TextToImage/TextToImage.scss rename to invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImage.scss diff --git a/invokeai/frontend/src/features/ui/components/TextToImage/TextToImageDisplay.tsx b/invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImageDisplay.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/TextToImage/TextToImageDisplay.tsx rename to invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImageDisplay.tsx diff --git a/invokeai/frontend/src/features/ui/components/TextToImage/TextToImagePanel.tsx b/invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImagePanel.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/TextToImage/TextToImagePanel.tsx rename to invokeai/frontend/web/src/features/ui/components/TextToImage/TextToImagePanel.tsx diff --git a/invokeai/frontend/src/features/ui/components/TextToImage/index.tsx b/invokeai/frontend/web/src/features/ui/components/TextToImage/index.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/TextToImage/index.tsx rename to invokeai/frontend/web/src/features/ui/components/TextToImage/index.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/CanvasWorkarea.scss b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/CanvasWorkarea.scss similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/CanvasWorkarea.scss rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/CanvasWorkarea.scss diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx diff --git a/invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx b/invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx similarity index 100% rename from invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx rename to invokeai/frontend/web/src/features/ui/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx diff --git a/invokeai/frontend/src/features/ui/store/tabMap.ts b/invokeai/frontend/web/src/features/ui/store/tabMap.ts similarity index 100% rename from invokeai/frontend/src/features/ui/store/tabMap.ts rename to invokeai/frontend/web/src/features/ui/store/tabMap.ts diff --git a/invokeai/frontend/src/features/ui/store/uiSelectors.ts b/invokeai/frontend/web/src/features/ui/store/uiSelectors.ts similarity index 100% rename from invokeai/frontend/src/features/ui/store/uiSelectors.ts rename to invokeai/frontend/web/src/features/ui/store/uiSelectors.ts diff --git a/invokeai/frontend/src/features/ui/store/uiSlice.ts b/invokeai/frontend/web/src/features/ui/store/uiSlice.ts similarity index 100% rename from invokeai/frontend/src/features/ui/store/uiSlice.ts rename to invokeai/frontend/web/src/features/ui/store/uiSlice.ts diff --git a/invokeai/frontend/src/features/ui/store/uiTypes.ts b/invokeai/frontend/web/src/features/ui/store/uiTypes.ts similarity index 100% rename from invokeai/frontend/src/features/ui/store/uiTypes.ts rename to invokeai/frontend/web/src/features/ui/store/uiTypes.ts diff --git a/invokeai/frontend/src/i18.d.ts b/invokeai/frontend/web/src/i18.d.ts similarity index 100% rename from invokeai/frontend/src/i18.d.ts rename to invokeai/frontend/web/src/i18.d.ts diff --git a/invokeai/frontend/src/i18n.ts b/invokeai/frontend/web/src/i18n.ts similarity index 100% rename from invokeai/frontend/src/i18n.ts rename to invokeai/frontend/web/src/i18n.ts diff --git a/invokeai/frontend/src/main.tsx b/invokeai/frontend/web/src/main.tsx similarity index 100% rename from invokeai/frontend/src/main.tsx rename to invokeai/frontend/web/src/main.tsx diff --git a/invokeai/frontend/src/persistor.ts b/invokeai/frontend/web/src/persistor.ts similarity index 100% rename from invokeai/frontend/src/persistor.ts rename to invokeai/frontend/web/src/persistor.ts diff --git a/invokeai/frontend/src/styles/Mixins/Buttons.scss b/invokeai/frontend/web/src/styles/Mixins/Buttons.scss similarity index 100% rename from invokeai/frontend/src/styles/Mixins/Buttons.scss rename to invokeai/frontend/web/src/styles/Mixins/Buttons.scss diff --git a/invokeai/frontend/src/styles/Mixins/_Responsive.scss b/invokeai/frontend/web/src/styles/Mixins/_Responsive.scss similarity index 100% rename from invokeai/frontend/src/styles/Mixins/_Responsive.scss rename to invokeai/frontend/web/src/styles/Mixins/_Responsive.scss diff --git a/invokeai/frontend/src/styles/Mixins/_Shared.scss b/invokeai/frontend/web/src/styles/Mixins/_Shared.scss similarity index 100% rename from invokeai/frontend/src/styles/Mixins/_Shared.scss rename to invokeai/frontend/web/src/styles/Mixins/_Shared.scss diff --git a/invokeai/frontend/src/styles/Mixins/_Variables.scss b/invokeai/frontend/web/src/styles/Mixins/_Variables.scss similarity index 100% rename from invokeai/frontend/src/styles/Mixins/_Variables.scss rename to invokeai/frontend/web/src/styles/Mixins/_Variables.scss diff --git a/invokeai/frontend/src/styles/Mixins/index.scss b/invokeai/frontend/web/src/styles/Mixins/index.scss similarity index 100% rename from invokeai/frontend/src/styles/Mixins/index.scss rename to invokeai/frontend/web/src/styles/Mixins/index.scss diff --git a/invokeai/frontend/src/styles/Themes/_Colors_Dark.scss b/invokeai/frontend/web/src/styles/Themes/_Colors_Dark.scss similarity index 100% rename from invokeai/frontend/src/styles/Themes/_Colors_Dark.scss rename to invokeai/frontend/web/src/styles/Themes/_Colors_Dark.scss diff --git a/invokeai/frontend/src/styles/Themes/_Colors_Green.scss b/invokeai/frontend/web/src/styles/Themes/_Colors_Green.scss similarity index 100% rename from invokeai/frontend/src/styles/Themes/_Colors_Green.scss rename to invokeai/frontend/web/src/styles/Themes/_Colors_Green.scss diff --git a/invokeai/frontend/src/styles/Themes/_Colors_Light.scss b/invokeai/frontend/web/src/styles/Themes/_Colors_Light.scss similarity index 100% rename from invokeai/frontend/src/styles/Themes/_Colors_Light.scss rename to invokeai/frontend/web/src/styles/Themes/_Colors_Light.scss diff --git a/invokeai/frontend/src/styles/_Animations.scss b/invokeai/frontend/web/src/styles/_Animations.scss similarity index 100% rename from invokeai/frontend/src/styles/_Animations.scss rename to invokeai/frontend/web/src/styles/_Animations.scss diff --git a/invokeai/frontend/src/styles/_Fonts.scss b/invokeai/frontend/web/src/styles/_Fonts.scss similarity index 100% rename from invokeai/frontend/src/styles/_Fonts.scss rename to invokeai/frontend/web/src/styles/_Fonts.scss diff --git a/invokeai/frontend/src/styles/_Misc.scss b/invokeai/frontend/web/src/styles/_Misc.scss similarity index 100% rename from invokeai/frontend/src/styles/_Misc.scss rename to invokeai/frontend/web/src/styles/_Misc.scss diff --git a/invokeai/frontend/src/styles/index.scss b/invokeai/frontend/web/src/styles/index.scss similarity index 100% rename from invokeai/frontend/src/styles/index.scss rename to invokeai/frontend/web/src/styles/index.scss diff --git a/invokeai/frontend/src/vite-env.d.ts b/invokeai/frontend/web/src/vite-env.d.ts similarity index 100% rename from invokeai/frontend/src/vite-env.d.ts rename to invokeai/frontend/web/src/vite-env.d.ts diff --git a/invokeai/frontend/stats.html b/invokeai/frontend/web/stats.html similarity index 100% rename from invokeai/frontend/stats.html rename to invokeai/frontend/web/stats.html diff --git a/invokeai/frontend/tsconfig.json b/invokeai/frontend/web/tsconfig.json similarity index 100% rename from invokeai/frontend/tsconfig.json rename to invokeai/frontend/web/tsconfig.json diff --git a/invokeai/frontend/tsconfig.node.json b/invokeai/frontend/web/tsconfig.node.json similarity index 100% rename from invokeai/frontend/tsconfig.node.json rename to invokeai/frontend/web/tsconfig.node.json diff --git a/invokeai/frontend/vite.config.ts b/invokeai/frontend/web/vite.config.ts similarity index 100% rename from invokeai/frontend/vite.config.ts rename to invokeai/frontend/web/vite.config.ts diff --git a/invokeai/frontend/yarn.lock b/invokeai/frontend/web/yarn.lock similarity index 100% rename from invokeai/frontend/yarn.lock rename to invokeai/frontend/web/yarn.lock diff --git a/ldm/invoke/config/__init__.py b/ldm/invoke/config/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ldm/invoke/restoration/codeformer/weights/README b/ldm/invoke/restoration/codeformer/weights/README deleted file mode 100644 index 1cc40dc026..0000000000 --- a/ldm/invoke/restoration/codeformer/weights/README +++ /dev/null @@ -1,3 +0,0 @@ -To use codeformer face reconstruction, you will need to copy -https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth -into this directory. diff --git a/ldm/invoke/server.py b/ldm/invoke/server.py deleted file mode 100644 index 9bcf056477..0000000000 --- a/ldm/invoke/server.py +++ /dev/null @@ -1,282 +0,0 @@ -import argparse -import json -import copy -import base64 -import mimetypes -import os -from ldm.invoke.args import Args, metadata_dumps -from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from ldm.invoke.pngwriter import PngWriter -from threading import Event - -def build_opt(post_data, seed, gfpgan_model_exists): - opt = Args() - opt.parse_args() # initialize defaults - setattr(opt, 'prompt', post_data['prompt']) - setattr(opt, 'init_img', post_data['initimg']) - setattr(opt, 'strength', float(post_data['strength'])) - setattr(opt, 'iterations', int(post_data['iterations'])) - setattr(opt, 'steps', int(post_data['steps'])) - setattr(opt, 'width', int(post_data['width'])) - setattr(opt, 'height', int(post_data['height'])) - setattr(opt, 'seamless', 'seamless' in post_data) - setattr(opt, 'fit', 'fit' in post_data) - setattr(opt, 'mask', 'mask' in post_data) - setattr(opt, 'invert_mask', 'invert_mask' in post_data) - setattr(opt, 'cfg_scale', float(post_data['cfg_scale'])) - setattr(opt, 'sampler_name', post_data['sampler_name']) - - # embiggen not practical at this point because we have no way of feeding images back into img2img - # however, this code is here against that eventuality - setattr(opt, 'embiggen', None) - setattr(opt, 'embiggen_tiles', None) - setattr(opt, 'embiggen_strength', None) - - setattr(opt, 'facetool_strength', float(post_data['facetool_strength']) if gfpgan_model_exists else 0) - setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None) - setattr(opt, 'progress_images', 'progress_images' in post_data) - setattr(opt, 'progress_latents', 'progress_latents' in post_data) - setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed'])) - setattr(opt, 'threshold', float(post_data['threshold'])) - setattr(opt, 'perlin', float(post_data['perlin'])) - setattr(opt, 'hires_fix', 'hires_fix' in post_data) - setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0) - setattr(opt, 'with_variations', []) - setattr(opt, 'embiggen', None) - setattr(opt, 'embiggen_tiles', None) - - broken = False - if int(post_data['seed']) != -1 and post_data['with_variations'] != '': - for part in post_data['with_variations'].split(','): - seed_and_weight = part.split(':') - if len(seed_and_weight) != 2: - print(f'could not parse WITH_variation part "{part}"') - broken = True - break - try: - seed = int(seed_and_weight[0]) - weight = float(seed_and_weight[1]) - except ValueError: - print(f'could not parse with_variation part "{part}"') - broken = True - break - opt.with_variations.append([seed, weight]) - - if broken: - raise CanceledException - - if len(opt.with_variations) == 0: - opt.with_variations = None - - return opt - -class CanceledException(Exception): - pass - -class DreamServer(BaseHTTPRequestHandler): - model = None - outdir = None - canceled = Event() - - def do_GET(self): - if self.path == "/": - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - with open("./static/legacy_web/index.html", "rb") as content: - self.wfile.write(content.read()) - elif self.path == "/config.js": - # unfortunately this import can't be at the top level, since that would cause a circular import - self.send_response(200) - self.send_header("Content-type", "application/javascript") - self.end_headers() - config = { - 'gfpgan_model_exists': self.gfpgan_model_exists - } - self.wfile.write(bytes("let config = " + json.dumps(config) + ";\n", "utf-8")) - elif self.path == "/run_log.json": - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - output = [] - - log_file = os.path.join(self.outdir, "legacy_web_log.txt") - if os.path.exists(log_file): - with open(log_file, "r") as log: - for line in log: - url, config = line.split(": {", maxsplit=1) - config = json.loads("{" + config) - config["url"] = url.lstrip(".") - if os.path.exists(url): - output.append(config) - - self.wfile.write(bytes(json.dumps({"run_log": output}), "utf-8")) - elif self.path == "/cancel": - self.canceled.set() - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - self.wfile.write(bytes('{}', 'utf8')) - else: - path_dir = os.path.dirname(self.path) - out_dir = os.path.realpath(self.outdir.rstrip('/')) - if self.path.startswith('/static/legacy_web/'): - path = '.' + self.path - elif out_dir.replace('\\', '/').endswith(path_dir): - file = os.path.basename(self.path) - path = os.path.join(self.outdir,file) - else: - self.send_response(404) - return - mime_type = mimetypes.guess_type(path)[0] - if mime_type is not None: - self.send_response(200) - self.send_header("Content-type", mime_type) - self.end_headers() - with open(path, "rb") as content: - self.wfile.write(content.read()) - else: - self.send_response(404) - - def do_POST(self): - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - - # unfortunately this import can't be at the top level, since that would cause a circular import - - content_length = int(self.headers['Content-Length']) - post_data = json.loads(self.rfile.read(content_length)) - opt = build_opt(post_data, self.model.seed, self.gfpgan_model_exists) - - self.canceled.clear() - # In order to handle upscaled images, the PngWriter needs to maintain state - # across images generated by each call to prompt2img(), so we define it in - # the outer scope of image_done() - config = post_data.copy() # Shallow copy - config['initimg'] = config.pop('initimg_name', '') - - images_generated = 0 # helps keep track of when upscaling is started - images_upscaled = 0 # helps keep track of when upscaling is completed - pngwriter = PngWriter(self.outdir) - - prefix = pngwriter.unique_prefix() - # if upscaling is requested, then this will be called twice, once when - # the images are first generated, and then again when after upscaling - # is complete. The upscaling replaces the original file, so the second - # entry should not be inserted into the image list. - # LS: This repeats code in dream.py - def image_done(image, seed, upscaled=False, first_seed=None): - name = f'{prefix}.{seed}.png' - iter_opt = copy.copy(opt) - if opt.variation_amount > 0: - this_variation = [[seed, opt.variation_amount]] - if opt.with_variations is None: - iter_opt.with_variations = this_variation - else: - iter_opt.with_variations = opt.with_variations + this_variation - iter_opt.variation_amount = 0 - formatted_prompt = opt.dream_prompt_str(seed=seed) - path = pngwriter.save_image_and_prompt_to_png( - image, - dream_prompt = formatted_prompt, - metadata = metadata_dumps(iter_opt, - seeds = [seed], - model_hash = self.model.model_hash - ), - name = name, - ) - - if int(config['seed']) == -1: - config['seed'] = seed - # Append post_data to log, but only once! - if not upscaled: - with open(os.path.join(self.outdir, "legacy_web_log.txt"), "a") as log: - log.write(f"{path}: {json.dumps(config)}\n") - - self.wfile.write(bytes(json.dumps( - {'event': 'result', 'url': path, 'seed': seed, 'config': config} - ) + '\n',"utf-8")) - - # control state of the "postprocessing..." message - upscaling_requested = opt.upscale or opt.facetool_strength > 0 - nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure. - nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure. - if upscaled: - images_upscaled += 1 - else: - images_generated += 1 - if upscaling_requested: - action = None - if images_generated >= opt.iterations: - if images_upscaled < opt.iterations: - action = 'upscaling-started' - else: - action = 'upscaling-done' - if action: - x = images_upscaled + 1 - self.wfile.write(bytes(json.dumps( - {'event': action, 'processed_file_cnt': f'{x}/{opt.iterations}'} - ) + '\n',"utf-8")) - - step_writer = PngWriter(os.path.join(self.outdir, "intermediates")) - step_index = 1 - def image_progress(sample, step): - if self.canceled.is_set(): - self.wfile.write(bytes(json.dumps({'event':'canceled'}) + '\n', 'utf-8')) - raise CanceledException - path = None - # since rendering images is moderately expensive, only render every 5th image - # and don't bother with the last one, since it'll render anyway - nonlocal step_index - - wants_progress_latents = opt.progress_latents - wants_progress_image = opt.progress_image and step % 5 == 0 - - if (wants_progress_image | wants_progress_latents) and step < opt.steps - 1: - image = self.model.sample_to_image(sample) if wants_progress_image \ - else self.model.sample_to_lowres_estimated_image(sample) - step_index_padded = str(step_index).rjust(len(str(opt.steps)), '0') - name = f'{prefix}.{opt.seed}.{step_index_padded}.png' - metadata = f'{opt.prompt} -S{opt.seed} [intermediate]' - path = step_writer.save_image_and_prompt_to_png(image, dream_prompt=metadata, name=name) - step_index += 1 - self.wfile.write(bytes(json.dumps( - {'event': 'step', 'step': step + 1, 'url': path} - ) + '\n',"utf-8")) - - try: - if opt.init_img is None: - # Run txt2img - self.model.prompt2image(**vars(opt), step_callback=image_progress, image_callback=image_done) - else: - # Decode initimg as base64 to temp file - with open("./img2img-tmp.png", "wb") as f: - initimg = opt.init_img.split(",")[1] # Ignore mime type - f.write(base64.b64decode(initimg)) - opt1 = argparse.Namespace(**vars(opt)) - opt1.init_img = "./img2img-tmp.png" - - try: - # Run img2img - self.model.prompt2image(**vars(opt1), step_callback=image_progress, image_callback=image_done) - finally: - # Remove the temp file - os.remove("./img2img-tmp.png") - except CanceledException: - print(f"Canceled.") - return - except Exception as e: - print("Error happened") - print(e) - self.wfile.write(bytes(json.dumps( - {'event': 'error', - 'message': str(e), - 'type': e.__class__.__name__} - ) + '\n',"utf-8")) - raise e - - -class ThreadingDreamServer(ThreadingHTTPServer): - def __init__(self, server_address): - super(ThreadingDreamServer, self).__init__(server_address, DreamServer) diff --git a/ldm/invoke/server_legacy.py b/ldm/invoke/server_legacy.py deleted file mode 100644 index 7497d64519..0000000000 --- a/ldm/invoke/server_legacy.py +++ /dev/null @@ -1,246 +0,0 @@ -import argparse -import json -import base64 -import mimetypes -import os -from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from ldm.invoke.pngwriter import PngWriter, PromptFormatter -from threading import Event - -def build_opt(post_data, seed, gfpgan_model_exists): - opt = argparse.Namespace() - setattr(opt, 'prompt', post_data['prompt']) - setattr(opt, 'init_img', post_data['initimg']) - setattr(opt, 'strength', float(post_data['strength'])) - setattr(opt, 'iterations', int(post_data['iterations'])) - setattr(opt, 'steps', int(post_data['steps'])) - setattr(opt, 'width', int(post_data['width'])) - setattr(opt, 'height', int(post_data['height'])) - setattr(opt, 'seamless', 'seamless' in post_data) - setattr(opt, 'fit', 'fit' in post_data) - setattr(opt, 'mask', 'mask' in post_data) - setattr(opt, 'invert_mask', 'invert_mask' in post_data) - setattr(opt, 'cfg_scale', float(post_data['cfg_scale'])) - setattr(opt, 'sampler_name', post_data['sampler_name']) - setattr(opt, 'gfpgan_strength', float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0) - setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None) - setattr(opt, 'progress_images', 'progress_images' in post_data) - setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed'])) - setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0) - setattr(opt, 'with_variations', []) - - broken = False - if int(post_data['seed']) != -1 and post_data['with_variations'] != '': - for part in post_data['with_variations'].split(','): - seed_and_weight = part.split(':') - if len(seed_and_weight) != 2: - print(f'could not parse with_variation part "{part}"') - broken = True - break - try: - seed = int(seed_and_weight[0]) - weight = float(seed_and_weight[1]) - except ValueError: - print(f'could not parse with_variation part "{part}"') - broken = True - break - opt.with_variations.append([seed, weight]) - - if broken: - raise CanceledException - - if len(opt.with_variations) == 0: - opt.with_variations = None - - return opt - -class CanceledException(Exception): - pass - -class DreamServer(BaseHTTPRequestHandler): - model = None - outdir = None - canceled = Event() - - def do_GET(self): - if self.path == "/": - self.send_response(200) - self.send_header("Content-type", "text/html") - self.end_headers() - with open("./static/dream_web/index.html", "rb") as content: - self.wfile.write(content.read()) - elif self.path == "/config.js": - # unfortunately this import can't be at the top level, since that would cause a circular import - from ldm.gfpgan.gfpgan_tools import gfpgan_model_exists - self.send_response(200) - self.send_header("Content-type", "application/javascript") - self.end_headers() - config = { - 'gfpgan_model_exists': gfpgan_model_exists - } - self.wfile.write(bytes("let config = " + json.dumps(config) + ";\n", "utf-8")) - elif self.path == "/run_log.json": - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - output = [] - - log_file = os.path.join(self.outdir, "dream_web_log.txt") - if os.path.exists(log_file): - with open(log_file, "r") as log: - for line in log: - url, config = line.split(": {", maxsplit=1) - config = json.loads("{" + config) - config["url"] = url.lstrip(".") - if os.path.exists(url): - output.append(config) - - self.wfile.write(bytes(json.dumps({"run_log": output}), "utf-8")) - elif self.path == "/cancel": - self.canceled.set() - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - self.wfile.write(bytes('{}', 'utf8')) - else: - path = "." + self.path - cwd = os.path.realpath(os.getcwd()) - is_in_cwd = os.path.commonprefix((os.path.realpath(path), cwd)) == cwd - if not (is_in_cwd and os.path.exists(path)): - self.send_response(404) - return - mime_type = mimetypes.guess_type(path)[0] - if mime_type is not None: - self.send_response(200) - self.send_header("Content-type", mime_type) - self.end_headers() - with open("." + self.path, "rb") as content: - self.wfile.write(content.read()) - else: - self.send_response(404) - - def do_POST(self): - self.send_response(200) - self.send_header("Content-type", "application/json") - self.end_headers() - - # unfortunately this import can't be at the top level, since that would cause a circular import - # TODO temporarily commented out, import fails for some reason - # from ldm.gfpgan.gfpgan_tools import gfpgan_model_exists - gfpgan_model_exists = False - - content_length = int(self.headers['Content-Length']) - post_data = json.loads(self.rfile.read(content_length)) - opt = build_opt(post_data, self.model.seed, gfpgan_model_exists) - - self.canceled.clear() - print(f">> Request to generate with prompt: {opt.prompt}") - # In order to handle upscaled images, the PngWriter needs to maintain state - # across images generated by each call to prompt2img(), so we define it in - # the outer scope of image_done() - config = post_data.copy() # Shallow copy - config['initimg'] = config.pop('initimg_name', '') - - images_generated = 0 # helps keep track of when upscaling is started - images_upscaled = 0 # helps keep track of when upscaling is completed - pngwriter = PngWriter(self.outdir) - - prefix = pngwriter.unique_prefix() - # if upscaling is requested, then this will be called twice, once when - # the images are first generated, and then again when after upscaling - # is complete. The upscaling replaces the original file, so the second - # entry should not be inserted into the image list. - def image_done(image, seed, upscaled=False, first_seed=-1, use_prefix=None): - print(f'First seed: {first_seed}') - name = f'{prefix}.{seed}.png' - iter_opt = argparse.Namespace(**vars(opt)) # copy - if opt.variation_amount > 0: - this_variation = [[seed, opt.variation_amount]] - if opt.with_variations is None: - iter_opt.with_variations = this_variation - else: - iter_opt.with_variations = opt.with_variations + this_variation - iter_opt.variation_amount = 0 - elif opt.with_variations is None: - iter_opt.seed = seed - normalized_prompt = PromptFormatter(self.model, iter_opt).normalize_prompt() - path = pngwriter.save_image_and_prompt_to_png(image, f'{normalized_prompt} -S{iter_opt.seed}', name) - - if int(config['seed']) == -1: - config['seed'] = seed - # Append post_data to log, but only once! - if not upscaled: - with open(os.path.join(self.outdir, "dream_web_log.txt"), "a") as log: - log.write(f"{path}: {json.dumps(config)}\n") - - self.wfile.write(bytes(json.dumps( - {'event': 'result', 'url': path, 'seed': seed, 'config': config} - ) + '\n',"utf-8")) - - # control state of the "postprocessing..." message - upscaling_requested = opt.upscale or opt.gfpgan_strength > 0 - nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure. - nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure. - if upscaled: - images_upscaled += 1 - else: - images_generated += 1 - if upscaling_requested: - action = None - if images_generated >= opt.iterations: - if images_upscaled < opt.iterations: - action = 'upscaling-started' - else: - action = 'upscaling-done' - if action: - x = images_upscaled + 1 - self.wfile.write(bytes(json.dumps( - {'event': action, 'processed_file_cnt': f'{x}/{opt.iterations}'} - ) + '\n',"utf-8")) - - step_writer = PngWriter(os.path.join(self.outdir, "intermediates")) - step_index = 1 - def image_progress(sample, step): - if self.canceled.is_set(): - self.wfile.write(bytes(json.dumps({'event':'canceled'}) + '\n', 'utf-8')) - raise CanceledException - path = None - # since rendering images is moderately expensive, only render every 5th image - # and don't bother with the last one, since it'll render anyway - nonlocal step_index - if opt.progress_images and step % 5 == 0 and step < opt.steps - 1: - image = self.model.sample_to_image(sample) - name = f'{prefix}.{opt.seed}.{step_index}.png' - metadata = f'{opt.prompt} -S{opt.seed} [intermediate]' - path = step_writer.save_image_and_prompt_to_png(image, metadata, name) - step_index += 1 - self.wfile.write(bytes(json.dumps( - {'event': 'step', 'step': step + 1, 'url': path} - ) + '\n',"utf-8")) - - try: - if opt.init_img is None: - # Run txt2img - self.model.prompt2image(**vars(opt), step_callback=image_progress, image_callback=image_done) - else: - # Decode initimg as base64 to temp file - with open("./img2img-tmp.png", "wb") as f: - initimg = opt.init_img.split(",")[1] # Ignore mime type - f.write(base64.b64decode(initimg)) - opt1 = argparse.Namespace(**vars(opt)) - opt1.init_img = "./img2img-tmp.png" - - try: - # Run img2img - self.model.prompt2image(**vars(opt1), step_callback=image_progress, image_callback=image_done) - finally: - # Remove the temp file - os.remove("./img2img-tmp.png") - except CanceledException: - print(f"Canceled.") - return - - -class ThreadingDreamServer(ThreadingHTTPServer): - def __init__(self, server_address): - super(ThreadingDreamServer, self).__init__(server_address, DreamServer) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py deleted file mode 100644 index 79c1d1978e..0000000000 --- a/ldm/lr_scheduler.py +++ /dev/null @@ -1,143 +0,0 @@ -import numpy as np - - -class LambdaWarmUpCosineScheduler: - """ - note: use with a base_lr of 1.0 - """ - - def __init__( - self, - warm_up_steps, - lr_min, - lr_max, - lr_start, - max_decay_steps, - verbosity_interval=0, - ): - self.lr_warm_up_steps = warm_up_steps - self.lr_start = lr_start - self.lr_min = lr_min - self.lr_max = lr_max - self.lr_max_decay_steps = max_decay_steps - self.last_lr = 0.0 - self.verbosity_interval = verbosity_interval - - def schedule(self, n, **kwargs): - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: - print( - f'current step: {n}, recent lr-multiplier: {self.last_lr}' - ) - if n < self.lr_warm_up_steps: - lr = ( - self.lr_max - self.lr_start - ) / self.lr_warm_up_steps * n + self.lr_start - self.last_lr = lr - return lr - else: - t = (n - self.lr_warm_up_steps) / ( - self.lr_max_decay_steps - self.lr_warm_up_steps - ) - t = min(t, 1.0) - lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( - 1 + np.cos(t * np.pi) - ) - self.last_lr = lr - return lr - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaWarmUpCosineScheduler2: - """ - supports repeated iterations, configurable via lists - note: use with a base_lr of 1.0. - """ - - def __init__( - self, - warm_up_steps, - f_min, - f_max, - f_start, - cycle_lengths, - verbosity_interval=0, - ): - assert ( - len(warm_up_steps) - == len(f_min) - == len(f_max) - == len(f_start) - == len(cycle_lengths) - ) - self.lr_warm_up_steps = warm_up_steps - self.f_start = f_start - self.f_min = f_min - self.f_max = f_max - self.cycle_lengths = cycle_lengths - self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) - self.last_f = 0.0 - self.verbosity_interval = verbosity_interval - - def find_in_interval(self, n): - interval = 0 - for cl in self.cum_cycles[1:]: - if n <= cl: - return interval - interval += 1 - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: - print( - f'current step: {n}, recent lr-multiplier: {self.last_f}, ' - f'current cycle {cycle}' - ) - if n < self.lr_warm_up_steps[cycle]: - f = ( - self.f_max[cycle] - self.f_start[cycle] - ) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - t = (n - self.lr_warm_up_steps[cycle]) / ( - self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle] - ) - t = min(t, 1.0) - f = self.f_min[cycle] + 0.5 * ( - self.f_max[cycle] - self.f_min[cycle] - ) * (1 + np.cos(t * np.pi)) - self.last_f = f - return f - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: - print( - f'current step: {n}, recent lr-multiplier: {self.last_f}, ' - f'current cycle {cycle}' - ) - - if n < self.lr_warm_up_steps[cycle]: - f = ( - self.f_max[cycle] - self.f_start[cycle] - ) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * ( - self.cycle_lengths[cycle] - n - ) / (self.cycle_lengths[cycle]) - self.last_f = f - return f diff --git a/ldm/models/__init__.py b/ldm/models/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py deleted file mode 100644 index 548c44fa49..0000000000 --- a/ldm/simplet2i.py +++ /dev/null @@ -1,13 +0,0 @@ -''' -This module is provided for backward compatibility with the -original (hasty) API. - -Please use ldm.generate instead. -''' - -from ldm.generate import Generate - -class T2I(Generate): - def __init__(self,**kwargs): - print(f'>> The ldm.simplet2i module is deprecated. Use ldm.generate instead. It is a drop-in replacement.') - super().__init__(kwargs) diff --git a/pyproject.toml b/pyproject.toml index 3e219172cc..45fe8ef327 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,18 +104,17 @@ dependencies = [ [project.scripts] # legacy entrypoints; provided for backwards compatibility -"invoke.py" = "ldm.invoke.CLI:main" -"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main" -"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" -"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" +"invoke.py" = "invokeai.frontend.CLI:command_line_interface" +"configure_invokeai.py" = "invokeai.frontend.config:invokeai_configure" +"textual_inversion.py" = "invokeai.frontend.training:textual_inversion" # modern entrypoints -"invokeai" = "ldm.invoke.CLI:main" -"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" -"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging -"invokeai-ti" = "ldm.invoke.training.textual_inversion:main" -"invokeai-model-install" = "ldm.invoke.config.model_install:main" -"invokeai-update" = "ldm.invoke.config.invokeai_update:main" +"invokeai" = "invokeai.frontend.CLI:invokeai_command_line_interface" +"invokeai-configure" = "invokeai.frontend.config:invokeai_configure" +"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers" +"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion" +"invokeai-model-install" = "invokeai.frontend.config:invokeai_model_install" +"invokeai-update" = "invokeai.frontend.config:invokeai_update" [project.urls] "Homepage" = "https://invoke-ai.github.io/InvokeAI/" @@ -132,15 +131,16 @@ version = { attr = "invokeai.version.__version__" } "include" = [ "invokeai.assets.web*","invokeai.version*", "invokeai.generator*","invokeai.backend*", - "invokeai.frontend.dist*", "invokeai.configs*", - "ldm*", + "invokeai.frontend*", "invokeai.frontend.web.dist*", + "invokeai.configs*", + "invokeai.app*","ldm*", ] [tool.setuptools.package-data] "invokeai.assets.web" = ["**.png"] "invokeai.backend" = ["**.png"] "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] -"invokeai.frontend.dist" = ["**"] +"invokeai.frontend.web.dist" = ["**"] [tool.pytest.ini_options] addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index 980c262501..4c22507098 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -1,11 +1,11 @@ from .test_invoker import create_edge from .test_nodes import ImageTestInvocation, ListPassThroughInvocation, PromptTestInvocation, PromptCollectionTestInvocation -from ldm.invoke.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext -from ldm.invoke.app.services.processor import DefaultInvocationProcessor -from ldm.invoke.app.services.sqlite import SqliteItemStorage, sqlite_memory -from ldm.invoke.app.services.invocation_queue import MemoryInvocationQueue -from ldm.invoke.app.services.invocation_services import InvocationServices -from ldm.invoke.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext +from invokeai.app.services.processor import DefaultInvocationProcessor +from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.invocation_queue import MemoryInvocationQueue +from invokeai.app.services.invocation_services import InvocationServices +from invokeai.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState import pytest diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 8ca2931841..6a7867bffe 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -1,11 +1,11 @@ from .test_nodes import ErrorInvocation, ImageTestInvocation, ListPassThroughInvocation, PromptTestInvocation, PromptCollectionTestInvocation, TestEventService, create_edge, wait_until -from ldm.invoke.app.services.processor import DefaultInvocationProcessor -from ldm.invoke.app.services.sqlite import SqliteItemStorage, sqlite_memory -from ldm.invoke.app.services.invocation_queue import MemoryInvocationQueue -from ldm.invoke.app.services.invoker import Invoker -from ldm.invoke.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext -from ldm.invoke.app.services.invocation_services import InvocationServices -from ldm.invoke.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState +from invokeai.app.services.processor import DefaultInvocationProcessor +from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.invocation_queue import MemoryInvocationQueue +from invokeai.app.services.invoker import Invoker +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext +from invokeai.app.services.invocation_services import InvocationServices +from invokeai.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation, GraphExecutionState import pytest diff --git a/tests/nodes/test_node_graph.py b/tests/nodes/test_node_graph.py index 1b5b341192..d432234aec 100644 --- a/tests/nodes/test_node_graph.py +++ b/tests/nodes/test_node_graph.py @@ -1,9 +1,9 @@ -from ldm.invoke.app.invocations.image import * +from invokeai.app.invocations.image import * from .test_nodes import ListPassThroughInvocation, PromptTestInvocation -from ldm.invoke.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation -from ldm.invoke.app.invocations.generate import ImageToImageInvocation, TextToImageInvocation -from ldm.invoke.app.invocations.upscale import UpscaleInvocation +from invokeai.app.services.graph import Graph, GraphInvocation, InvalidEdgeError, NodeAlreadyInGraphError, NodeNotFoundError, are_connections_compatible, EdgeConnection, CollectInvocation, IterateInvocation +from invokeai.app.invocations.generate import ImageToImageInvocation, TextToImageInvocation +from invokeai.app.invocations.upscale import UpscaleInvocation import pytest diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index e07dcb8594..c3427ac03b 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -1,7 +1,7 @@ from typing import Any, Callable, Literal -from ldm.invoke.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext -from ldm.invoke.app.invocations.image import ImageField -from ldm.invoke.app.services.invocation_services import InvocationServices +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext +from invokeai.app.invocations.image import ImageField +from invokeai.app.services.invocation_services import InvocationServices from pydantic import Field import pytest @@ -63,8 +63,8 @@ class PromptCollectionTestInvocation(BaseInvocation): return PromptCollectionTestInvocationOutput(collection=self.collection.copy()) -from ldm.invoke.app.services.events import EventServiceBase -from ldm.invoke.app.services.graph import EdgeConnection +from invokeai.app.services.events import EventServiceBase +from invokeai.app.services.graph import EdgeConnection def create_edge(from_id: str, from_field: str, to_id: str, to_field: str) -> tuple[EdgeConnection, EdgeConnection]: return (EdgeConnection(node_id = from_id, field = from_field), EdgeConnection(node_id = to_id, field = to_field)) @@ -95,4 +95,4 @@ def wait_until(condition: Callable[[], bool], timeout: int = 10, interval: float if condition(): return time.sleep(interval) - raise TimeoutError("Condition not met") \ No newline at end of file + raise TimeoutError("Condition not met") diff --git a/tests/nodes/test_sqlite.py b/tests/nodes/test_sqlite.py index e499bbce12..a803af5635 100644 --- a/tests/nodes/test_sqlite.py +++ b/tests/nodes/test_sqlite.py @@ -1,4 +1,4 @@ -from ldm.invoke.app.services.sqlite import SqliteItemStorage, sqlite_memory +from invokeai.app.services.sqlite import SqliteItemStorage, sqlite_memory from pydantic import BaseModel, Field diff --git a/tests/test_path.py b/tests/test_path.py index e99420d8ee..6076c6554f 100644 --- a/tests/test_path.py +++ b/tests/test_path.py @@ -7,11 +7,10 @@ import pathlib from os import path as osp from PIL import Image -import invokeai.frontend.dist as frontend +import invokeai.frontend.web.dist as frontend import invokeai.configs as configs import invokeai.assets.web as assets_web - class ConfigsTestCase(unittest.TestCase): """Test the configuration related imports and objects""" @@ -32,7 +31,7 @@ class ConfigsTestCase(unittest.TestCase): def test_frontend_path(self): """Test that the frontend path is correct""" FRONTEND_PATH = str(self.get_frontend_path()) - assert FRONTEND_PATH.endswith(osp.join("invokeai", "frontend", "dist")) + assert FRONTEND_PATH.endswith(osp.join("invokeai", "frontend", "web", "dist")) def test_caution_img(self): """Verify the caution image""" From 60a98cacef241a7671ed3a7f33395cc26799387e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:02:00 -0500 Subject: [PATCH 12/19] all vestiges of ldm.invoke removed --- invokeai/app/api/dependencies.py | 55 +- invokeai/app/api/events.py | 28 +- invokeai/app/api/routers/images.py | 59 +- invokeai/app/api/routers/sessions.py | 269 ++-- invokeai/app/api/sockets.py | 32 +- invokeai/app/api_app.py | 105 +- invokeai/app/cli_app.py | 248 ++-- invokeai/app/invocations/__init__.py | 6 +- invokeai/app/invocations/baseinvocation.py | 2 + invokeai/app/invocations/cv.py | 31 +- invokeai/app/invocations/generate.py | 186 ++- invokeai/app/invocations/image.py | 186 ++- invokeai/app/invocations/prompt.py | 6 +- invokeai/app/invocations/reconstruct.py | 38 +- invokeai/app/invocations/upscale.py | 35 +- invokeai/app/services/events.py | 92 +- invokeai/app/services/generate_initializer.py | 164 ++- invokeai/app/services/graph.py | 680 +++++++--- invokeai/app/services/image_storage.py | 33 +- invokeai/app/services/invocation_queue.py | 23 +- invokeai/app/services/invocation_services.py | 26 +- invokeai/app/services/invoker.py | 47 +- invokeai/app/services/item_storage.py | 25 +- invokeai/app/services/processor.py | 72 +- invokeai/app/services/sqlite.py | 73 +- invokeai/backend/__init__.py | 9 +- invokeai/backend/args.py | 1123 +++++++++-------- invokeai/backend/config/invokeai_configure.py | 40 +- .../backend/config/model_install_backend.py | 79 +- invokeai/backend/generate.py | 38 +- invokeai/backend/generator/__init__.py | 4 +- invokeai/backend/generator/base.py | 309 +++-- invokeai/backend/generator/embiggen.py | 325 +++-- invokeai/backend/generator/img2img.py | 88 +- invokeai/backend/generator/inpaint.py | 289 +++-- invokeai/backend/generator/omnibus.py | 173 --- invokeai/backend/generator/txt2img.py | 71 +- invokeai/backend/generator/txt2img2img.py | 135 +- invokeai/backend/globals.py | 64 +- invokeai/backend/image_util/__init__.py | 15 +- invokeai/backend/image_util/patchmatch.py | 29 +- invokeai/backend/image_util/pngwriter.py | 92 +- invokeai/backend/image_util/seamless.py | 46 +- invokeai/backend/image_util/txt2mask.py | 113 +- invokeai/backend/image_util/util.py | 47 +- invokeai/backend/model_management/__init__.py | 12 +- .../convert_ckpt_to_diffusers.py | 625 ++++++--- .../backend/model_management/model_manager.py | 30 +- invokeai/backend/prompting/__init__.py | 15 +- invokeai/backend/prompting/conditioning.py | 223 ++-- invokeai/backend/restoration/__init__.py | 6 +- invokeai/backend/restoration/base.py | 21 +- invokeai/backend/restoration/codeformer.py | 75 +- .../backend/restoration/codeformer_arch.py | 156 ++- invokeai/backend/restoration/gfpgan.py | 48 +- invokeai/backend/restoration/outcrop.py | 123 +- invokeai/backend/restoration/outpaint.py | 72 +- invokeai/backend/restoration/realesrgan.py | 59 +- invokeai/backend/restoration/vqgan_arch.py | 267 ++-- invokeai/backend/stable_diffusion/__init__.py | 18 +- .../backend/stable_diffusion/attention.py | 181 +-- .../backend/stable_diffusion/autoencoder.py | 169 +-- .../backend/stable_diffusion/concepts_lib.py | 197 +-- .../backend/stable_diffusion/data/base.py | 12 +- .../backend/stable_diffusion/data/imagenet.py | 229 ++-- .../backend/stable_diffusion/data/lsun.py | 70 +- .../stable_diffusion/data/personalized.py | 203 ++- .../data/personalized_style.py | 157 +-- .../stable_diffusion/diffusers_pipeline.py | 543 +++++--- .../stable_diffusion/diffusion/__init__.py | 6 +- .../stable_diffusion/diffusion/__init__.py~ | 4 - .../stable_diffusion/diffusion/classifier.py | 161 +-- .../diffusion/cross_attention_control.py | 422 ++++--- .../diffusion/cross_attention_map_saving.py | 47 +- .../stable_diffusion/diffusion/ddim.py | 92 +- .../stable_diffusion/diffusion/ddpm.py | 732 +++++------ .../stable_diffusion/diffusion/ksampler.py | 229 ++-- .../stable_diffusion/diffusion/plms.py | 103 +- .../stable_diffusion/diffusion/sampler.py | 286 ++--- .../diffusion/shared_invokeai_diffusion.py | 436 ++++--- .../diffusionmodules/model.py | 803 +++++++----- .../diffusionmodules/openaimodel.py | 99 +- .../stable_diffusion/diffusionmodules/util.py | 69 +- .../distributions/distributions.py | 8 +- invokeai/backend/stable_diffusion/ema.py | 20 +- .../stable_diffusion/encoders/modules.py | 337 ++--- .../image_degradation/bsrgan.py | 122 +- .../image_degradation/bsrgan_light.py | 112 +- .../image_degradation/utils_image.py | 123 +- .../stable_diffusion/losses/contperceptual.py | 58 +- .../stable_diffusion/losses/vqperceptual.py | 88 +- .../backend/stable_diffusion/offloading.py | 42 +- .../textual_inversion_manager.py | 2 +- .../backend/stable_diffusion/x_transformer.py | 147 +-- .../training/{__init.py__ => __init__.py} | 4 +- .../training/textual_inversion_training.py | 14 +- invokeai/backend/util/__init__.py | 33 +- invokeai/backend/util/devices.py | 31 +- invokeai/backend/util/log.py | 11 +- invokeai/backend/util/util.py | 29 +- invokeai/backend/web/__init__.py | 4 +- invokeai/backend/web/invoke_ai_web_server.py | 295 ++--- .../backend/web/modules/create_cmd_parser.py | 11 +- .../web/modules/get_canvas_generation_mode.py | 12 +- invokeai/backend/web/modules/parameters.py | 3 +- .../configs/stable-diffusion/v1-finetune.yaml | 14 +- .../stable-diffusion/v1-finetune_style.yaml | 12 +- .../stable-diffusion/v1-inference.yaml | 10 +- .../v1-inpainting-inference.yaml | 10 +- .../stable-diffusion/v1-m1-finetune.yaml | 12 +- .../stable-diffusion/v2-inference-v.yaml | 6 +- invokeai/frontend/CLI/CLI.py | 58 +- invokeai/frontend/CLI/__init__.py | 4 +- invokeai/frontend/CLI/readline.py | 424 ++++--- .../frontend/{config => install}/__init__.py | 7 +- .../{config => install}/invokeai_configure.py | 4 +- .../{config => install}/invokeai_update.py | 63 +- .../{config => install}/model_install.py | 65 +- .../frontend/{config => install}/widgets.py | 148 ++- invokeai/frontend/merge/__init__.py | 4 +- invokeai/frontend/merge/merge_diffusers.py | 26 +- invokeai/frontend/training/__init__.py | 5 +- .../frontend/training/textual_inversion.py | 10 +- invokeai/version/__init__.py | 22 +- invokeai/version/invokeai_version.py | 2 +- pyproject.toml | 12 +- 126 files changed, 8514 insertions(+), 6520 deletions(-) delete mode 100644 invokeai/backend/generator/omnibus.py delete mode 100644 invokeai/backend/stable_diffusion/diffusion/__init__.py~ rename invokeai/backend/training/{__init.py__ => __init__.py} (94%) rename invokeai/frontend/{config => install}/__init__.py (96%) rename invokeai/frontend/{config => install}/invokeai_configure.py (93%) rename invokeai/frontend/{config => install}/invokeai_update.py (56%) rename invokeai/frontend/{config => install}/model_install.py (93%) rename invokeai/frontend/{config => install}/widgets.py (61%) diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 08f362133e..5ff8f9eef5 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -1,33 +1,31 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from argparse import Namespace import os - -from ..services.processor import DefaultInvocationProcessor - -from ..services.graph import GraphExecutionState -from ..services.sqlite import SqliteItemStorage +from argparse import Namespace from ...globals import Globals - +from ..services.generate_initializer import get_generate +from ..services.graph import GraphExecutionState from ..services.image_storage import DiskImageStorage from ..services.invocation_queue import MemoryInvocationQueue from ..services.invocation_services import InvocationServices from ..services.invoker import Invoker -from ..services.generate_initializer import get_generate +from ..services.processor import DefaultInvocationProcessor +from ..services.sqlite import SqliteItemStorage from .events import FastAPIEventService # TODO: is there a better way to achieve this? -def check_internet()->bool: - ''' +def check_internet() -> bool: + """ Return true if the internet is reachable. It does this by pinging huggingface.co. - ''' + """ import urllib.request - host = 'http://huggingface.co' + + host = "http://huggingface.co" try: - urllib.request.urlopen(host,timeout=1) + urllib.request.urlopen(host, timeout=1) return True except: return False @@ -35,14 +33,11 @@ def check_internet()->bool: class ApiDependencies: """Contains and initializes all dependencies for the API""" + invoker: Invoker = None @staticmethod - def initialize( - args, - config, - event_handler_id: int - ): + def initialize(args, config, event_handler_id: int): Globals.try_patchmatch = args.patchmatch Globals.always_use_cpu = args.always_use_cpu Globals.internet_available = args.internet_available and check_internet() @@ -50,30 +45,34 @@ class ApiDependencies: Globals.ckpt_convert = args.ckpt_convert # TODO: Use a logger - print(f'>> Internet connectivity is {Globals.internet_available}') + print(f">> Internet connectivity is {Globals.internet_available}") generate = get_generate(args, config) events = FastAPIEventService(event_handler_id) - output_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../outputs')) + output_folder = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../../outputs") + ) images = DiskImageStorage(output_folder) # TODO: build a file/path manager? - db_location = os.path.join(output_folder, 'invokeai.db') + db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate = generate, - events = events, - images = images, - queue = MemoryInvocationQueue(), - graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = db_location, table_name = 'graph_executions'), - processor = DefaultInvocationProcessor() + generate=generate, + events=events, + images=images, + queue=MemoryInvocationQueue(), + graph_execution_manager=SqliteItemStorage[GraphExecutionState]( + filename=db_location, table_name="graph_executions" + ), + processor=DefaultInvocationProcessor(), ) ApiDependencies.invoker = Invoker(services) - + @staticmethod def shutdown(): if ApiDependencies.invoker: diff --git a/invokeai/app/api/events.py b/invokeai/app/api/events.py index 701b48a316..bcfaad35a1 100644 --- a/invokeai/app/api/events.py +++ b/invokeai/app/api/events.py @@ -1,11 +1,14 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) import asyncio +import threading from queue import Empty, Queue from typing import Any + from fastapi_events.dispatcher import dispatch + from ..services.events import EventServiceBase -import threading + class FastAPIEventService(EventServiceBase): event_handler_id: int @@ -16,39 +19,34 @@ class FastAPIEventService(EventServiceBase): self.event_handler_id = event_handler_id self.__queue = Queue() self.__stop_event = threading.Event() - asyncio.create_task(self.__dispatch_from_queue(stop_event = self.__stop_event)) + asyncio.create_task(self.__dispatch_from_queue(stop_event=self.__stop_event)) super().__init__() - def stop(self, *args, **kwargs): self.__stop_event.set() self.__queue.put(None) - def dispatch(self, event_name: str, payload: Any) -> None: - self.__queue.put(dict( - event_name = event_name, - payload = payload - )) - + self.__queue.put(dict(event_name=event_name, payload=payload)) async def __dispatch_from_queue(self, stop_event: threading.Event): """Get events on from the queue and dispatch them, from the correct thread""" while not stop_event.is_set(): try: - event = self.__queue.get(block = False) - if not event: # Probably stopping + event = self.__queue.get(block=False) + if not event: # Probably stopping continue dispatch( - event.get('event_name'), - payload = event.get('payload'), - middleware_id = self.event_handler_id) + event.get("event_name"), + payload=event.get("payload"), + middleware_id=self.event_handler_id, + ) except Empty: await asyncio.sleep(0.001) pass except asyncio.CancelledError as e: - raise e # Raise a proper error + raise e # Raise a proper error diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 1ae116e49d..55f1a2f036 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -1,57 +1,56 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) from datetime import datetime, timezone -from fastapi import Path, UploadFile, Request -from fastapi.routing import APIRouter + +from fastapi import Path, Request, UploadFile from fastapi.responses import FileResponse, Response +from fastapi.routing import APIRouter from PIL import Image + from ...services.image_storage import ImageType from ..dependencies import ApiDependencies -images_router = APIRouter( - prefix = '/v1/images', - tags = ['images'] -) +images_router = APIRouter(prefix="/v1/images", tags=["images"]) -@images_router.get('/{image_type}/{image_name}', - operation_id = 'get_image' - ) +@images_router.get("/{image_type}/{image_name}", operation_id="get_image") async def get_image( - image_type: ImageType = Path(description = "The type of image to get"), - image_name: str = Path(description = "The name of the image to get") + image_type: ImageType = Path(description="The type of image to get"), + image_name: str = Path(description="The name of the image to get"), ): """Gets a result""" # TODO: This is not really secure at all. At least make sure only output results are served filename = ApiDependencies.invoker.services.images.get_path(image_type, image_name) return FileResponse(filename) -@images_router.post('/uploads/', - operation_id = 'upload_image', - responses = { - 201: {'description': 'The image was uploaded successfully'}, - 404: {'description': 'Session not found'} - }) -async def upload_image( - file: UploadFile, - request: Request -): - if not file.content_type.startswith('image'): - return Response(status_code = 415) - + +@images_router.post( + "/uploads/", + operation_id="upload_image", + responses={ + 201: {"description": "The image was uploaded successfully"}, + 404: {"description": "Session not found"}, + }, +) +async def upload_image(file: UploadFile, request: Request): + if not file.content_type.startswith("image"): + return Response(status_code=415) + contents = await file.read() try: im = Image.open(contents) except: # Error opening the image - return Response(status_code = 415) - - filename = f'{str(int(datetime.now(timezone.utc).timestamp()))}.png' + return Response(status_code=415) + + filename = f"{str(int(datetime.now(timezone.utc).timestamp()))}.png" ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, im) return Response( status_code=201, - headers = { - 'Location': request.url_for('get_image', image_type=ImageType.UPLOAD, image_name=filename) - } + headers={ + "Location": request.url_for( + "get_image", image_type=ImageType.UPLOAD, image_name=filename + ) + }, ) diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py index beb13736c6..713b212294 100644 --- a/invokeai/app/api/routers/sessions.py +++ b/invokeai/app/api/routers/sessions.py @@ -1,232 +1,271 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import List, Optional, Union, Annotated -from fastapi import Query, Path, Body -from fastapi.routing import APIRouter +from typing import Annotated, List, Optional, Union + +from fastapi import Body, Path, Query from fastapi.responses import Response +from fastapi.routing import APIRouter from pydantic.fields import Field +from ...invocations import * +from ...invocations.baseinvocation import BaseInvocation +from ...services.graph import ( + EdgeConnection, + Graph, + GraphExecutionState, + NodeAlreadyExecutedError, +) from ...services.item_storage import PaginatedResults from ..dependencies import ApiDependencies -from ...invocations.baseinvocation import BaseInvocation -from ...services.graph import EdgeConnection, Graph, GraphExecutionState, NodeAlreadyExecutedError -from ...invocations import * -session_router = APIRouter( - prefix = '/v1/sessions', - tags = ['sessions'] -) +session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) -@session_router.post('/', - operation_id = 'create_session', - responses = { +@session_router.post( + "/", + operation_id="create_session", + responses={ 200: {"model": GraphExecutionState}, - 400: {'description': 'Invalid json'} - }) + 400: {"description": "Invalid json"}, + }, +) async def create_session( - graph: Optional[Graph] = Body(default = None, description = "The graph to initialize the session with") + graph: Optional[Graph] = Body( + default=None, description="The graph to initialize the session with" + ) ) -> GraphExecutionState: """Creates a new session, optionally initializing it with an invocation graph""" session = ApiDependencies.invoker.create_execution_state(graph) return session -@session_router.get('/', - operation_id = 'list_sessions', - responses = { - 200: {"model": PaginatedResults[GraphExecutionState]} - }) +@session_router.get( + "/", + operation_id="list_sessions", + responses={200: {"model": PaginatedResults[GraphExecutionState]}}, +) async def list_sessions( - page: int = Query(default = 0, description = "The page of results to get"), - per_page: int = Query(default = 10, description = "The number of results per page"), - query: str = Query(default = '', description = "The query string to search for") + page: int = Query(default=0, description="The page of results to get"), + per_page: int = Query(default=10, description="The number of results per page"), + query: str = Query(default="", description="The query string to search for"), ) -> PaginatedResults[GraphExecutionState]: """Gets a list of sessions, optionally searching""" - if filter == '': - result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) + if filter == "": + result = ApiDependencies.invoker.services.graph_execution_manager.list( + page, per_page + ) else: - result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) + result = ApiDependencies.invoker.services.graph_execution_manager.search( + query, page, per_page + ) return result -@session_router.get('/{session_id}', - operation_id = 'get_session', - responses = { +@session_router.get( + "/{session_id}", + operation_id="get_session", + responses={ 200: {"model": GraphExecutionState}, - 404: {'description': 'Session not found'} - }) + 404: {"description": "Session not found"}, + }, +) async def get_session( - session_id: str = Path(description = "The id of the session to get") + session_id: str = Path(description="The id of the session to get"), ) -> GraphExecutionState: """Gets a session""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) else: return session -@session_router.post('/{session_id}/nodes', - operation_id = 'add_node', - responses = { +@session_router.post( + "/{session_id}/nodes", + operation_id="add_node", + responses={ 200: {"model": str}, - 400: {'description': 'Invalid node or link'}, - 404: {'description': 'Session not found'} - } + 400: {"description": "Invalid node or link"}, + 404: {"description": "Session not found"}, + }, ) async def add_node( - session_id: str = Path(description = "The id of the session"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body(description = "The node to add") + session_id: str = Path(description="The id of the session"), + node: Annotated[ + Union[BaseInvocation.get_invocations()], Field(discriminator="type") + ] = Body(description="The node to add"), ) -> str: """Adds a node to the graph""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) try: session.add_node(node) - ApiDependencies.invoker.services.graph_execution_manager.set(session) # TODO: can this be done automatically, or add node through an API? + ApiDependencies.invoker.services.graph_execution_manager.set( + session + ) # TODO: can this be done automatically, or add node through an API? return session.id except NodeAlreadyExecutedError: - return Response(status_code = 400) + return Response(status_code=400) except IndexError: - return Response(status_code = 400) - + return Response(status_code=400) -@session_router.put('/{session_id}/nodes/{node_path}', - operation_id = 'update_node', - responses = { + +@session_router.put( + "/{session_id}/nodes/{node_path}", + operation_id="update_node", + responses={ 200: {"model": GraphExecutionState}, - 400: {'description': 'Invalid node or link'}, - 404: {'description': 'Session not found'} - } + 400: {"description": "Invalid node or link"}, + 404: {"description": "Session not found"}, + }, ) async def update_node( - session_id: str = Path(description = "The id of the session"), - node_path: str = Path(description = "The path to the node in the graph"), - node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body(description = "The new node") + session_id: str = Path(description="The id of the session"), + node_path: str = Path(description="The path to the node in the graph"), + node: Annotated[ + Union[BaseInvocation.get_invocations()], Field(discriminator="type") + ] = Body(description="The new node"), ) -> GraphExecutionState: """Updates a node in the graph and removes all linked edges""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) try: session.update_node(node_path, node) - ApiDependencies.invoker.services.graph_execution_manager.set(session) # TODO: can this be done automatically, or add node through an API? + ApiDependencies.invoker.services.graph_execution_manager.set( + session + ) # TODO: can this be done automatically, or add node through an API? return session except NodeAlreadyExecutedError: - return Response(status_code = 400) + return Response(status_code=400) except IndexError: - return Response(status_code = 400) + return Response(status_code=400) -@session_router.delete('/{session_id}/nodes/{node_path}', - operation_id = 'delete_node', - responses = { +@session_router.delete( + "/{session_id}/nodes/{node_path}", + operation_id="delete_node", + responses={ 200: {"model": GraphExecutionState}, - 400: {'description': 'Invalid node or link'}, - 404: {'description': 'Session not found'} - } + 400: {"description": "Invalid node or link"}, + 404: {"description": "Session not found"}, + }, ) async def delete_node( - session_id: str = Path(description = "The id of the session"), - node_path: str = Path(description = "The path to the node to delete") + session_id: str = Path(description="The id of the session"), + node_path: str = Path(description="The path to the node to delete"), ) -> GraphExecutionState: """Deletes a node in the graph and removes all linked edges""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) try: session.delete_node(node_path) - ApiDependencies.invoker.services.graph_execution_manager.set(session) # TODO: can this be done automatically, or add node through an API? + ApiDependencies.invoker.services.graph_execution_manager.set( + session + ) # TODO: can this be done automatically, or add node through an API? return session except NodeAlreadyExecutedError: - return Response(status_code = 400) + return Response(status_code=400) except IndexError: - return Response(status_code = 400) + return Response(status_code=400) -@session_router.post('/{session_id}/edges', - operation_id = 'add_edge', - responses = { +@session_router.post( + "/{session_id}/edges", + operation_id="add_edge", + responses={ 200: {"model": GraphExecutionState}, - 400: {'description': 'Invalid node or link'}, - 404: {'description': 'Session not found'} - } + 400: {"description": "Invalid node or link"}, + 404: {"description": "Session not found"}, + }, ) async def add_edge( - session_id: str = Path(description = "The id of the session"), - edge: tuple[EdgeConnection, EdgeConnection] = Body(description = "The edge to add") + session_id: str = Path(description="The id of the session"), + edge: tuple[EdgeConnection, EdgeConnection] = Body(description="The edge to add"), ) -> GraphExecutionState: """Adds an edge to the graph""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) try: session.add_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set(session) # TODO: can this be done automatically, or add node through an API? + ApiDependencies.invoker.services.graph_execution_manager.set( + session + ) # TODO: can this be done automatically, or add node through an API? return session except NodeAlreadyExecutedError: - return Response(status_code = 400) + return Response(status_code=400) except IndexError: - return Response(status_code = 400) + return Response(status_code=400) # TODO: the edge being in the path here is really ugly, find a better solution -@session_router.delete('/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}', - operation_id = 'delete_edge', - responses = { +@session_router.delete( + "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", + operation_id="delete_edge", + responses={ 200: {"model": GraphExecutionState}, - 400: {'description': 'Invalid node or link'}, - 404: {'description': 'Session not found'} - } + 400: {"description": "Invalid node or link"}, + 404: {"description": "Session not found"}, + }, ) async def delete_edge( - session_id: str = Path(description = "The id of the session"), - from_node_id: str = Path(description = "The id of the node the edge is coming from"), - from_field: str = Path(description = "The field of the node the edge is coming from"), - to_node_id: str = Path(description = "The id of the node the edge is going to"), - to_field: str = Path(description = "The field of the node the edge is going to") + session_id: str = Path(description="The id of the session"), + from_node_id: str = Path(description="The id of the node the edge is coming from"), + from_field: str = Path(description="The field of the node the edge is coming from"), + to_node_id: str = Path(description="The id of the node the edge is going to"), + to_field: str = Path(description="The field of the node the edge is going to"), ) -> GraphExecutionState: """Deletes an edge from the graph""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) + return Response(status_code=404) try: - edge = (EdgeConnection(node_id = from_node_id, field = from_field), EdgeConnection(node_id = to_node_id, field = to_field)) + edge = ( + EdgeConnection(node_id=from_node_id, field=from_field), + EdgeConnection(node_id=to_node_id, field=to_field), + ) session.delete_edge(edge) - ApiDependencies.invoker.services.graph_execution_manager.set(session) # TODO: can this be done automatically, or add node through an API? + ApiDependencies.invoker.services.graph_execution_manager.set( + session + ) # TODO: can this be done automatically, or add node through an API? return session except NodeAlreadyExecutedError: - return Response(status_code = 400) + return Response(status_code=400) except IndexError: - return Response(status_code = 400) - + return Response(status_code=400) -@session_router.put('/{session_id}/invoke', - operation_id = 'invoke_session', - responses = { + +@session_router.put( + "/{session_id}/invoke", + operation_id="invoke_session", + responses={ 200: {"model": None}, - 202: {'description': 'The invocation is queued'}, - 400: {'description': 'The session has no invocations ready to invoke'}, - 404: {'description': 'Session not found'} - }) + 202: {"description": "The invocation is queued"}, + 400: {"description": "The session has no invocations ready to invoke"}, + 404: {"description": "Session not found"}, + }, +) async def invoke_session( - session_id: str = Path(description = "The id of the session to invoke"), - all: bool = Query(default = False, description = "Whether or not to invoke all remaining invocations") + session_id: str = Path(description="The id of the session to invoke"), + all: bool = Query( + default=False, description="Whether or not to invoke all remaining invocations" + ), ) -> None: """Invokes a session""" session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) if session is None: - return Response(status_code = 404) - + return Response(status_code=404) + if session.is_complete(): - return Response(status_code = 400) - - ApiDependencies.invoker.invoke(session, invoke_all = all) + return Response(status_code=400) + + ApiDependencies.invoker.invoke(session, invoke_all=all) return Response(status_code=202) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index eb4d5403c0..f70d7a6609 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -1,36 +1,38 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) from fastapi import FastAPI -from fastapi_socketio import SocketManager from fastapi_events.handlers.local import local_handler from fastapi_events.typing import Event +from fastapi_socketio import SocketManager + from ..services.events import EventServiceBase + class SocketIO: __sio: SocketManager def __init__(self, app: FastAPI): - self.__sio = SocketManager(app = app) - self.__sio.on('subscribe', handler=self._handle_sub) - self.__sio.on('unsubscribe', handler=self._handle_unsub) - + self.__sio = SocketManager(app=app) + self.__sio.on("subscribe", handler=self._handle_sub) + self.__sio.on("unsubscribe", handler=self._handle_unsub) + local_handler.register( - event_name = EventServiceBase.session_event, - _func=self._handle_session_event + event_name=EventServiceBase.session_event, _func=self._handle_session_event ) async def _handle_session_event(self, event: Event): await self.__sio.emit( - event = event[1]['event'], - data = event[1]['data'], - room = event[1]['data']['graph_execution_state_id'] + event=event[1]["event"], + data=event[1]["data"], + room=event[1]["data"]["graph_execution_state_id"], ) async def _handle_sub(self, sid, data, *args, **kwargs): - if 'session' in data: - self.__sio.enter_room(sid, data['session']) - + if "session" in data: + self.__sio.enter_room(sid, data["session"]) + # @app.sio.on('unsubscribe') + async def _handle_unsub(self, sid, data, *args, **kwargs): - if 'session' in data: - self.__sio.leave_room(sid, data['session']) + if "session" in data: + self.__sio.leave_room(sid, data["session"]) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index db79b0d7e8..79ba126f8e 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -2,36 +2,37 @@ import asyncio from inspect import signature -from fastapi import FastAPI -from fastapi.openapi.utils import get_openapi -from fastapi.openapi.docs import get_swagger_ui_html, get_redoc_html -from fastapi.staticfiles import StaticFiles -from fastapi_events.middleware import EventHandlerASGIMiddleware -from fastapi_events.handlers.local import local_handler -from fastapi.middleware.cors import CORSMiddleware -from pydantic.schema import schema + import uvicorn +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html +from fastapi.openapi.utils import get_openapi +from fastapi.staticfiles import StaticFiles +from fastapi_events.handlers.local import local_handler +from fastapi_events.middleware import EventHandlerASGIMiddleware +from pydantic.schema import schema + +from ..args import Args +from .api.dependencies import ApiDependencies +from .api.routers import images, sessions from .api.sockets import SocketIO from .invocations import * from .invocations.baseinvocation import BaseInvocation -from .api.routers import images, sessions -from .api.dependencies import ApiDependencies -from ..args import Args # Create the app # TODO: create this all in a method so configuration/etc. can be passed in? -app = FastAPI( - title = "Invoke AI", - docs_url = None, - redoc_url = None -) +app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None) # Add event handler event_handler_id: int = id(app) app.add_middleware( EventHandlerASGIMiddleware, - handlers = [local_handler], # TODO: consider doing this in services to support different configurations - middleware_id = event_handler_id) + handlers=[ + local_handler + ], # TODO: consider doing this in services to support different configurations + middleware_id=event_handler_id, +) # Add CORS # TODO: use configuration for this @@ -48,38 +49,34 @@ socket_io = SocketIO(app) config = {} + # Add startup event to load dependencies -@app.on_event('startup') +@app.on_event("startup") async def startup_event(): args = Args() config = args.parse_args() ApiDependencies.initialize( - args = args, - config = config, - event_handler_id = event_handler_id + args=args, config=config, event_handler_id=event_handler_id ) + # Shut down threads -@app.on_event('shutdown') +@app.on_event("shutdown") async def shutdown_event(): ApiDependencies.shutdown() + # Include all routers # TODO: REMOVE # app.include_router( # invocation.invocation_router, # prefix = '/api') -app.include_router( - sessions.session_router, - prefix = '/api' -) +app.include_router(sessions.session_router, prefix="/api") + +app.include_router(images.images_router, prefix="/api") -app.include_router( - images.images_router, - prefix = '/api' -) # Build a custom OpenAPI to include all outputs # TODO: can outputs be included on metadata of invocation schemas somehow? @@ -87,10 +84,10 @@ def custom_openapi(): if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( - title = app.title, - description = "An API for invoking AI image operations", - version = "1.0.0", - routes = app.routes + title=app.title, + description="An API for invoking AI image operations", + version="1.0.0", + routes=app.routes, ) # Add all outputs @@ -102,12 +99,12 @@ def custom_openapi(): output_types.add(output_type) output_schemas = schema(output_types, ref_prefix="#/components/schemas/") - for schema_key, output_schema in output_schemas['definitions'].items(): + for schema_key, output_schema in output_schemas["definitions"].items(): openapi_schema["components"]["schemas"][schema_key] = output_schema # TODO: note that we assume the schema_key here is the TYPE.__name__ # This could break in some cases, figure out a better way to do it - output_type_titles[schema_key] = output_schema['title'] + output_type_titles[schema_key] = output_schema["title"] # Add a reference to the output type to additionalProperties of the invoker schema for invoker in all_invocations: @@ -115,47 +112,47 @@ def custom_openapi(): output_type = signature(invoker.invoke).return_annotation output_type_title = output_type_titles[output_type.__name__] invoker_schema = openapi_schema["components"]["schemas"][invoker_name] - outputs_ref = { '$ref': f'#/components/schemas/{output_type_title}' } - if 'additionalProperties' not in invoker_schema: - invoker_schema['additionalProperties'] = {} + outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} + if "additionalProperties" not in invoker_schema: + invoker_schema["additionalProperties"] = {} + + invoker_schema["additionalProperties"]["outputs"] = outputs_ref - invoker_schema['additionalProperties']['outputs'] = outputs_ref - app.openapi_schema = openapi_schema return app.openapi_schema + app.openapi = custom_openapi # Override API doc favicons -app.mount('/static', StaticFiles(directory='static/dream_web'), name='static') +app.mount("/static", StaticFiles(directory="static/dream_web"), name="static") + @app.get("/docs", include_in_schema=False) def overridden_swagger(): - return get_swagger_ui_html( + return get_swagger_ui_html( openapi_url=app.openapi_url, title=app.title, - swagger_favicon_url="/static/favicon.ico" + swagger_favicon_url="/static/favicon.ico", ) + @app.get("/redoc", include_in_schema=False) def overridden_redoc(): - return get_redoc_html( + return get_redoc_html( openapi_url=app.openapi_url, title=app.title, - redoc_favicon_url="/static/favicon.ico" + redoc_favicon_url="/static/favicon.ico", ) + def invoke_api(): # Start our own event loop for eventing usage # TODO: determine if there's a better way to do this loop = asyncio.new_event_loop() - config = uvicorn.Config( - app = app, - host = "0.0.0.0", - port = 9090, - loop = loop) - # Use access_log to turn off logging - + config = uvicorn.Config(app=app, host="0.0.0.0", port=9090, loop=loop) + # Use access_log to turn off logging + server = uvicorn.Server(config) loop.run_until_complete(server.serve()) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 51ccb9d41e..178efc0274 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -1,33 +1,40 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) import argparse -import shlex import os +import shlex import time -from typing import Any, Dict, Iterable, Literal, Union, get_args, get_origin, get_type_hints +from typing import ( + Any, + Dict, + Iterable, + Literal, + Union, + get_args, + get_origin, + get_type_hints, +) + from pydantic import BaseModel from pydantic.fields import Field -from .services.processor import DefaultInvocationProcessor - -from .services.graph import EdgeConnection, GraphExecutionState - -from .services.sqlite import SqliteItemStorage - +from ..args import Args +from .invocations import * +from .invocations.baseinvocation import BaseInvocation from .invocations.image import ImageField +from .services.events import EventServiceBase from .services.generate_initializer import get_generate +from .services.graph import EdgeConnection, GraphExecutionState from .services.image_storage import DiskImageStorage from .services.invocation_queue import MemoryInvocationQueue -from .invocations.baseinvocation import BaseInvocation from .services.invocation_services import InvocationServices -from .services.invoker import Invoker -from .invocations import * -from ..args import Args -from .services.events import EventServiceBase +from .services.invoker import Invoker +from .services.processor import DefaultInvocationProcessor +from .services.sqlite import SqliteItemStorage class InvocationCommand(BaseModel): - invocation: Union[BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore + invocation: Union[BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore class InvalidArgs(Exception): @@ -35,72 +42,94 @@ class InvalidArgs(Exception): def get_invocation_parser() -> argparse.ArgumentParser: - # Create invocation parser parser = argparse.ArgumentParser() + def exit(*args, **kwargs): raise InvalidArgs + parser.exit = exit - subparsers = parser.add_subparsers(dest='type') + subparsers = parser.add_subparsers(dest="type") invocation_parsers = dict() # Add history parser - history_parser = subparsers.add_parser('history', help="Shows the invocation history") - history_parser.add_argument('count', nargs='?', default=5, type=int, help="The number of history entries to show") + history_parser = subparsers.add_parser( + "history", help="Shows the invocation history" + ) + history_parser.add_argument( + "count", + nargs="?", + default=5, + type=int, + help="The number of history entries to show", + ) # Add default parser - default_parser = subparsers.add_parser('default', help="Define a default value for all inputs with a specified name") - default_parser.add_argument('input', type=str, help="The input field") - default_parser.add_argument('value', help="The default value") - - default_parser = subparsers.add_parser('reset_default', help="Resets a default value") - default_parser.add_argument('input', type=str, help="The input field") + default_parser = subparsers.add_parser( + "default", help="Define a default value for all inputs with a specified name" + ) + default_parser.add_argument("input", type=str, help="The input field") + default_parser.add_argument("value", help="The default value") + + default_parser = subparsers.add_parser( + "reset_default", help="Resets a default value" + ) + default_parser.add_argument("input", type=str, help="The input field") # Create subparsers for each invocation invocations = BaseInvocation.get_all_subclasses() for invocation in invocations: hints = get_type_hints(invocation) - cmd_name = get_args(hints['type'])[0] + cmd_name = get_args(hints["type"])[0] command_parser = subparsers.add_parser(cmd_name, help=invocation.__doc__) invocation_parsers[cmd_name] = command_parser # Add linking capability - command_parser.add_argument('--link', '-l', action='append', nargs=3, - help="A link in the format 'dest_field source_node source_field'. source_node can be relative to history (e.g. -1)") + command_parser.add_argument( + "--link", + "-l", + action="append", + nargs=3, + help="A link in the format 'dest_field source_node source_field'. source_node can be relative to history (e.g. -1)", + ) - command_parser.add_argument('--link_node', '-ln', action='append', - help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)") + command_parser.add_argument( + "--link_node", + "-ln", + action="append", + help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)", + ) # Convert all fields to arguments fields = invocation.__fields__ for name, field in fields.items(): - if name in ['id', 'type']: + if name in ["id", "type"]: continue - + if get_origin(field.type_) == Literal: allowed_values = get_args(field.type_) allowed_types = set() for val in allowed_values: allowed_types.add(type(val)) allowed_types_list = list(allowed_types) - field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore + field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore command_parser.add_argument( f"--{name}", dest=name, type=field_type, default=field.default, - choices = allowed_values, - help=field.field_info.description + choices=allowed_values, + help=field.field_info.description, ) else: command_parser.add_argument( f"--{name}", dest=name, type=field.type_, - default=field.default, - help=field.field_info.description + default=field.default, + help=field.field_info.description, ) return parser @@ -110,8 +139,8 @@ def get_invocation_command(invocation) -> str: fields = invocation.__fields__.items() type_hints = get_type_hints(type(invocation)) command = [invocation.type] - for name,field in fields: - if name in ['id', 'type']: + for name, field in fields: + if name in ["id", "type"]: continue # TODO: add links @@ -127,17 +156,25 @@ def get_invocation_command(invocation) -> str: if type_hint is str or str in get_args(type_hint): command.append(f'--{name} "{field_value}"') else: - command.append(f'--{name} {field_value}') - - return ' '.join(command) + command.append(f"--{name} {field_value}") + + return " ".join(command) -def get_graph_execution_history(graph_execution_state: GraphExecutionState) -> Iterable[str]: +def get_graph_execution_history( + graph_execution_state: GraphExecutionState, +) -> Iterable[str]: """Gets the history of fully-executed invocations for a graph execution""" - return (n for n in reversed(graph_execution_state.executed_history) if n in graph_execution_state.graph.nodes) + return ( + n + for n in reversed(graph_execution_state.executed_history) + if n in graph_execution_state.graph.nodes + ) -def generate_matching_edges(a: BaseInvocation, b: BaseInvocation) -> list[tuple[EdgeConnection, EdgeConnection]]: +def generate_matching_edges( + a: BaseInvocation, b: BaseInvocation +) -> list[tuple[EdgeConnection, EdgeConnection]]: """Generates all possible edges between two invocations""" atype = type(a) btype = type(b) @@ -148,12 +185,18 @@ def generate_matching_edges(a: BaseInvocation, b: BaseInvocation) -> list[tuple[ bfields = get_type_hints(btype) matching_fields = set(afields.keys()).intersection(bfields.keys()) - + # Remove invalid fields - invalid_fields = set(['type', 'id']) + invalid_fields = set(["type", "id"]) matching_fields = matching_fields.difference(invalid_fields) - edges = [(EdgeConnection(node_id = a.id, field = field), EdgeConnection(node_id = b.id, field = field)) for field in matching_fields] + edges = [ + ( + EdgeConnection(node_id=a.id, field=field), + EdgeConnection(node_id=b.id, field=field), + ) + for field in matching_fields + ] return edges @@ -165,27 +208,31 @@ def invoke_cli(): # NOTE: load model on first use, uncomment to load at startup # TODO: Make this a config option? - #generate.load_model() + # generate.load_model() events = EventServiceBase() - output_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../outputs')) + output_folder = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../outputs") + ) # TODO: build a file/path manager? - db_location = os.path.join(output_folder, 'invokeai.db') + db_location = os.path.join(output_folder, "invokeai.db") services = InvocationServices( - generate = generate, - events = events, - images = DiskImageStorage(output_folder), - queue = MemoryInvocationQueue(), - graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = db_location, table_name = 'graph_executions'), - processor = DefaultInvocationProcessor() + generate=generate, + events=events, + images=DiskImageStorage(output_folder), + queue=MemoryInvocationQueue(), + graph_execution_manager=SqliteItemStorage[GraphExecutionState]( + filename=db_location, table_name="graph_executions" + ), + processor=DefaultInvocationProcessor(), ) invoker = Invoker(services) session: GraphExecutionState = invoker.create_execution_state() - + parser = get_invocation_parser() # Uncomment to print out previous sessions at startup @@ -201,10 +248,10 @@ def invoke_cli(): # Ctrl-c exits break - if cmd_input in ['exit','q']: - break; + if cmd_input in ["exit", "q"]: + break - if cmd_input in ['--help','help','h','?']: + if cmd_input in ["--help", "help", "h", "?"]: parser.print_help() continue @@ -214,65 +261,82 @@ def invoke_cli(): history = list(get_graph_execution_history(session)) # Split the command for piping - cmds = cmd_input.split('|') + cmds = cmd_input.split("|") start_id = len(history) current_id = start_id new_invocations = list() for cmd in cmds: - if cmd is None or cmd.strip() == '': - raise InvalidArgs('Empty command') + if cmd is None or cmd.strip() == "": + raise InvalidArgs("Empty command") # Parse args to create invocation args = vars(parser.parse_args(shlex.split(cmd.strip()))) # Check for special commands # TODO: These might be better as Pydantic models, similar to the invocations - if args['type'] == 'history': - history_count = args['count'] or 5 + if args["type"] == "history": + history_count = args["count"] or 5 for i in range(min(history_count, len(history))): entry_id = history[-1 - i] entry = session.graph.get_node(entry_id) - print(f'{entry_id}: {get_invocation_command(entry.invocation)}') + print(f"{entry_id}: {get_invocation_command(entry.invocation)}") continue - if args['type'] == 'reset_default': - if args['input'] in defaults: - del defaults[args['input']] + if args["type"] == "reset_default": + if args["input"] in defaults: + del defaults[args["input"]] continue - if args['type'] == 'default': - field = args['input'] - field_value = args['value'] + if args["type"] == "default": + field = args["input"] + field_value = args["value"] defaults[field] = field_value continue # Override defaults - for field_name,field_default in defaults.items(): + for field_name, field_default in defaults.items(): if field_name in args: args[field_name] = field_default # Parse invocation - args['id'] = current_id - command = InvocationCommand(invocation = args) + args["id"] = current_id + command = InvocationCommand(invocation=args) # Pipe previous command output (if there was a previous command) edges = [] if len(history) > 0 or current_id != start_id: - from_id = history[0] if current_id == start_id else str(current_id - 1) - from_node = next(filter(lambda n: n[0].id == from_id, new_invocations))[0] if current_id != start_id else session.graph.get_node(from_id) - matching_edges = generate_matching_edges(from_node, command.invocation) + from_id = ( + history[0] if current_id == start_id else str(current_id - 1) + ) + from_node = ( + next(filter(lambda n: n[0].id == from_id, new_invocations))[0] + if current_id != start_id + else session.graph.get_node(from_id) + ) + matching_edges = generate_matching_edges( + from_node, command.invocation + ) edges.extend(matching_edges) - + # Parse provided links - if 'link_node' in args and args['link_node']: - for link in args['link_node']: + if "link_node" in args and args["link_node"]: + for link in args["link_node"]: link_node = session.graph.get_node(link) - matching_edges = generate_matching_edges(link_node, command.invocation) + matching_edges = generate_matching_edges( + link_node, command.invocation + ) edges.extend(matching_edges) - - if 'link' in args and args['link']: - for link in args['link']: - edges.append((EdgeConnection(node_id = link[1], field = link[0]), EdgeConnection(node_id = command.invocation.id, field = link[2]))) + + if "link" in args and args["link"]: + for link in args["link"]: + edges.append( + ( + EdgeConnection(node_id=link[1], field=link[0]), + EdgeConnection( + node_id=command.invocation.id, field=link[2] + ), + ) + ) new_invocations.append((command.invocation, edges)) @@ -286,17 +350,19 @@ def invoke_cli(): session.add_edge(edge) # Execute all available invocations - invoker.invoke(session, invoke_all = True) + invoker.invoke(session, invoke_all=True) while not session.is_complete(): # Wait some time session = invoker.services.graph_execution_manager.get(session.id) time.sleep(0.1) - + # Print any errors if session.has_error(): for n in session.errors: - print(f'Error in node {n} (source node {session.prepared_source_mapping[n]}): {session.errors[n]}') - + print( + f"Error in node {n} (source node {session.prepared_source_mapping[n]}): {session.errors[n]}" + ) + # Start a new session print("Creating a new session") session = invoker.create_execution_state() @@ -307,7 +373,7 @@ def invoke_cli(): except SystemExit: continue - + invoker.stop() diff --git a/invokeai/app/invocations/__init__.py b/invokeai/app/invocations/__init__.py index 6407a1cdee..0a451ff618 100644 --- a/invokeai/app/invocations/__init__.py +++ b/invokeai/app/invocations/__init__.py @@ -4,5 +4,9 @@ __all__ = [] dirname = os.path.dirname(os.path.abspath(__file__)) for f in os.listdir(dirname): - if f != "__init__.py" and os.path.isfile("%s/%s" % (dirname, f)) and f[-3:] == ".py": + if ( + f != "__init__.py" + and os.path.isfile("%s/%s" % (dirname, f)) + and f[-3:] == ".py" + ): __all__.append(f[:-3]) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 1ad2d99112..82b6743c27 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -3,7 +3,9 @@ from abc import ABC, abstractmethod from inspect import signature from typing import get_args, get_type_hints + from pydantic import BaseModel, Field + from ..services.invocation_services import InvocationServices diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py index f950669736..ea39a8d8cd 100644 --- a/invokeai/app/invocations/cv.py +++ b/invokeai/app/invocations/cv.py @@ -1,30 +1,37 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) from typing import Literal -import numpy -from pydantic import Field -from PIL import Image, ImageOps + import cv2 as cv -from .image import ImageField, ImageOutput -from .baseinvocation import BaseInvocation, InvocationContext +import numpy +from PIL import Image, ImageOps +from pydantic import Field + from ..services.image_storage import ImageType +from .baseinvocation import BaseInvocation, InvocationContext +from .image import ImageField, ImageOutput class CvInpaintInvocation(BaseInvocation): """Simple inpaint using opencv.""" - type: Literal['cv_inpaint'] = 'cv_inpaint' + + type: Literal["cv_inpaint"] = "cv_inpaint" # Inputs image: ImageField = Field(default=None, description="The image to inpaint") - mask: ImageField = Field(default=None, description="The mask to use when inpainting") + mask: ImageField = Field( + default=None, description="The mask to use when inpainting" + ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) mask = context.services.images.get(self.mask.image_type, self.mask.image_name) # Convert to cv image/mask # TODO: consider making these utility functions - cv_image = cv.cvtColor(numpy.array(image.convert('RGB')), cv.COLOR_RGB2BGR) + cv_image = cv.cvtColor(numpy.array(image.convert("RGB")), cv.COLOR_RGB2BGR) cv_mask = numpy.array(ImageOps.invert(mask)) # Inpaint @@ -35,8 +42,10 @@ class CvInpaintInvocation(BaseInvocation): image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB)) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, image_inpainted) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 60b656bf0c..83ad09a3f7 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -2,104 +2,160 @@ from datetime import datetime, timezone from typing import Any, Literal, Optional, Union + import numpy as np -from pydantic import Field from PIL import Image +from pydantic import Field from skimage.exposure.histogram_matching import match_histograms -from .image import ImageField, ImageOutput -from .baseinvocation import BaseInvocation, InvocationContext + from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices +from .baseinvocation import BaseInvocation, InvocationContext +from .image import ImageField, ImageOutput +SAMPLER_NAME_VALUES = Literal[ + "ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun" +] -SAMPLER_NAME_VALUES = Literal["ddim","plms","k_lms","k_dpm_2","k_dpm_2_a","k_euler","k_euler_a","k_heun"] # Text to image class TextToImageInvocation(BaseInvocation): """Generates an image using text2img.""" - type: Literal['txt2img'] = 'txt2img' + + type: Literal["txt2img"] = "txt2img" # Inputs # TODO: consider making prompt optional to enable providing prompt through a link - prompt: Optional[str] = Field(description="The prompt to generate an image from") - seed: int = Field(default=-1, ge=-1, le=np.iinfo(np.uint32).max, description="The seed to use (-1 for a random seed)") - steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") - width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image") - height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image") - cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt") - sampler_name: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The sampler to use") - seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams") - model: str = Field(default='', description="The model to use (currently ignored)") - progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation") + prompt: Optional[str] = Field(description="The prompt to generate an image from") + seed: int = Field( + default=-1, + ge=-1, + le=np.iinfo(np.uint32).max, + description="The seed to use (-1 for a random seed)", + ) + steps: int = Field( + default=10, gt=0, description="The number of steps to use to generate the image" + ) + width: int = Field( + default=512, + multiple_of=64, + gt=0, + description="The width of the resulting image", + ) + height: int = Field( + default=512, + multiple_of=64, + gt=0, + description="The height of the resulting image", + ) + cfg_scale: float = Field( + default=7.5, + gt=0, + description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", + ) + sampler_name: SAMPLER_NAME_VALUES = Field( + default="k_lms", description="The sampler to use" + ) + seamless: bool = Field( + default=False, + description="Whether or not to generate an image that can tile without seams", + ) + model: str = Field(default="", description="The model to use (currently ignored)") + progress_images: bool = Field( + default=False, + description="Whether or not to produce progress images during generation", + ) # TODO: pass this an emitter method or something? or a session for dispatching? - def dispatch_progress(self, context: InvocationContext, sample: Any = None, step: int = 0) -> None: + def dispatch_progress( + self, context: InvocationContext, sample: Any = None, step: int = 0 + ) -> None: context.services.events.emit_generator_progress( - context.graph_execution_state_id, self.id, step, float(step) / float(self.steps) + context.graph_execution_state_id, + self.id, + step, + float(step) / float(self.steps), ) def invoke(self, context: InvocationContext) -> ImageOutput: - - def step_callback(sample, step = 0): + def step_callback(sample, step=0): self.dispatch_progress(context, sample, step) # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == '': + if self.model is None or self.model == "": self.model = context.services.generate.model_name # Set the model (if already cached, this does nothing) context.services.generate.set_model(self.model) results = context.services.generate.prompt2image( - prompt = self.prompt, - step_callback = step_callback, - **self.dict(exclude = {'prompt'}) # Shorthand for passing all of the parameters above manually + prompt=self.prompt, + step_callback=step_callback, + **self.dict( + exclude={"prompt"} + ), # Shorthand for passing all of the parameters above manually ) # Results are image and seed, unwrap for now and ignore the seed # TODO: pre-seed? # TODO: can this return multiple results? Should it? image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, results[0][0]) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class ImageToImageInvocation(TextToImageInvocation): """Generates an image using img2img.""" - type: Literal['img2img'] = 'img2img' + + type: Literal["img2img"] = "img2img" # Inputs - image: Union[ImageField,None] = Field(description="The input image") - strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the original image") - fit: bool = Field(default=True, description="Whether or not the result should be fit to the aspect ratio of the input image") + image: Union[ImageField, None] = Field(description="The input image") + strength: float = Field( + default=0.75, gt=0, le=1, description="The strength of the original image" + ) + fit: bool = Field( + default=True, + description="Whether or not the result should be fit to the aspect ratio of the input image", + ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = None if self.image is None else context.services.images.get(self.image.image_type, self.image.image_name) - mask = None + image = ( + None + if self.image is None + else context.services.images.get( + self.image.image_type, self.image.image_name + ) + ) + mask = None - def step_callback(sample, step = 0): + def step_callback(sample, step=0): self.dispatch_progress(context, sample, step) # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == '': + if self.model is None or self.model == "": self.model = context.services.generate.model_name # Set the model (if already cached, this does nothing) context.services.generate.set_model(self.model) results = context.services.generate.prompt2image( - prompt = self.prompt, - init_img = image, - init_mask = mask, - step_callback = step_callback, - **self.dict(exclude = {'prompt','image','mask'}) # Shorthand for passing all of the parameters above manually + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually ) result_image = results[0][0] @@ -108,43 +164,63 @@ class ImageToImageInvocation(TextToImageInvocation): # TODO: pre-seed? # TODO: can this return multiple results? Should it? image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, result_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class InpaintInvocation(ImageToImageInvocation): """Generates an image using inpaint.""" - type: Literal['inpaint'] = 'inpaint' + + type: Literal["inpaint"] = "inpaint" # Inputs - mask: Union[ImageField,None] = Field(description="The mask") - inpaint_replace: float = Field(default=0.0, ge=0.0, le=1.0, description="The amount by which to replace masked areas with latent noise") + mask: Union[ImageField, None] = Field(description="The mask") + inpaint_replace: float = Field( + default=0.0, + ge=0.0, + le=1.0, + description="The amount by which to replace masked areas with latent noise", + ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = None if self.image is None else context.services.images.get(self.image.image_type, self.image.image_name) - mask = None if self.mask is None else context.services.images.get(self.mask.image_type, self.mask.image_name) + image = ( + None + if self.image is None + else context.services.images.get( + self.image.image_type, self.image.image_name + ) + ) + mask = ( + None + if self.mask is None + else context.services.images.get(self.mask.image_type, self.mask.image_name) + ) - def step_callback(sample, step = 0): + def step_callback(sample, step=0): self.dispatch_progress(context, sample, step) # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache # TODO: How to get the default model name now? - if self.model is None or self.model == '': + if self.model is None or self.model == "": self.model = context.services.generate.model_name # Set the model (if already cached, this does nothing) context.services.generate.set_model(self.model) results = context.services.generate.prompt2image( - prompt = self.prompt, - init_img = image, - init_mask = mask, - step_callback = step_callback, - **self.dict(exclude = {'prompt','image','mask'}) # Shorthand for passing all of the parameters above manually + prompt=self.prompt, + init_img=image, + init_mask=mask, + step_callback=step_callback, + **self.dict( + exclude={"prompt", "image", "mask"} + ), # Shorthand for passing all of the parameters above manually ) result_image = results[0][0] @@ -153,8 +229,10 @@ class InpaintInvocation(ImageToImageInvocation): # TODO: pre-seed? # TODO: can this return multiple results? Should it? image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, result_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index cb326b1bb7..e0a302c24c 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -2,30 +2,37 @@ from datetime import datetime, timezone from typing import Literal, Optional + import numpy -from pydantic import Field, BaseModel -from PIL import Image, ImageOps, ImageFilter -from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext +from PIL import Image, ImageFilter, ImageOps +from pydantic import BaseModel, Field + from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices +from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext class ImageField(BaseModel): """An image field used for passing image objects between invocations""" - image_type: str = Field(default=ImageType.RESULT, description="The type of the image") + + image_type: str = Field( + default=ImageType.RESULT, description="The type of the image" + ) image_name: Optional[str] = Field(default=None, description="The name of the image") class ImageOutput(BaseInvocationOutput): """Base class for invocations that output an image""" - type: Literal['image'] = 'image' + + type: Literal["image"] = "image" image: ImageField = Field(default=None, description="The output image") class MaskOutput(BaseInvocationOutput): """Base class for invocations that output a mask""" - type: Literal['mask'] = 'mask' + + type: Literal["mask"] = "mask" mask: ImageField = Field(default=None, description="The output mask") @@ -33,7 +40,8 @@ class MaskOutput(BaseInvocationOutput): # TODO: this isn't really necessary anymore class LoadImageInvocation(BaseInvocation): """Load an image from a filename and provide it as output.""" - type: Literal['load_image'] = 'load_image' + + type: Literal["load_image"] = "load_image" # Inputs image_type: ImageType = Field(description="The type of the image") @@ -41,69 +49,100 @@ class LoadImageInvocation(BaseInvocation): def invoke(self, context: InvocationContext) -> ImageOutput: return ImageOutput( - image = ImageField(image_type = self.image_type, image_name = self.image_name) + image=ImageField(image_type=self.image_type, image_name=self.image_name) ) class ShowImageInvocation(BaseInvocation): """Displays a provided image, and passes it forward in the pipeline.""" - type: Literal['show_image'] = 'show_image' + + type: Literal["show_image"] = "show_image" # Inputs image: ImageField = Field(default=None, description="The image to show") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) if image: image.show() # TODO: how to handle failure? return ImageOutput( - image = ImageField(image_type = self.image.image_type, image_name = self.image.image_name) + image=ImageField( + image_type=self.image.image_type, image_name=self.image.image_name + ) ) class CropImageInvocation(BaseInvocation): """Crops an image to a specified box. The box can be outside of the image.""" - type: Literal['crop'] = 'crop' + + type: Literal["crop"] = "crop" # Inputs image: ImageField = Field(default=None, description="The image to crop") - x: int = Field(default=0, description="The left x coordinate of the crop rectangle") - y: int = Field(default=0, description="The top y coordinate of the crop rectangle") - width: int = Field(default=512, gt=0, description="The width of the crop rectangle") - height: int = Field(default=512, gt=0, description="The height of the crop rectangle") + x: int = Field(default=0, description="The left x coordinate of the crop rectangle") + y: int = Field(default=0, description="The top y coordinate of the crop rectangle") + width: int = Field(default=512, gt=0, description="The width of the crop rectangle") + height: int = Field( + default=512, gt=0, description="The height of the crop rectangle" + ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) - image_crop = Image.new(mode = 'RGBA', size = (self.width, self.height), color = (0, 0, 0, 0)) + image_crop = Image.new( + mode="RGBA", size=(self.width, self.height), color=(0, 0, 0, 0) + ) image_crop.paste(image, (-self.x, -self.y)) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, image_crop) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class PasteImageInvocation(BaseInvocation): """Pastes an image into another image.""" - type: Literal['paste'] = 'paste' + + type: Literal["paste"] = "paste" # Inputs - base_image: ImageField = Field(default=None, description="The base image") - image: ImageField = Field(default=None, description="The image to paste") - mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting") - x: int = Field(default=0, description="The left x coordinate at which to paste the image") - y: int = Field(default=0, description="The top y coordinate at which to paste the image") + base_image: ImageField = Field(default=None, description="The base image") + image: ImageField = Field(default=None, description="The image to paste") + mask: Optional[ImageField] = Field( + default=None, description="The mask to use when pasting" + ) + x: int = Field( + default=0, description="The left x coordinate at which to paste the image" + ) + y: int = Field( + default=0, description="The top y coordinate at which to paste the image" + ) def invoke(self, context: InvocationContext) -> ImageOutput: - base_image = context.services.images.get(self.base_image.image_type, self.base_image.image_name) - image = context.services.images.get(self.image.image_type, self.image.image_name) - mask = None if self.mask is None else ImageOps.invert(services.images.get(self.mask.image_type, self.mask.image_name)) + base_image = context.services.images.get( + self.base_image.image_type, self.base_image.image_name + ) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) + mask = ( + None + if self.mask is None + else ImageOps.invert( + services.images.get(self.mask.image_type, self.mask.image_name) + ) + ) # TODO: probably shouldn't invert mask here... should user be required to do it? min_x = min(0, self.x) @@ -111,67 +150,88 @@ class PasteImageInvocation(BaseInvocation): max_x = max(base_image.width, image.width + self.x) max_y = max(base_image.height, image.height + self.y) - new_image = Image.new(mode = 'RGBA', size = (max_x - min_x, max_y - min_y), color = (0, 0, 0, 0)) + new_image = Image.new( + mode="RGBA", size=(max_x - min_x, max_y - min_y), color=(0, 0, 0, 0) + ) new_image.paste(base_image, (abs(min_x), abs(min_y))) - new_image.paste(image, (max(0, self.x), max(0, self.y)), mask = mask) + new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask) image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, new_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class MaskFromAlphaInvocation(BaseInvocation): """Extracts the alpha channel of an image as a mask.""" - type: Literal['tomask'] = 'tomask' + + type: Literal["tomask"] = "tomask" # Inputs - image: ImageField = Field(default=None, description="The image to create the mask from") + image: ImageField = Field( + default=None, description="The image to create the mask from" + ) invert: bool = Field(default=False, description="Whether or not to invert the mask") def invoke(self, context: InvocationContext) -> MaskOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) image_mask = image.split()[-1] if self.invert: image_mask = ImageOps.invert(image_mask) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) - context.services.images.save(image_type, image_name, image_mask) - return MaskOutput( - mask = ImageField(image_type = image_type, image_name = image_name) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id ) + context.services.images.save(image_type, image_name, image_mask) + return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name)) class BlurInvocation(BaseInvocation): """Blurs an image""" - type: Literal['blur'] = 'blur' + + type: Literal["blur"] = "blur" # Inputs image: ImageField = Field(default=None, description="The image to blur") - radius: float = Field(default=8.0, ge=0, description="The blur radius") - blur_type: Literal['gaussian', 'box'] = Field(default='gaussian', description="The type of blur") + radius: float = Field(default=8.0, ge=0, description="The blur radius") + blur_type: Literal["gaussian", "box"] = Field( + default="gaussian", description="The type of blur" + ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) - blur = ImageFilter.GaussianBlur(self.radius) if self.blur_type == 'gaussian' else ImageFilter.BoxBlur(self.radius) + blur = ( + ImageFilter.GaussianBlur(self.radius) + if self.blur_type == "gaussian" + else ImageFilter.BoxBlur(self.radius) + ) blur_image = image.filter(blur) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, blur_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class LerpInvocation(BaseInvocation): """Linear interpolation of all pixels of an image""" - type: Literal['lerp'] = 'lerp' + + type: Literal["lerp"] = "lerp" # Inputs image: ImageField = Field(default=None, description="The image to lerp") @@ -179,7 +239,9 @@ class LerpInvocation(BaseInvocation): max: int = Field(default=255, ge=0, le=255, description="The maximum output value") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) image_arr = numpy.asarray(image, dtype=numpy.float32) / 255 image_arr = image_arr * (self.max - self.min) + self.max @@ -187,16 +249,19 @@ class LerpInvocation(BaseInvocation): lerp_image = Image.fromarray(numpy.uint8(image_arr)) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, lerp_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) class InverseLerpInvocation(BaseInvocation): """Inverse linear interpolation of all pixels of an image""" - type: Literal['ilerp'] = 'ilerp' + + type: Literal["ilerp"] = "ilerp" # Inputs image: ImageField = Field(default=None, description="The image to lerp") @@ -204,16 +269,25 @@ class InverseLerpInvocation(BaseInvocation): max: int = Field(default=255, ge=0, le=255, description="The maximum input value") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) image_arr = numpy.asarray(image, dtype=numpy.float32) - image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 + image_arr = ( + numpy.minimum( + numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1 + ) + * 255 + ) ilerp_image = Image.fromarray(numpy.uint8(image_arr)) image_type = ImageType.INTERMEDIATE - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, ilerp_image) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index 029cad9660..2c8a1c4989 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -1,9 +1,13 @@ from typing import Literal + from pydantic.fields import Field + from .baseinvocation import BaseInvocationOutput + class PromptOutput(BaseInvocationOutput): """Base class for invocations that output a prompt""" - type: Literal['prompt'] = 'prompt' + + type: Literal["prompt"] = "prompt" prompt: str = Field(default=None, description="The output prompt") diff --git a/invokeai/app/invocations/reconstruct.py b/invokeai/app/invocations/reconstruct.py index 98201ce837..71a15c57e9 100644 --- a/invokeai/app/invocations/reconstruct.py +++ b/invokeai/app/invocations/reconstruct.py @@ -1,36 +1,44 @@ from datetime import datetime, timezone from typing import Literal, Union + from pydantic import Field -from .image import ImageField, ImageOutput -from .baseinvocation import BaseInvocation, InvocationContext + from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices +from .baseinvocation import BaseInvocation, InvocationContext +from .image import ImageField, ImageOutput class RestoreFaceInvocation(BaseInvocation): """Restores faces in an image.""" - type: Literal['restore_face'] = 'restore_face' + + type: Literal["restore_face"] = "restore_face" # Inputs - image: Union[ImageField,None] = Field(description="The input image") - strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration") + image: Union[ImageField, None] = Field(description="The input image") + strength: float = Field( + default=0.75, gt=0, le=1, description="The strength of the restoration" + ) - - def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + def invoke(self, context: InvocationContext) -> ImageOutput: + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) results = context.services.generate.upscale_and_reconstruct( - image_list = [[image, 0]], - upscale = None, - strength = self.strength, # GFPGAN strength - save_original = False, - image_callback = None, + image_list=[[image, 0]], + upscale=None, + strength=self.strength, # GFPGAN strength + save_original=False, + image_callback=None, ) # Results are image and seed, unwrap for now # TODO: can this return multiple results? image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, results[0][0]) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index 1df8c44ea8..d1d8e4e2d4 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -2,37 +2,44 @@ from datetime import datetime, timezone from typing import Literal, Union + from pydantic import Field -from .image import ImageField, ImageOutput -from .baseinvocation import BaseInvocation, InvocationContext + from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices +from .baseinvocation import BaseInvocation, InvocationContext +from .image import ImageField, ImageOutput class UpscaleInvocation(BaseInvocation): """Upscales an image.""" - type: Literal['upscale'] = 'upscale' + + type: Literal["upscale"] = "upscale" # Inputs - image: Union[ImageField,None] = Field(description="The input image", default=None) - strength: float = Field(default=0.75, gt=0, le=1, description="The strength") - level: Literal[2,4] = Field(default=2, description = "The upscale level") + image: Union[ImageField, None] = Field(description="The input image", default=None) + strength: float = Field(default=0.75, gt=0, le=1, description="The strength") + level: Literal[2, 4] = Field(default=2, description="The upscale level") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get(self.image.image_type, self.image.image_name) + image = context.services.images.get( + self.image.image_type, self.image.image_name + ) results = context.services.generate.upscale_and_reconstruct( - image_list = [[image, 0]], - upscale = (self.level, self.strength), - strength = 0.0, # GFPGAN strength - save_original = False, - image_callback = None, + image_list=[[image, 0]], + upscale=(self.level, self.strength), + strength=0.0, # GFPGAN strength + save_original=False, + image_callback=None, ) # Results are image and seed, unwrap for now # TODO: can this return multiple results? image_type = ImageType.RESULT - image_name = context.services.images.create_name(context.graph_execution_state_id, self.id) + image_name = context.services.images.create_name( + context.graph_execution_state_id, self.id + ) context.services.images.save(image_type, image_name, results[0][0]) return ImageOutput( - image = ImageField(image_type = image_type, image_name = image_name) + image=ImageField(image_type=image_type, image_name=image_name) ) diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events.py index c7b6367d68..e2ab4e61e3 100644 --- a/invokeai/app/services/events.py +++ b/invokeai/app/services/events.py @@ -4,90 +4,80 @@ from typing import Any, Dict class EventServiceBase: - session_event: str = 'session_event' + session_event: str = "session_event" """Basic event bus, to have an empty stand-in when not needed""" + def dispatch(self, event_name: str, payload: Any) -> None: pass - def __emit_session_event(self, - event_name: str, - payload: Dict) -> None: + def __emit_session_event(self, event_name: str, payload: Dict) -> None: self.dispatch( - event_name = EventServiceBase.session_event, - payload = dict( - event = event_name, - data = payload - ) + event_name=EventServiceBase.session_event, + payload=dict(event=event_name, data=payload), ) # Define events here for every event in the system. # This will make them easier to integrate until we find a schema generator. - def emit_generator_progress(self, + def emit_generator_progress( + self, graph_execution_state_id: str, invocation_id: str, step: int, - percent: float + percent: float, ) -> None: """Emitted when there is generation progress""" self.__emit_session_event( - event_name = 'generator_progress', - payload = dict( - graph_execution_state_id = graph_execution_state_id, - invocation_id = invocation_id, - step = step, - percent = percent - ) + event_name="generator_progress", + payload=dict( + graph_execution_state_id=graph_execution_state_id, + invocation_id=invocation_id, + step=step, + percent=percent, + ), ) - def emit_invocation_complete(self, - graph_execution_state_id: str, - invocation_id: str, - result: Dict + def emit_invocation_complete( + self, graph_execution_state_id: str, invocation_id: str, result: Dict ) -> None: """Emitted when an invocation has completed""" self.__emit_session_event( - event_name = 'invocation_complete', - payload = dict( - graph_execution_state_id = graph_execution_state_id, - invocation_id = invocation_id, - result = result - ) + event_name="invocation_complete", + payload=dict( + graph_execution_state_id=graph_execution_state_id, + invocation_id=invocation_id, + result=result, + ), ) - - def emit_invocation_error(self, - graph_execution_state_id: str, - invocation_id: str, - error: str + + def emit_invocation_error( + self, graph_execution_state_id: str, invocation_id: str, error: str ) -> None: """Emitted when an invocation has completed""" self.__emit_session_event( - event_name = 'invocation_error', - payload = dict( - graph_execution_state_id = graph_execution_state_id, - invocation_id = invocation_id, - error = error - ) + event_name="invocation_error", + payload=dict( + graph_execution_state_id=graph_execution_state_id, + invocation_id=invocation_id, + error=error, + ), ) - def emit_invocation_started(self, - graph_execution_state_id: str, - invocation_id: str + def emit_invocation_started( + self, graph_execution_state_id: str, invocation_id: str ) -> None: """Emitted when an invocation has started""" self.__emit_session_event( - event_name = 'invocation_started', - payload = dict( - graph_execution_state_id = graph_execution_state_id, - invocation_id = invocation_id - ) + event_name="invocation_started", + payload=dict( + graph_execution_state_id=graph_execution_state_id, + invocation_id=invocation_id, + ), ) def emit_graph_execution_complete(self, graph_execution_state_id: str) -> None: """Emitted when a session has completed all invocations""" self.__emit_session_event( - event_name = 'graph_execution_state_complete', - payload = dict( - graph_execution_state_id = graph_execution_state_id - ) + event_name="graph_execution_state_complete", + payload=dict(graph_execution_state_id=graph_execution_state_id), ) diff --git a/invokeai/app/services/generate_initializer.py b/invokeai/app/services/generate_initializer.py index c6e81a2403..4a1ce3ea16 100644 --- a/invokeai/app/services/generate_initializer.py +++ b/invokeai/app/services/generate_initializer.py @@ -1,40 +1,47 @@ -from argparse import Namespace import os import sys import traceback +from argparse import Namespace -from invokeai.backend import ModelManager, Generate +import invokeai.version +from invokeai.backend import Generate, ModelManager from ...globals import Globals -import invokeai.version + # TODO: most of this code should be split into individual services as the Generate.py code is deprecated def get_generate(args, config) -> Generate: if not args.conf: - config_file = os.path.join(Globals.root,'configs','models.yaml') + config_file = os.path.join(Globals.root, "configs", "models.yaml") if not os.path.exists(config_file): - report_model_error(args, FileNotFoundError(f"The file {config_file} could not be found.")) + report_model_error( + args, FileNotFoundError(f"The file {config_file} could not be found.") + ) - print(f'>> {invokeai.version.__app_name__}, version {invokeai.version.__version__}') + print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}") print(f'>> InvokeAI runtime directory is "{Globals.root}"') # these two lines prevent a horrible warning message from appearing # when the frozen CLIP tokenizer is imported import transformers # type: ignore + transformers.logging.set_verbosity_error() import diffusers + diffusers.logging.set_verbosity_error() # Loading Face Restoration and ESRGAN Modules - gfpgan,codeformer,esrgan = load_face_restoration(args) + gfpgan, codeformer, esrgan = load_face_restoration(args) # normalize the config directory relative to root if not os.path.isabs(args.conf): - args.conf = os.path.normpath(os.path.join(Globals.root,args.conf)) + args.conf = os.path.normpath(os.path.join(Globals.root, args.conf)) if args.embeddings: if not os.path.isabs(args.embedding_path): - embedding_path = os.path.normpath(os.path.join(Globals.root,args.embedding_path)) + embedding_path = os.path.normpath( + os.path.join(Globals.root, args.embedding_path) + ) else: embedding_path = args.embedding_path else: @@ -47,35 +54,35 @@ def get_generate(args, config) -> Generate: if args.infile: try: if os.path.isfile(args.infile): - infile = open(args.infile, 'r', encoding='utf-8') - elif args.infile == '-': # stdin + infile = open(args.infile, "r", encoding="utf-8") + elif args.infile == "-": # stdin infile = sys.stdin else: - raise FileNotFoundError(f'{args.infile} not found.') + raise FileNotFoundError(f"{args.infile} not found.") except (FileNotFoundError, IOError) as e: - print(f'{e}. Aborting.') + print(f"{e}. Aborting.") sys.exit(-1) # creating a Generate object: try: gen = Generate( - conf = args.conf, - model = args.model, - sampler_name = args.sampler_name, - embedding_path = embedding_path, - full_precision = args.full_precision, - precision = args.precision, - gfpgan = gfpgan, - codeformer = codeformer, - esrgan = esrgan, - free_gpu_mem = args.free_gpu_mem, - safety_checker = args.safety_checker, - max_loaded_models = args.max_loaded_models, - ) + conf=args.conf, + model=args.model, + sampler_name=args.sampler_name, + embedding_path=embedding_path, + full_precision=args.full_precision, + precision=args.precision, + gfpgan=gfpgan, + codeformer=codeformer, + esrgan=esrgan, + free_gpu_mem=args.free_gpu_mem, + safety_checker=args.safety_checker, + max_loaded_models=args.max_loaded_models, + ) except (FileNotFoundError, TypeError, AssertionError) as e: - report_model_error(opt,e) + report_model_error(opt, e) except (IOError, KeyError) as e: - print(f'{e}. Aborting.') + print(f"{e}. Aborting.") sys.exit(-1) if args.seamless: @@ -96,7 +103,7 @@ def get_generate(args, config) -> Generate: conf_path=args.conf, weights_directory=path, ) - + return gen @@ -104,51 +111,61 @@ def load_face_restoration(opt): try: gfpgan, codeformer, esrgan = None, None, None if opt.restore or opt.esrgan: - from ldm.invoke.restoration import Restoration + from invokeai.backend.restoration import Restoration + restoration = Restoration() if opt.restore: - gfpgan, codeformer = restoration.load_face_restore_models(opt.gfpgan_model_path) + gfpgan, codeformer = restoration.load_face_restore_models( + opt.gfpgan_model_path + ) else: - print('>> Face restoration disabled') + print(">> Face restoration disabled") if opt.esrgan: esrgan = restoration.load_esrgan(opt.esrgan_bg_tile) else: - print('>> Upscaling disabled') + print(">> Upscaling disabled") else: - print('>> Face restoration and upscaling disabled') + print(">> Face restoration and upscaling disabled") except (ModuleNotFoundError, ImportError): print(traceback.format_exc(), file=sys.stderr) - print('>> You may need to install the ESRGAN and/or GFPGAN modules') - return gfpgan,codeformer,esrgan + print(">> You may need to install the ESRGAN and/or GFPGAN modules") + return gfpgan, codeformer, esrgan -def report_model_error(opt:Namespace, e:Exception): +def report_model_error(opt: Namespace, e: Exception): print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') - print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.') - yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE') + print( + "** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." + ) + yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") if yes_to_all: - print('** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE') + print( + "** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" + ) else: - response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ') - if response.startswith(('n', 'N')): + response = input( + "Do you want to run invokeai-configure script to select and/or reinstall models? [y] " + ) + if response.startswith(("n", "N")): return - print('invokeai-configure is launching....\n') + print("invokeai-configure is launching....\n") # Match arguments that were set on the CLI # only the arguments accepted by the configuration script are parsed root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] config = ["--config", opt.conf] if opt.conf is not None else [] previous_args = sys.argv - sys.argv = [ 'invokeai-configure' ] + sys.argv = ["invokeai-configure"] sys.argv.extend(root_dir) sys.argv.extend(config) if yes_to_all is not None: for arg in yes_to_all.split(): sys.argv.append(arg) - from ldm.invoke.config import invokeai_configure - invokeai_configure.main() + from invokeai.frontend.install import invokeai_configure + + invokeai_configure() # TODO: Figure out how to restart # print('** InvokeAI will now restart') # sys.argv = previous_args @@ -162,14 +179,17 @@ def old_get_generate(args, config) -> Generate: from invokeai.backend.globals import Globals # alert - setting globals here - Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.')) + Globals.root = os.path.expanduser( + args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".") + ) Globals.try_patchmatch = args.patchmatch - + print(f'>> InvokeAI runtime directory is "{Globals.root}"') # these two lines prevent a horrible warning message from appearing # when the frozen CLIP tokenizer is imported import transformers + transformers.logging.set_verbosity_error() # Loading Face Restoration and ESRGAN Modules @@ -177,53 +197,57 @@ def old_get_generate(args, config) -> Generate: try: if config.restore or config.esrgan: from ldm.invoke.restoration import Restoration + restoration = Restoration() if config.restore: - gfpgan, codeformer = restoration.load_face_restore_models(config.gfpgan_model_path) + gfpgan, codeformer = restoration.load_face_restore_models( + config.gfpgan_model_path + ) else: - print('>> Face restoration disabled') + print(">> Face restoration disabled") if config.esrgan: esrgan = restoration.load_esrgan(config.esrgan_bg_tile) else: - print('>> Upscaling disabled') + print(">> Upscaling disabled") else: - print('>> Face restoration and upscaling disabled') + print(">> Face restoration and upscaling disabled") except (ModuleNotFoundError, ImportError): print(traceback.format_exc(), file=sys.stderr) - print('>> You may need to install the ESRGAN and/or GFPGAN modules') + print(">> You may need to install the ESRGAN and/or GFPGAN modules") # normalize the config directory relative to root if not os.path.isabs(config.conf): - config.conf = os.path.normpath(os.path.join(Globals.root,config.conf)) + config.conf = os.path.normpath(os.path.join(Globals.root, config.conf)) if config.embeddings: if not os.path.isabs(config.embedding_path): - embedding_path = os.path.normpath(os.path.join(Globals.root,config.embedding_path)) + embedding_path = os.path.normpath( + os.path.join(Globals.root, config.embedding_path) + ) else: embedding_path = None - # TODO: lazy-initialize this by wrapping it try: generate = Generate( - conf = config.conf, - model = config.model, - sampler_name = config.sampler_name, - embedding_path = embedding_path, - full_precision = config.full_precision, - precision = config.precision, - gfpgan = gfpgan, - codeformer = codeformer, - esrgan = esrgan, - free_gpu_mem = config.free_gpu_mem, - safety_checker = config.safety_checker, - max_loaded_models = config.max_loaded_models, + conf=config.conf, + model=config.model, + sampler_name=config.sampler_name, + embedding_path=embedding_path, + full_precision=config.full_precision, + precision=config.precision, + gfpgan=gfpgan, + codeformer=codeformer, + esrgan=esrgan, + free_gpu_mem=config.free_gpu_mem, + safety_checker=config.safety_checker, + max_loaded_models=config.max_loaded_models, ) except (FileNotFoundError, TypeError, AssertionError): - #emergency_model_reconfigure() # TODO? + # emergency_model_reconfigure() # TODO? sys.exit(-1) except (IOError, KeyError) as e: - print(f'{e}. Aborting.') + print(f"{e}. Aborting.") sys.exit(-1) generate.free_gpu_mem = config.free_gpu_mem diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/graph.py index 059ebca2d4..aa462ab170 100644 --- a/invokeai/app/services/graph.py +++ b/invokeai/app/services/graph.py @@ -3,29 +3,45 @@ import copy import itertools import traceback -from types import NoneType import uuid +from types import NoneType +from typing import ( + Annotated, + Any, + Literal, + Optional, + Union, + get_args, + get_origin, + get_type_hints, +) + import networkx as nx from pydantic import BaseModel, validator from pydantic.fields import Field -from typing import Any, Literal, Optional, Union, get_args, get_origin, get_type_hints, Annotated -from .invocation_services import InvocationServices -from ..invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext from ..invocations import * +from ..invocations.baseinvocation import ( + BaseInvocation, + BaseInvocationOutput, + InvocationContext, +) +from .invocation_services import InvocationServices class EdgeConnection(BaseModel): - node_id: str = Field(description="The id of the node for this edge connection") + node_id: str = Field(description="The id of the node for this edge connection") field: str = Field(description="The field for this connection") def __eq__(self, other): - return (isinstance(other, self.__class__) and - getattr(other, 'node_id', None) == self.node_id and - getattr(other, 'field', None) == self.field) - + return ( + isinstance(other, self.__class__) + and getattr(other, "node_id", None) == self.node_id + and getattr(other, "field", None) == self.field + ) + def __hash__(self): - return hash(f'{self.node_id}.{self.field}') + return hash(f"{self.node_id}.{self.field}") def get_output_field(node: BaseInvocation, field: str) -> Any: @@ -34,7 +50,7 @@ def get_output_field(node: BaseInvocation, field: str) -> Any: node_output_field = node_outputs.get(field) or None return node_output_field - + def get_input_field(node: BaseInvocation, field: str) -> Any: node_type = type(node) node_inputs = get_type_hints(node_type) @@ -47,15 +63,17 @@ def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool: return False if not to_type: return False - + # TODO: this is pretty forgiving on generic types. Clean that up (need to handle optionals and such) if from_type and to_type: # Ports are compatible - if (from_type == to_type or - from_type == Any or - to_type == Any or - Any in get_args(from_type) or - Any in get_args(to_type)): + if ( + from_type == to_type + or from_type == Any + or to_type == Any + or Any in get_args(from_type) + or Any in get_args(to_type) + ): return True if from_type in get_args(to_type): @@ -68,15 +86,13 @@ def are_connection_types_compatible(from_type: Any, to_type: Any) -> bool: return False else: return False - + return True def are_connections_compatible( - from_node: BaseInvocation, - from_field: str, - to_node: BaseInvocation, - to_field: str) -> bool: + from_node: BaseInvocation, from_field: str, to_node: BaseInvocation, to_field: str +) -> bool: """Determines if a connection between fields of two nodes is compatible.""" # TODO: handle iterators and collectors @@ -93,24 +109,26 @@ class NodeAlreadyInGraphError(Exception): class InvalidEdgeError(Exception): pass + class NodeNotFoundError(Exception): pass + class NodeAlreadyExecutedError(Exception): pass # TODO: Create and use an Empty output? class GraphInvocationOutput(BaseInvocationOutput): - type: Literal['graph_output'] = 'graph_output' + type: Literal["graph_output"] = "graph_output" # TODO: Fill this out and move to invocations class GraphInvocation(BaseInvocation): - type: Literal['graph'] = 'graph' + type: Literal["graph"] = "graph" # TODO: figure out how to create a default here - graph: 'Graph' = Field(description="The graph to run", default=None) + graph: "Graph" = Field(description="The graph to run", default=None) def invoke(self, context: InvocationContext) -> GraphInvocationOutput: """Invoke with provided services and return outputs.""" @@ -119,54 +137,71 @@ class GraphInvocation(BaseInvocation): class IterateInvocationOutput(BaseInvocationOutput): """Used to connect iteration outputs. Will be expanded to a specific output.""" - type: Literal['iterate_output'] = 'iterate_output' + + type: Literal["iterate_output"] = "iterate_output" item: Any = Field(description="The item being iterated over") # TODO: Fill this out and move to invocations class IterateInvocation(BaseInvocation): - type: Literal['iterate'] = 'iterate' + type: Literal["iterate"] = "iterate" - collection: list[Any] = Field(description="The list of items to iterate over", default_factory=list) - index: int = Field(description="The index, will be provided on executed iterators", default=0) + collection: list[Any] = Field( + description="The list of items to iterate over", default_factory=list + ) + index: int = Field( + description="The index, will be provided on executed iterators", default=0 + ) def invoke(self, context: InvocationContext) -> IterateInvocationOutput: """Produces the outputs as values""" - return IterateInvocationOutput(item = self.collection[self.index]) + return IterateInvocationOutput(item=self.collection[self.index]) class CollectInvocationOutput(BaseInvocationOutput): - type: Literal['collect_output'] = 'collect_output' + type: Literal["collect_output"] = "collect_output" collection: list[Any] = Field(description="The collection of input items") class CollectInvocation(BaseInvocation): """Collects values into a collection""" - type: Literal['collect'] = 'collect' - item: Any = Field(description="The item to collect (all inputs must be of the same type)", default=None) - collection: list[Any] = Field(description="The collection, will be provided on execution", default_factory=list) + type: Literal["collect"] = "collect" + + item: Any = Field( + description="The item to collect (all inputs must be of the same type)", + default=None, + ) + collection: list[Any] = Field( + description="The collection, will be provided on execution", + default_factory=list, + ) def invoke(self, context: InvocationContext) -> CollectInvocationOutput: """Invoke with provided services and return outputs.""" - return CollectInvocationOutput(collection = copy.copy(self.collection)) + return CollectInvocationOutput(collection=copy.copy(self.collection)) -InvocationsUnion = Union[BaseInvocation.get_invocations()] # type: ignore -InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()] # type: ignore +InvocationsUnion = Union[BaseInvocation.get_invocations()] # type: ignore +InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()] # type: ignore class Graph(BaseModel): id: str = Field(description="The id of this graph", default_factory=uuid.uuid4) # TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me - nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(description="The nodes in this graph", default_factory=dict) - edges: list[tuple[EdgeConnection,EdgeConnection]] = Field(description="The connections between nodes and their fields in this graph", default_factory=list) + nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field( + description="The nodes in this graph", default_factory=dict + ) + edges: list[tuple[EdgeConnection, EdgeConnection]] = Field( + description="The connections between nodes and their fields in this graph", + default_factory=list, + ) def add_node(self, node: BaseInvocation) -> None: """Adds a node to a graph - + :raises NodeAlreadyInGraphError: the node is already present in the graph. """ @@ -174,26 +209,26 @@ class Graph(BaseModel): raise NodeAlreadyInGraphError() self.nodes[node.id] = node - - def _get_graph_and_node(self, node_path: str) -> tuple['Graph', str]: + def _get_graph_and_node(self, node_path: str) -> tuple["Graph", str]: """Returns the graph and node id for a node path.""" # Materialized graphs may have nodes at the top level if node_path in self.nodes: return (self, node_path) - node_id = node_path if '.' not in node_path else node_path[:node_path.index('.')] + node_id = ( + node_path if "." not in node_path else node_path[: node_path.index(".")] + ) if node_id not in self.nodes: - raise NodeNotFoundError(f'Node {node_path} not found in graph') + raise NodeNotFoundError(f"Node {node_path} not found in graph") node = self.nodes[node_id] if not isinstance(node, GraphInvocation): # There's more node path left but this isn't a graph - failure - raise NodeNotFoundError('Node path terminated early at a non-graph node') - - return node.graph._get_graph_and_node(node_path[node_path.index('.')+1:]) + raise NodeNotFoundError("Node path terminated early at a non-graph node") + return node.graph._get_graph_and_node(node_path[node_path.index(".") + 1 :]) def delete_node(self, node_path: str) -> None: """Deletes a node from a graph""" @@ -205,21 +240,20 @@ class Graph(BaseModel): input_edges = self._get_input_edges_and_graphs(node_path) output_edges = self._get_output_edges_and_graphs(node_path) - for edge_graph,_,edge in input_edges: + for edge_graph, _, edge in input_edges: edge_graph.delete_edge(edge) - for edge_graph,_,edge in output_edges: + for edge_graph, _, edge in output_edges: edge_graph.delete_edge(edge) del graph.nodes[node_id] except NodeNotFoundError: - pass # Ignore, not doesn't exist (should this throw?) - + pass # Ignore, not doesn't exist (should this throw?) def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: """Adds an edge to a graph - + :raises InvalidEdgeError: the provided edge is invalid. """ @@ -227,8 +261,7 @@ class Graph(BaseModel): self.edges.append(edge) else: raise InvalidEdgeError() - - + def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: """Deletes an edge from a graph""" @@ -237,7 +270,6 @@ class Graph(BaseModel): except KeyError: pass - def is_valid(self) -> bool: """Validates the graph.""" @@ -247,7 +279,9 @@ class Graph(BaseModel): return False # Validate all edges reference nodes in the graph - node_ids = set([e[0].node_id for e in self.edges]+[e[1].node_id for e in self.edges]) + node_ids = set( + [e[0].node_id for e in self.edges] + [e[1].node_id for e in self.edges] + ) if not all((self.has_node(node_id) for node_id in node_ids)): return False @@ -255,26 +289,45 @@ class Graph(BaseModel): g = self.nx_graph_flat() if not nx.is_directed_acyclic_graph(g): return False - + # Validate all edge connections are valid - if not all((are_connections_compatible( - self.get_node(e[0].node_id), e[0].field, - self.get_node(e[1].node_id), e[1].field - ) for e in self.edges)): + if not all( + ( + are_connections_compatible( + self.get_node(e[0].node_id), + e[0].field, + self.get_node(e[1].node_id), + e[1].field, + ) + for e in self.edges + ) + ): return False - + # Validate all iterators # TODO: may need to validate all iterators in subgraphs so edge connections in parent graphs will be available - if not all((self._is_iterator_connection_valid(n.id) for n in self.nodes.values() if isinstance(n, IterateInvocation))): + if not all( + ( + self._is_iterator_connection_valid(n.id) + for n in self.nodes.values() + if isinstance(n, IterateInvocation) + ) + ): return False # Validate all collectors # TODO: may need to validate all collectors in subgraphs so edge connections in parent graphs will be available - if not all((self._is_collector_connection_valid(n.id) for n in self.nodes.values() if isinstance(n, CollectInvocation))): + if not all( + ( + self._is_collector_connection_valid(n.id) + for n in self.nodes.values() + if isinstance(n, CollectInvocation) + ) + ): return False - + return True - + def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool: """Validates that a new edge doesn't create a cycle in the graph""" @@ -295,29 +348,39 @@ class Graph(BaseModel): g.add_edge(edge[0].node_id, edge[1].node_id) if not nx.is_directed_acyclic_graph(g): return False - + # Validate that the field types are compatible - if not are_connections_compatible(from_node, edge[0].field, to_node, edge[1].field): + if not are_connections_compatible( + from_node, edge[0].field, to_node, edge[1].field + ): return False # Validate if iterator output type matches iterator input type (if this edge results in both being set) - if isinstance(to_node, IterateInvocation) and edge[1].field == 'collection': - if not self._is_iterator_connection_valid(edge[1].node_id, new_input = edge[0]): + if isinstance(to_node, IterateInvocation) and edge[1].field == "collection": + if not self._is_iterator_connection_valid( + edge[1].node_id, new_input=edge[0] + ): return False # Validate if iterator input type matches output type (if this edge results in both being set) - if isinstance(from_node, IterateInvocation) and edge[0].field == 'item': - if not self._is_iterator_connection_valid(edge[0].node_id, new_output = edge[1]): + if isinstance(from_node, IterateInvocation) and edge[0].field == "item": + if not self._is_iterator_connection_valid( + edge[0].node_id, new_output=edge[1] + ): return False # Validate if collector input type matches output type (if this edge results in both being set) - if isinstance(to_node, CollectInvocation) and edge[1].field == 'item': - if not self._is_collector_connection_valid(edge[1].node_id, new_input = edge[0]): + if isinstance(to_node, CollectInvocation) and edge[1].field == "item": + if not self._is_collector_connection_valid( + edge[1].node_id, new_input=edge[0] + ): return False # Validate if collector output type matches input type (if this edge results in both being set) - if isinstance(from_node, CollectInvocation) and edge[0].field == 'collection': - if not self._is_collector_connection_valid(edge[0].node_id, new_output = edge[1]): + if isinstance(from_node, CollectInvocation) and edge[0].field == "collection": + if not self._is_collector_connection_valid( + edge[0].node_id, new_output=edge[1] + ): return False return True @@ -339,25 +402,27 @@ class Graph(BaseModel): graph, node_id = self._get_graph_and_node(node_path) return graph.nodes[node_id] - def _get_node_path(self, node_id: str, prefix: Optional[str] = None) -> str: - return node_id if prefix is None or prefix == '' else f'{prefix}.{node_id}' - + return node_id if prefix is None or prefix == "" else f"{prefix}.{node_id}" def update_node(self, node_path: str, new_node: BaseInvocation) -> None: """Updates a node in the graph.""" graph, node_id = self._get_graph_and_node(node_path) node = graph.nodes[node_id] - + # Ensure the node type matches the new node if type(node) != type(new_node): - raise TypeError(f'Node {node_path} is type {type(node)} but new node is type {type(new_node)}') + raise TypeError( + f"Node {node_path} is type {type(node)} but new node is type {type(new_node)}" + ) # Ensure the new id is either the same or is not in the graph - prefix = None if '.' not in node_path else node_path[:node_path.rindex('.')] - new_path = self._get_node_path(new_node.id, prefix = prefix) + prefix = None if "." not in node_path else node_path[: node_path.rindex(".")] + new_path = self._get_node_path(new_node.id, prefix=prefix) if new_node.id != node.id and self.has_node(new_path): - raise NodeAlreadyInGraphError('Node with id {new_node.id} already exists in graph') + raise NodeAlreadyInGraphError( + "Node with id {new_node.id} already exists in graph" + ) # Set the new node in the graph graph.nodes[new_node.id] = new_node @@ -369,136 +434,247 @@ class Graph(BaseModel): graph.delete_node(node_path) # Create new edges for each input and output - for graph,_,edge in input_edges: + for graph, _, edge in input_edges: # Remove the graph prefix from the node path - new_graph_node_path = new_node.id if '.' not in edge[1].node_id else f'{edge[1].node_id[edge[1].node_id.rindex("."):]}.{new_node.id}' - graph.add_edge((edge[0], EdgeConnection(node_id = new_graph_node_path, field = edge[1].field))) + new_graph_node_path = ( + new_node.id + if "." not in edge[1].node_id + else f'{edge[1].node_id[edge[1].node_id.rindex("."):]}.{new_node.id}' + ) + graph.add_edge( + ( + edge[0], + EdgeConnection( + node_id=new_graph_node_path, field=edge[1].field + ), + ) + ) - for graph,_,edge in output_edges: + for graph, _, edge in output_edges: # Remove the graph prefix from the node path - new_graph_node_path = new_node.id if '.' not in edge[0].node_id else f'{edge[0].node_id[edge[0].node_id.rindex("."):]}.{new_node.id}' - graph.add_edge((EdgeConnection(node_id = new_graph_node_path, field = edge[0].field), edge[1])) - + new_graph_node_path = ( + new_node.id + if "." not in edge[0].node_id + else f'{edge[0].node_id[edge[0].node_id.rindex("."):]}.{new_node.id}' + ) + graph.add_edge( + ( + EdgeConnection( + node_id=new_graph_node_path, field=edge[0].field + ), + edge[1], + ) + ) - def _get_input_edges(self, node_path: str, field: Optional[str] = None) -> list[tuple[EdgeConnection,EdgeConnection]]: + def _get_input_edges( + self, node_path: str, field: Optional[str] = None + ) -> list[tuple[EdgeConnection, EdgeConnection]]: """Gets all input edges for a node""" edges = self._get_input_edges_and_graphs(node_path) - + # Filter to edges that match the field filtered_edges = (e for e in edges if field is None or e[2][1].field == field) # Create full node paths for each edge - return [(EdgeConnection(node_id = self._get_node_path(e[0].node_id, prefix = prefix), field=e[0].field), EdgeConnection(node_id = self._get_node_path(e[1].node_id, prefix = prefix), field=e[1].field)) for _,prefix,e in filtered_edges] + return [ + ( + EdgeConnection( + node_id=self._get_node_path(e[0].node_id, prefix=prefix), + field=e[0].field, + ), + EdgeConnection( + node_id=self._get_node_path(e[1].node_id, prefix=prefix), + field=e[1].field, + ), + ) + for _, prefix, e in filtered_edges + ] - - def _get_input_edges_and_graphs(self, node_path: str, prefix: Optional[str] = None) -> list[tuple['Graph', str, tuple[EdgeConnection,EdgeConnection]]]: + def _get_input_edges_and_graphs( + self, node_path: str, prefix: Optional[str] = None + ) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]: """Gets all input edges for a node along with the graph they are in and the graph's path""" edges = list() # Return any input edges that appear in this graph - edges.extend([(self, prefix, e) for e in self.edges if e[1].node_id == node_path]) + edges.extend( + [(self, prefix, e) for e in self.edges if e[1].node_id == node_path] + ) - node_id = node_path if '.' not in node_path else node_path[:node_path.index('.')] + node_id = ( + node_path if "." not in node_path else node_path[: node_path.index(".")] + ) node = self.nodes[node_id] if isinstance(node, GraphInvocation): graph = node.graph - graph_path = node.id if prefix is None or prefix == '' else self._get_node_path(node.id, prefix = prefix) - graph_edges = graph._get_input_edges_and_graphs(node_path[(len(node_id)+1):], prefix=graph_path) + graph_path = ( + node.id + if prefix is None or prefix == "" + else self._get_node_path(node.id, prefix=prefix) + ) + graph_edges = graph._get_input_edges_and_graphs( + node_path[(len(node_id) + 1) :], prefix=graph_path + ) edges.extend(graph_edges) - + return edges - - def _get_output_edges(self, node_path: str, field: str) -> list[tuple[EdgeConnection,EdgeConnection]]: + def _get_output_edges( + self, node_path: str, field: str + ) -> list[tuple[EdgeConnection, EdgeConnection]]: """Gets all output edges for a node""" edges = self._get_output_edges_and_graphs(node_path) - + # Filter to edges that match the field filtered_edges = (e for e in edges if e[2][0].field == field) # Create full node paths for each edge - return [(EdgeConnection(node_id = self._get_node_path(e[0].node_id, prefix = prefix), field=e[0].field), EdgeConnection(node_id = self._get_node_path(e[1].node_id, prefix = prefix), field=e[1].field)) for _,prefix,e in filtered_edges] + return [ + ( + EdgeConnection( + node_id=self._get_node_path(e[0].node_id, prefix=prefix), + field=e[0].field, + ), + EdgeConnection( + node_id=self._get_node_path(e[1].node_id, prefix=prefix), + field=e[1].field, + ), + ) + for _, prefix, e in filtered_edges + ] - - def _get_output_edges_and_graphs(self, node_path: str, prefix: Optional[str] = None) -> list[tuple['Graph', str, tuple[EdgeConnection,EdgeConnection]]]: + def _get_output_edges_and_graphs( + self, node_path: str, prefix: Optional[str] = None + ) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]: """Gets all output edges for a node along with the graph they are in and the graph's path""" edges = list() # Return any input edges that appear in this graph - edges.extend([(self, prefix, e) for e in self.edges if e[0].node_id == node_path]) + edges.extend( + [(self, prefix, e) for e in self.edges if e[0].node_id == node_path] + ) - node_id = node_path if '.' not in node_path else node_path[:node_path.index('.')] + node_id = ( + node_path if "." not in node_path else node_path[: node_path.index(".")] + ) node = self.nodes[node_id] if isinstance(node, GraphInvocation): graph = node.graph - graph_path = node.id if prefix is None or prefix == '' else self._get_node_path(node.id, prefix = prefix) - graph_edges = graph._get_output_edges_and_graphs(node_path[(len(node_id)+1):], prefix=graph_path) + graph_path = ( + node.id + if prefix is None or prefix == "" + else self._get_node_path(node.id, prefix=prefix) + ) + graph_edges = graph._get_output_edges_and_graphs( + node_path[(len(node_id) + 1) :], prefix=graph_path + ) edges.extend(graph_edges) - + return edges - - def _is_iterator_connection_valid(self, node_path: str, new_input: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None) -> bool: - inputs = list([e[0] for e in self._get_input_edges(node_path, 'collection')]) - outputs = list([e[1] for e in self._get_output_edges(node_path, 'item')]) + def _is_iterator_connection_valid( + self, + node_path: str, + new_input: Optional[EdgeConnection] = None, + new_output: Optional[EdgeConnection] = None, + ) -> bool: + inputs = list([e[0] for e in self._get_input_edges(node_path, "collection")]) + outputs = list([e[1] for e in self._get_output_edges(node_path, "item")]) if new_input is not None: inputs.append(new_input) if new_output is not None: outputs.append(new_output) - + # Only one input is allowed for iterators if len(inputs) > 1: return False # Get input and output fields (the fields linked to the iterator's input/output) - input_field = get_output_field(self.get_node(inputs[0].node_id), inputs[0].field) - output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs]) + input_field = get_output_field( + self.get_node(inputs[0].node_id), inputs[0].field + ) + output_fields = list( + [get_input_field(self.get_node(e.node_id), e.field) for e in outputs] + ) # Input type must be a list if get_origin(input_field) != list: return False - + # Validate that all outputs match the input type input_field_item_type = get_args(input_field)[0] - if not all((are_connection_types_compatible(input_field_item_type, f) for f in output_fields)): + if not all( + ( + are_connection_types_compatible(input_field_item_type, f) + for f in output_fields + ) + ): return False return True - def _is_collector_connection_valid(self, node_path: str, new_input: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None) -> bool: - inputs = list([e[0] for e in self._get_input_edges(node_path, 'item')]) - outputs = list([e[1] for e in self._get_output_edges(node_path, 'collection')]) + def _is_collector_connection_valid( + self, + node_path: str, + new_input: Optional[EdgeConnection] = None, + new_output: Optional[EdgeConnection] = None, + ) -> bool: + inputs = list([e[0] for e in self._get_input_edges(node_path, "item")]) + outputs = list([e[1] for e in self._get_output_edges(node_path, "collection")]) if new_input is not None: inputs.append(new_input) if new_output is not None: outputs.append(new_output) - + # Get input and output fields (the fields linked to the iterator's input/output) - input_fields = list([get_output_field(self.get_node(e.node_id), e.field) for e in inputs]) - output_fields = list([get_input_field(self.get_node(e.node_id), e.field) for e in outputs]) - + input_fields = list( + [get_output_field(self.get_node(e.node_id), e.field) for e in inputs] + ) + output_fields = list( + [get_input_field(self.get_node(e.node_id), e.field) for e in outputs] + ) + # Validate that all inputs are derived from or match a single type - input_field_types = set([t for input_field in input_fields for t in ([input_field] if get_origin(input_field) == None else get_args(input_field)) if t != NoneType]) # Get unique types + input_field_types = set( + [ + t + for input_field in input_fields + for t in ( + [input_field] + if get_origin(input_field) == None + else get_args(input_field) + ) + if t != NoneType + ] + ) # Get unique types type_tree = nx.DiGraph() type_tree.add_nodes_from(input_field_types) - type_tree.add_edges_from([e for e in itertools.permutations(input_field_types, 2) if issubclass(e[1], e[0])]) + type_tree.add_edges_from( + [ + e + for e in itertools.permutations(input_field_types, 2) + if issubclass(e[1], e[0]) + ] + ) type_degrees = type_tree.in_degree(type_tree.nodes) - if sum((t[1] == 0 for t in type_degrees)) != 1: # type: ignore - return False # There is more than one root type + if sum((t[1] == 0 for t in type_degrees)) != 1: # type: ignore + return False # There is more than one root type # Get the input root type - input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore + input_root_type = next(t[0] for t in type_degrees if t[1] == 0) # type: ignore # Verify that all outputs are lists if not all((get_origin(f) == list for f in output_fields)): return False # Verify that all outputs match the input type (are a base class or the same class) - if not all((issubclass(input_root_type, get_args(f)[0]) for f in output_fields)): + if not all( + (issubclass(input_root_type, get_args(f)[0]) for f in output_fields) + ): return False return True @@ -511,49 +687,86 @@ class Graph(BaseModel): g.add_edges_from(set([(e[0].node_id, e[1].node_id) for e in self.edges])) return g - def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph: + def nx_graph_flat( + self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None + ) -> nx.DiGraph: """Returns a flattened NetworkX DiGraph, including all subgraphs (but not with iterations expanded)""" g = nx_graph or nx.DiGraph() # Add all nodes from this graph except graph/iteration nodes - g.add_nodes_from([self._get_node_path(n.id, prefix) for n in self.nodes.values() if not isinstance(n, GraphInvocation) and not isinstance(n, IterateInvocation)]) + g.add_nodes_from( + [ + self._get_node_path(n.id, prefix) + for n in self.nodes.values() + if not isinstance(n, GraphInvocation) + and not isinstance(n, IterateInvocation) + ] + ) # Expand graph nodes - for sgn in (gn for gn in self.nodes.values() if isinstance(gn, GraphInvocation)): + for sgn in ( + gn for gn in self.nodes.values() if isinstance(gn, GraphInvocation) + ): sgn.graph.nx_graph_flat(g, self._get_node_path(sgn.id, prefix)) # TODO: figure out if iteration nodes need to be expanded unique_edges = set([(e[0].node_id, e[1].node_id) for e in self.edges]) - g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges]) + g.add_edges_from( + [ + (self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) + for e in unique_edges + ] + ) return g class GraphExecutionState(BaseModel): """Tracks the state of a graph execution""" - id: str = Field(description="The id of the execution state", default_factory=uuid.uuid4) + + id: str = Field( + description="The id of the execution state", default_factory=uuid.uuid4 + ) # TODO: Store a reference to the graph instead of the actual graph? graph: Graph = Field(description="The graph being executed") # The graph of materialized nodes - execution_graph: Graph = Field(description="The expanded graph of activated and executed nodes", default_factory=Graph) + execution_graph: Graph = Field( + description="The expanded graph of activated and executed nodes", + default_factory=Graph, + ) # Nodes that have been executed - executed: set[str] = Field(description="The set of node ids that have been executed", default_factory=set) - executed_history: list[str] = Field(description="The list of node ids that have been executed, in order of execution", default_factory=list) + executed: set[str] = Field( + description="The set of node ids that have been executed", default_factory=set + ) + executed_history: list[str] = Field( + description="The list of node ids that have been executed, in order of execution", + default_factory=list, + ) # The results of executed nodes - results: dict[str, Annotated[InvocationOutputsUnion, Field(discriminator="type")]] = Field(description="The results of node executions", default_factory=dict) + results: dict[ + str, Annotated[InvocationOutputsUnion, Field(discriminator="type")] + ] = Field(description="The results of node executions", default_factory=dict) # Errors raised when executing nodes - errors: dict[str, str] = Field(description="Errors raised when executing nodes", default_factory=dict) + errors: dict[str, str] = Field( + description="Errors raised when executing nodes", default_factory=dict + ) # Map of prepared/executed nodes to their original nodes - prepared_source_mapping: dict[str, str] = Field(description="The map of prepared nodes to original graph nodes", default_factory=dict) + prepared_source_mapping: dict[str, str] = Field( + description="The map of prepared nodes to original graph nodes", + default_factory=dict, + ) # Map of original nodes to prepared nodes - source_prepared_mapping: dict[str, set[str]] = Field(description="The map of original graph nodes to prepared nodes", default_factory=dict) + source_prepared_mapping: dict[str, set[str]] = Field( + description="The map of original graph nodes to prepared nodes", + default_factory=dict, + ) def next(self) -> BaseInvocation | None: """Gets the next node ready to execute.""" @@ -569,10 +782,10 @@ class GraphExecutionState(BaseModel): # TODO: prepare multiple nodes at once? # while prepared_id is not None and not isinstance(self.graph.nodes[prepared_id], IterateInvocation): # prepared_id = self._prepare() - + if prepared_id is not None: next_node = self._get_next_node() - + # Get values from edges if next_node is not None: self._prepare_inputs(next_node) @@ -584,7 +797,7 @@ class GraphExecutionState(BaseModel): """Marks a node as complete""" if node_id not in self.execution_graph.nodes: - return # TODO: log error? + return # TODO: log error? # Mark node as executed self.executed.add(node_id) @@ -597,7 +810,7 @@ class GraphExecutionState(BaseModel): if all([n in self.executed for n in prepared_nodes]): self.executed.add(source_node) self.executed_history.append(source_node) - + def set_node_error(self, node_id: str, error: str): """Marks a node as errored""" self.errors[node_id] = error @@ -605,12 +818,14 @@ class GraphExecutionState(BaseModel): def is_complete(self) -> bool: """Returns true if the graph is complete""" return self.has_error() or all((k in self.executed for k in self.graph.nodes)) - + def has_error(self) -> bool: """Returns true if the graph has any errors""" return len(self.errors) > 0 - def _create_execution_node(self, node_path: str, iteration_node_map: list[tuple[str, str]]) -> list[str]: + def _create_execution_node( + self, node_path: str, iteration_node_map: list[tuple[str, str]] + ) -> list[str]: """Prepares an iteration node and connects all edges, returning the new node id""" node = self.graph.get_node(node_path) @@ -620,10 +835,20 @@ class GraphExecutionState(BaseModel): # If this is an iterator node, we must create a copy for each iteration if isinstance(node, IterateInvocation): # Get input collection edge (should error if there are no inputs) - input_collection_edge = next(iter(self.graph._get_input_edges(node_path, 'collection'))) - input_collection_prepared_node_id = next(n[1] for n in iteration_node_map if n[0] == input_collection_edge[0].node_id) - input_collection_prepared_node_output = self.results[input_collection_prepared_node_id] - input_collection = getattr(input_collection_prepared_node_output, input_collection_edge[0].field) + input_collection_edge = next( + iter(self.graph._get_input_edges(node_path, "collection")) + ) + input_collection_prepared_node_id = next( + n[1] + for n in iteration_node_map + if n[0] == input_collection_edge[0].node_id + ) + input_collection_prepared_node_output = self.results[ + input_collection_prepared_node_id + ] + input_collection = getattr( + input_collection_prepared_node_output, input_collection_edge[0].field + ) self_iteration_count = len(input_collection) new_nodes = list() @@ -638,12 +863,17 @@ class GraphExecutionState(BaseModel): # For collect nodes, this may contain multiple inputs to the same field new_edges = list() for edge in input_edges: - for input_node_id in (n[1] for n in iteration_node_map if n[0] == edge[0].node_id): - new_edge = (EdgeConnection(node_id = input_node_id, field = edge[0].field), EdgeConnection(node_id = '', field = edge[1].field)) + for input_node_id in ( + n[1] for n in iteration_node_map if n[0] == edge[0].node_id + ): + new_edge = ( + EdgeConnection(node_id=input_node_id, field=edge[0].field), + EdgeConnection(node_id="", field=edge[1].field), + ) new_edges.append(new_edge) - + # Create a new node (or one for each iteration of this iterator) - for i in (range(self_iteration_count) if self_iteration_count > 0 else [-1]): + for i in range(self_iteration_count) if self_iteration_count > 0 else [-1]: # Create a new node new_node = copy.deepcopy(node) @@ -663,7 +893,10 @@ class GraphExecutionState(BaseModel): # Add new edges to execution graph for edge in new_edges: - new_edge = (edge[0], EdgeConnection(node_id = new_node.id, field = edge[1].field)) + new_edge = ( + edge[0], + EdgeConnection(node_id=new_node.id, field=edge[1].field), + ) self.execution_graph.add_edge(new_edge) new_nodes.append(new_node.id) @@ -673,26 +906,40 @@ class GraphExecutionState(BaseModel): def _iterator_graph(self) -> nx.DiGraph: """Gets a DiGraph with edges to collectors removed so an ancestor search produces all active iterators for any node""" g = self.graph.nx_graph() - collectors = (n for n in self.graph.nodes if isinstance(self.graph.nodes[n], CollectInvocation)) + collectors = ( + n + for n in self.graph.nodes + if isinstance(self.graph.nodes[n], CollectInvocation) + ) for c in collectors: g.remove_edges_from(list(g.in_edges(c))) return g - def _get_node_iterators(self, node_id: str) -> list[str]: """Gets iterators for a node""" g = self._iterator_graph() - iterators = [n for n in nx.ancestors(g, node_id) if isinstance(self.graph.nodes[n], IterateInvocation)] + iterators = [ + n + for n in nx.ancestors(g, node_id) + if isinstance(self.graph.nodes[n], IterateInvocation) + ] return iterators - def _prepare(self) -> Optional[str]: # Get flattened source graph g = self.graph.nx_graph_flat() # Find next unprepared node where all source nodes are executed sorted_nodes = nx.topological_sort(g) - next_node_id = next((n for n in sorted_nodes if n not in self.source_prepared_mapping and all((e[0] in self.executed for e in g.in_edges(n)))), None) + next_node_id = next( + ( + n + for n in sorted_nodes + if n not in self.source_prepared_mapping + and all((e[0] in self.executed for e in g.in_edges(n))) + ), + None, + ) if next_node_id == None: return None @@ -705,48 +952,86 @@ class GraphExecutionState(BaseModel): new_node_ids = list() if isinstance(next_node, CollectInvocation): # Collapse all iterator input mappings and create a single execution node for the collect invocation - all_iteration_mappings = list(itertools.chain(*(((s,p) for p in self.source_prepared_mapping[s]) for s in next_node_parents))) - #all_iteration_mappings = list(set(itertools.chain(*prepared_parent_mappings))) - create_results = self._create_execution_node(next_node_id, all_iteration_mappings) + all_iteration_mappings = list( + itertools.chain( + *( + ((s, p) for p in self.source_prepared_mapping[s]) + for s in next_node_parents + ) + ) + ) + # all_iteration_mappings = list(set(itertools.chain(*prepared_parent_mappings))) + create_results = self._create_execution_node( + next_node_id, all_iteration_mappings + ) if create_results is not None: new_node_ids.extend(create_results) - else: # Iterators or normal nodes + else: # Iterators or normal nodes # Get all iterator combinations for this node # Will produce a list of lists of prepared iterator nodes, from which results can be iterated iterator_nodes = self._get_node_iterators(next_node_id) - iterator_nodes_prepared = [list(self.source_prepared_mapping[n]) for n in iterator_nodes] - iterator_node_prepared_combinations = list(itertools.product(*iterator_nodes_prepared)) + iterator_nodes_prepared = [ + list(self.source_prepared_mapping[n]) for n in iterator_nodes + ] + iterator_node_prepared_combinations = list( + itertools.product(*iterator_nodes_prepared) + ) # Select the correct prepared parents for each iteration # For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator # TODO: Handle a node mapping to none eg = self.execution_graph.nx_graph_flat() - prepared_parent_mappings = [[(n,self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore - + prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore + # Create execution node for each iteration for iteration_mappings in prepared_parent_mappings: - create_results = self._create_execution_node(next_node_id, iteration_mappings) # type: ignore + create_results = self._create_execution_node(next_node_id, iteration_mappings) # type: ignore if create_results is not None: new_node_ids.extend(create_results) return next(iter(new_node_ids), None) - - def _get_iteration_node(self, source_node_path: str, graph: nx.DiGraph, execution_graph: nx.DiGraph, prepared_iterator_nodes: list[str]) -> Optional[str]: + + def _get_iteration_node( + self, + source_node_path: str, + graph: nx.DiGraph, + execution_graph: nx.DiGraph, + prepared_iterator_nodes: list[str], + ) -> Optional[str]: """Gets the prepared version of the specified source node that matches every iteration specified""" prepared_nodes = self.source_prepared_mapping[source_node_path] if len(prepared_nodes) == 1: return next(iter(prepared_nodes)) - + # Check if the requested node is an iterator - prepared_iterator = next((n for n in prepared_nodes if n in prepared_iterator_nodes), None) + prepared_iterator = next( + (n for n in prepared_nodes if n in prepared_iterator_nodes), None + ) if prepared_iterator is not None: return prepared_iterator # Filter to only iterator nodes that are a parent of the specified node, in tuple format (prepared, source) - iterator_source_node_mapping = [(n, self.prepared_source_mapping[n]) for n in prepared_iterator_nodes] - parent_iterators = [itn for itn in iterator_source_node_mapping if nx.has_path(graph, itn[1], source_node_path)] + iterator_source_node_mapping = [ + (n, self.prepared_source_mapping[n]) for n in prepared_iterator_nodes + ] + parent_iterators = [ + itn + for itn in iterator_source_node_mapping + if nx.has_path(graph, itn[1], source_node_path) + ] - return next((n for n in prepared_nodes if all(pit for pit in parent_iterators if nx.has_path(execution_graph, pit[0], n))), None) + return next( + ( + n + for n in prepared_nodes + if all( + pit + for pit in parent_iterators + if nx.has_path(execution_graph, pit[0], n) + ) + ), + None, + ) def _get_next_node(self) -> Optional[BaseInvocation]: g = self.execution_graph.nx_graph() @@ -760,8 +1045,12 @@ class GraphExecutionState(BaseModel): def _prepare_inputs(self, node: BaseInvocation): input_edges = [e for e in self.execution_graph.edges if e[1].node_id == node.id] if isinstance(node, CollectInvocation): - output_collection = [getattr(self.results[edge[0].node_id], edge[0].field) for edge in input_edges if edge[1].field == 'item'] - setattr(node, 'collection', output_collection) + output_collection = [ + getattr(self.results[edge[0].node_id], edge[0].field) + for edge in input_edges + if edge[1].field == "item" + ] + setattr(node, "collection", output_collection) else: for edge in input_edges: output_value = getattr(self.results[edge[0].node_id], edge[0].field) @@ -771,7 +1060,7 @@ class GraphExecutionState(BaseModel): def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool: if not self._is_edge_valid(edge): return False - + # Invalid if destination has already been prepared or executed if edge[1].node_id in self.source_prepared_mapping: return False @@ -785,25 +1074,34 @@ class GraphExecutionState(BaseModel): def add_node(self, node: BaseInvocation) -> None: self.graph.add_node(node) - + def update_node(self, node_path: str, new_node: BaseInvocation) -> None: if not self._is_node_updatable(node_path): - raise NodeAlreadyExecutedError(f'Node {node_path} has already been prepared or executed and cannot be updated') + raise NodeAlreadyExecutedError( + f"Node {node_path} has already been prepared or executed and cannot be updated" + ) self.graph.update_node(node_path, new_node) def delete_node(self, node_path: str) -> None: if not self._is_node_updatable(node_path): - raise NodeAlreadyExecutedError(f'Node {node_path} has already been prepared or executed and cannot be deleted') + raise NodeAlreadyExecutedError( + f"Node {node_path} has already been prepared or executed and cannot be deleted" + ) self.graph.delete_node(node_path) def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: if not self._is_node_updatable(edge[1].node_id): - raise NodeAlreadyExecutedError(f'Destination node {edge[1].node_id} has already been prepared or executed and cannot be linked to') + raise NodeAlreadyExecutedError( + f"Destination node {edge[1].node_id} has already been prepared or executed and cannot be linked to" + ) self.graph.add_edge(edge) - + def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: if not self._is_node_updatable(edge[1].node_id): - raise NodeAlreadyExecutedError(f'Destination node {edge[1].node_id} has already been prepared or executed and cannot have a source edge deleted') + raise NodeAlreadyExecutedError( + f"Destination node {edge[1].node_id} has already been prepared or executed and cannot have a source edge deleted" + ) self.graph.delete_edge(edge) + GraphInvocation.update_forward_refs() diff --git a/invokeai/app/services/image_storage.py b/invokeai/app/services/image_storage.py index fa6a85dfbe..ad0ff23f14 100644 --- a/invokeai/app/services/image_storage.py +++ b/invokeai/app/services/image_storage.py @@ -1,20 +1,22 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from abc import ABC, abstractmethod -from enum import Enum import datetime import os +from abc import ABC, abstractmethod +from enum import Enum from pathlib import Path from queue import Queue from typing import Dict + from PIL.Image import Image + from invokeai.backend.image_util import PngWriter class ImageType(str, Enum): - RESULT = 'results' - INTERMEDIATE = 'intermediates' - UPLOAD = 'uploads' + RESULT = "results" + INTERMEDIATE = "intermediates" + UPLOAD = "uploads" class ImageStorageBase(ABC): @@ -38,14 +40,15 @@ class ImageStorageBase(ABC): pass def create_name(self, context_id: str, node_id: str) -> str: - return f'{context_id}_{node_id}_{str(int(datetime.datetime.now(datetime.timezone.utc).timestamp()))}.png' + return f"{context_id}_{node_id}_{str(int(datetime.datetime.now(datetime.timezone.utc).timestamp()))}.png" class DiskImageStorage(ImageStorageBase): """Stores images on disk""" + __output_folder: str __pngWriter: PngWriter - __cache_ids: Queue # TODO: this is an incredibly naive cache + __cache_ids: Queue # TODO: this is an incredibly naive cache __cache: Dict[str, Image] __max_cache_size: int @@ -54,13 +57,15 @@ class DiskImageStorage(ImageStorageBase): self.__pngWriter = PngWriter(output_folder) self.__cache = dict() self.__cache_ids = Queue() - self.__max_cache_size = 10 # TODO: get this from config + self.__max_cache_size = 10 # TODO: get this from config Path(output_folder).mkdir(parents=True, exist_ok=True) # TODO: don't hard-code. get/save/delete should maybe take subpath? for image_type in ImageType: - Path(os.path.join(output_folder, image_type)).mkdir(parents=True, exist_ok=True) + Path(os.path.join(output_folder, image_type)).mkdir( + parents=True, exist_ok=True + ) def get(self, image_type: ImageType, image_name: str) -> Image: image_path = self.get_path(image_type, image_name) @@ -79,7 +84,9 @@ class DiskImageStorage(ImageStorageBase): def save(self, image_type: ImageType, image_name: str, image: Image) -> None: image_subpath = os.path.join(image_type, image_name) - self.__pngWriter.save_image_and_prompt_to_png(image, "", image_subpath, None) # TODO: just pass full path to png writer + self.__pngWriter.save_image_and_prompt_to_png( + image, "", image_subpath, None + ) # TODO: just pass full path to png writer image_path = self.get_path(image_type, image_name) self.__set_cache(image_path, image) @@ -88,7 +95,7 @@ class DiskImageStorage(ImageStorageBase): image_path = self.get_path(image_type, image_name) if os.path.exists(image_path): os.remove(image_path) - + if image_path in self.__cache: del self.__cache[image_path] @@ -98,7 +105,9 @@ class DiskImageStorage(ImageStorageBase): def __set_cache(self, image_name: str, image: Image): if not image_name in self.__cache: self.__cache[image_name] = image - self.__cache_ids.put(image_name) # TODO: this should refresh position for LRU cache + self.__cache_ids.put( + image_name + ) # TODO: this should refresh position for LRU cache if len(self.__cache) > self.__max_cache_size: cache_id = self.__cache_ids.get() del self.__cache[cache_id] diff --git a/invokeai/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue.py index 0a5b5ae3bb..88a4f8708d 100644 --- a/invokeai/app/services/invocation_queue.py +++ b/invokeai/app/services/invocation_queue.py @@ -6,17 +6,19 @@ from queue import Queue # TODO: make this serializable class InvocationQueueItem: - #session_id: str + # session_id: str graph_execution_state_id: str invocation_id: str invoke_all: bool - def __init__(self, - #session_id: str, + def __init__( + self, + # session_id: str, graph_execution_state_id: str, invocation_id: str, - invoke_all: bool = False): - #self.session_id = session_id + invoke_all: bool = False, + ): + # self.session_id = session_id self.graph_execution_state_id = graph_execution_state_id self.invocation_id = invocation_id self.invoke_all = invoke_all @@ -24,12 +26,13 @@ class InvocationQueueItem: class InvocationQueueABC(ABC): """Abstract base class for all invocation queues""" + @abstractmethod def get(self) -> InvocationQueueItem: pass - + @abstractmethod - def put(self, item: InvocationQueueItem|None) -> None: + def put(self, item: InvocationQueueItem | None) -> None: pass @@ -38,9 +41,9 @@ class MemoryInvocationQueue(InvocationQueueABC): def __init__(self): self.__queue = Queue() - + def get(self) -> InvocationQueueItem: return self.__queue.get() - - def put(self, item: InvocationQueueItem|None) -> None: + + def put(self, item: InvocationQueueItem | None) -> None: self.__queue.put(item) diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 93d60bb230..42cbd6c271 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -1,28 +1,32 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from .invocation_queue import InvocationQueueABC -from .item_storage import ItemStorageABC -from .image_storage import ImageStorageBase -from .events import EventServiceBase from invokeai.backend import Generate -class InvocationServices(): +from .events import EventServiceBase +from .image_storage import ImageStorageBase +from .invocation_queue import InvocationQueueABC +from .item_storage import ItemStorageABC + + +class InvocationServices: """Services that can be used by invocations""" - generate: Generate # TODO: wrap Generate, or split it up from model? + + generate: Generate # TODO: wrap Generate, or split it up from model? events: EventServiceBase images: ImageStorageBase queue: InvocationQueueABC # NOTE: we must forward-declare any types that include invocations, since invocations can use services - graph_execution_manager: ItemStorageABC['GraphExecutionState'] - processor: 'InvocationProcessorABC' + graph_execution_manager: ItemStorageABC["GraphExecutionState"] + processor: "InvocationProcessorABC" - def __init__(self, + def __init__( + self, generate: Generate, events: EventServiceBase, images: ImageStorageBase, queue: InvocationQueueABC, - graph_execution_manager: ItemStorageABC['GraphExecutionState'], - processor: 'InvocationProcessorABC' + graph_execution_manager: ItemStorageABC["GraphExecutionState"], + processor: "InvocationProcessorABC", ): self.generate = generate self.events = events diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py index 4397a75021..f234cd827b 100644 --- a/invokeai/app/services/invoker.py +++ b/invokeai/app/services/invoker.py @@ -2,11 +2,12 @@ from abc import ABC from threading import Event, Thread -from .graph import Graph, GraphExecutionState -from .item_storage import ItemStorageABC + from ..invocations.baseinvocation import InvocationContext -from .invocation_services import InvocationServices +from .graph import Graph, GraphExecutionState from .invocation_queue import InvocationQueueABC, InvocationQueueItem +from .invocation_services import InvocationServices +from .item_storage import ItemStorageABC class Invoker: @@ -14,14 +15,13 @@ class Invoker: services: InvocationServices - def __init__(self, - services: InvocationServices - ): + def __init__(self, services: InvocationServices): self.services = services self._start() - - def invoke(self, graph_execution_state: GraphExecutionState, invoke_all: bool = False) -> str|None: + def invoke( + self, graph_execution_state: GraphExecutionState, invoke_all: bool = False + ) -> str | None: """Determines the next node to invoke and returns the id of the invoked node, or None if there are no nodes to execute""" # Get the next invocation @@ -33,38 +33,36 @@ class Invoker: self.services.graph_execution_manager.set(graph_execution_state) # Queue the invocation - print(f'queueing item {invocation.id}') - self.services.queue.put(InvocationQueueItem( - #session_id = session.id, - graph_execution_state_id = graph_execution_state.id, - invocation_id = invocation.id, - invoke_all = invoke_all - )) + print(f"queueing item {invocation.id}") + self.services.queue.put( + InvocationQueueItem( + # session_id = session.id, + graph_execution_state_id=graph_execution_state.id, + invocation_id=invocation.id, + invoke_all=invoke_all, + ) + ) return invocation.id - - def create_execution_state(self, graph: Graph|None = None) -> GraphExecutionState: + def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState: """Creates a new execution state for the given graph""" - new_state = GraphExecutionState(graph = Graph() if graph is None else graph) + new_state = GraphExecutionState(graph=Graph() if graph is None else graph) self.services.graph_execution_manager.set(new_state) return new_state - def __start_service(self, service) -> None: # Call start() method on any services that have it - start_op = getattr(service, 'start', None) + start_op = getattr(service, "start", None) if callable(start_op): start_op(self) - def __stop_service(self, service) -> None: # Call stop() method on any services that have it - stop_op = getattr(service, 'stop', None) + stop_op = getattr(service, "stop", None) if callable(stop_op): stop_op(self) - def _start(self) -> None: """Starts the invoker. This is called automatically when the invoker is created.""" for service in vars(self.services): @@ -73,7 +71,6 @@ class Invoker: for service in vars(self.services): self.__start_service(getattr(self.services, service)) - def stop(self) -> None: """Stops the invoker. A new invoker will have to be created to execute further.""" # First stop all services @@ -87,4 +84,4 @@ class Invoker: class InvocationProcessorABC(ABC): - pass \ No newline at end of file + pass diff --git a/invokeai/app/services/item_storage.py b/invokeai/app/services/item_storage.py index 738f06cb7e..83044e5a52 100644 --- a/invokeai/app/services/item_storage.py +++ b/invokeai/app/services/item_storage.py @@ -1,18 +1,20 @@ +from abc import ABC, abstractmethod +from typing import Callable, Generic, TypeVar -from typing import Callable, TypeVar, Generic from pydantic import BaseModel, Field from pydantic.generics import GenericModel -from abc import ABC, abstractmethod -T = TypeVar('T', bound=BaseModel) +T = TypeVar("T", bound=BaseModel) + class PaginatedResults(GenericModel, Generic[T]): """Paginated results""" - items: list[T] = Field(description = "Items") - page: int = Field(description = "Current Page") - pages: int = Field(description = "Total number of pages") - per_page: int = Field(description = "Number of items per page") - total: int = Field(description = "Total number of items in result") + + items: list[T] = Field(description="Items") + page: int = Field(description="Current Page") + pages: int = Field(description="Total number of pages") + per_page: int = Field(description="Number of items per page") + total: int = Field(description="Total number of items in result") class ItemStorageABC(ABC, Generic[T]): @@ -24,6 +26,7 @@ class ItemStorageABC(ABC, Generic[T]): self._on_deleted_callbacks = list() """Base item storage class""" + @abstractmethod def get(self, item_id: str) -> T: pass @@ -37,7 +40,9 @@ class ItemStorageABC(ABC, Generic[T]): pass @abstractmethod - def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]: + def search( + self, query: str, page: int = 0, per_page: int = 10 + ) -> PaginatedResults[T]: pass def on_changed(self, on_changed: Callable[[T], None]) -> None: @@ -51,7 +56,7 @@ class ItemStorageABC(ABC, Generic[T]): def _on_changed(self, item: T) -> None: for callback in self._on_changed_callbacks: callback(item) - + def _on_deleted(self, item_id: str) -> None: for callback in self._on_deleted_callbacks: callback(item_id) diff --git a/invokeai/app/services/processor.py b/invokeai/app/services/processor.py index 1825d404e5..5baa64503c 100644 --- a/invokeai/app/services/processor.py +++ b/invokeai/app/services/processor.py @@ -1,5 +1,6 @@ -from threading import Event, Thread import traceback +from threading import Event, Thread + from ..invocations.baseinvocation import InvocationContext from .invocation_queue import InvocationQueueItem from .invoker import InvocationProcessorABC, Invoker @@ -14,52 +15,62 @@ class DefaultInvocationProcessor(InvocationProcessorABC): self.__invoker = invoker self.__stop_event = Event() self.__invoker_thread = Thread( - name = "invoker_processor", - target = self.__process, - kwargs = dict(stop_event = self.__stop_event) + name="invoker_processor", + target=self.__process, + kwargs=dict(stop_event=self.__stop_event), + ) + self.__invoker_thread.daemon = ( + True # TODO: probably better to just not use threads? ) - self.__invoker_thread.daemon = True # TODO: probably better to just not use threads? self.__invoker_thread.start() - def stop(self, *args, **kwargs) -> None: self.__stop_event.set() - def __process(self, stop_event: Event): try: while not stop_event.is_set(): queue_item: InvocationQueueItem = self.__invoker.services.queue.get() - if not queue_item: # Probably stopping + if not queue_item: # Probably stopping continue - graph_execution_state = self.__invoker.services.graph_execution_manager.get(queue_item.graph_execution_state_id) - invocation = graph_execution_state.execution_graph.get_node(queue_item.invocation_id) + graph_execution_state = ( + self.__invoker.services.graph_execution_manager.get( + queue_item.graph_execution_state_id + ) + ) + invocation = graph_execution_state.execution_graph.get_node( + queue_item.invocation_id + ) # Send starting event self.__invoker.services.events.emit_invocation_started( - graph_execution_state_id = graph_execution_state.id, - invocation_id = invocation.id + graph_execution_state_id=graph_execution_state.id, + invocation_id=invocation.id, ) # Invoke try: - outputs = invocation.invoke(InvocationContext( - services = self.__invoker.services, - graph_execution_state_id = graph_execution_state.id - )) + outputs = invocation.invoke( + InvocationContext( + services=self.__invoker.services, + graph_execution_state_id=graph_execution_state.id, + ) + ) # Save outputs and history graph_execution_state.complete(invocation.id, outputs) # Save the state changes - self.__invoker.services.graph_execution_manager.set(graph_execution_state) + self.__invoker.services.graph_execution_manager.set( + graph_execution_state + ) # Send complete event self.__invoker.services.events.emit_invocation_complete( - graph_execution_state_id = graph_execution_state.id, - invocation_id = invocation.id, - result = outputs.dict() + graph_execution_state_id=graph_execution_state.id, + invocation_id=invocation.id, + result=outputs.dict(), ) except KeyboardInterrupt: @@ -72,24 +83,27 @@ class DefaultInvocationProcessor(InvocationProcessorABC): graph_execution_state.set_node_error(invocation.id, error) # Save the state changes - self.__invoker.services.graph_execution_manager.set(graph_execution_state) + self.__invoker.services.graph_execution_manager.set( + graph_execution_state + ) # Send error event self.__invoker.services.events.emit_invocation_error( - graph_execution_state_id = graph_execution_state.id, - invocation_id = invocation.id, - error = error - ) + graph_execution_state_id=graph_execution_state.id, + invocation_id=invocation.id, + error=error, + ) pass - # Queue any further commands if invoking all is_complete = graph_execution_state.is_complete() if queue_item.invoke_all and not is_complete: - self.__invoker.invoke(graph_execution_state, invoke_all = True) + self.__invoker.invoke(graph_execution_state, invoke_all=True) elif is_complete: - self.__invoker.services.events.emit_graph_execution_complete(graph_execution_state.id) + self.__invoker.services.events.emit_graph_execution_complete( + graph_execution_state.id + ) except KeyboardInterrupt: - ... # Log something? + ... # Log something? diff --git a/invokeai/app/services/sqlite.py b/invokeai/app/services/sqlite.py index 8858bbd874..e5bba4ad31 100644 --- a/invokeai/app/services/sqlite.py +++ b/invokeai/app/services/sqlite.py @@ -1,12 +1,15 @@ import sqlite3 from threading import Lock from typing import Generic, TypeVar, Union, get_args + from pydantic import BaseModel, parse_raw_as + from .item_storage import ItemStorageABC, PaginatedResults -T = TypeVar('T', bound=BaseModel) +T = TypeVar("T", bound=BaseModel) + +sqlite_memory = ":memory:" -sqlite_memory = ':memory:' class SqliteItemStorage(ItemStorageABC, Generic[T]): _filename: str @@ -16,15 +19,17 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): _id_field: str _lock: Lock - def __init__(self, filename: str, table_name: str, id_field: str = 'id'): + def __init__(self, filename: str, table_name: str, id_field: str = "id"): super().__init__() self._filename = filename self._table_name = table_name - self._id_field = id_field # TODO: validate that T has this field + self._id_field = id_field # TODO: validate that T has this field self._lock = Lock() - self._conn = sqlite3.connect(self._filename, check_same_thread=False) # TODO: figure out a better threading solution + self._conn = sqlite3.connect( + self._filename, check_same_thread=False + ) # TODO: figure out a better threading solution self._cursor = self._conn.cursor() self._create_table() @@ -32,10 +37,14 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): def _create_table(self): try: self._lock.acquire() - self._cursor.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ( + self._cursor.execute( + f"""CREATE TABLE IF NOT EXISTS {self._table_name} ( item TEXT, - id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);''') - self._cursor.execute(f'''CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);''') + id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);""" + ) + self._cursor.execute( + f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);""" + ) finally: self._lock.release() @@ -46,7 +55,10 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): def set(self, item: T): try: self._lock.acquire() - self._cursor.execute(f'''INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);''', (item.json(),)) + self._cursor.execute( + f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""", + (item.json(),), + ) finally: self._lock.release() self._on_changed(item) @@ -54,7 +66,9 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): def get(self, id: str) -> Union[T, None]: try: self._lock.acquire() - self._cursor.execute(f'''SELECT item FROM {self._table_name} WHERE id = ?;''', (str(id),)) + self._cursor.execute( + f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),) + ) result = self._cursor.fetchone() finally: self._lock.release() @@ -67,7 +81,9 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): def delete(self, id: str): try: self._lock.acquire() - self._cursor.execute(f'''DELETE FROM {self._table_name} WHERE id = ?;''', (str(id),)) + self._cursor.execute( + f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),) + ) finally: self._lock.release() self._on_deleted(id) @@ -75,12 +91,15 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]: try: self._lock.acquire() - self._cursor.execute(f'''SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;''', (per_page, page * per_page)) + self._cursor.execute( + f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""", + (per_page, page * per_page), + ) result = self._cursor.fetchall() items = list(map(lambda r: self._parse_item(r[0]), result)) - self._cursor.execute(f'''SELECT count(*) FROM {self._table_name};''') + self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""") count = self._cursor.fetchone()[0] finally: self._lock.release() @@ -88,22 +107,26 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): pageCount = int(count / per_page) + 1 return PaginatedResults[T]( - items = items, - page = page, - pages = pageCount, - per_page = per_page, - total = count + items=items, page=page, pages=pageCount, per_page=per_page, total=count ) - - def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]: + + def search( + self, query: str, page: int = 0, per_page: int = 10 + ) -> PaginatedResults[T]: try: self._lock.acquire() - self._cursor.execute(f'''SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;''', (f'%{query}%', per_page, page * per_page)) + self._cursor.execute( + f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""", + (f"%{query}%", per_page, page * per_page), + ) result = self._cursor.fetchall() items = list(map(lambda r: self._parse_item(r[0]), result)) - self._cursor.execute(f'''SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;''', (f'%{query}%',)) + self._cursor.execute( + f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""", + (f"%{query}%",), + ) count = self._cursor.fetchone()[0] finally: self._lock.release() @@ -111,9 +134,5 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): pageCount = int(count / per_page) + 1 return PaginatedResults[T]( - items = items, - page = page, - pages = pageCount, - per_page = per_page, - total = count + items=items, page=page, pages=pageCount, per_page=per_page, total=count ) diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index 7bdc6e7270..9062fb7d52 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -1,8 +1,5 @@ -''' +""" Initialization file for invokeai.backend -''' -from .model_management import ModelManager +""" from .generate import Generate - - - +from .model_management import ModelManager diff --git a/invokeai/backend/args.py b/invokeai/backend/args.py index 7de6b35edb..b961658fde 100644 --- a/invokeai/backend/args.py +++ b/invokeai/backend/args.py @@ -95,8 +95,8 @@ from argparse import Namespace from pathlib import Path from typing import List -from invokeai.backend.image_util import retrieve_metadata import invokeai.version +from invokeai.backend.image_util import retrieve_metadata from .globals import Globals from .prompting import split_weighted_subprompts @@ -106,32 +106,33 @@ APP_NAME = invokeai.version.__app_name__ APP_VERSION = invokeai.version.__version__ SAMPLER_CHOICES = [ - 'ddim', - 'k_dpm_2_a', - 'k_dpm_2', - 'k_dpmpp_2_a', - 'k_dpmpp_2', - 'k_euler_a', - 'k_euler', - 'k_heun', - 'k_lms', - 'plms', + "ddim", + "k_dpm_2_a", + "k_dpm_2", + "k_dpmpp_2_a", + "k_dpmpp_2", + "k_euler_a", + "k_euler", + "k_heun", + "k_lms", + "plms", # diffusers: "pndm", ] PRECISION_CHOICES = [ - 'auto', - 'float32', - 'autocast', - 'float16', + "auto", + "float32", + "autocast", + "float16", ] + class ArgFormatter(argparse.RawTextHelpFormatter): - # use defined argument order to display usage + # use defined argument order to display usage def _format_usage(self, usage, actions, groups, prefix): if prefix is None: - prefix = 'usage: ' + prefix = "usage: " # if usage is specified, use that if usage is not None: @@ -139,109 +140,121 @@ class ArgFormatter(argparse.RawTextHelpFormatter): # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: - usage = 'invoke>' + usage = "invoke>" elif usage is None: - prog='invoke>' + prog = "invoke>" # build full usage string - action_usage = self._format_actions_usage(actions, groups) # NEW - usage = ' '.join([s for s in [prog, action_usage] if s]) + action_usage = self._format_actions_usage(actions, groups) # NEW + usage = " ".join([s for s in [prog, action_usage] if s]) # omit the long line wrapping code # prefix with 'usage:' - return '%s%s\n\n' % (prefix, usage) + return "%s%s\n\n" % (prefix, usage) + class PagingArgumentParser(argparse.ArgumentParser): - ''' + """ A custom ArgumentParser that uses pydoc to page its output. It also supports reading defaults from an init file. - ''' + """ + def print_help(self, file=None): text = self.format_help() pydoc.pager(text) def convert_arg_line_to_args(self, arg_line): - return shlex.split(arg_line,comments=True) + return shlex.split(arg_line, comments=True) + class Args(object): - def __init__(self,arg_parser=None,cmd_parser=None): - ''' + def __init__(self, arg_parser=None, cmd_parser=None): + """ Initialize new Args class. It takes two optional arguments, an argparse parser for switches given on the shell command line, and an argparse parser for switches given on the invoke> CLI line. If one or both are missing, it creates appropriate parsers internally. - ''' - self._arg_parser = arg_parser or self._create_arg_parser() - self._cmd_parser = cmd_parser or self._create_dream_cmd_parser() - self._arg_switches = self.parse_cmd('') # fill in defaults - self._cmd_switches = self.parse_cmd('') # fill in defaults + """ + self._arg_parser = arg_parser or self._create_arg_parser() + self._cmd_parser = cmd_parser or self._create_dream_cmd_parser() + self._arg_switches = self.parse_cmd("") # fill in defaults + self._cmd_switches = self.parse_cmd("") # fill in defaults - def parse_args(self, args: List[str]=None): - '''Parse the shell switches and store.''' + def parse_args(self, args: List[str] = None): + """Parse the shell switches and store.""" sysargs = args if args is not None else sys.argv[1:] try: # pre-parse before we do any initialization to get root directory # and intercept --version request switches = self._arg_parser.parse_args(sysargs) if switches.version: - print(f'{APP_NAME} {APP_VERSION}') + print(f"{APP_NAME} {APP_VERSION}") sys.exit(0) - print('* Initializing, be patient...') + print("* Initializing, be patient...") Globals.root = Path(os.path.abspath(switches.root_dir or Globals.root)) Globals.try_patchmatch = switches.patchmatch # now use root directory to find the init file - initfile = os.path.expanduser(os.path.join(Globals.root,Globals.initfile)) - legacyinit = os.path.expanduser('~/.invokeai') + initfile = os.path.expanduser(os.path.join(Globals.root, Globals.initfile)) + legacyinit = os.path.expanduser("~/.invokeai") if os.path.exists(initfile): - print(f'>> Initialization file {initfile} found. Loading...',file=sys.stderr) - sysargs.insert(0,f'@{initfile}') + print( + f">> Initialization file {initfile} found. Loading...", + file=sys.stderr, + ) + sysargs.insert(0, f"@{initfile}") elif os.path.exists(legacyinit): - print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.') - sysargs.insert(0,f'@{legacyinit}') - Globals.log_tokenization = self._arg_parser.parse_args(sysargs).log_tokenization + print( + f">> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init." + ) + sysargs.insert(0, f"@{legacyinit}") + Globals.log_tokenization = self._arg_parser.parse_args( + sysargs + ).log_tokenization self._arg_switches = self._arg_parser.parse_args(sysargs) return self._arg_switches except Exception as e: - print(f'An exception has occurred: {e}') + print(f"An exception has occurred: {e}") return None - def parse_cmd(self,cmd_string): - '''Parse a invoke>-style command string ''' + def parse_cmd(self, cmd_string): + """Parse a invoke>-style command string""" # handle the case in which the first token is a switch - if cmd_string.startswith('-'): - prompt = '' + if cmd_string.startswith("-"): + prompt = "" switches = cmd_string # handle the case in which the prompt is enclosed by quotes elif cmd_string.startswith('"'): - a = shlex.split(cmd_string,comments=True) + a = shlex.split(cmd_string, comments=True) prompt = a[0] switches = shlex.join(a[1:]) else: # no initial quote, so get everything up to the first thing # that looks like a switch - if cmd_string.startswith('-'): - prompt = '' + if cmd_string.startswith("-"): + prompt = "" switches = cmd_string else: - match = re.match('^(.+?)\s(--?[a-zA-Z].+)',cmd_string) + match = re.match("^(.+?)\s(--?[a-zA-Z].+)", cmd_string) if match: - prompt,switches = match.groups() + prompt, switches = match.groups() else: prompt = cmd_string - switches = '' + switches = "" try: - self._cmd_switches = self._cmd_parser.parse_args(shlex.split(switches,comments=True)) - if not getattr(self._cmd_switches,'prompt'): - setattr(self._cmd_switches,'prompt',prompt) + self._cmd_switches = self._cmd_parser.parse_args( + shlex.split(switches, comments=True) + ) + if not getattr(self._cmd_switches, "prompt"): + setattr(self._cmd_switches, "prompt", prompt) return self._cmd_switches except: return None - def json(self,**kwargs): + def json(self, **kwargs): return json.dumps(self.to_dict(**kwargs)) - def to_dict(self,**kwargs): + def to_dict(self, **kwargs): a = vars(self) a.update(kwargs) return a @@ -249,125 +262,129 @@ class Args(object): # Isn't there a more automated way of doing this? # Ideally we get the switch strings out of the argparse objects, # but I don't see a documented API for this. - def dream_prompt_str(self,**kwargs): + def dream_prompt_str(self, **kwargs): """Normalized dream_prompt.""" a = vars(self) a.update(kwargs) switches = list() - prompt = a['prompt'] - prompt.replace('"','\\"') + prompt = a["prompt"] + prompt.replace('"', '\\"') switches.append(prompt) switches.append(f'-s {a["steps"]}') switches.append(f'-S {a["seed"]}') switches.append(f'-W {a["width"]}') switches.append(f'-H {a["height"]}') switches.append(f'-C {a["cfg_scale"]}') - if a['karras_max'] is not None: + if a["karras_max"] is not None: switches.append(f'--karras_max {a["karras_max"]}') - if a['perlin'] > 0: + if a["perlin"] > 0: switches.append(f'--perlin {a["perlin"]}') - if a['threshold'] > 0: + if a["threshold"] > 0: switches.append(f'--threshold {a["threshold"]}') - if a['grid']: - switches.append('--grid') - if a['seamless']: - switches.append('--seamless') - if a['hires_fix']: - switches.append('--hires_fix') - if a['h_symmetry_time_pct']: + if a["grid"]: + switches.append("--grid") + if a["seamless"]: + switches.append("--seamless") + if a["hires_fix"]: + switches.append("--hires_fix") + if a["h_symmetry_time_pct"]: switches.append(f'--h_symmetry_time_pct {a["h_symmetry_time_pct"]}') - if a['v_symmetry_time_pct']: + if a["v_symmetry_time_pct"]: switches.append(f'--v_symmetry_time_pct {a["v_symmetry_time_pct"]}') # img2img generations have parameters relevant only to them and have special handling - if a['init_img'] and len(a['init_img'])>0: + if a["init_img"] and len(a["init_img"]) > 0: switches.append(f'-I {a["init_img"]}') switches.append(f'-A {a["sampler_name"]}') - if a['fit']: - switches.append('--fit') - if a['init_mask'] and len(a['init_mask'])>0: + if a["fit"]: + switches.append("--fit") + if a["init_mask"] and len(a["init_mask"]) > 0: switches.append(f'-M {a["init_mask"]}') - if a['init_color'] and len(a['init_color'])>0: + if a["init_color"] and len(a["init_color"]) > 0: switches.append(f'--init_color {a["init_color"]}') - if a['strength'] and a['strength']>0: + if a["strength"] and a["strength"] > 0: switches.append(f'-f {a["strength"]}') - if a['inpaint_replace']: - switches.append('--inpaint_replace') - if a['text_mask']: + if a["inpaint_replace"]: + switches.append("--inpaint_replace") + if a["text_mask"]: switches.append(f'-tm {" ".join([str(u) for u in a["text_mask"]])}') else: switches.append(f'-A {a["sampler_name"]}') # facetool-specific parameters, only print if running facetool - if a['facetool_strength']: + if a["facetool_strength"]: switches.append(f'-G {a["facetool_strength"]}') switches.append(f'-ft {a["facetool"]}') if a["facetool"] == "codeformer": switches.append(f'-cf {a["codeformer_fidelity"]}') - if a['outcrop']: + if a["outcrop"]: switches.append(f'-c {" ".join([str(u) for u in a["outcrop"]])}') # esrgan-specific parameters - if a['upscale']: + if a["upscale"]: switches.append(f'-U {" ".join([str(u) for u in a["upscale"]])}') # embiggen parameters - if a['embiggen']: + if a["embiggen"]: switches.append(f'--embiggen {" ".join([str(u) for u in a["embiggen"]])}') - if a['embiggen_tiles']: - switches.append(f'--embiggen_tiles {" ".join([str(u) for u in a["embiggen_tiles"]])}') - if a['embiggen_strength']: + if a["embiggen_tiles"]: + switches.append( + f'--embiggen_tiles {" ".join([str(u) for u in a["embiggen_tiles"]])}' + ) + if a["embiggen_strength"]: switches.append(f'--embiggen_strength {a["embiggen_strength"]}') # outpainting parameters - if a['out_direction']: + if a["out_direction"]: switches.append(f'-D {" ".join([str(u) for u in a["out_direction"]])}') # LS: slight semantic drift which needs addressing in the future: # 1. Variations come out of the stored metadata as a packed string with the keyword "variations" # 2. However, they come out of the CLI (and probably web) with the keyword "with_variations" and # in broken-out form. Variation (1) should be changed to comply with (2) - if a['with_variations'] and len(a['with_variations'])>0: - formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in (a["with_variations"])) - switches.append(f'-V {formatted_variations}') - if 'variations' in a and len(a['variations'])>0: + if a["with_variations"] and len(a["with_variations"]) > 0: + formatted_variations = ",".join( + f"{seed}:{weight}" for seed, weight in (a["with_variations"]) + ) + switches.append(f"-V {formatted_variations}") + if "variations" in a and len(a["variations"]) > 0: switches.append(f'-V {a["variations"]}') - return ' '.join(switches) + return " ".join(switches) - def __getattribute__(self,name): - ''' + def __getattribute__(self, name): + """ Returns union of command-line arguments and dream_prompt arguments, with the latter superseding the former. - ''' + """ cmd_switches = None arg_switches = None try: - cmd_switches = object.__getattribute__(self,'_cmd_switches') - arg_switches = object.__getattribute__(self,'_arg_switches') + cmd_switches = object.__getattribute__(self, "_cmd_switches") + arg_switches = object.__getattribute__(self, "_arg_switches") except AttributeError: pass - if cmd_switches and arg_switches and name=='__dict__': + if cmd_switches and arg_switches and name == "__dict__": return self._merge_dict( arg_switches.__dict__, cmd_switches.__dict__, ) try: - return object.__getattribute__(self,name) + return object.__getattribute__(self, name) except AttributeError: pass - if not hasattr(cmd_switches,name) and not hasattr(arg_switches,name): + if not hasattr(cmd_switches, name) and not hasattr(arg_switches, name): raise AttributeError - value_arg,value_cmd = (None,None) + value_arg, value_cmd = (None, None) try: - value_cmd = getattr(cmd_switches,name) + value_cmd = getattr(cmd_switches, name) except AttributeError: pass try: - value_arg = getattr(arg_switches,name) + value_arg = getattr(arg_switches, name) except AttributeError: pass @@ -375,46 +392,46 @@ class Args(object): # default behavior is to choose the dream_command value over # the arg value. For example, the --grid and --individual options are a little # funny because of their push/pull relationship. This is how to handle it. - if name=='grid': + if name == "grid": if cmd_switches.individual: return False else: return value_cmd or value_arg return value_cmd if value_cmd is not None else value_arg - def __setattr__(self,name,value): - if name.startswith('_'): - object.__setattr__(self,name,value) + def __setattr__(self, name, value): + if name.startswith("_"): + object.__setattr__(self, name, value) else: self._cmd_switches.__dict__[name] = value - def _merge_dict(self,dict1,dict2): - new_dict = {} - for k in set(list(dict1.keys())+list(dict2.keys())): - value1 = dict1.get(k,None) - value2 = dict2.get(k,None) + def _merge_dict(self, dict1, dict2): + new_dict = {} + for k in set(list(dict1.keys()) + list(dict2.keys())): + value1 = dict1.get(k, None) + value2 = dict2.get(k, None) new_dict[k] = value2 if value2 is not None else value1 return new_dict - def _create_init_file(self,initfile:str): - with open(initfile, mode='w', encoding='utf-8') as f: - f.write('''# InvokeAI initialization file + def _create_init_file(self, initfile: str): + with open(initfile, mode="w", encoding="utf-8") as f: + f.write( + """# InvokeAI initialization file # Put frequently-used startup commands here, one or more per line # Examples: # --web --host=0.0.0.0 # --steps 20 # -Ak_euler_a -C10.0 -''' +""" ) def _create_arg_parser(self): - ''' + """ This defines all the arguments used on the command line when you launch the CLI or web backend. - ''' + """ parser = PagingArgumentParser( - description= - """ + description=""" Generate images using Stable Diffusion. Use --web to launch the web interface. Use --from_file to load prompts from a file path or standard input ("-"). @@ -422,298 +439,293 @@ class Args(object): Other command-line arguments are defaults that can usually be overridden prompt the command prompt. """, - fromfile_prefix_chars='@', + fromfile_prefix_chars="@", ) - general_group = parser.add_argument_group('General') - model_group = parser.add_argument_group('Model selection') - file_group = parser.add_argument_group('Input/output') - web_server_group = parser.add_argument_group('Web server') - render_group = parser.add_argument_group('Rendering') - postprocessing_group = parser.add_argument_group('Postprocessing') - deprecated_group = parser.add_argument_group('Deprecated options') + general_group = parser.add_argument_group("General") + model_group = parser.add_argument_group("Model selection") + file_group = parser.add_argument_group("Input/output") + web_server_group = parser.add_argument_group("Web server") + render_group = parser.add_argument_group("Rendering") + postprocessing_group = parser.add_argument_group("Postprocessing") + deprecated_group = parser.add_argument_group("Deprecated options") - deprecated_group.add_argument('--laion400m') - deprecated_group.add_argument('--weights') # deprecated + deprecated_group.add_argument("--laion400m") + deprecated_group.add_argument("--weights") # deprecated deprecated_group.add_argument( - '--ckpt_convert', + "--ckpt_convert", action=argparse.BooleanOptionalAction, - dest='ckpt_convert', + dest="ckpt_convert", default=True, - help='Load legacy ckpt files as diffusers (deprecated; always true now).', + help="Load legacy ckpt files as diffusers (deprecated; always true now).", ) general_group.add_argument( - '--version','-V', - action='store_true', - help='Print InvokeAI version number' + "--version", "-V", action="store_true", help="Print InvokeAI version number" ) model_group.add_argument( - '--root_dir', + "--root_dir", default=None, help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.', ) model_group.add_argument( - '--config', - '-c', - '-config', - dest='conf', - default='./configs/models.yaml', - help='Path to configuration file for alternate models.', + "--config", + "-c", + "-config", + dest="conf", + default="./configs/models.yaml", + help="Path to configuration file for alternate models.", ) model_group.add_argument( - '--model', + "--model", help='Indicates which diffusion model to load (defaults to "default" stanza in configs/models.yaml)', ) model_group.add_argument( - '--weight_dirs', - nargs='+', + "--weight_dirs", + nargs="+", type=str, - help='List of one or more directories that will be auto-scanned for new model weights to import', + help="List of one or more directories that will be auto-scanned for new model weights to import", ) model_group.add_argument( - '--png_compression','-z', + "--png_compression", + "-z", type=int, default=6, - choices=range(0,9), - dest='png_compression', - help='level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.' + choices=range(0, 9), + dest="png_compression", + help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.", ) model_group.add_argument( - '-F', - '--full_precision', - dest='full_precision', - action='store_true', - help='Deprecated way to set --precision=float32', + "-F", + "--full_precision", + dest="full_precision", + action="store_true", + help="Deprecated way to set --precision=float32", ) model_group.add_argument( - '--max_loaded_models', - dest='max_loaded_models', + "--max_loaded_models", + dest="max_loaded_models", type=int, default=2, - help='Maximum number of models to keep in memory for fast switching, including the one in GPU', + help="Maximum number of models to keep in memory for fast switching, including the one in GPU", ) model_group.add_argument( - '--free_gpu_mem', - dest='free_gpu_mem', - action='store_true', - help='Force free gpu memory before final decoding', + "--free_gpu_mem", + dest="free_gpu_mem", + action="store_true", + help="Force free gpu memory before final decoding", ) model_group.add_argument( - '--sequential_guidance', - dest='sequential_guidance', - action='store_true', + "--sequential_guidance", + dest="sequential_guidance", + action="store_true", help="Calculate guidance in serial instead of in parallel, lowering memory requirement " - "at the expense of speed", + "at the expense of speed", ) model_group.add_argument( - '--xformers', + "--xformers", action=argparse.BooleanOptionalAction, default=True, - help='Enable/disable xformers support (default enabled if installed)', + help="Enable/disable xformers support (default enabled if installed)", ) model_group.add_argument( "--always_use_cpu", dest="always_use_cpu", action="store_true", - help="Force use of CPU even if GPU is available" + help="Force use of CPU even if GPU is available", ) model_group.add_argument( - '--precision', - dest='precision', + "--precision", + dest="precision", type=str, choices=PRECISION_CHOICES, - metavar='PRECISION', + metavar="PRECISION", help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}', - default='auto', + default="auto", ) model_group.add_argument( - '--internet', + "--internet", action=argparse.BooleanOptionalAction, - dest='internet_available', + dest="internet_available", default=True, - help='Indicate whether internet is available for just-in-time model downloading (default: probe automatically).', + help="Indicate whether internet is available for just-in-time model downloading (default: probe automatically).", ) model_group.add_argument( - '--nsfw_checker', - '--safety_checker', + "--nsfw_checker", + "--safety_checker", action=argparse.BooleanOptionalAction, - dest='safety_checker', + dest="safety_checker", default=False, - help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.', + help="Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.", ) model_group.add_argument( - '--autoimport', + "--autoimport", default=None, type=str, - help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly', + help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly", ) model_group.add_argument( - '--autoconvert', + "--autoconvert", default=None, type=str, - help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models', + help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models", ) model_group.add_argument( - '--patchmatch', + "--patchmatch", action=argparse.BooleanOptionalAction, default=True, - help='Load the patchmatch extension for outpainting. Use --no-patchmatch to disable.', + help="Load the patchmatch extension for outpainting. Use --no-patchmatch to disable.", ) file_group.add_argument( - '--from_file', - dest='infile', + "--from_file", + dest="infile", type=str, - help='If specified, load prompts from this file', + help="If specified, load prompts from this file", ) file_group.add_argument( - '--outdir', - '-o', + "--outdir", + "-o", type=str, - help='Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs', - default='outputs', + help="Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs", + default="outputs", ) file_group.add_argument( - '--prompt_as_dir', - '-p', - action='store_true', - help='Place images in subdirectories named after the prompt.', + "--prompt_as_dir", + "-p", + action="store_true", + help="Place images in subdirectories named after the prompt.", ) render_group.add_argument( - '--fnformat', - default='{prefix}.{seed}.png', + "--fnformat", + default="{prefix}.{seed}.png", type=str, - help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png', + help="Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png", ) render_group.add_argument( - '-s', - '--steps', + "-s", "--steps", type=int, default=50, help="Number of steps" + ) + render_group.add_argument( + "-W", + "--width", type=int, - default=50, - help='Number of steps' + help="Image width, multiple of 64", ) render_group.add_argument( - '-W', - '--width', + "-H", + "--height", type=int, - help='Image width, multiple of 64', + help="Image height, multiple of 64", ) render_group.add_argument( - '-H', - '--height', - type=int, - help='Image height, multiple of 64', - ) - render_group.add_argument( - '-C', - '--cfg_scale', + "-C", + "--cfg_scale", default=7.5, type=float, help='Classifier free guidance (CFG) scale - higher numbers cause generator to "try" harder.', ) render_group.add_argument( - '--sampler', - '-A', - '-m', - dest='sampler_name', + "--sampler", + "-A", + "-m", + dest="sampler_name", type=str, choices=SAMPLER_CHOICES, - metavar='SAMPLER_NAME', + metavar="SAMPLER_NAME", help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', - default='k_lms', + default="k_lms", ) render_group.add_argument( - '--log_tokenization', - '-t', - action='store_true', - help='shows how the prompt is split into tokens' + "--log_tokenization", + "-t", + action="store_true", + help="shows how the prompt is split into tokens", ) render_group.add_argument( - '-f', - '--strength', + "-f", + "--strength", type=float, - help='img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely', + help="img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely", ) render_group.add_argument( - '-T', - '-fit', - '--fit', + "-T", + "-fit", + "--fit", action=argparse.BooleanOptionalAction, - help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)', + help="If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)", ) render_group.add_argument( - '--grid', - '-g', + "--grid", + "-g", action=argparse.BooleanOptionalAction, - help='generate a grid' + help="generate a grid", ) render_group.add_argument( - '--embedding_directory', - '--embedding_path', - dest='embedding_path', - default='embeddings', + "--embedding_directory", + "--embedding_path", + dest="embedding_path", + default="embeddings", type=str, - help='Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)' + help="Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)", ) render_group.add_argument( - '--embeddings', + "--embeddings", action=argparse.BooleanOptionalAction, default=True, - help='Enable embedding directory (default). Use --no-embeddings to disable.', + help="Enable embedding directory (default). Use --no-embeddings to disable.", ) render_group.add_argument( - '--enable_image_debugging', - action='store_true', - help='Generates debugging image to display' + "--enable_image_debugging", + action="store_true", + help="Generates debugging image to display", ) render_group.add_argument( - '--karras_max', + "--karras_max", type=int, default=None, - help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]." + help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29].", ) # Restoration related args postprocessing_group.add_argument( - '--no_restore', - dest='restore', - action='store_false', - help='Disable face restoration with GFPGAN or codeformer', + "--no_restore", + dest="restore", + action="store_false", + help="Disable face restoration with GFPGAN or codeformer", ) postprocessing_group.add_argument( - '--no_upscale', - dest='esrgan', - action='store_false', - help='Disable upscaling with ESRGAN', + "--no_upscale", + dest="esrgan", + action="store_false", + help="Disable upscaling with ESRGAN", ) postprocessing_group.add_argument( - '--esrgan_bg_tile', + "--esrgan_bg_tile", type=int, default=400, - help='Tile size for background sampler, 0 for no tile during testing. Default: 400.', + help="Tile size for background sampler, 0 for no tile during testing. Default: 400.", ) postprocessing_group.add_argument( - '--esrgan_denoise_str', + "--esrgan_denoise_str", type=float, default=0.75, - help='esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75', + help="esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75", ) postprocessing_group.add_argument( - '--gfpgan_model_path', + "--gfpgan_model_path", type=str, - default='./models/gfpgan/GFPGANv1.4.pth', - help='Indicates the path to the GFPGAN model', + default="./models/gfpgan/GFPGANv1.4.pth", + help="Indicates the path to the GFPGAN model", ) web_server_group.add_argument( - '--web', - dest='web', - action='store_true', - help='Start in web server mode.', + "--web", + dest="web", + action="store_true", + help="Start in web server mode.", ) web_server_group.add_argument( - '--web_develop', - dest='web_develop', - action='store_true', - help='Start in web server development mode.', + "--web_develop", + dest="web_develop", + action="store_true", + help="Start in web server development mode.", ) web_server_group.add_argument( "--web_verbose", @@ -727,34 +739,31 @@ class Args(object): help="Additional allowed origins, comma-separated", ) web_server_group.add_argument( - '--host', + "--host", type=str, - default='127.0.0.1', - help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.' + default="127.0.0.1", + help="Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.", ) web_server_group.add_argument( - '--port', - type=int, - default='9090', - help='Web server: Port to listen on' + "--port", type=int, default="9090", help="Web server: Port to listen on" ) web_server_group.add_argument( - '--certfile', + "--certfile", type=str, default=None, - help='Web server: Path to certificate file to use for SSL. Use together with --keyfile' + help="Web server: Path to certificate file to use for SSL. Use together with --keyfile", ) web_server_group.add_argument( - '--keyfile', + "--keyfile", type=str, default=None, - help='Web server: Path to private key file to use for SSL. Use together with --certfile' + help="Web server: Path to private key file to use for SSL. Use together with --certfile", ) web_server_group.add_argument( - '--gui', - dest='gui', - action='store_true', - help='Start InvokeAI GUI', + "--gui", + dest="gui", + action="store_true", + help="Start InvokeAI GUI", ) return parser @@ -762,8 +771,7 @@ class Args(object): def _create_dream_cmd_parser(self): parser = PagingArgumentParser( formatter_class=ArgFormatter, - description= - """ + description=""" *Image generation* invoke> a fantastic alien landscape -W576 -H512 -s60 -n4 @@ -800,517 +808,549 @@ class Args(object): !convert_model /path/to/weights/file.ckpt -- converts a .ckpt file path to a diffusers model !edit_model -- edit a model's description !del_model -- delete a model - """ + """, ) - render_group = parser.add_argument_group('General rendering') - img2img_group = parser.add_argument_group('Image-to-image and inpainting') - inpainting_group = parser.add_argument_group('Inpainting') - outpainting_group = parser.add_argument_group('Outpainting and outcropping') - variation_group = parser.add_argument_group('Creating and combining variations') - postprocessing_group = parser.add_argument_group('Post-processing') - special_effects_group = parser.add_argument_group('Special effects') - deprecated_group = parser.add_argument_group('Deprecated options') + render_group = parser.add_argument_group("General rendering") + img2img_group = parser.add_argument_group("Image-to-image and inpainting") + inpainting_group = parser.add_argument_group("Inpainting") + outpainting_group = parser.add_argument_group("Outpainting and outcropping") + variation_group = parser.add_argument_group("Creating and combining variations") + postprocessing_group = parser.add_argument_group("Post-processing") + special_effects_group = parser.add_argument_group("Special effects") + deprecated_group = parser.add_argument_group("Deprecated options") render_group.add_argument( - '--prompt', - default='', - help='prompt string', + "--prompt", + default="", + help="prompt string", ) + render_group.add_argument("-s", "--steps", type=int, help="Number of steps") render_group.add_argument( - '-s', - '--steps', - type=int, - help='Number of steps' - ) - render_group.add_argument( - '-S', - '--seed', + "-S", + "--seed", type=int, default=None, - help='Image seed; a +ve integer, or use -1 for the previous seed, -2 for the one before that, etc', + help="Image seed; a +ve integer, or use -1 for the previous seed, -2 for the one before that, etc", ) render_group.add_argument( - '-n', - '--iterations', + "-n", + "--iterations", type=int, default=1, - help='Number of samplings to perform (slower, but will provide seeds for individual images)', + help="Number of samplings to perform (slower, but will provide seeds for individual images)", ) render_group.add_argument( - '-W', - '--width', + "-W", + "--width", type=int, - help='Image width, multiple of 64', + help="Image width, multiple of 64", ) render_group.add_argument( - '-H', - '--height', + "-H", + "--height", type=int, - help='Image height, multiple of 64', + help="Image height, multiple of 64", ) render_group.add_argument( - '-C', - '--cfg_scale', + "-C", + "--cfg_scale", type=float, help='Classifier free guidance (CFG) scale - higher numbers cause generator to "try" harder.', ) render_group.add_argument( - '--threshold', + "--threshold", default=0.0, type=float, help='Latent threshold for classifier free guidance (CFG) - prevent generator from "trying" too hard. Use positive values, 0 disables.', ) render_group.add_argument( - '--perlin', + "--perlin", default=0.0, type=float, - help='Perlin noise scale (0.0 - 1.0) - add perlin noise to the initialization instead of the usual gaussian noise.', + help="Perlin noise scale (0.0 - 1.0) - add perlin noise to the initialization instead of the usual gaussian noise.", ) render_group.add_argument( - '--h_symmetry_time_pct', + "--h_symmetry_time_pct", default=None, type=float, - help='Horizontal symmetry point (0.0 - 1.0) - apply horizontal symmetry at this point in image generation.', + help="Horizontal symmetry point (0.0 - 1.0) - apply horizontal symmetry at this point in image generation.", ) render_group.add_argument( - '--v_symmetry_time_pct', + "--v_symmetry_time_pct", default=None, type=float, - help='Vertical symmetry point (0.0 - 1.0) - apply vertical symmetry at this point in image generation.', + help="Vertical symmetry point (0.0 - 1.0) - apply vertical symmetry at this point in image generation.", ) render_group.add_argument( - '--fnformat', - default='{prefix}.{seed}.png', + "--fnformat", + default="{prefix}.{seed}.png", type=str, - help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png', + help="Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png", ) render_group.add_argument( - '--grid', - '-g', + "--grid", + "-g", action=argparse.BooleanOptionalAction, - help='generate a grid' + help="generate a grid", ) render_group.add_argument( - '-i', - '--individual', - action='store_true', - help='override command-line --grid setting and generate individual images' + "-i", + "--individual", + action="store_true", + help="override command-line --grid setting and generate individual images", ) render_group.add_argument( - '-x', - '--skip_normalize', - action='store_true', - help='Skip subprompt weight normalization', + "-x", + "--skip_normalize", + action="store_true", + help="Skip subprompt weight normalization", ) render_group.add_argument( - '-A', - '-m', - '--sampler', - dest='sampler_name', + "-A", + "-m", + "--sampler", + dest="sampler_name", type=str, choices=SAMPLER_CHOICES, - metavar='SAMPLER_NAME', + metavar="SAMPLER_NAME", help=f'Switch to a different sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', ) render_group.add_argument( - '-t', - '--log_tokenization', - action='store_true', - help='shows how the prompt is split into tokens' + "-t", + "--log_tokenization", + action="store_true", + help="shows how the prompt is split into tokens", ) render_group.add_argument( - '--outdir', - '-o', + "--outdir", + "-o", type=str, - help='Directory to save generated images and a log of prompts and seeds', + help="Directory to save generated images and a log of prompts and seeds", ) render_group.add_argument( - '--hires_fix', - action='store_true', - dest='hires_fix', - help='Create hires image using img2img to prevent duplicated objects' + "--hires_fix", + action="store_true", + dest="hires_fix", + help="Create hires image using img2img to prevent duplicated objects", ) render_group.add_argument( - '--save_intermediates', + "--save_intermediates", type=int, default=0, - dest='save_intermediates', - help='Save every nth intermediate image into an "intermediates" directory within the output directory' + dest="save_intermediates", + help='Save every nth intermediate image into an "intermediates" directory within the output directory', ) render_group.add_argument( - '--png_compression','-z', + "--png_compression", + "-z", type=int, default=6, - choices=range(0,10), - dest='png_compression', - help='level of PNG compression, from 0 (none) to 9 (maximum). [6]' + choices=range(0, 10), + dest="png_compression", + help="level of PNG compression, from 0 (none) to 9 (maximum). [6]", ) render_group.add_argument( - '--karras_max', + "--karras_max", type=int, default=None, - help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]." + help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29].", ) img2img_group.add_argument( - '-I', - '--init_img', + "-I", + "--init_img", type=str, - help='Path to input image for img2img mode (supersedes width and height)', + help="Path to input image for img2img mode (supersedes width and height)", ) img2img_group.add_argument( - '-tm', - '--text_mask', - nargs='+', + "-tm", + "--text_mask", + nargs="+", type=str, help='Use the clipseg classifier to generate the mask area for inpainting. Provide a description of the area to mask ("a mug"), optionally followed by the confidence level threshold (0-1.0; defaults to 0.5).', default=None, ) img2img_group.add_argument( - '--init_color', + "--init_color", type=str, - help='Path to reference image for color correction (used for repeated img2img and inpainting)' + help="Path to reference image for color correction (used for repeated img2img and inpainting)", ) img2img_group.add_argument( - '-T', - '-fit', - '--fit', - action='store_true', - help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)', + "-T", + "-fit", + "--fit", + action="store_true", + help="If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)", ) img2img_group.add_argument( - '-f', - '--strength', + "-f", + "--strength", type=float, - help='img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely', + help="img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely", ) inpainting_group.add_argument( - '-M', - '--init_mask', + "-M", + "--init_mask", type=str, - help='Path to input mask for inpainting mode (supersedes width and height)', + help="Path to input mask for inpainting mode (supersedes width and height)", ) inpainting_group.add_argument( - '--invert_mask', - action='store_true', - help='Invert the mask', + "--invert_mask", + action="store_true", + help="Invert the mask", ) inpainting_group.add_argument( - '-r', - '--inpaint_replace', + "-r", + "--inpaint_replace", type=float, default=0.0, - help='when inpainting, adjust how aggressively to replace the part of the picture under the mask, from 0.0 (a gentle merge) to 1.0 (replace entirely)', + help="when inpainting, adjust how aggressively to replace the part of the picture under the mask, from 0.0 (a gentle merge) to 1.0 (replace entirely)", ) outpainting_group.add_argument( - '-c', - '--outcrop', - nargs='+', + "-c", + "--outcrop", + nargs="+", type=str, - metavar=('direction','pixels'), - help='Outcrop the image with one or more direction/pixel pairs: e.g. -c top 64 bottom 128 left 64 right 64', + metavar=("direction", "pixels"), + help="Outcrop the image with one or more direction/pixel pairs: e.g. -c top 64 bottom 128 left 64 right 64", ) outpainting_group.add_argument( - '--force_outpaint', - action='store_true', + "--force_outpaint", + action="store_true", default=False, - help='Force outpainting if you have no inpainting mask to pass', + help="Force outpainting if you have no inpainting mask to pass", ) outpainting_group.add_argument( - '--seam_size', + "--seam_size", type=int, default=0, - help='When outpainting, size of the mask around the seam between original and outpainted image', + help="When outpainting, size of the mask around the seam between original and outpainted image", ) outpainting_group.add_argument( - '--seam_blur', + "--seam_blur", type=int, default=0, - help='When outpainting, the amount to blur the seam inwards', + help="When outpainting, the amount to blur the seam inwards", ) outpainting_group.add_argument( - '--seam_strength', + "--seam_strength", type=float, default=0.7, - help='When outpainting, the img2img strength to use when filling the seam. Values around 0.7 work well', + help="When outpainting, the img2img strength to use when filling the seam. Values around 0.7 work well", ) outpainting_group.add_argument( - '--seam_steps', + "--seam_steps", type=int, default=10, - help='When outpainting, the number of steps to use to fill the seam. Low values (~10) work well', + help="When outpainting, the number of steps to use to fill the seam. Low values (~10) work well", ) outpainting_group.add_argument( - '--tile_size', + "--tile_size", type=int, default=32, - help='When outpainting, the tile size to use for filling outpaint areas', + help="When outpainting, the tile size to use for filling outpaint areas", ) postprocessing_group.add_argument( - '--new_prompt', + "--new_prompt", type=str, - help='Change the text prompt applied during postprocessing (default, use original generation prompt)', + help="Change the text prompt applied during postprocessing (default, use original generation prompt)", ) postprocessing_group.add_argument( - '-ft', - '--facetool', + "-ft", + "--facetool", type=str, - default='gfpgan', - help='Select the face restoration AI to use: gfpgan, codeformer', + default="gfpgan", + help="Select the face restoration AI to use: gfpgan, codeformer", ) postprocessing_group.add_argument( - '-G', - '--facetool_strength', - '--gfpgan_strength', + "-G", + "--facetool_strength", + "--gfpgan_strength", type=float, - help='The strength at which to apply the face restoration to the result.', + help="The strength at which to apply the face restoration to the result.", default=0.0, ) postprocessing_group.add_argument( - '-cf', - '--codeformer_fidelity', + "-cf", + "--codeformer_fidelity", type=float, - help='Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality.', - default=0.75 + help="Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality.", + default=0.75, ) postprocessing_group.add_argument( - '-U', - '--upscale', - nargs='+', + "-U", + "--upscale", + nargs="+", type=float, - help='Scale factor (1, 2, 3, 4, etc..) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75', + help="Scale factor (1, 2, 3, 4, etc..) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75", default=None, ) postprocessing_group.add_argument( - '--save_original', - '-save_orig', - action='store_true', - help='Save original. Use it when upscaling to save both versions.', + "--save_original", + "-save_orig", + action="store_true", + help="Save original. Use it when upscaling to save both versions.", ) postprocessing_group.add_argument( - '--embiggen', - '-embiggen', - nargs='+', + "--embiggen", + "-embiggen", + nargs="+", type=float, - help='Arbitrary upscaling using img2img. Provide scale factor (0.75), optionally followed by strength (0.75) and tile overlap proportion (0.25).', + help="Arbitrary upscaling using img2img. Provide scale factor (0.75), optionally followed by strength (0.75) and tile overlap proportion (0.25).", default=None, ) postprocessing_group.add_argument( - '--embiggen_tiles', - '-embiggen_tiles', - nargs='+', + "--embiggen_tiles", + "-embiggen_tiles", + nargs="+", type=int, - help='For embiggen, provide list of tiles to process and replace onto the image e.g. `1 3 5`.', + help="For embiggen, provide list of tiles to process and replace onto the image e.g. `1 3 5`.", default=None, ) postprocessing_group.add_argument( - '--embiggen_strength', - '-embiggen_strength', + "--embiggen_strength", + "-embiggen_strength", type=float, - help='The strength of the embiggen img2img step, defaults to 0.4', + help="The strength of the embiggen img2img step, defaults to 0.4", default=None, ) special_effects_group.add_argument( - '--seamless', - action='store_true', - help='Change the model to seamless tiling (circular) mode', + "--seamless", + action="store_true", + help="Change the model to seamless tiling (circular) mode", ) special_effects_group.add_argument( - '--seamless_axes', - default=['x', 'y'], + "--seamless_axes", + default=["x", "y"], type=list[str], - help='Specify which axes to use circular convolution on.', + help="Specify which axes to use circular convolution on.", ) variation_group.add_argument( - '-v', - '--variation_amount', + "-v", + "--variation_amount", default=0.0, type=float, - help='If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different.' + help="If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different.", ) variation_group.add_argument( - '-V', - '--with_variations', + "-V", + "--with_variations", default=None, type=str, - help='list of variations to apply, in the format `seed:weight,seed:weight,...' + help="list of variations to apply, in the format `seed:weight,seed:weight,...", ) render_group.add_argument( - '--use_mps_noise', - action='store_true', - dest='use_mps_noise', - help='Simulate noise on M1 systems to get the same results' + "--use_mps_noise", + action="store_true", + dest="use_mps_noise", + help="Simulate noise on M1 systems to get the same results", ) deprecated_group.add_argument( - '-D', - '--out_direction', - nargs='+', + "-D", + "--out_direction", + nargs="+", type=str, - metavar=('direction', 'pixels'), - help='Older outcropping system. Direction to extend the given image (left|right|top|bottom). If a distance pixel value is not specified it defaults to half the image size' + metavar=("direction", "pixels"), + help="Older outcropping system. Direction to extend the given image (left|right|top|bottom). If a distance pixel value is not specified it defaults to half the image size", ) return parser + def format_metadata(**kwargs): - print('format_metadata() is deprecated. Please use metadata_dumps()') + print("format_metadata() is deprecated. Please use metadata_dumps()") return metadata_dumps(kwargs) -def metadata_dumps(opt, - seeds=[], - model_hash=None, - postprocessing=None): - ''' + +def metadata_dumps(opt, seeds=[], model_hash=None, postprocessing=None): + """ Given an Args object, returns a dict containing the keys and structure of the proposed stable diffusion metadata standard https://github.com/lstein/stable-diffusion/discussions/392 This is intended to be turned into JSON and stored in the "sd - ''' + """ # top-level metadata minus `image` or `images` metadata = { - 'model' : 'stable diffusion', - 'model_id' : opt.model, - 'model_hash' : model_hash, - 'app_id' : APP_ID, - 'app_version' : APP_VERSION, + "model": "stable diffusion", + "model_id": opt.model, + "model_hash": model_hash, + "app_id": APP_ID, + "app_version": APP_VERSION, } # # add some RFC266 fields that are generated internally, and not as # # user args - image_dict = opt.to_dict( - postprocessing=postprocessing - ) + image_dict = opt.to_dict(postprocessing=postprocessing) # remove any image keys not mentioned in RFC #266 - rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps', - 'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless' - 'init_img','init_mask','facetool','facetool_strength','upscale','h_symmetry_time_pct', - 'v_symmetry_time_pct'] - rfc_dict ={} + rfc266_img_fields = [ + "type", + "postprocessing", + "sampler", + "prompt", + "seed", + "variations", + "steps", + "cfg_scale", + "threshold", + "perlin", + "step_number", + "width", + "height", + "extra", + "strength", + "seamless" "init_img", + "init_mask", + "facetool", + "facetool_strength", + "upscale", + "h_symmetry_time_pct", + "v_symmetry_time_pct", + ] + rfc_dict = {} for item in image_dict.items(): - key,value = item + key, value = item if key in rfc266_img_fields: rfc_dict[key] = value # semantic drift - rfc_dict['sampler'] = image_dict.get('sampler_name',None) + rfc_dict["sampler"] = image_dict.get("sampler_name", None) # display weighted subprompts (liable to change) if opt.prompt: subprompts = split_weighted_subprompts(opt.prompt) - subprompts = [{'prompt':x[0],'weight':x[1]} for x in subprompts] - rfc_dict['prompt'] = subprompts + subprompts = [{"prompt": x[0], "weight": x[1]} for x in subprompts] + rfc_dict["prompt"] = subprompts # 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs - rfc_dict['variations'] = [{'seed':x[0],'weight':x[1]} for x in opt.with_variations] if opt.with_variations else [] + rfc_dict["variations"] = ( + [{"seed": x[0], "weight": x[1]} for x in opt.with_variations] + if opt.with_variations + else [] + ) # if variations are present then we need to replace 'seed' with 'orig_seed' - if hasattr(opt,'first_seed'): - rfc_dict['seed'] = opt.first_seed + if hasattr(opt, "first_seed"): + rfc_dict["seed"] = opt.first_seed if opt.init_img: - rfc_dict['type'] = 'img2img' - rfc_dict['strength_steps'] = rfc_dict.pop('strength') - rfc_dict['orig_hash'] = calculate_init_img_hash(opt.init_img) - rfc_dict['inpaint_replace'] = opt.inpaint_replace + rfc_dict["type"] = "img2img" + rfc_dict["strength_steps"] = rfc_dict.pop("strength") + rfc_dict["orig_hash"] = calculate_init_img_hash(opt.init_img) + rfc_dict["inpaint_replace"] = opt.inpaint_replace else: - rfc_dict['type'] = 'txt2img' - rfc_dict.pop('strength') + rfc_dict["type"] = "txt2img" + rfc_dict.pop("strength") - if len(seeds)==0 and opt.seed: - seeds=[opt.seed] + if len(seeds) == 0 and opt.seed: + seeds = [opt.seed] if opt.grid: images = [] for seed in seeds: - rfc_dict['seed'] = seed + rfc_dict["seed"] = seed images.append(copy.copy(rfc_dict)) - metadata['images'] = images + metadata["images"] = images else: # there should only ever be a single seed if we did not generate a grid - assert len(seeds) == 1, 'Expected a single seed' - rfc_dict['seed'] = seeds[0] - metadata['image'] = rfc_dict + assert len(seeds) == 1, "Expected a single seed" + rfc_dict["seed"] = seeds[0] + metadata["image"] = rfc_dict return metadata + @functools.lru_cache(maxsize=50) def args_from_png(png_file_path) -> list[Args]: - ''' + """ Given the path to a PNG file created by invoke.py, retrieves a list of Args objects containing the image data. - ''' + """ try: meta = retrieve_metadata(png_file_path) except AttributeError: - return [legacy_metadata_load({},png_file_path)] + return [legacy_metadata_load({}, png_file_path)] try: return metadata_loads(meta) except: - return [legacy_metadata_load(meta,png_file_path)] + return [legacy_metadata_load(meta, png_file_path)] + @functools.lru_cache(maxsize=50) def metadata_from_png(png_file_path) -> Args: - ''' + """ Given the path to a PNG file created by dream.py, retrieves an Args object containing the image metadata. Note that this returns a single Args object, not multiple. - ''' + """ args_list = args_from_png(png_file_path) - return args_list[0] if len(args_list)>0 else Args() # empty args + return args_list[0] if len(args_list) > 0 else Args() # empty args + def dream_cmd_from_png(png_file_path): opt = metadata_from_png(png_file_path) return opt.dream_prompt_str() + def metadata_loads(metadata) -> list: - ''' + """ Takes the dictionary corresponding to RFC266 (https://github.com/lstein/stable-diffusion/issues/266) and returns a series of opt objects for each of the images described in the dictionary. Note that this returns a list, and not a single object. See metadata_from_png() for a more convenient function for files that contain a single image. - ''' + """ results = [] try: - if 'images' in metadata['sd-metadata']: - images = metadata['sd-metadata']['images'] + if "images" in metadata["sd-metadata"]: + images = metadata["sd-metadata"]["images"] else: - images = [metadata['sd-metadata']['image']] + images = [metadata["sd-metadata"]["image"]] for image in images: # repack the prompt and variations - if 'prompt' in image: - image['prompt'] = repack_prompt(image['prompt']) - if 'variations' in image: - image['variations'] = ','.join([':'.join([str(x['seed']),str(x['weight'])]) for x in image['variations']]) + if "prompt" in image: + image["prompt"] = repack_prompt(image["prompt"]) + if "variations" in image: + image["variations"] = ",".join( + [ + ":".join([str(x["seed"]), str(x["weight"])]) + for x in image["variations"] + ] + ) # fix a bit of semantic drift here - image['sampler_name']=image.pop('sampler') + image["sampler_name"] = image.pop("sampler") opt = Args() opt._cmd_switches = Namespace(**image) results.append(opt) except Exception: - import sys, traceback - print('>> could not read metadata',file=sys.stderr) + import sys + import traceback + + print(">> could not read metadata", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return results -def repack_prompt(prompt_list:list)->str: + +def repack_prompt(prompt_list: list) -> str: # in the common case of no weighting syntax, just return the prompt as is if len(prompt_list) > 1: - return ','.join([':'.join([x['prompt'], str(x['weight'])]) for x in prompt_list]) + return ",".join( + [":".join([x["prompt"], str(x["weight"])]) for x in prompt_list] + ) else: - return prompt_list[0]['prompt'] + return prompt_list[0]["prompt"] + # image can either be a file path on disk or a base64-encoded # representation of the file's contents def calculate_init_img_hash(image_string): - prefix = 'data:image/png;base64,' - hash = None + prefix = "data:image/png;base64," + hash = None if image_string.startswith(prefix): - imagebase64 = image_string[len(prefix):] - imagedata = base64.b64decode(imagebase64) - with open('outputs/test.png','wb') as file: + imagebase64 = image_string[len(prefix) :] + imagedata = base64.b64decode(imagebase64) + with open("outputs/test.png", "wb") as file: file.write(imagedata) sha = hashlib.sha256() sha.update(imagedata) @@ -1319,10 +1359,11 @@ def calculate_init_img_hash(image_string): hash = sha256(image_string) return hash + # Bah. This should be moved somewhere else... def sha256(path): sha = hashlib.sha256() - with open(path,'rb') as f: + with open(path, "rb") as f: while True: data = f.read(65536) if not data: @@ -1330,18 +1371,18 @@ def sha256(path): sha.update(data) return sha.hexdigest() -def legacy_metadata_load(meta,pathname) -> Args: + +def legacy_metadata_load(meta, pathname) -> Args: opt = Args() - if 'Dream' in meta and len(meta['Dream']) > 0: - dream_prompt = meta['Dream'] + if "Dream" in meta and len(meta["Dream"]) > 0: + dream_prompt = meta["Dream"] opt.parse_cmd(dream_prompt) - else: # if nothing else, we can get the seed - match = re.search('\d+\.(\d+)',pathname) + else: # if nothing else, we can get the seed + match = re.search("\d+\.(\d+)", pathname) if match: seed = match.groups()[0] opt.seed = seed else: - opt.prompt = '' + opt.prompt = "" opt.seed = 0 return opt - diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index 9b1cc5affa..6f0a218dc1 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -17,8 +17,8 @@ import traceback import warnings from argparse import Namespace from pathlib import Path -from urllib import request from shutil import get_terminal_size +from urllib import request import npyscreen import torch @@ -37,17 +37,20 @@ from transformers import ( import invokeai.configs as configs +from ...frontend.install.model_install import addModelsForm, process_and_execute +from ...frontend.install.widgets import ( + CenteredButtonPress, + IntTitleSlider, + set_min_terminal_size, +) from ..args import PRECISION_CHOICES, Args -from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir -from ...frontend.config.model_install import addModelsForm, process_and_execute +from ..globals import Globals, global_cache_dir, global_config_dir, global_config_file from .model_install_backend import ( default_dataset, download_from_hf, - recommended_datasets, hf_download_with_resume, + recommended_datasets, ) -from ...frontend.config.widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size - warnings.filterwarnings("ignore") @@ -82,6 +85,7 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file # -Ak_euler_a -C10.0 """ + # -------------------------------------------- def postscript(errors: None): if not any(errors): @@ -180,13 +184,11 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th # --------------------------------------------- # this will preload the Bert tokenizer fles def download_bert(): - print( - "Installing bert tokenizer...", - file=sys.stderr - ) + print("Installing bert tokenizer...", file=sys.stderr) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) from transformers import BertTokenizerFast + download_from_hf(BertTokenizerFast, "bert-base-uncased") @@ -197,12 +199,14 @@ def download_sd1_clip(): download_from_hf(CLIPTokenizer, version) download_from_hf(CLIPTextModel, version) + # --------------------------------------------- def download_sd2_clip(): - version = 'stabilityai/stable-diffusion-2' + version = "stabilityai/stable-diffusion-2" print("Installing SD2 clip model...", file=sys.stderr) - download_from_hf(CLIPTokenizer, version, subfolder='tokenizer') - download_from_hf(CLIPTextModel, version, subfolder='text_encoder') + download_from_hf(CLIPTokenizer, version, subfolder="tokenizer") + download_from_hf(CLIPTextModel, version, subfolder="text_encoder") + # --------------------------------------------- def download_realesrgan(): @@ -323,13 +327,13 @@ def get_root(root: str = None) -> str: class editOptsForm(npyscreen.FormMultiPage): # for responsive resizing - disabled # FIX_MINIMUM_SIZE_WHEN_CREATED = False - + def create(self): program_opts = self.parentApp.program_opts old_opts = self.parentApp.invokeai_opts first_time = not (Globals.root / Globals.initfile).exists() access_token = HfFolder.get_token() - window_width,window_height = get_terminal_size() + window_width, window_height = get_terminal_size() for i in [ "Configure startup settings. You can come back and change these later.", "Use ctrl-N and ctrl-P to move to the ext and

revious fields.", @@ -681,6 +685,7 @@ def run_console_ui( else: return (editApp.new_opts, editApp.user_selections) + # ------------------------------------- def write_opts(opts: Namespace, init_file: Path): """ @@ -701,8 +706,8 @@ def write_opts(opts: Namespace, init_file: Path): "^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)" ) # fix windows paths - opts.outdir = opts.outdir.replace('\\','/') - opts.embedding_path = opts.embedding_path.replace('\\','/') + opts.outdir = opts.outdir.replace("\\", "/") + opts.embedding_path = opts.embedding_path.replace("\\", "/") new_file = f"{init_file}.new" try: lines = [x.strip() for x in open(init_file, "r").readlines()] @@ -855,6 +860,7 @@ def main(): except KeyboardInterrupt: print("\nGoodbye! Come back soon.") + # ------------------------------------- if __name__ == "__main__": main() diff --git a/invokeai/backend/config/model_install_backend.py b/invokeai/backend/config/model_install_backend.py index 6157c2b2b4..1180991d06 100644 --- a/invokeai/backend/config/model_install_backend.py +++ b/invokeai/backend/config/model_install_backend.py @@ -8,6 +8,7 @@ import sys import warnings from pathlib import Path from tempfile import TemporaryFile +from typing import List import requests from diffusers import AutoencoderKL @@ -15,12 +16,12 @@ from huggingface_hub import hf_hub_url from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from tqdm import tqdm -from typing import List import invokeai.configs as configs -from ..stable_diffusion import StableDiffusionGeneratorPipeline + from ..globals import Globals, global_cache_dir, global_config_dir from ..model_management import ModelManager +from ..stable_diffusion import StableDiffusionGeneratorPipeline warnings.filterwarnings("ignore") @@ -44,45 +45,49 @@ Config_preamble = """ # was trained on. """ + def default_config_file(): return Path(global_config_dir()) / "models.yaml" + def sd_configs(): return Path(global_config_dir()) / "stable-diffusion" + def initial_models(): global Datasets if Datasets: return Datasets return (Datasets := OmegaConf.load(Dataset_path)) + def install_requested_models( - install_initial_models: List[str] = None, - remove_models: List[str] = None, - scan_directory: Path = None, - external_models: List[str] = None, - scan_at_startup: bool = False, - convert_to_diffusers: bool = False, - precision: str = "float16", - purge_deleted: bool = False, - config_file_path: Path = None, + install_initial_models: List[str] = None, + remove_models: List[str] = None, + scan_directory: Path = None, + external_models: List[str] = None, + scan_at_startup: bool = False, + convert_to_diffusers: bool = False, + precision: str = "float16", + purge_deleted: bool = False, + config_file_path: Path = None, ): - ''' + """ Entry point for installing/deleting starter models, or installing external models. - ''' - config_file_path=config_file_path or default_config_file() + """ + config_file_path = config_file_path or default_config_file() if not config_file_path.exists(): - open(config_file_path,'w') - - model_manager= ModelManager(OmegaConf.load(config_file_path),precision=precision) - + open(config_file_path, "w") + + model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) + if remove_models and len(remove_models) > 0: print("== DELETING UNCHECKED STARTER MODELS ==") for model in remove_models: - print(f'{model}...') + print(f"{model}...") model_manager.del_model(model, delete_files=purge_deleted) model_manager.commit(config_file_path) - + if install_initial_models and len(install_initial_models) > 0: print("== INSTALLING SELECTED STARTER MODELS ==") successfully_downloaded = download_weight_datasets( @@ -96,20 +101,20 @@ def install_requested_models( # due to above, we have to reload the model manager because conf file # was changed behind its back - model_manager= ModelManager(OmegaConf.load(config_file_path),precision=precision) + model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) external_models = external_models or list() if scan_directory: external_models.append(str(scan_directory)) - if len(external_models)>0: + if len(external_models) > 0: print("== INSTALLING EXTERNAL MODELS ==") for path_url_or_repo in external_models: try: model_manager.heuristic_import( path_url_or_repo, convert=convert_to_diffusers, - commit_to_conf=config_file_path + commit_to_conf=config_file_path, ) except KeyboardInterrupt: sys.exit(-1) @@ -117,17 +122,18 @@ def install_requested_models( pass if scan_at_startup and scan_directory.is_dir(): - argument = '--autoconvert' if convert_to_diffusers else '--autoimport' + argument = "--autoconvert" if convert_to_diffusers else "--autoimport" initfile = Path(Globals.root, Globals.initfile) - replacement = Path(Globals.root, f'{Globals.initfile}.new') - directory = str(scan_directory).replace('\\','/') - with open(initfile,'r') as input: - with open(replacement,'w') as output: + replacement = Path(Globals.root, f"{Globals.initfile}.new") + directory = str(scan_directory).replace("\\", "/") + with open(initfile, "r") as input: + with open(replacement, "w") as output: while line := input.readline(): if not line.startswith(argument): output.writelines([line]) - output.writelines([f'{argument} {directory}']) - os.replace(replacement,initfile) + output.writelines([f"{argument} {directory}"]) + os.replace(replacement, initfile) + # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): @@ -183,7 +189,9 @@ def migrate_models_ckpt(): if not os.path.exists(os.path.join(model_path, "model.ckpt")): return new_name = initial_models()["stable-diffusion-1.4"]["file"] - print('The Stable Diffusion v4.1 "model.ckpt" is already installed. The name will be changed to {new_name} to avoid confusion.') + print( + 'The Stable Diffusion v4.1 "model.ckpt" is already installed. The name will be changed to {new_name} to avoid confusion.' + ) print(f"model.ckpt => {new_name}") os.replace( os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) @@ -383,7 +391,8 @@ def update_config_file(successfully_downloaded: dict, config_file: Path): # --------------------------------------------- def new_config_file_contents( - successfully_downloaded: dict, config_file: Path, + successfully_downloaded: dict, + config_file: Path, ) -> str: if config_file.exists(): conf = OmegaConf.load(str(config_file.expanduser().resolve())) @@ -413,7 +422,9 @@ def new_config_file_contents( stanza["weights"] = os.path.relpath( successfully_downloaded[model], start=Globals.root ) - stanza["config"] = os.path.normpath(os.path.join(sd_configs(), mod["config"])) + stanza["config"] = os.path.normpath( + os.path.join(sd_configs(), mod["config"]) + ) if "vae" in mod: if "file" in mod["vae"]: stanza["vae"] = os.path.normpath( @@ -445,7 +456,7 @@ def delete_weights(model_name: str, conf_stanza: dict): print( f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Deleting the original file {weights}?" ) - + weights = Path(weights) if not weights.is_absolute(): weights = Path(Globals.root) / weights diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index 329e1b50bd..8f2992db0c 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -25,18 +25,20 @@ from omegaconf import OmegaConf from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything -from . import ModelManager -from .prompting import get_uc_and_c_and_ec -from .stable_diffusion import (DDIMSampler, KSampler, PLMSSampler, HuggingFaceConceptsLibrary) -from .generator import infill_methods -from .util import choose_precision, choose_torch_device -from .image_util import (InitImageResizer, - PngWriter, - Txt2Mask, - configure_model_padding) - -from .globals import Globals, global_cache_dir +from .model_management import ModelManager from .args import metadata_from_png +from .generator import infill_methods +from .globals import Globals, global_cache_dir +from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding +from .prompting import get_uc_and_c_and_ec +from .stable_diffusion import ( + DDIMSampler, + HuggingFaceConceptsLibrary, + KSampler, + PLMSSampler, +) +from .util import choose_precision, choose_torch_device + def fix_func(orig): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): @@ -324,8 +326,8 @@ class Generate: variation_amount=0.0, threshold=0.0, perlin=0.0, - h_symmetry_time_pct = None, - v_symmetry_time_pct = None, + h_symmetry_time_pct=None, + v_symmetry_time_pct=None, karras_max=None, outdir=None, # these are specific to img2img and inpaint @@ -713,7 +715,7 @@ class Generate: prompt, model=self.model, skip_normalize_legacy_blend=opt.skip_normalize, - log_tokens=ldm.invoke.conditioning.log_tokenization, + log_tokens=invokeai.backend.prompting.conditioning.log_tokenization, ) if tool in ("gfpgan", "codeformer", "upscale"): @@ -737,7 +739,7 @@ class Generate: ) elif tool == "outcrop": - from ldm.invoke.restoration.outcrop import Outcrop + from .restoration.outcrop import Outcrop extend_instructions = {} for direction, pixels in _pairwise(opt.outcrop): @@ -790,7 +792,7 @@ class Generate: clear_cuda_cache=self.clear_cuda_cache, ) elif tool == "outpaint": - from ldm.invoke.restoration.outpaint import Outpaint + from .restoration.outpaint import Outpaint restorer = Outpaint(image, self) return restorer.process(opt, args, image_callback=callback, prefix=prefix) @@ -812,7 +814,6 @@ class Generate: hires_fix: bool = False, force_outpaint: bool = False, ): - if hires_fix: return self._make_txt2img2img() @@ -960,7 +961,7 @@ class Generate: seed_everything(random.randrange(0, np.iinfo(np.uint32).max)) if self.embedding_path is not None: - print(f'>> Loading embeddings from {self.embedding_path}') + print(f">> Loading embeddings from {self.embedding_path}") for root, _, files in os.walk(self.embedding_path): for name in files: ti_path = os.path.join(root, name) @@ -1015,7 +1016,6 @@ class Generate: image_callback=None, prefix=None, ): - results = [] for r in image_list: image, seed = r diff --git a/invokeai/backend/generator/__init__.py b/invokeai/backend/generator/__init__.py index 4d61779472..b01e93ad81 100644 --- a/invokeai/backend/generator/__init__.py +++ b/invokeai/backend/generator/__init__.py @@ -1,5 +1,5 @@ -''' +""" Initialization file for the invokeai.generator package -''' +""" from .base import Generator from .inpaint import infill_methods diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 767b632103..831c941ff4 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -1,7 +1,7 @@ -''' +""" Base class for invokeai.backend.generator.* including img2img, txt2img, and inpaint -''' +""" from __future__ import annotations import os @@ -9,24 +9,25 @@ import os.path as osp import random import traceback from contextlib import nullcontext +from pathlib import Path import cv2 import numpy as np import torch - -from PIL import Image, ImageFilter, ImageChops from diffusers import DiffusionPipeline from einops import rearrange -from pathlib import Path +from PIL import Image, ImageChops, ImageFilter from pytorch_lightning import seed_everything from tqdm import trange import invokeai.assets.web as web_assets + from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper from ..util.util import rand_perlin_2d downsampling = 8 -CAUTION_IMG = 'caution.png' +CAUTION_IMG = "caution.png" + class Generator: downsampling_factor: int @@ -39,7 +40,7 @@ class Generator: self.precision = precision self.seed = None self.latent_channels = model.channels - self.downsampling_factor = downsampling # BUG: should come from model or config + self.downsampling_factor = downsampling # BUG: should come from model or config self.safety_checker = None self.perlin = 0.0 self.threshold = 0 @@ -50,56 +51,73 @@ class Generator: self.caution_img = None # this is going to be overridden in img2img.py, txt2img.py and inpaint.py - def get_make_image(self,prompt,**kwargs): + def get_make_image(self, prompt, **kwargs): """ Returns a function returning an image derived from the prompt and the initial image Return value depends on the seed at the time you call it """ - raise NotImplementedError("image_iterator() must be implemented in a descendent class") + raise NotImplementedError( + "image_iterator() must be implemented in a descendent class" + ) def set_variation(self, seed, variation_amount, with_variations): - self.seed = seed + self.seed = seed self.variation_amount = variation_amount - self.with_variations = with_variations + self.with_variations = with_variations - def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None, - image_callback=None, step_callback=None, threshold=0.0, perlin=0.0, - h_symmetry_time_pct=None, v_symmetry_time_pct=None, - safety_checker:dict=None, - free_gpu_mem: bool=False, - **kwargs): + def generate( + self, + prompt, + init_image, + width, + height, + sampler, + iterations=1, + seed=None, + image_callback=None, + step_callback=None, + threshold=0.0, + perlin=0.0, + h_symmetry_time_pct=None, + v_symmetry_time_pct=None, + safety_checker: dict = None, + free_gpu_mem: bool = False, + **kwargs, + ): scope = nullcontext self.safety_checker = safety_checker self.free_gpu_mem = free_gpu_mem attention_maps_images = [] - attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image()) + attention_maps_callback = lambda saver: attention_maps_images.append( + saver.get_stacked_maps_image() + ) make_image = self.get_make_image( prompt, - sampler = sampler, - init_image = init_image, - width = width, - height = height, - step_callback = step_callback, - threshold = threshold, - perlin = perlin, - h_symmetry_time_pct = h_symmetry_time_pct, - v_symmetry_time_pct = v_symmetry_time_pct, - attention_maps_callback = attention_maps_callback, - **kwargs + sampler=sampler, + init_image=init_image, + width=width, + height=height, + step_callback=step_callback, + threshold=threshold, + perlin=perlin, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct, + attention_maps_callback=attention_maps_callback, + **kwargs, ) - results = [] - seed = seed if seed is not None and seed >= 0 else self.new_seed() - first_seed = seed + results = [] + seed = seed if seed is not None and seed >= 0 else self.new_seed() + first_seed = seed seed, initial_noise = self.generate_initial_noise(seed, width, height) # There used to be an additional self.model.ema_scope() here, but it breaks # the inpaint-1.5 model. Not sure what it did.... ? with scope(self.model.device.type): - for n in trange(iterations, desc='Generating'): + for n in trange(iterations, desc="Generating"): x_T = None if self.variation_amount > 0: seed_everything(seed) - target_noise = self.get_noise(width,height) + target_noise = self.get_noise(width, height) x_T = self.slerp(self.variation_amount, initial_noise, target_noise) elif initial_noise is not None: # i.e. we specified particular variations @@ -107,9 +125,9 @@ class Generator: else: seed_everything(seed) try: - x_T = self.get_noise(width,height) + x_T = self.get_noise(width, height) except: - print('** An error occurred while getting initial noise **') + print("** An error occurred while getting initial noise **") print(traceback.format_exc()) image = make_image(x_T) @@ -120,19 +138,30 @@ class Generator: results.append([image, seed]) if image_callback is not None: - attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1] - image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image) + attention_maps_image = ( + None + if len(attention_maps_images) == 0 + else attention_maps_images[-1] + ) + image_callback( + image, + seed, + first_seed=first_seed, + attention_maps_image=attention_maps_image, + ) seed = self.new_seed() # Free up memory from the last generation. - clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None + clear_cuda_cache = ( + kwargs["clear_cuda_cache"] if "clear_cuda_cache" in kwargs else None + ) if clear_cuda_cache is not None: clear_cuda_cache() return results - def sample_to_image(self,samples)->Image.Image: + def sample_to_image(self, samples) -> Image.Image: """ Given samples returned from a sampler, converts it into a PIL Image @@ -141,18 +170,30 @@ class Generator: image = self.model.decode_latents(samples) return self.model.numpy_to_pil(image)[0] - def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image: + def repaste_and_color_correct( + self, + result: Image.Image, + init_image: Image.Image, + init_mask: Image.Image, + mask_blur_radius: int = 8, + ) -> Image.Image: if init_image is None or init_mask is None: return result # Get the original alpha channel of the mask if there is one. # Otherwise it is some other black/white image format ('1', 'L' or 'RGB') - pil_init_mask = init_mask.getchannel('A') if init_mask.mode == 'RGBA' else init_mask.convert('L') - pil_init_image = init_image.convert('RGBA') # Add an alpha channel if one doesn't exist + pil_init_mask = ( + init_mask.getchannel("A") + if init_mask.mode == "RGBA" + else init_mask.convert("L") + ) + pil_init_image = init_image.convert( + "RGBA" + ) # Add an alpha channel if one doesn't exist # Build an image with only visible pixels from source to use as reference for color-matching. - init_rgb_pixels = np.asarray(init_image.convert('RGB'), dtype=np.uint8) - init_a_pixels = np.asarray(pil_init_image.getchannel('A'), dtype=np.uint8) + init_rgb_pixels = np.asarray(init_image.convert("RGB"), dtype=np.uint8) + init_a_pixels = np.asarray(pil_init_image.getchannel("A"), dtype=np.uint8) init_mask_pixels = np.asarray(pil_init_mask, dtype=np.uint8) # Get numpy version of result @@ -171,44 +212,70 @@ class Generator: # Color correct np_matched_result = np_image.copy() - np_matched_result[:,:,:] = (((np_matched_result[:,:,:].astype(np.float32) - gen_means[None,None,:]) / gen_std[None,None,:]) * init_std[None,None,:] + init_means[None,None,:]).clip(0, 255).astype(np.uint8) - matched_result = Image.fromarray(np_matched_result, mode='RGB') + np_matched_result[:, :, :] = ( + ( + ( + ( + np_matched_result[:, :, :].astype(np.float32) + - gen_means[None, None, :] + ) + / gen_std[None, None, :] + ) + * init_std[None, None, :] + + init_means[None, None, :] + ) + .clip(0, 255) + .astype(np.uint8) + ) + matched_result = Image.fromarray(np_matched_result, mode="RGB") else: - matched_result = Image.fromarray(np_image, mode='RGB') + matched_result = Image.fromarray(np_image, mode="RGB") # Blur the mask out (into init image) by specified amount if mask_blur_radius > 0: nm = np.asarray(pil_init_mask, dtype=np.uint8) - nmd = cv2.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2)) - pmd = Image.fromarray(nmd, mode='L') + nmd = cv2.erode( + nm, + kernel=np.ones((3, 3), dtype=np.uint8), + iterations=int(mask_blur_radius / 2), + ) + pmd = Image.fromarray(nmd, mode="L") blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius)) else: blurred_init_mask = pil_init_mask - multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) + multiplied_blurred_init_mask = ImageChops.multiply( + blurred_init_mask, self.pil_image.split()[-1] + ) # Paste original on color-corrected generation (using blurred mask) - matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask) + matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) return matched_result - def sample_to_lowres_estimated_image(self,samples): + def sample_to_lowres_estimated_image(self, samples): # origingally adapted from code by @erucipe and @keturn here: # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 # these updated numbers for v1.5 are from @torridgristle - v1_5_latent_rgb_factors = torch.tensor([ - # R G B - [ 0.3444, 0.1385, 0.0670], # L1 - [ 0.1247, 0.4027, 0.1494], # L2 - [-0.3192, 0.2513, 0.2103], # L3 - [-0.1307, -0.1874, -0.7445] # L4 - ], dtype=samples.dtype, device=samples.device) + v1_5_latent_rgb_factors = torch.tensor( + [ + # R G B + [0.3444, 0.1385, 0.0670], # L1 + [0.1247, 0.4027, 0.1494], # L2 + [-0.3192, 0.2513, 0.2103], # L3 + [-0.1307, -0.1874, -0.7445], # L4 + ], + dtype=samples.dtype, + device=samples.device, + ) latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors - latents_ubyte = (((latent_image + 1) / 2) - .clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - .byte()).cpu() + latents_ubyte = ( + ((latent_image + 1) / 2) + .clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + .byte() + ).cpu() return Image.fromarray(latents_ubyte.numpy()) @@ -217,38 +284,45 @@ class Generator: if self.variation_amount > 0 or len(self.with_variations) > 0: # use fixed initial noise plus random noise per iteration seed_everything(seed) - initial_noise = self.get_noise(width,height) + initial_noise = self.get_noise(width, height) for v_seed, v_weight in self.with_variations: seed = v_seed seed_everything(seed) - next_noise = self.get_noise(width,height) + next_noise = self.get_noise(width, height) initial_noise = self.slerp(v_weight, initial_noise, next_noise) if self.variation_amount > 0: - random.seed() # reset RNG to an actually random state, so we can get a random seed for variations - seed = random.randrange(0,np.iinfo(np.uint32).max) + random.seed() # reset RNG to an actually random state, so we can get a random seed for variations + seed = random.randrange(0, np.iinfo(np.uint32).max) return (seed, initial_noise) else: return (seed, None) # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): + def get_noise(self, width, height): """ Returns a tensor filled with random numbers, either form a normal distribution (txt2img) or from the latent image (img2img, inpaint) """ - raise NotImplementedError("get_noise() must be implemented in a descendent class") + raise NotImplementedError( + "get_noise() must be implemented in a descendent class" + ) - def get_perlin_noise(self,width,height): - fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device + def get_perlin_noise(self, width, height): + fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device # limit noise to only the diffusion image channels, not the mask channels input_channels = min(self.latent_channels, 4) # round up to the nearest block of 8 temp_width = int((width + 7) / 8) * 8 temp_height = int((height + 7) / 8) * 8 - noise = torch.stack([ - rand_perlin_2d((temp_height, temp_width), - (8, 8), - device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device) + noise = torch.stack( + [ + rand_perlin_2d( + (temp_height, temp_width), (8, 8), device=self.model.device + ).to(fixdevice) + for _ in range(input_channels) + ], + dim=0, + ).to(self.model.device) return noise[0:4, 0:height, 0:width] def new_seed(self): @@ -256,7 +330,7 @@ class Generator: return self.seed def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): - ''' + """ Spherical linear interpolation Args: t (float/np.ndarray): Float value between 0.0 and 1.0 @@ -266,7 +340,7 @@ class Generator: colineal. Not recommended to alter this. Returns: v2 (np.ndarray): Interpolation vector between v0 and v1 - ''' + """ inputs_are_torch = False if not isinstance(v0, np.ndarray): inputs_are_torch = True @@ -292,15 +366,15 @@ class Generator: return v2 - def safety_check(self,image:Image.Image): - ''' + def safety_check(self, image: Image.Image): + """ If the CompViz safety checker flags an NSFW image, we blur it out. - ''' + """ import diffusers - checker = self.safety_checker['checker'] - extractor = self.safety_checker['extractor'] + checker = self.safety_checker["checker"] + extractor = self.safety_checker["extractor"] features = extractor([image], return_tensors="pt") features.to(self.model.device) @@ -309,19 +383,23 @@ class Generator: x_image = x_image[None].transpose(0, 3, 1, 2) diffusers.logging.set_verbosity_error() - checked_image, has_nsfw_concept = checker(images=x_image, clip_input=features.pixel_values) + checked_image, has_nsfw_concept = checker( + images=x_image, clip_input=features.pixel_values + ) if has_nsfw_concept[0]: - print('** An image with potential non-safe content has been detected. A blurred image will be returned. **') + print( + "** An image with potential non-safe content has been detected. A blurred image will be returned. **" + ) return self.blur(image) else: return image - def blur(self,input): + def blur(self, input): blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32)) try: caution = self.get_caution_img() if caution: - blurry.paste(caution,(0,0),caution) + blurry.paste(caution, (0, 0), caution) except FileNotFoundError: pass return blurry @@ -332,43 +410,52 @@ class Generator: return self.caution_img path = Path(web_assets.__path__[0]) / CAUTION_IMG caution = Image.open(path) - self.caution_img = caution.resize((caution.width // 2, caution.height //2)) + self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) return self.caution_img # this is a handy routine for debugging use. Given a generated sample, # convert it into a PNG image and store it at the indicated path def save_sample(self, sample, filepath): image = self.sample_to_image(sample) - dirname = os.path.dirname(filepath) or '.' + dirname = os.path.dirname(filepath) or "." if not os.path.exists(dirname): - print(f'** creating directory {dirname}') + print(f"** creating directory {dirname}") os.makedirs(dirname, exist_ok=True) - image.save(filepath,'PNG') + image.save(filepath, "PNG") - - def torch_dtype(self)->torch.dtype: - return torch.float16 if self.precision == 'float16' else torch.float32 + def torch_dtype(self) -> torch.dtype: + return torch.float16 if self.precision == "float16" else torch.float32 # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - device = self.model.device + def get_noise(self, width, height): + device = self.model.device # limit noise to only the diffusion image channels, not the mask channels input_channels = min(self.latent_channels, 4) - if self.use_mps_noise or device.type == 'mps': - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device='cpu').to(device) + if self.use_mps_noise or device.type == "mps": + x = torch.randn( + [ + 1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor, + ], + dtype=self.torch_dtype(), + device="cpu", + ).to(device) else: - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device=device) + x = torch.randn( + [ + 1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor, + ], + dtype=self.torch_dtype(), + device=device, + ) if self.perlin > 0.0: - perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - x = (1-self.perlin)*x + self.perlin*perlin_noise + perlin_noise = self.get_perlin_noise( + width // self.downsampling_factor, height // self.downsampling_factor + ) + x = (1 - self.perlin) * x + self.perlin * perlin_noise return x diff --git a/invokeai/backend/generator/embiggen.py b/invokeai/backend/generator/embiggen.py index 23447d59e3..ce9ef4d1b6 100644 --- a/invokeai/backend/generator/embiggen.py +++ b/invokeai/backend/generator/embiggen.py @@ -1,7 +1,7 @@ -''' -invokeai.backend.generator.embiggen descends from ldm.invoke.generator -and generates with invokeai.backend.generator.img2img -''' +""" +invokeai.backend.generator.embiggen descends from .generator +and generates with .generator.img2img +""" import numpy as np import torch @@ -15,23 +15,24 @@ from .img2img import Img2Img class Embiggen(Generator): def __init__(self, model, precision): super().__init__(model, precision) - self.init_latent = None + self.init_latent = None # Replace generate because Embiggen doesn't need/use most of what it does normallly - def generate(self,prompt,iterations=1,seed=None, - image_callback=None, step_callback=None, - **kwargs): - - make_image = self.get_make_image( - prompt, - step_callback = step_callback, - **kwargs - ) - results = [] - seed = seed if seed else self.new_seed() + def generate( + self, + prompt, + iterations=1, + seed=None, + image_callback=None, + step_callback=None, + **kwargs, + ): + make_image = self.get_make_image(prompt, step_callback=step_callback, **kwargs) + results = [] + seed = seed if seed else self.new_seed() # Noise will be generated by the Img2Img generator when called - for _ in trange(iterations, desc='Generating'): + for _ in trange(iterations, desc="Generating"): # make_image will call Img2Img which will do the equivalent of get_noise itself image = make_image() results.append([image, seed]) @@ -56,13 +57,15 @@ class Embiggen(Generator): embiggen, embiggen_tiles, step_callback=None, - **kwargs + **kwargs, ): """ Returns a function returning an image derived from the prompt and multi-stage twice-baked potato layering over the img2img on the initial image Return value depends on the seed at the time you call it """ - assert not sampler.uses_inpainting_model(), "--embiggen is not supported by inpainting models" + assert ( + not sampler.uses_inpainting_model() + ), "--embiggen is not supported by inpainting models" # Construct embiggen arg array, and sanity check arguments if embiggen == None: # embiggen can also be called with just embiggen_tiles @@ -70,48 +73,57 @@ class Embiggen(Generator): elif embiggen[0] < 0: embiggen[0] = 1.0 print( - '>> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !') + ">> Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !" + ) if len(embiggen) < 2: embiggen.append(0.75) elif embiggen[1] > 1.0 or embiggen[1] < 0: embiggen[1] = 0.75 - print('>> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !') + print( + ">> Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !" + ) if len(embiggen) < 3: embiggen.append(0.25) elif embiggen[2] < 0: embiggen[2] = 0.25 - print('>> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !') + print( + ">> Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !" + ) # Convert tiles from their user-freindly count-from-one to count-from-zero, because we need to do modulo math # and then sort them, because... people. if embiggen_tiles: - embiggen_tiles = list(map(lambda n: n-1, embiggen_tiles)) + embiggen_tiles = list(map(lambda n: n - 1, embiggen_tiles)) embiggen_tiles.sort() if strength >= 0.5: - print(f'* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45.') + print( + f"* WARNING: Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45." + ) # Prep img2img generator, since we wrap over it - gen_img2img = Img2Img(self.model,self.precision) + gen_img2img = Img2Img(self.model, self.precision) # Open original init image (not a tensor) to manipulate initsuperimage = Image.open(init_img) with Image.open(init_img) as img: - initsuperimage = img.convert('RGB') + initsuperimage = img.convert("RGB") # Size of the target super init image in pixels initsuperwidth, initsuperheight = initsuperimage.size # Increase by scaling factor if not already resized, using ESRGAN as able if embiggen[0] != 1.0: - initsuperwidth = round(initsuperwidth*embiggen[0]) - initsuperheight = round(initsuperheight*embiggen[0]) + initsuperwidth = round(initsuperwidth * embiggen[0]) + initsuperheight = round(initsuperheight * embiggen[0]) if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero - from ldm.invoke.restoration.realesrgan import ESRGAN + from ..restoration.realesrgan import ESRGAN + esrgan = ESRGAN() print( - f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}') + f">> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}" + ) if embiggen[0] > 2: initsuperimage = esrgan.process( initsuperimage, @@ -130,7 +142,8 @@ class Embiggen(Generator): # but from personal experiance it doesn't greatly improve anything after 4x # Resize to target scaling factor resolution initsuperimage = initsuperimage.resize( - (initsuperwidth, initsuperheight), Image.Resampling.LANCZOS) + (initsuperwidth, initsuperheight), Image.Resampling.LANCZOS + ) # Use width and height as tile widths and height # Determine buffer size in pixels @@ -153,23 +166,24 @@ class Embiggen(Generator): emb_tiles_x = 1 emb_tiles_y = 1 if (initsuperwidth - width) > 0: - emb_tiles_x = ceildiv(initsuperwidth - width, - width - overlap_size_x) + 1 + emb_tiles_x = ceildiv(initsuperwidth - width, width - overlap_size_x) + 1 if (initsuperheight - height) > 0: - emb_tiles_y = ceildiv(initsuperheight - height, - height - overlap_size_y) + 1 + emb_tiles_y = ceildiv(initsuperheight - height, height - overlap_size_y) + 1 # Sanity - assert emb_tiles_x > 1 or emb_tiles_y > 1, f'ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don\'t need to Embiggen! Check your arguments.' + assert ( + emb_tiles_x > 1 or emb_tiles_y > 1 + ), f"ERROR: Based on the requested dimensions of {initsuperwidth}x{initsuperheight} and tiles of {width}x{height} you don't need to Embiggen! Check your arguments." # Prep alpha layers -------------- # https://stackoverflow.com/questions/69321734/how-to-create-different-transparency-like-gradient-with-python-pil # agradientL is Left-side transparent - agradientL = Image.linear_gradient('L').rotate( - 90).resize((overlap_size_x, height)) + agradientL = ( + Image.linear_gradient("L").rotate(90).resize((overlap_size_x, height)) + ) # agradientT is Top-side transparent - agradientT = Image.linear_gradient('L').resize((width, overlap_size_y)) + agradientT = Image.linear_gradient("L").resize((width, overlap_size_y)) # radial corner is the left-top corner, made full circle then cut to just the left-top quadrant - agradientC = Image.new('L', (256, 256)) + agradientC = Image.new("L", (256, 256)) for y in range(256): for x in range(256): # Find distance to lower right corner (numpy takes arrays) @@ -177,16 +191,16 @@ class Embiggen(Generator): # Clamp values to max 255 if distanceToLR > 255: distanceToLR = 255 - #Place the pixel as invert of distance + # Place the pixel as invert of distance agradientC.putpixel((x, y), round(255 - distanceToLR)) # Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges # Fits for a left-fading gradient on the bottom side and full opacity on the right side. - agradientAsymC = Image.new('L', (256, 256)) + agradientAsymC = Image.new("L", (256, 256)) for y in range(256): for x in range(256): - value = round(max(0, x-(255-y)) * (255 / max(1,y))) - #Clamp values + value = round(max(0, x - (255 - y)) * (255 / max(1, y))) + # Clamp values value = max(0, value) value = min(255, value) agradientAsymC.putpixel((x, y), value) @@ -204,80 +218,91 @@ class Embiggen(Generator): # make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile # to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space alphaLayerTaC = alphaLayerT.copy() - alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + alphaLayerTaC.paste( + agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, 0), + ) alphaLayerLTaC = alphaLayerLTC.copy() - alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + alphaLayerLTaC.paste( + agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, 0), + ) if embiggen_tiles: # Individual unconnected sides alphaLayerR = Image.new("L", (width, height), 255) - alphaLayerR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) + alphaLayerR.paste(agradientL.rotate(180), (width - overlap_size_x, 0)) alphaLayerB = Image.new("L", (width, height), 255) - alphaLayerB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) + alphaLayerB.paste(agradientT.rotate(180), (0, height - overlap_size_y)) alphaLayerTB = Image.new("L", (width, height), 255) alphaLayerTB.paste(agradientT, (0, 0)) - alphaLayerTB.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) + alphaLayerTB.paste(agradientT.rotate(180), (0, height - overlap_size_y)) alphaLayerLR = Image.new("L", (width, height), 255) alphaLayerLR.paste(agradientL, (0, 0)) - alphaLayerLR.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) + alphaLayerLR.paste(agradientL.rotate(180), (width - overlap_size_x, 0)) # Sides and corner Layers alphaLayerRBC = Image.new("L", (width, height), 255) - alphaLayerRBC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerRBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerRBC.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerRBC.paste(agradientL.rotate(180), (width - overlap_size_x, 0)) + alphaLayerRBC.paste(agradientT.rotate(180), (0, height - overlap_size_y)) + alphaLayerRBC.paste( + agradientC.rotate(180).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, height - overlap_size_y), + ) alphaLayerLBC = Image.new("L", (width, height), 255) alphaLayerLBC.paste(agradientL, (0, 0)) - alphaLayerLBC.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerLBC.paste(agradientC.rotate(90).resize( - (overlap_size_x, overlap_size_y)), (0, height - overlap_size_y)) + alphaLayerLBC.paste(agradientT.rotate(180), (0, height - overlap_size_y)) + alphaLayerLBC.paste( + agradientC.rotate(90).resize((overlap_size_x, overlap_size_y)), + (0, height - overlap_size_y), + ) alphaLayerRTC = Image.new("L", (width, height), 255) - alphaLayerRTC.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) + alphaLayerRTC.paste(agradientL.rotate(180), (width - overlap_size_x, 0)) alphaLayerRTC.paste(agradientT, (0, 0)) - alphaLayerRTC.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + alphaLayerRTC.paste( + agradientC.rotate(270).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, 0), + ) # All but X layers alphaLayerABT = Image.new("L", (width, height), 255) alphaLayerABT.paste(alphaLayerLBC, (0, 0)) - alphaLayerABT.paste(agradientL.rotate( - 180), (width - overlap_size_x, 0)) - alphaLayerABT.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerABT.paste(agradientL.rotate(180), (width - overlap_size_x, 0)) + alphaLayerABT.paste( + agradientC.rotate(180).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, height - overlap_size_y), + ) alphaLayerABL = Image.new("L", (width, height), 255) alphaLayerABL.paste(alphaLayerRTC, (0, 0)) - alphaLayerABL.paste(agradientT.rotate( - 180), (0, height - overlap_size_y)) - alphaLayerABL.paste(agradientC.rotate(180).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, height - overlap_size_y)) + alphaLayerABL.paste(agradientT.rotate(180), (0, height - overlap_size_y)) + alphaLayerABL.paste( + agradientC.rotate(180).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, height - overlap_size_y), + ) alphaLayerABR = Image.new("L", (width, height), 255) alphaLayerABR.paste(alphaLayerLBC, (0, 0)) alphaLayerABR.paste(agradientT, (0, 0)) - alphaLayerABR.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) + alphaLayerABR.paste( + agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0) + ) alphaLayerABB = Image.new("L", (width, height), 255) alphaLayerABB.paste(alphaLayerRTC, (0, 0)) alphaLayerABB.paste(agradientL, (0, 0)) - alphaLayerABB.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) + alphaLayerABB.paste( + agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0) + ) # All-around layer alphaLayerAA = Image.new("L", (width, height), 255) alphaLayerAA.paste(alphaLayerABT, (0, 0)) alphaLayerAA.paste(agradientT, (0, 0)) - alphaLayerAA.paste(agradientC.resize( - (overlap_size_x, overlap_size_y)), (0, 0)) - alphaLayerAA.paste(agradientC.rotate(270).resize( - (overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0)) + alphaLayerAA.paste( + agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0) + ) + alphaLayerAA.paste( + agradientC.rotate(270).resize((overlap_size_x, overlap_size_y)), + (width - overlap_size_x, 0), + ) # Clean up temporary gradients del agradientL @@ -287,17 +312,20 @@ class Embiggen(Generator): def make_image(): # Make main tiles ------------------------------------------------- if embiggen_tiles: - print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...') + print(f">> Making {len(embiggen_tiles)} Embiggen tiles...") else: print( - f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...') + f">> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})..." + ) emb_tile_store = [] # Although we could use the same seed for every tile for determinism, at higher strengths this may # produce duplicated structures for each tile and make the tiling effect more obvious # instead track and iterate a local seed we pass to Img2Img seed = self.seed - seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy + seedintlimit = ( + np.iinfo(np.uint32).max - 1 + ) # only retreive this one from numpy for tile in range(emb_tiles_x * emb_tiles_y): # Don't iterate on first tile @@ -334,37 +362,38 @@ class Embiggen(Generator): if embiggen_tiles: print( - f'Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)') + f"Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)" + ) else: - print( - f'Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles') + print(f"Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles") # create a torch tensor from an Image - newinitimage = np.array( - newinitimage).astype(np.float32) / 255.0 + newinitimage = np.array(newinitimage).astype(np.float32) / 255.0 newinitimage = newinitimage[None].transpose(0, 3, 1, 2) newinitimage = torch.from_numpy(newinitimage) newinitimage = 2.0 * newinitimage - 1.0 newinitimage = newinitimage.to(self.model.device) - clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None + clear_cuda_cache = ( + kwargs["clear_cuda_cache"] if "clear_cuda_cache" in kwargs else None + ) tile_results = gen_img2img.generate( prompt, - iterations = 1, - seed = seed, - sampler = sampler, - steps = steps, - cfg_scale = cfg_scale, - conditioning = conditioning, - ddim_eta = ddim_eta, - image_callback = None, # called only after the final image is generated - step_callback = step_callback, # called after each intermediate image is generated - width = width, - height = height, - init_image = newinitimage, # notice that init_image is different from init_img - mask_image = None, - strength = strength, - clear_cuda_cache = clear_cuda_cache + iterations=1, + seed=seed, + sampler=sampler, + steps=steps, + cfg_scale=cfg_scale, + conditioning=conditioning, + ddim_eta=ddim_eta, + image_callback=None, # called only after the final image is generated + step_callback=step_callback, # called after each intermediate image is generated + width=width, + height=height, + init_image=newinitimage, # notice that init_image is different from init_img + mask_image=None, + strength=strength, + clear_cuda_cache=clear_cuda_cache, ) emb_tile_store.append(tile_results[0][0]) @@ -373,12 +402,14 @@ class Embiggen(Generator): del newinitimage # Sanity check we have them all - if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or (embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles)): - outputsuperimage = Image.new( - "RGBA", (initsuperwidth, initsuperheight)) + if len(emb_tile_store) == (emb_tiles_x * emb_tiles_y) or ( + embiggen_tiles != [] and len(emb_tile_store) == len(embiggen_tiles) + ): + outputsuperimage = Image.new("RGBA", (initsuperwidth, initsuperheight)) if embiggen_tiles: outputsuperimage.alpha_composite( - initsuperimage.convert('RGBA'), (0, 0)) + initsuperimage.convert("RGBA"), (0, 0) + ) for tile in range(emb_tiles_x * emb_tiles_y): if embiggen_tiles: if tile in embiggen_tiles: @@ -387,7 +418,7 @@ class Embiggen(Generator): continue else: intileimage = emb_tile_store[tile] - intileimage = intileimage.convert('RGBA') + intileimage = intileimage.convert("RGBA") # Get row and column entries emb_row_i = tile // emb_tiles_x emb_column_i = tile % emb_tiles_x @@ -399,8 +430,7 @@ class Embiggen(Generator): if emb_column_i + 1 == emb_tiles_x: left = initsuperwidth - width else: - left = round(emb_column_i * - (width - overlap_size_x)) + left = round(emb_column_i * (width - overlap_size_x)) if emb_row_i + 1 == emb_tiles_y: top = initsuperheight - height else: @@ -411,33 +441,43 @@ class Embiggen(Generator): # top of image if emb_row_i == 0: if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) not in embiggen_tiles: # Look-ahead down + if (tile + 1) in embiggen_tiles: # Look-ahead right + if ( + tile + emb_tiles_x + ) not in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerB) # Otherwise do nothing on this tile - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + elif ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down only intileimage.putalpha(alphaLayerR) else: intileimage.putalpha(alphaLayerRBC) elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + if ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerL) else: intileimage.putalpha(alphaLayerLBC) else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + if (tile + 1) in embiggen_tiles: # Look-ahead right + if ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerL) else: intileimage.putalpha(alphaLayerLBC) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + elif ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down only intileimage.putalpha(alphaLayerLR) else: intileimage.putalpha(alphaLayerABT) # bottom of image elif emb_row_i == emb_tiles_y - 1: if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile + 1) in embiggen_tiles: # Look-ahead right intileimage.putalpha(alphaLayerTaC) else: intileimage.putalpha(alphaLayerRTC) @@ -445,34 +485,44 @@ class Embiggen(Generator): # No tiles to look ahead to intileimage.putalpha(alphaLayerLTC) else: - if (tile+1) in embiggen_tiles: # Look-ahead right + if (tile + 1) in embiggen_tiles: # Look-ahead right intileimage.putalpha(alphaLayerLTaC) else: intileimage.putalpha(alphaLayerABB) # vertical middle of image else: if emb_column_i == 0: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + if (tile + 1) in embiggen_tiles: # Look-ahead right + if ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerTaC) else: intileimage.putalpha(alphaLayerTB) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + elif ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down only intileimage.putalpha(alphaLayerRTC) else: intileimage.putalpha(alphaLayerABL) elif emb_column_i == emb_tiles_x - 1: - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + if ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerLTC) else: intileimage.putalpha(alphaLayerABR) else: - if (tile+1) in embiggen_tiles: # Look-ahead right - if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down + if (tile + 1) in embiggen_tiles: # Look-ahead right + if ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down intileimage.putalpha(alphaLayerLTaC) else: intileimage.putalpha(alphaLayerABR) - elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only + elif ( + tile + emb_tiles_x + ) in embiggen_tiles: # Look-ahead down only intileimage.putalpha(alphaLayerABB) else: intileimage.putalpha(alphaLayerAA) @@ -481,21 +531,28 @@ class Embiggen(Generator): if emb_row_i == 0 and emb_column_i >= 1: intileimage.putalpha(alphaLayerL) elif emb_row_i >= 1 and emb_column_i == 0: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right + if ( + emb_column_i + 1 == emb_tiles_x + ): # If we don't have anything that can be placed to the right intileimage.putalpha(alphaLayerT) else: intileimage.putalpha(alphaLayerTaC) else: - if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right + if ( + emb_column_i + 1 == emb_tiles_x + ): # If we don't have anything that can be placed to the right intileimage.putalpha(alphaLayerLTC) else: intileimage.putalpha(alphaLayerLTaC) # Layer tile onto final image outputsuperimage.alpha_composite(intileimage, (left, top)) else: - print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') + print( + "Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation." + ) # after internal loops and patching up return Embiggen image return outputsuperimage + # end of function declaration return make_image diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py index edd78d6148..f9692b9cc5 100644 --- a/invokeai/backend/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -1,25 +1,42 @@ -''' -invokeai.backend.generator.img2img descends from ldm.invoke.generator -''' +""" +invokeai.backend.generator.img2img descends from .generator +""" import torch from diffusers import logging +from ..stable_diffusion import ( + ConditioningData, + PostprocessingSettings, + StableDiffusionGeneratorPipeline, +) from .base import Generator -from ..stable_diffusion import (StableDiffusionGeneratorPipeline, - ConditioningData, - PostprocessingSettings - ) + class Img2Img(Generator): def __init__(self, model, precision): super().__init__(model, precision) - self.init_latent = None # by get_noise() + self.init_latent = None # by get_noise() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,init_image,strength,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, - h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, - **kwargs): + def get_make_image( + self, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + init_image, + strength, + step_callback=None, + threshold=0.0, + warmup=0.2, + perlin=0.0, + h_symmetry_time_pct=None, + v_symmetry_time_pct=None, + attention_maps_callback=None, + **kwargs, + ): """ Returns a function returning an image derived from the prompt and the initial image Return value depends on the seed at the time you call it. @@ -30,30 +47,37 @@ class Img2Img(Generator): pipeline: StableDiffusionGeneratorPipeline = self.model pipeline.scheduler = sampler - uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings=PostprocessingSettings( - threshold=threshold, - warmup=warmup, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - + uc, c, extra_conditioning_info = conditioning + conditioning_data = ConditioningData( + uc, + c, + cfg_scale, + extra_conditioning_info, + postprocessing_settings=PostprocessingSettings( + threshold=threshold, + warmup=warmup, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct, + ), + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) def make_image(x_T): # FIXME: use x_T for initial seeded noise # We're not at the moment because the pipeline automatically resizes init_image if # necessary, which the x_T input might not match. - logging.set_verbosity_error() # quench safety check warnings + logging.set_verbosity_error() # quench safety check warnings pipeline_output = pipeline.img2img_from_embeddings( - init_image, strength, steps, conditioning_data, + init_image, + strength, + steps, + conditioning_data, noise_func=self.get_noise_like, - callback=step_callback + callback=step_callback, ) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + if ( + pipeline_output.attention_map_saver is not None + and attention_maps_callback is not None + ): attention_maps_callback(pipeline_output.attention_map_saver) return pipeline.numpy_to_pil(pipeline_output.images)[0] @@ -61,11 +85,13 @@ class Img2Img(Generator): def get_noise_like(self, like: torch.Tensor): device = like.device - if device.type == 'mps': - x = torch.randn_like(like, device='cpu').to(device) + if device.type == "mps": + x = torch.randn_like(like, device="cpu").to(device) else: x = torch.randn_like(like, device=device) if self.perlin > 0.0: shape = like.shape - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) + x = (1 - self.perlin) * x + self.perlin * self.get_perlin_noise( + shape[3], shape[2] + ) return x diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index a6e6fe4387..f7f21b8906 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -1,33 +1,35 @@ -''' -invokeai.backend.generator.inpaint descends from ldm.invoke.generator -''' +""" +invokeai.backend.generator.inpaint descends from .generator +""" from __future__ import annotations import math -import PIL import cv2 import numpy as np +import PIL import torch -from PIL import Image, ImageFilter, ImageOps, ImageChops +from PIL import Image, ImageChops, ImageFilter, ImageOps -from ..stable_diffusion.diffusers_pipeline import (image_resized_to_grid_as_tensor, - StableDiffusionGeneratorPipeline, - ConditioningData - ) -from .img2img import Img2Img from ..image_util import PatchMatch, debug_image +from ..stable_diffusion.diffusers_pipeline import ( + ConditioningData, + StableDiffusionGeneratorPipeline, + image_resized_to_grid_as_tensor, +) +from .img2img import Img2Img -def infill_methods()->list[str]: +def infill_methods() -> list[str]: methods = [ "tile", "solid", ] if PatchMatch.patchmatch_available(): - methods.insert(0, 'patchmatch') + methods.insert(0, "patchmatch") return methods + class Inpaint(Img2Img): def __init__(self, model, precision): self.inpaint_height = 0 @@ -54,11 +56,11 @@ class Inpaint(Img2Img): np.ravel(image), shape=(nrows, ncols, height, width, depth), strides=(height * _strides[0], width * _strides[1], *_strides), - writeable=False + writeable=False, ) def infill_patchmatch(self, im: Image.Image) -> Image: - if im.mode != 'RGBA': + if im.mode != "RGBA": return im # Skip patchmatch if patchmatch isn't available @@ -66,13 +68,17 @@ class Inpaint(Img2Img): return im # Patchmatch (note, we may want to expose patch_size? Increasing it significantly impacts performance though) - im_patched_np = PatchMatch.inpaint(im.convert('RGB'), ImageOps.invert(im.split()[-1]), patch_size = 3) - im_patched = Image.fromarray(im_patched_np, mode = 'RGB') + im_patched_np = PatchMatch.inpaint( + im.convert("RGB"), ImageOps.invert(im.split()[-1]), patch_size=3 + ) + im_patched = Image.fromarray(im_patched_np, mode="RGB") return im_patched - def tile_fill_missing(self, im: Image.Image, tile_size: int = 16, seed: int = None) -> Image: + def tile_fill_missing( + self, im: Image.Image, tile_size: int = 16, seed: int = None + ) -> Image: # Only fill if there's an alpha layer - if im.mode != 'RGBA': + if im.mode != "RGBA": return im a = np.asarray(im, dtype=np.uint8) @@ -80,21 +86,21 @@ class Inpaint(Img2Img): tile_size = (tile_size, tile_size) # Get the image as tiles of a specified size - tiles = self.get_tile_images(a,*tile_size).copy() + tiles = self.get_tile_images(a, *tile_size).copy() # Get the mask as tiles - tiles_mask = tiles[:,:,:,:,3] + tiles_mask = tiles[:, :, :, :, 3] # Find any mask tiles with any fully transparent pixels (we will be replacing these later) tmask_shape = tiles_mask.shape tiles_mask = tiles_mask.reshape(math.prod(tiles_mask.shape)) - n,ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) - tiles_mask = (tiles_mask > 0) - tiles_mask = tiles_mask.reshape((n,ny)).all(axis = 1) + n, ny = (math.prod(tmask_shape[0:2])), math.prod(tmask_shape[2:]) + tiles_mask = tiles_mask > 0 + tiles_mask = tiles_mask.reshape((n, ny)).all(axis=1) # Get RGB tiles in single array and filter by the mask tshape = tiles.shape - tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), * tiles.shape[2:])) + tiles_all = tiles.reshape((math.prod(tiles.shape[0:2]), *tiles.shape[2:])) filtered_tiles = tiles_all[tiles_mask] if len(filtered_tiles) == 0: @@ -102,23 +108,32 @@ class Inpaint(Img2Img): # Find all invalid tiles and replace with a random valid tile replace_count = (tiles_mask == False).sum() - rng = np.random.default_rng(seed = seed) - tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[rng.choice(filtered_tiles.shape[0], replace_count),:,:,:] + rng = np.random.default_rng(seed=seed) + tiles_all[np.logical_not(tiles_mask)] = filtered_tiles[ + rng.choice(filtered_tiles.shape[0], replace_count), :, :, : + ] # Convert back to an image tiles_all = tiles_all.reshape(tshape) - tiles_all = tiles_all.swapaxes(1,2) - st = tiles_all.reshape((math.prod(tiles_all.shape[0:2]), math.prod(tiles_all.shape[2:4]), tiles_all.shape[4])) - si = Image.fromarray(st, mode='RGBA') + tiles_all = tiles_all.swapaxes(1, 2) + st = tiles_all.reshape( + ( + math.prod(tiles_all.shape[0:2]), + math.prod(tiles_all.shape[2:4]), + tiles_all.shape[4], + ) + ) + si = Image.fromarray(st, mode="RGBA") return si - def mask_edge(self, mask: Image, edge_size: int, edge_blur: int) -> Image: npimg = np.asarray(mask, dtype=np.uint8) # Detect any partially transparent regions - npgradient = np.uint8(255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0))) + npgradient = np.uint8( + 255 * (1.0 - np.floor(np.abs(0.5 - np.float32(npimg) / 255.0) * 2.0)) + ) # Detect hard edges npedge = cv2.Canny(npimg, threshold1=100, threshold2=200) @@ -127,7 +142,9 @@ class Inpaint(Img2Img): npmask = npgradient + npedge # Expand - npmask = cv2.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) + npmask = cv2.dilate( + npmask, np.ones((3, 3), np.uint8), iterations=int(edge_size / 2) + ) new_mask = Image.fromarray(npmask) @@ -136,9 +153,22 @@ class Inpaint(Img2Img): return ImageOps.invert(new_mask) - - def seam_paint(self, im: Image.Image, seam_size: int, seam_blur: int, prompt, sampler, steps, cfg_scale, ddim_eta, - conditioning, strength, noise, infill_method, step_callback) -> Image.Image: + def seam_paint( + self, + im: Image.Image, + seam_size: int, + seam_blur: int, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + strength, + noise, + infill_method, + step_callback, + ) -> Image.Image: hard_mask = self.pil_image.split()[-1].copy() mask = self.mask_edge(hard_mask, seam_size, seam_blur) @@ -149,15 +179,15 @@ class Inpaint(Img2Img): cfg_scale, ddim_eta, conditioning, - init_image = im.copy().convert('RGBA'), - mask_image = mask, - strength = strength, - mask_blur_radius = 0, - seam_size = 0, - step_callback = step_callback, - inpaint_width = im.width, - inpaint_height = im.height, - infill_method = infill_method + init_image=im.copy().convert("RGBA"), + mask_image=mask, + strength=strength, + mask_blur_radius=0, + seam_size=0, + step_callback=step_callback, + inpaint_width=im.width, + inpaint_height=im.height, + infill_method=infill_method, ) seam_noise = self.get_noise(im.width, im.height) @@ -166,28 +196,35 @@ class Inpaint(Img2Img): return result - @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning, - init_image: PIL.Image.Image | torch.FloatTensor, - mask_image: PIL.Image.Image | torch.FloatTensor, - strength: float, - mask_blur_radius: int = 8, - # Seam settings - when 0, doesn't fill seam - seam_size: int = 0, - seam_blur: int = 0, - seam_strength: float = 0.7, - seam_steps: int = 10, - tile_size: int = 32, - step_callback=None, - inpaint_replace=False, enable_image_debugging=False, - infill_method = None, - inpaint_width=None, - inpaint_height=None, - inpaint_fill:tuple(int)=(0x7F, 0x7F, 0x7F, 0xFF), - attention_maps_callback=None, - **kwargs): + def get_make_image( + self, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + init_image: PIL.Image.Image | torch.FloatTensor, + mask_image: PIL.Image.Image | torch.FloatTensor, + strength: float, + mask_blur_radius: int = 8, + # Seam settings - when 0, doesn't fill seam + seam_size: int = 0, + seam_blur: int = 0, + seam_strength: float = 0.7, + seam_steps: int = 10, + tile_size: int = 32, + step_callback=None, + inpaint_replace=False, + enable_image_debugging=False, + infill_method=None, + inpaint_width=None, + inpaint_height=None, + inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), + attention_maps_callback=None, + **kwargs, + ): """ Returns a function returning an image derived from the prompt and the initial image + mask. Return value depends on the seed at @@ -205,33 +242,39 @@ class Inpaint(Img2Img): self.pil_image = init_image.copy() # Do infill - if infill_method == 'patchmatch' and PatchMatch.patchmatch_available(): + if infill_method == "patchmatch" and PatchMatch.patchmatch_available(): init_filled = self.infill_patchmatch(self.pil_image.copy()) - elif infill_method == 'tile': + elif infill_method == "tile": init_filled = self.tile_fill_missing( - self.pil_image.copy(), - seed = self.seed, - tile_size = tile_size + self.pil_image.copy(), seed=self.seed, tile_size=tile_size ) - elif infill_method == 'solid': + elif infill_method == "solid": solid_bg = PIL.Image.new("RGBA", init_image.size, inpaint_fill) init_filled = PIL.Image.alpha_composite(solid_bg, init_image) else: - raise ValueError(f"Non-supported infill type {infill_method}", infill_method) - init_filled.paste(init_image, (0,0), init_image.split()[-1]) + raise ValueError( + f"Non-supported infill type {infill_method}", infill_method + ) + init_filled.paste(init_image, (0, 0), init_image.split()[-1]) # Resize if requested for inpainting if inpaint_width and inpaint_height: init_filled = init_filled.resize((inpaint_width, inpaint_height)) - debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) + debug_image( + init_filled, "init_filled", debug_status=self.enable_image_debugging + ) # Create init tensor - init_image = image_resized_to_grid_as_tensor(init_filled.convert('RGB')) + init_image = image_resized_to_grid_as_tensor(init_filled.convert("RGB")) if isinstance(mask_image, PIL.Image.Image): self.pil_mask = mask_image.copy() - debug_image(mask_image, "mask_image BEFORE multiply with pil_image", debug_status=self.enable_image_debugging) + debug_image( + mask_image, + "mask_image BEFORE multiply with pil_image", + debug_status=self.enable_image_debugging, + ) init_alpha = self.pil_image.getchannel("A") if mask_image.mode != "L": @@ -244,8 +287,14 @@ class Inpaint(Img2Img): if inpaint_width and inpaint_height: mask_image = mask_image.resize((inpaint_width, inpaint_height)) - debug_image(mask_image, "mask_image AFTER multiply with pil_image", debug_status=self.enable_image_debugging) - mask: torch.FloatTensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + debug_image( + mask_image, + "mask_image AFTER multiply with pil_image", + debug_status=self.enable_image_debugging, + ) + mask: torch.FloatTensor = image_resized_to_grid_as_tensor( + mask_image, normalize=False + ) else: mask: torch.FloatTensor = mask_image @@ -257,9 +306,9 @@ class Inpaint(Img2Img): # todo: support cross-attention control uc, c, _ = conditioning - conditioning_data = (ConditioningData(uc, c, cfg_scale) - .add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) - + conditioning_data = ConditioningData( + uc, c, cfg_scale + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) def make_image(x_T): pipeline_output = pipeline.inpaint_from_embeddings( @@ -272,43 +321,71 @@ class Inpaint(Img2Img): callback=step_callback, ) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + if ( + pipeline_output.attention_map_saver is not None + and attention_maps_callback is not None + ): attention_maps_callback(pipeline_output.attention_map_saver) - result = self.postprocess_size_and_mask(pipeline.numpy_to_pil(pipeline_output.images)[0]) + result = self.postprocess_size_and_mask( + pipeline.numpy_to_pil(pipeline_output.images)[0] + ) # Seam paint if this is our first pass (seam_size set to 0 during seam painting) if seam_size > 0: old_image = self.pil_image or init_image old_mask = self.pil_mask or mask_image - result = self.seam_paint(result, seam_size, seam_blur, prompt, sampler, seam_steps, cfg_scale, ddim_eta, - conditioning, seam_strength, x_T, infill_method, step_callback) + result = self.seam_paint( + result, + seam_size, + seam_blur, + prompt, + sampler, + seam_steps, + cfg_scale, + ddim_eta, + conditioning, + seam_strength, + x_T, + infill_method, + step_callback, + ) # Restore original settings - self.get_make_image(prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning, - old_image, - old_mask, - strength, - mask_blur_radius, seam_size, seam_blur, seam_strength, - seam_steps, tile_size, step_callback, - inpaint_replace, enable_image_debugging, - inpaint_width = inpaint_width, - inpaint_height = inpaint_height, - infill_method = infill_method, - **kwargs) + self.get_make_image( + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + old_image, + old_mask, + strength, + mask_blur_radius, + seam_size, + seam_blur, + seam_strength, + seam_steps, + tile_size, + step_callback, + inpaint_replace, + enable_image_debugging, + inpaint_width=inpaint_width, + inpaint_height=inpaint_height, + infill_method=infill_method, + **kwargs, + ) return result return make_image - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') + def sample_to_image(self, samples) -> Image.Image: + gen_result = super().sample_to_image(samples).convert("RGB") return self.postprocess_size_and_mask(gen_result) - def postprocess_size_and_mask(self, gen_result: Image.Image) -> Image.Image: debug_image(gen_result, "gen_result", debug_status=self.enable_image_debugging) @@ -319,7 +396,13 @@ class Inpaint(Img2Img): if self.pil_image is None or self.pil_mask is None: return gen_result - corrected_result = self.repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) + corrected_result = self.repaste_and_color_correct( + gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius + ) + debug_image( + corrected_result, + "corrected_result", + debug_status=self.enable_image_debugging, + ) return corrected_result diff --git a/invokeai/backend/generator/omnibus.py b/invokeai/backend/generator/omnibus.py deleted file mode 100644 index a6fae3e567..0000000000 --- a/invokeai/backend/generator/omnibus.py +++ /dev/null @@ -1,173 +0,0 @@ -"""omnibus module to be used with the runwayml 9-channel custom inpainting model""" - -import torch -from PIL import Image, ImageOps -from einops import repeat - -from ldm.invoke.devices import choose_autocast -from ldm.invoke.generator.img2img import Img2Img -from ldm.invoke.generator.txt2img import Txt2Img - - -class Omnibus(Img2Img,Txt2Img): - def __init__(self, model, precision): - super().__init__(model, precision) - self.pil_mask = None - self.pil_image = None - - def get_make_image( - self, - prompt, - sampler, - steps, - cfg_scale, - ddim_eta, - conditioning, - width, - height, - init_image = None, - mask_image = None, - strength = None, - step_callback=None, - threshold=0.0, - perlin=0.0, - mask_blur_radius: int = 8, - **kwargs): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it. - """ - self.perlin = perlin - num_samples = 1 - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - if isinstance(init_image, Image.Image): - self.pil_image = init_image - if init_image.mode != 'RGB': - init_image = init_image.convert('RGB') - init_image = self._image_to_tensor(init_image) - - if isinstance(mask_image, Image.Image): - self.pil_mask = mask_image - - mask_image = ImageChops.multiply(mask_image.convert('L'), self.pil_image.split()[-1]) - mask_image = self._image_to_tensor(ImageOps.invert(mask_image), normalize=False) - - self.mask_blur_radius = mask_blur_radius - - if init_image is not None and mask_image is not None: # inpainting - masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero - - elif init_image is not None: # img2img - scope = choose_autocast(self.precision) - - with scope(self.model.device.type): - self.init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - # create a completely black mask (1s) - mask_image = torch.ones(1, 1, init_image.shape[2], init_image.shape[3], device=self.model.device) - # and the masked image is just a copy of the original - masked_image = init_image - - else: # txt2img - init_image = torch.zeros(1, 3, height, width, device=self.model.device) - mask_image = torch.ones(1, 1, height, width, device=self.model.device) - masked_image = init_image - - self.init_latent = init_image - height = init_image.shape[2] - width = init_image.shape[3] - model = self.model - - def make_image(x_T): - with torch.no_grad(): - scope = choose_autocast(self.precision) - with scope(self.model.device.type): - - batch = self.make_batch_sd( - init_image, - mask_image, - masked_image, - prompt=prompt, - device=model.device, - num_samples=num_samples, - ) - - c = model.cond_stage_model.encode(batch["txt"]) - c_cat = list() - for ck in model.concat_keys: - cc = batch[ck].float() - if ck != model.masked_image_key: - bchw = [num_samples, 4, height//8, width//8] - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = model.get_first_stage_encoding(model.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - - # cond - cond={"c_concat": [c_cat], "c_crossattn": [c]} - - # uncond cond - uc_cross = model.get_unconditional_conditioning(num_samples, "") - uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]} - shape = [model.channels, height//8, width//8] - - samples, _ = sampler.sample( - batch_size = 1, - S = steps, - x_T = x_T, - conditioning = cond, - shape = shape, - verbose = False, - unconditional_guidance_scale = cfg_scale, - unconditional_conditioning = uc_full, - eta = 1.0, - img_callback = step_callback, - threshold = threshold, - ) - if self.free_gpu_mem: - self.model.model.to("cpu") - return self.sample_to_image(samples) - - return make_image - - def make_batch_sd( - self, - image, - mask, - masked_image, - prompt, - device, - num_samples=1): - batch = { - "image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples), - "txt": num_samples * [prompt], - "mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples), - "masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples), - } - return batch - - def get_noise(self, width:int, height:int): - if self.init_latent is not None: - height = self.init_latent.shape[2] - width = self.init_latent.shape[3] - return Txt2Img.get_noise(self,width,height) - - - def sample_to_image(self, samples)->Image.Image: - gen_result = super().sample_to_image(samples).convert('RGB') - - if self.pil_image is None or self.pil_mask is None: - return gen_result - if self.pil_image.size != self.pil_mask.size: - return gen_result - - corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - - return corrected_result diff --git a/invokeai/backend/generator/txt2img.py b/invokeai/backend/generator/txt2img.py index 40094a3033..a9918f81ce 100644 --- a/invokeai/backend/generator/txt2img.py +++ b/invokeai/backend/generator/txt2img.py @@ -1,24 +1,41 @@ -''' +""" invokeai.backend.generator.txt2img inherits from invokeai.backend.generator -''' +""" import PIL.Image import torch +from ..stable_diffusion import ( + ConditioningData, + PostprocessingSettings, + StableDiffusionGeneratorPipeline, +) from .base import Generator -from ..stable_diffusion import (PostprocessingSettings, - StableDiffusionGeneratorPipeline, - ConditioningData - ) + class Txt2Img(Generator): def __init__(self, model, precision): super().__init__(model, precision) @torch.no_grad() - def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, - conditioning,width,height,step_callback=None,threshold=0.0,warmup=0.2,perlin=0.0, - h_symmetry_time_pct=None,v_symmetry_time_pct=None,attention_maps_callback=None, - **kwargs): + def get_make_image( + self, + prompt, + sampler, + steps, + cfg_scale, + ddim_eta, + conditioning, + width, + height, + step_callback=None, + threshold=0.0, + warmup=0.2, + perlin=0.0, + h_symmetry_time_pct=None, + v_symmetry_time_pct=None, + attention_maps_callback=None, + **kwargs, + ): """ Returns a function returning an image derived from the prompt and the initial image Return value depends on the seed at the time you call it @@ -30,33 +47,35 @@ class Txt2Img(Generator): pipeline: StableDiffusionGeneratorPipeline = self.model pipeline.scheduler = sampler - uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings=PostprocessingSettings( - threshold=threshold, - warmup=warmup, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + uc, c, extra_conditioning_info = conditioning + conditioning_data = ConditioningData( + uc, + c, + cfg_scale, + extra_conditioning_info, + postprocessing_settings=PostprocessingSettings( + threshold=threshold, + warmup=warmup, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct, + ), + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) def make_image(x_T) -> PIL.Image.Image: pipeline_output = pipeline.image_from_embeddings( - latents=torch.zeros_like(x_T,dtype=self.torch_dtype()), + latents=torch.zeros_like(x_T, dtype=self.torch_dtype()), noise=x_T, num_inference_steps=steps, conditioning_data=conditioning_data, callback=step_callback, ) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + if ( + pipeline_output.attention_map_saver is not None + and attention_maps_callback is not None + ): attention_maps_callback(pipeline_output.attention_map_saver) return pipeline.numpy_to_pil(pipeline_output.images)[0] return make_image - - - diff --git a/invokeai/backend/generator/txt2img2img.py b/invokeai/backend/generator/txt2img2img.py index 67de74fecf..76573b689d 100644 --- a/invokeai/backend/generator/txt2img2img.py +++ b/invokeai/backend/generator/txt2img2img.py @@ -1,6 +1,6 @@ -''' +""" invokeai.backend.generator.txt2img inherits from invokeai.backend.generator -''' +""" import math from typing import Callable, Optional @@ -8,21 +8,40 @@ from typing import Callable, Optional import torch from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error -from .base import Generator -from .diffusers_pipeline import trim_to_multiple_of, StableDiffusionGeneratorPipeline, \ - ConditioningData from ..models import PostprocessingSettings +from .base import Generator +from .diffusers_pipeline import ( + ConditioningData, + StableDiffusionGeneratorPipeline, + trim_to_multiple_of, +) class Txt2Img2Img(Generator): def __init__(self, model, precision): super().__init__(model, precision) - self.init_latent = None # for get_noise() + self.init_latent = None # for get_noise() - def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_eta, - conditioning, width:int, height:int, strength:float, - step_callback:Optional[Callable]=None, threshold=0.0, warmup=0.2, perlin=0.0, - h_symmetry_time_pct=None, v_symmetry_time_pct=None, attention_maps_callback=None, **kwargs): + def get_make_image( + self, + prompt: str, + sampler, + steps: int, + cfg_scale: float, + ddim_eta, + conditioning, + width: int, + height: int, + strength: float, + step_callback: Optional[Callable] = None, + threshold=0.0, + warmup=0.2, + perlin=0.0, + h_symmetry_time_pct=None, + v_symmetry_time_pct=None, + attention_maps_callback=None, + **kwargs, + ): """ Returns a function returning an image derived from the prompt and the initial image Return value depends on the seed at the time you call it @@ -35,19 +54,20 @@ class Txt2Img2Img(Generator): pipeline.scheduler = sampler uc, c, extra_conditioning_info = conditioning - conditioning_data = ( - ConditioningData( - uc, c, cfg_scale, extra_conditioning_info, - postprocessing_settings = PostprocessingSettings( - threshold=threshold, - warmup=0.2, - h_symmetry_time_pct=h_symmetry_time_pct, - v_symmetry_time_pct=v_symmetry_time_pct - ) - ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)) + conditioning_data = ConditioningData( + uc, + c, + cfg_scale, + extra_conditioning_info, + postprocessing_settings=PostprocessingSettings( + threshold=threshold, + warmup=0.2, + h_symmetry_time_pct=h_symmetry_time_pct, + v_symmetry_time_pct=v_symmetry_time_pct, + ), + ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) def make_image(x_T): - first_pass_latent_output, _ = pipeline.latents_from_embeddings( latents=torch.zeros_like(x_T), num_inference_steps=steps, @@ -61,28 +81,40 @@ class Txt2Img2Img(Generator): init_width = first_pass_latent_output.size()[3] * self.downsampling_factor init_height = first_pass_latent_output.size()[2] * self.downsampling_factor print( - f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling" - ) + f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling" + ) # resizing resized_latents = torch.nn.functional.interpolate( first_pass_latent_output, - size=(height // self.downsampling_factor, width // self.downsampling_factor), - mode="bilinear" + size=( + height // self.downsampling_factor, + width // self.downsampling_factor, + ), + mode="bilinear", ) # Free up memory from the last generation. - clear_cuda_cache = kwargs['clear_cuda_cache'] or None + clear_cuda_cache = kwargs["clear_cuda_cache"] or None if clear_cuda_cache is not None: clear_cuda_cache() - second_pass_noise = self.get_noise_like(resized_latents, override_perlin=True) + second_pass_noise = self.get_noise_like( + resized_latents, override_perlin=True + ) # Clear symmetry for the second pass from dataclasses import replace - new_postprocessing_settings = replace(conditioning_data.postprocessing_settings, h_symmetry_time_pct=None) - new_postprocessing_settings = replace(new_postprocessing_settings, v_symmetry_time_pct=None) - new_conditioning_data = replace(conditioning_data, postprocessing_settings=new_postprocessing_settings) + + new_postprocessing_settings = replace( + conditioning_data.postprocessing_settings, h_symmetry_time_pct=None + ) + new_postprocessing_settings = replace( + new_postprocessing_settings, v_symmetry_time_pct=None + ) + new_conditioning_data = replace( + conditioning_data, postprocessing_settings=new_postprocessing_settings + ) verbosity = get_verbosity() set_verbosity_error() @@ -92,15 +124,18 @@ class Txt2Img2Img(Generator): conditioning_data=new_conditioning_data, strength=strength, noise=second_pass_noise, - callback=step_callback) + callback=step_callback, + ) set_verbosity(verbosity) - if pipeline_output.attention_map_saver is not None and attention_maps_callback is not None: + if ( + pipeline_output.attention_map_saver is not None + and attention_maps_callback is not None + ): attention_maps_callback(pipeline_output.attention_map_saver) return pipeline.numpy_to_pil(pipeline_output.images)[0] - # FIXME: do we really need something entirely different for the inpainting model? # in the case of the inpainting model being loaded, the trick of @@ -111,19 +146,23 @@ class Txt2Img2Img(Generator): return make_image - def get_noise_like(self, like: torch.Tensor, override_perlin: bool=False): + def get_noise_like(self, like: torch.Tensor, override_perlin: bool = False): device = like.device - if device.type == 'mps': - x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device) + if device.type == "mps": + x = torch.randn_like(like, device="cpu", dtype=self.torch_dtype()).to( + device + ) else: x = torch.randn_like(like, device=device, dtype=self.torch_dtype()) if self.perlin > 0.0 and override_perlin == False: shape = like.shape - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) + x = (1 - self.perlin) * x + self.perlin * self.get_perlin_noise( + shape[3], shape[2] + ) return x # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height,scale = True): + def get_noise(self, width, height, scale=True): # print(f"Get noise: {width}x{height}") if scale: # Scale the input width and height for the initial generation @@ -133,7 +172,9 @@ class Txt2Img2Img(Generator): aspect = width / height dimension = self.model.unet.config.sample_size * self.model.vae_scale_factor min_dimension = math.floor(dimension * 0.5) - model_area = dimension * dimension # hardcoded for now since all models are trained on square images + model_area = ( + dimension * dimension + ) # hardcoded for now since all models are trained on square images if aspect > 1.0: init_height = max(min_dimension, math.sqrt(model_area / aspect)) @@ -142,7 +183,9 @@ class Txt2Img2Img(Generator): init_width = max(min_dimension, math.sqrt(model_area * aspect)) init_height = init_width / aspect - scaled_width, scaled_height = trim_to_multiple_of(math.floor(init_width), math.floor(init_height)) + scaled_width, scaled_height = trim_to_multiple_of( + math.floor(init_width), math.floor(init_height) + ) else: scaled_width = width @@ -152,10 +195,14 @@ class Txt2Img2Img(Generator): channels = self.latent_channels if channels == 9: channels = 4 # we don't really want noise for all the mask channels - shape = (1, channels, - scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor) - if self.use_mps_noise or device.type == 'mps': - tensor = torch.empty(size=shape, device='cpu') + shape = ( + 1, + channels, + scaled_height // self.downsampling_factor, + scaled_width // self.downsampling_factor, + ) + if self.use_mps_noise or device.type == "mps": + tensor = torch.empty(size=shape, device="cpu") tensor = self.get_noise_like(like=tensor).to(device) else: tensor = torch.empty(size=shape, device=device) diff --git a/invokeai/backend/globals.py b/invokeai/backend/globals.py index f41160a39b..5ccc3a6a1a 100644 --- a/invokeai/backend/globals.py +++ b/invokeai/backend/globals.py @@ -1,4 +1,4 @@ -''' +""" invokeai.backend.globals defines a small number of global variables that would otherwise have to be passed through long and complex call chains. @@ -9,7 +9,7 @@ the attributes: - initfile - path to the initialization file - try_patchmatch - option to globally disable loading of 'patchmatch' module - always_use_cpu - force use of CPU even if GPU is available -''' +""" import os import os.path as osp @@ -20,12 +20,12 @@ from typing import Union Globals = Namespace() # Where to look for the initialization file and other key components -Globals.initfile = 'invokeai.init' -Globals.models_file = 'models.yaml' -Globals.models_dir = 'models' -Globals.config_dir = 'configs' -Globals.autoscan_dir = 'weights' -Globals.converted_ckpts_dir = 'converted_ckpts' +Globals.initfile = "invokeai.init" +Globals.models_file = "models.yaml" +Globals.models_dir = "models" +Globals.config_dir = "configs" +Globals.autoscan_dir = "weights" +Globals.converted_ckpts_dir = "converted_ckpts" # Set the default root directory. This can be overwritten by explicitly # passing the `--root ` argument on the command line. @@ -34,12 +34,15 @@ Globals.converted_ckpts_dir = 'converted_ckpts' # 2) use VIRTUAL_ENV environment variable, with a check for initfile being there # 3) use ~/invokeai -if os.environ.get('INVOKEAI_ROOT'): - Globals.root = osp.abspath(os.environ.get('INVOKEAI_ROOT')) -elif os.environ.get('VIRTUAL_ENV') and Path(os.environ.get('VIRTUAL_ENV'),'..',Globals.initfile).exists(): - Globals.root = osp.abspath(osp.join(os.environ.get('VIRTUAL_ENV'), '..')) +if os.environ.get("INVOKEAI_ROOT"): + Globals.root = osp.abspath(os.environ.get("INVOKEAI_ROOT")) +elif ( + os.environ.get("VIRTUAL_ENV") + and Path(os.environ.get("VIRTUAL_ENV"), "..", Globals.initfile).exists() +): + Globals.root = osp.abspath(osp.join(os.environ.get("VIRTUAL_ENV"), "..")) else: - Globals.root = osp.abspath(osp.expanduser('~/invokeai')) + Globals.root = osp.abspath(osp.expanduser("~/invokeai")) # Try loading patchmatch Globals.try_patchmatch = True @@ -66,26 +69,33 @@ Globals.ckpt_convert = True # logging tokenization everywhere Globals.log_tokenization = False -def global_config_file()->Path: + +def global_config_file() -> Path: return Path(Globals.root, Globals.config_dir, Globals.models_file) -def global_config_dir()->Path: + +def global_config_dir() -> Path: return Path(Globals.root, Globals.config_dir) -def global_models_dir()->Path: + +def global_models_dir() -> Path: return Path(Globals.root, Globals.models_dir) -def global_autoscan_dir()->Path: + +def global_autoscan_dir() -> Path: return Path(Globals.root, Globals.autoscan_dir) -def global_converted_ckpts_dir()->Path: + +def global_converted_ckpts_dir() -> Path: return Path(global_models_dir(), Globals.converted_ckpts_dir) -def global_set_root(root_dir:Union[str,Path]): + +def global_set_root(root_dir: Union[str, Path]): Globals.root = root_dir -def global_cache_dir(subdir:Union[str,Path]='')->Path: - ''' + +def global_cache_dir(subdir: Union[str, Path] = "") -> Path: + """ Returns Path to the model cache directory. If a subdirectory is provided, it will be appended to the end of the path, allowing for huggingface-style conventions: @@ -98,18 +108,18 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path: One other caveat is that HuggingFace is moving some diffusers models into the "hub" subdirectory as well, so this will need to be revisited from time to time. - ''' - home: str = os.getenv('HF_HOME') + """ + home: str = os.getenv("HF_HOME") if home is None: - home = os.getenv('XDG_CACHE_HOME') + home = os.getenv("XDG_CACHE_HOME") if home is not None: # Set `home` to $XDG_CACHE_HOME/huggingface, which is the default location mentioned in HuggingFace Hub Client Library. # See: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome - home += os.sep + 'huggingface' + home += os.sep + "huggingface" if home is not None: - return Path(home,subdir) + return Path(home, subdir) else: - return Path(Globals.root,'models',subdir) + return Path(Globals.root, "models", subdir) diff --git a/invokeai/backend/image_util/__init__.py b/invokeai/backend/image_util/__init__.py index 3b55f094d7..410f003f6a 100644 --- a/invokeai/backend/image_util/__init__.py +++ b/invokeai/backend/image_util/__init__.py @@ -1,15 +1,12 @@ -''' +""" Initialization file for invokeai.backend.image_util methods. -''' +""" from .patchmatch import PatchMatch +from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata +from .seamless import configure_model_padding from .txt2mask import Txt2Mask from .util import InitImageResizer, make_grid -from .pngwriter import (PngWriter, - PromptFormatter, - retrieve_metadata, - write_metadata, - ) -from .seamless import configure_model_padding + def debug_image( debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False @@ -25,5 +22,3 @@ def debug_image( if debug_result: return image_copy - - diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py index 1c27a1d67d..8753298f51 100644 --- a/invokeai/backend/image_util/patchmatch.py +++ b/invokeai/backend/image_util/patchmatch.py @@ -1,20 +1,22 @@ -''' +""" This module defines a singleton object, "patchmatch" that wraps the actual patchmatch object. It respects the global "try_patchmatch" attribute, so that patchmatch loading can be suppressed or deferred -''' +""" +import numpy as np + from invokeai.backend.globals import Globals -import numpy as np + class PatchMatch: - ''' + """ Thin class wrapper around the patchmatch function. - ''' + """ patch_match = None - tried_load:bool = False - + tried_load: bool = False + def __init__(self): super().__init__() @@ -24,21 +26,22 @@ class PatchMatch: return if Globals.try_patchmatch: from patchmatch import patch_match as pm + if pm.patchmatch_available: - print('>> Patchmatch initialized') + print(">> Patchmatch initialized") else: - print('>> Patchmatch not loaded (nonfatal)') + print(">> Patchmatch not loaded (nonfatal)") self.patch_match = pm else: - print('>> Patchmatch loading disabled') + print(">> Patchmatch loading disabled") self.tried_load = True @classmethod - def patchmatch_available(self)->bool: + def patchmatch_available(self) -> bool: self._load_patch_match() return self.patch_match and self.patch_match.patchmatch_available @classmethod - def inpaint(self,*args,**kwargs)->np.ndarray: + def inpaint(self, *args, **kwargs) -> np.ndarray: if self.patchmatch_available(): - return self.patch_match.inpaint(*args,**kwargs) + return self.patch_match.inpaint(*args, **kwargs) diff --git a/invokeai/backend/image_util/pngwriter.py b/invokeai/backend/image_util/pngwriter.py index c022c62870..452bbfc783 100644 --- a/invokeai/backend/image_util/pngwriter.py +++ b/invokeai/backend/image_util/pngwriter.py @@ -6,10 +6,11 @@ PngWriter -- Converts Images generated by T2I into PNGs, finds Exports function retrieve_metadata(path) """ +import json import os import re -import json -from PIL import PngImagePlugin, Image + +from PIL import Image, PngImagePlugin # -------------------image generation utils----- @@ -25,52 +26,57 @@ class PngWriter: dirlist = sorted(os.listdir(self.outdir), reverse=True) # find the first filename that matches our pattern or return 000000.0.png existing_name = next( - (f for f in dirlist if re.match('^(\d+)\..*\.png', f)), - '0000000.0.png', + (f for f in dirlist if re.match("^(\d+)\..*\.png", f)), + "0000000.0.png", ) - basecount = int(existing_name.split('.', 1)[0]) + 1 - return f'{basecount:06}' + basecount = int(existing_name.split(".", 1)[0]) + 1 + return f"{basecount:06}" # saves image named _image_ to outdir/name, writing metadata from prompt # returns full path of output - def save_image_and_prompt_to_png(self, image, dream_prompt, name, metadata=None, compress_level=6): + def save_image_and_prompt_to_png( + self, image, dream_prompt, name, metadata=None, compress_level=6 + ): path = os.path.join(self.outdir, name) info = PngImagePlugin.PngInfo() - info.add_text('Dream', dream_prompt) + info.add_text("Dream", dream_prompt) if metadata: - info.add_text('sd-metadata', json.dumps(metadata)) - image.save(path, 'PNG', pnginfo=info, compress_level=compress_level) + info.add_text("sd-metadata", json.dumps(metadata)) + image.save(path, "PNG", pnginfo=info, compress_level=compress_level) return path - def retrieve_metadata(self,img_basename): - ''' + def retrieve_metadata(self, img_basename): + """ Given a PNG filename stored in outdir, returns the "sd-metadata" metadata stored there, as a dict - ''' - path = os.path.join(self.outdir,img_basename) + """ + path = os.path.join(self.outdir, img_basename) all_metadata = retrieve_metadata(path) - return all_metadata['sd-metadata'] + return all_metadata["sd-metadata"] + def retrieve_metadata(img_path): - ''' + """ Given a path to a PNG image, returns the "sd-metadata" metadata stored there, as a dict - ''' + """ im = Image.open(img_path) - if hasattr(im, 'text'): - md = im.text.get('sd-metadata', '{}') - dream_prompt = im.text.get('Dream', '') + if hasattr(im, "text"): + md = im.text.get("sd-metadata", "{}") + dream_prompt = im.text.get("Dream", "") else: # When trying to retrieve metadata from images without a 'text' payload, such as JPG images. - md = '{}' - dream_prompt = '' - return {'sd-metadata': json.loads(md), 'Dream': dream_prompt} + md = "{}" + dream_prompt = "" + return {"sd-metadata": json.loads(md), "Dream": dream_prompt} -def write_metadata(img_path:str, meta:dict): + +def write_metadata(img_path: str, meta: dict): im = Image.open(img_path) info = PngImagePlugin.PngInfo() - info.add_text('sd-metadata', json.dumps(meta)) - im.save(img_path,'PNG',pnginfo=info) + info.add_text("sd-metadata", json.dumps(meta)) + im.save(img_path, "PNG", pnginfo=info) + class PromptFormatter: def __init__(self, t2i, opt): @@ -86,28 +92,30 @@ class PromptFormatter: switches = list() switches.append(f'"{opt.prompt}"') - switches.append(f'-s{opt.steps or t2i.steps}') - switches.append(f'-W{opt.width or t2i.width}') - switches.append(f'-H{opt.height or t2i.height}') - switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}') - switches.append(f'-A{opt.sampler_name or t2i.sampler_name}') -# to do: put model name into the t2i object -# switches.append(f'--model{t2i.model_name}') + switches.append(f"-s{opt.steps or t2i.steps}") + switches.append(f"-W{opt.width or t2i.width}") + switches.append(f"-H{opt.height or t2i.height}") + switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}") + switches.append(f"-A{opt.sampler_name or t2i.sampler_name}") + # to do: put model name into the t2i object + # switches.append(f'--model{t2i.model_name}') if opt.seamless or t2i.seamless: - switches.append(f'--seamless') + switches.append(f"--seamless") if opt.init_img: - switches.append(f'-I{opt.init_img}') + switches.append(f"-I{opt.init_img}") if opt.fit: - switches.append(f'--fit') + switches.append(f"--fit") if opt.strength and opt.init_img is not None: - switches.append(f'-f{opt.strength or t2i.strength}') + switches.append(f"-f{opt.strength or t2i.strength}") if opt.gfpgan_strength: - switches.append(f'-G{opt.gfpgan_strength}') + switches.append(f"-G{opt.gfpgan_strength}") if opt.upscale: switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}') if opt.variation_amount > 0: - switches.append(f'-v{opt.variation_amount}') + switches.append(f"-v{opt.variation_amount}") if opt.with_variations: - formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in opt.with_variations) - switches.append(f'-V{formatted_variations}') - return ' '.join(switches) + formatted_variations = ",".join( + f"{seed}:{weight}" for seed, weight in opt.with_variations + ) + switches.append(f"-V{formatted_variations}") + return " ".join(switches) diff --git a/invokeai/backend/image_util/seamless.py b/invokeai/backend/image_util/seamless.py index fda363eb7b..4fbc0cd78e 100644 --- a/invokeai/backend/image_util/seamless.py +++ b/invokeai/backend/image_util/seamless.py @@ -1,12 +1,26 @@ import torch.nn as nn + def _conv_forward_asymmetric(self, input, weight, bias): """ Patch for Conv2d._conv_forward that supports asymmetric padding """ - working = nn.functional.pad(input, self.asymmetric_padding['x'], mode=self.asymmetric_padding_mode['x']) - working = nn.functional.pad(working, self.asymmetric_padding['y'], mode=self.asymmetric_padding_mode['y']) - return nn.functional.conv2d(working, weight, bias, self.stride, nn.modules.utils._pair(0), self.dilation, self.groups) + working = nn.functional.pad( + input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"] + ) + working = nn.functional.pad( + working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"] + ) + return nn.functional.conv2d( + working, + weight, + bias, + self.stride, + nn.modules.utils._pair(0), + self.dilation, + self.groups, + ) + def configure_model_padding(model, seamless, seamless_axes): """ @@ -18,14 +32,28 @@ def configure_model_padding(model, seamless, seamless_axes): if seamless: m.asymmetric_padding_mode = {} m.asymmetric_padding = {} - m.asymmetric_padding_mode['x'] = 'circular' if ('x' in seamless_axes) else 'constant' - m.asymmetric_padding['x'] = (m._reversed_padding_repeated_twice[0], m._reversed_padding_repeated_twice[1], 0, 0) - m.asymmetric_padding_mode['y'] = 'circular' if ('y' in seamless_axes) else 'constant' - m.asymmetric_padding['y'] = (0, 0, m._reversed_padding_repeated_twice[2], m._reversed_padding_repeated_twice[3]) + m.asymmetric_padding_mode["x"] = ( + "circular" if ("x" in seamless_axes) else "constant" + ) + m.asymmetric_padding["x"] = ( + m._reversed_padding_repeated_twice[0], + m._reversed_padding_repeated_twice[1], + 0, + 0, + ) + m.asymmetric_padding_mode["y"] = ( + "circular" if ("y" in seamless_axes) else "constant" + ) + m.asymmetric_padding["y"] = ( + 0, + 0, + m._reversed_padding_repeated_twice[2], + m._reversed_padding_repeated_twice[3], + ) m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d) else: m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d) - if hasattr(m, 'asymmetric_padding_mode'): + if hasattr(m, "asymmetric_padding_mode"): del m.asymmetric_padding_mode - if hasattr(m, 'asymmetric_padding'): + if hasattr(m, "asymmetric_padding"): del m.asymmetric_padding diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 6e64df3f31..bc7e56d397 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -1,9 +1,9 @@ -'''Makes available the Txt2Mask class, which assists in the automatic +"""Makes available the Txt2Mask class, which assists in the automatic assignment of masks via text prompt using clipseg. Here is typical usage: - from ldm.invoke.txt2mask import Txt2Mask, SegmentedGrayscale + from invokeai.backend.image_util.txt2mask import Txt2Mask, SegmentedGrayscale from PIL import Image txt2mask = Txt2Mask(self.device) @@ -25,31 +25,39 @@ the mask that exceed the indicated confidence threshold. Values range from 0.0 to 1.0. The higher the threshold, the more confident the algorithm is. In limited testing, I have found that values around 0.5 work fine. -''' +""" +import numpy as np import torch -import numpy as np -from transformers import AutoProcessor, CLIPSegForImageSegmentation from PIL import Image, ImageOps from torchvision import transforms +from transformers import AutoProcessor, CLIPSegForImageSegmentation + from invokeai.backend.globals import global_cache_dir -CLIPSEG_MODEL = 'CIDAS/clipseg-rd64-refined' +CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_SIZE = 352 + class SegmentedGrayscale(object): - def __init__(self, image:Image, heatmap:torch.Tensor): + def __init__(self, image: Image, heatmap: torch.Tensor): self.heatmap = heatmap self.image = image - def to_grayscale(self,invert:bool=False)->Image: - return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255))) + def to_grayscale(self, invert: bool = False) -> Image: + return self._rescale( + Image.fromarray( + np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255) + ) + ) - def to_mask(self,threshold:float=0.5)->Image: + def to_mask(self, threshold: float = 0.5) -> Image: discrete_heatmap = self.heatmap.lt(threshold).int() - return self._rescale(Image.fromarray(np.uint8(discrete_heatmap*255),mode='L')) + return self._rescale( + Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L") + ) - def to_transparent(self,invert:bool=False)->Image: + def to_transparent(self, invert: bool = False) -> Image: transparent_image = self.image.copy() # For img2img, we want the selected regions to be transparent, # but to_grayscale() returns the opposite. Thus invert. @@ -58,70 +66,77 @@ class SegmentedGrayscale(object): return transparent_image # unscales and uncrops the 352x352 heatmap so that it matches the image again - def _rescale(self, heatmap:Image)->Image: - size = self.image.width if (self.image.width > self.image.height) else self.image.height - resized_image = heatmap.resize( - (size,size), - resample=Image.Resampling.LANCZOS + def _rescale(self, heatmap: Image) -> Image: + size = ( + self.image.width + if (self.image.width > self.image.height) + else self.image.height ) - return resized_image.crop((0,0,self.image.width,self.image.height)) + resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS) + return resized_image.crop((0, 0, self.image.width, self.image.height)) + class Txt2Mask(object): - ''' + """ Create new Txt2Mask object. The optional device argument can be one of 'cuda', 'mps' or 'cpu'. - ''' - def __init__(self,device='cpu',refined=False): - print('>> Initializing clipseg model for text to mask inference') + """ + + def __init__(self, device="cpu", refined=False): + print(">> Initializing clipseg model for text to mask inference") # BUG: we are not doing anything with the device option at this time self.device = device - self.processor = AutoProcessor.from_pretrained(CLIPSEG_MODEL, - cache_dir=global_cache_dir('hub') - ) - self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, - cache_dir=global_cache_dir('hub') - ) + self.processor = AutoProcessor.from_pretrained( + CLIPSEG_MODEL, cache_dir=global_cache_dir("hub") + ) + self.model = CLIPSegForImageSegmentation.from_pretrained( + CLIPSEG_MODEL, cache_dir=global_cache_dir("hub") + ) @torch.no_grad() - def segment(self, image, prompt:str) -> SegmentedGrayscale: - ''' + def segment(self, image, prompt: str) -> SegmentedGrayscale: + """ Given a prompt string such as "a bagel", tries to identify the object in the provided image and returns a SegmentedGrayscale object in which the brighter pixels indicate where the object is inferred to be. - ''' - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - transforms.Resize((CLIPSEG_SIZE, CLIPSEG_SIZE)), # must be multiple of 64... - ]) + """ + transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ), + transforms.Resize( + (CLIPSEG_SIZE, CLIPSEG_SIZE) + ), # must be multiple of 64... + ] + ) if type(image) is str: - image = Image.open(image).convert('RGB') + image = Image.open(image).convert("RGB") image = ImageOps.exif_transpose(image) img = self._scale_and_crop(image) - inputs = self.processor(text=[prompt], - images=[img], - padding=True, - return_tensors='pt') + inputs = self.processor( + text=[prompt], images=[img], padding=True, return_tensors="pt" + ) outputs = self.model(**inputs) heatmap = torch.sigmoid(outputs.logits) return SegmentedGrayscale(image, heatmap) - def _scale_and_crop(self, image:Image)->Image: - scaled_image = Image.new('RGB',(CLIPSEG_SIZE,CLIPSEG_SIZE)) - if image.width > image.height: # width is constraint + def _scale_and_crop(self, image: Image) -> Image: + scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE)) + if image.width > image.height: # width is constraint scale = CLIPSEG_SIZE / image.width else: scale = CLIPSEG_SIZE / image.height scaled_image.paste( image.resize( - (int(scale * image.width), - int(scale * image.height) - ), - resample=Image.Resampling.LANCZOS - ),box=(0,0) + (int(scale * image.width), int(scale * image.height)), + resample=Image.Resampling.LANCZOS, + ), + box=(0, 0), ) return scaled_image diff --git a/invokeai/backend/image_util/util.py b/invokeai/backend/image_util/util.py index 3fb509b3cd..bc7fa01e3b 100644 --- a/invokeai/backend/image_util/util.py +++ b/invokeai/backend/image_util/util.py @@ -1,12 +1,15 @@ -from math import sqrt, floor, ceil +from math import ceil, floor, sqrt + from PIL import Image -class InitImageResizer(): + +class InitImageResizer: """Simple class to create resized copies of an Image while preserving the aspect ratio.""" - def __init__(self,Image): + + def __init__(self, Image): self.image = Image - def resize(self,width=None,height=None) -> Image: + def resize(self, width=None, height=None) -> Image: """ Return a copy of the image resized to fit within a box width x height. The aspect ratio is @@ -18,37 +21,36 @@ class InitImageResizer(): Everything is floored to the nearest multiple of 64 so that it can be passed to img2img() """ - im = self.image + im = self.image - ar = im.width/float(im.height) + ar = im.width / float(im.height) # Infer missing values from aspect ratio - if not(width or height): # both missing - width = im.width + if not (width or height): # both missing + width = im.width height = im.height - elif not height: # height missing - height = int(width/ar) - elif not width: # width missing - width = int(height*ar) + elif not height: # height missing + height = int(width / ar) + elif not width: # width missing + width = int(height * ar) - w_scale = width/im.width - h_scale = height/im.height - scale = min(w_scale,h_scale) - (rw,rh) = (int(scale*im.width),int(scale*im.height)) + w_scale = width / im.width + h_scale = height / im.height + scale = min(w_scale, h_scale) + (rw, rh) = (int(scale * im.width), int(scale * im.height)) - #round everything to multiples of 64 - width,height,rw,rh = map( - lambda x: x-x%64, (width,height,rw,rh) - ) + # round everything to multiples of 64 + width, height, rw, rh = map(lambda x: x - x % 64, (width, height, rw, rh)) # no resize necessary, but return a copy if im.width == width and im.height == height: return im.copy() # otherwise resize the original image so that it fits inside the bounding box - resized_image = self.image.resize((rw,rh),resample=Image.Resampling.LANCZOS) + resized_image = self.image.resize((rw, rh), resample=Image.Resampling.LANCZOS) return resized_image + def make_grid(image_list, rows=None, cols=None): image_cnt = len(image_list) if None in (rows, cols): @@ -57,7 +59,7 @@ def make_grid(image_list, rows=None, cols=None): width = image_list[0].width height = image_list[0].height - grid_img = Image.new('RGB', (width * cols, height * rows)) + grid_img = Image.new("RGB", (width * cols, height * rows)) i = 0 for r in range(0, rows): for c in range(0, cols): @@ -67,4 +69,3 @@ def make_grid(image_list, rows=None, cols=None): i = i + 1 return grid_img - diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py index e83527123f..39411a853a 100644 --- a/invokeai/backend/model_management/__init__.py +++ b/invokeai/backend/model_management/__init__.py @@ -1,8 +1,8 @@ -''' +""" Initialization file for invokeai.backend.model_management -''' +""" +from .convert_ckpt_to_diffusers import ( + convert_ckpt_to_diffusers, + load_pipeline_from_original_stable_diffusion_ckpt, +) from .model_manager import ModelManager -from .convert_ckpt_to_diffusers import (load_pipeline_from_original_stable_diffusion_ckpt, - convert_ckpt_to_diffusers) -from ...frontend.merge.merge_diffusers import (merge_diffusion_models, - merge_diffusion_models_and_commit) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 395432c1e6..c0c1cca3c4 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -18,17 +18,17 @@ """ Conversion script for the LDM checkpoints. """ import re -import torch import warnings from pathlib import Path -from invokeai.backend.globals import ( - global_cache_dir, - global_config_dir, - ) -from .model_manager import ModelManager, SDLegacyType -from safetensors.torch import load_file from typing import Union +import torch +from safetensors.torch import load_file + +from invokeai.backend.globals import global_cache_dir, global_config_dir + +from .model_manager import ModelManager, SDLegacyType + try: from omegaconf import OmegaConf except ImportError: @@ -48,16 +48,31 @@ from diffusers import ( PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, - logging as dlogging, ) -from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel -from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder, PaintByExamplePipeline -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers import logging as dlogging +from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import ( + LDMBertConfig, + LDMBertModel, +) +from diffusers.pipelines.paint_by_example import ( + PaintByExampleImageEncoder, + PaintByExamplePipeline, +) +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) from diffusers.utils import is_safetensors_available -from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig +from transformers import ( + AutoFeatureExtractor, + BertTokenizerFast, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionConfig, +) from ..stable_diffusion import StableDiffusionGeneratorPipeline + def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. @@ -83,7 +98,9 @@ def renew_resnet_paths(old_list, n_shave_prefix_segments=0): new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + new_item = shave_segments( + new_item, n_shave_prefix_segments=n_shave_prefix_segments + ) mapping.append({"old": old_item, "new": new_item}) @@ -99,7 +116,9 @@ def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + new_item = shave_segments( + new_item, n_shave_prefix_segments=n_shave_prefix_segments + ) mapping.append({"old": old_item, "new": new_item}) @@ -150,7 +169,9 @@ def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + new_item = shave_segments( + new_item, n_shave_prefix_segments=n_shave_prefix_segments + ) mapping.append({"old": old_item, "new": new_item}) @@ -158,7 +179,12 @@ def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): def assign_to_checkpoint( - paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None + paths, + checkpoint, + old_checkpoint, + attention_paths_to_split=None, + additional_replacements=None, + config=None, ): """ This does the final conversion step: take locally converted weights and apply a global renaming @@ -167,7 +193,9 @@ def assign_to_checkpoint( Assigns the weights to the new checkpoint. """ - assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." + assert isinstance( + paths, list + ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: @@ -179,7 +207,9 @@ def assign_to_checkpoint( num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 - old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) + old_tensor = old_tensor.reshape( + (num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] + ) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) @@ -190,7 +220,10 @@ def assign_to_checkpoint( new_path = path["new"] # These have already been assigned - if attention_paths_to_split is not None and new_path in attention_paths_to_split: + if ( + attention_paths_to_split is not None + and new_path in attention_paths_to_split + ): continue # Global renaming happens here @@ -228,19 +261,29 @@ def create_unet_diffusers_config(original_config, image_size: int): unet_params = original_config.model.params.unet_config.params vae_params = original_config.model.params.first_stage_config.params.ddconfig - block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] + block_out_channels = [ + unet_params.model_channels * mult for mult in unet_params.channel_mult + ] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): - block_type = "CrossAttnDownBlock2D" if resolution in unet_params.attention_resolutions else "DownBlock2D" + block_type = ( + "CrossAttnDownBlock2D" + if resolution in unet_params.attention_resolutions + else "DownBlock2D" + ) down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): - block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D" + block_type = ( + "CrossAttnUpBlock2D" + if resolution in unet_params.attention_resolutions + else "UpBlock2D" + ) up_block_types.append(block_type) resolution //= 2 @@ -248,7 +291,9 @@ def create_unet_diffusers_config(original_config, image_size: int): head_dim = unet_params.num_heads if "num_heads" in unet_params else None use_linear_projection = ( - unet_params.use_linear_in_transformer if "use_linear_in_transformer" in unet_params else False + unet_params.use_linear_in_transformer + if "use_linear_in_transformer" in unet_params + else False ) if use_linear_projection: # stable diffusion 2-base-512 and 2-768 @@ -329,16 +374,16 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False if sum(k.startswith("model_ema") for k in keys) > 100: print(f" | Checkpoint {path} has both EMA and non-EMA weights.") if extract_ema: - print( - ' | Extracting EMA weights (usually better for inference)' - ) + print(" | Extracting EMA weights (usually better for inference)") for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop( + flat_ema_key + ) else: print( - ' | Extracting only the non-EMA weights (usually better for fine-tuning)' + " | Extracting only the non-EMA weights (usually better for fine-tuning)" ) for key in keys: @@ -347,10 +392,18 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False new_checkpoint = {} - new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] - new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] - new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] - new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] + new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict[ + "time_embed.0.weight" + ] + new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict[ + "time_embed.0.bias" + ] + new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict[ + "time_embed.2.weight" + ] + new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict[ + "time_embed.2.bias" + ] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] @@ -361,21 +414,39 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only - num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) + num_input_blocks = len( + { + ".".join(layer.split(".")[:2]) + for layer in unet_state_dict + if "input_blocks" in layer + } + ) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only - num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) + num_middle_blocks = len( + { + ".".join(layer.split(".")[:2]) + for layer in unet_state_dict + if "middle_block" in layer + } + ) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only - num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) + num_output_blocks = len( + { + ".".join(layer.split(".")[:2]) + for layer in unet_state_dict + if "output_blocks" in layer + } + ) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) @@ -386,29 +457,45 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ - key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + key + for key in input_blocks[i] + if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( - f"input_blocks.{i}.0.op.weight" - ) - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( - f"input_blocks.{i}.0.op.bias" - ) + new_checkpoint[ + f"down_blocks.{block_id}.downsamplers.0.conv.weight" + ] = unet_state_dict.pop(f"input_blocks.{i}.0.op.weight") + new_checkpoint[ + f"down_blocks.{block_id}.downsamplers.0.conv.bias" + ] = unet_state_dict.pop(f"input_blocks.{i}.0.op.bias") paths = renew_resnet_paths(resnets) - meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} + meta_path = { + "old": f"input_blocks.{i}.0", + "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}", + } assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + paths, + new_checkpoint, + unet_state_dict, + additional_replacements=[meta_path], + config=config, ) if len(attentions): paths = renew_attention_paths(attentions) - meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} + meta_path = { + "old": f"input_blocks.{i}.1", + "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}", + } assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + paths, + new_checkpoint, + unet_state_dict, + additional_replacements=[meta_path], + config=config, ) resnet_0 = middle_blocks[0] @@ -424,7 +511,11 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( - attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + attentions_paths, + new_checkpoint, + unet_state_dict, + additional_replacements=[meta_path], + config=config, ) for i in range(num_output_blocks): @@ -442,25 +533,36 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] - attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] + attentions = [ + key for key in output_blocks[i] if f"output_blocks.{i}.1" in key + ] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) - meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} + meta_path = { + "old": f"output_blocks.{i}.0", + "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}", + } assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + paths, + new_checkpoint, + unet_state_dict, + additional_replacements=[meta_path], + config=config, ) output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): - index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.weight" - ] - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.bias" - ] + index = list(output_block_list.values()).index( + ["conv.bias", "conv.weight"] + ) + new_checkpoint[ + f"up_blocks.{block_id}.upsamplers.0.conv.weight" + ] = unet_state_dict[f"output_blocks.{i}.{index}.conv.weight"] + new_checkpoint[ + f"up_blocks.{block_id}.upsamplers.0.conv.bias" + ] = unet_state_dict[f"output_blocks.{i}.{index}.conv.bias"] # Clear attentions as they have been attributed above. if len(attentions) == 2: @@ -473,13 +575,27 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + paths, + new_checkpoint, + unet_state_dict, + additional_replacements=[meta_path], + config=config, ) else: - resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) + resnet_0_paths = renew_resnet_paths( + output_block_layers, n_shave_prefix_segments=1 + ) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) - new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) + new_path = ".".join( + [ + "up_blocks", + str(block_id), + "resnets", + str(layer_in_block_id), + path["new"], + ] + ) new_checkpoint[new_path] = unet_state_dict[old_path] @@ -499,17 +615,29 @@ def convert_ldm_vae_checkpoint(checkpoint, config): new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] - new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] + new_checkpoint["encoder.conv_out.weight"] = vae_state_dict[ + "encoder.conv_out.weight" + ] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] - new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] - new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] + new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict[ + "encoder.norm_out.weight" + ] + new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict[ + "encoder.norm_out.bias" + ] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] - new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] + new_checkpoint["decoder.conv_out.weight"] = vae_state_dict[ + "decoder.conv_out.weight" + ] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] - new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] - new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] + new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict[ + "decoder.norm_out.weight" + ] + new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict[ + "decoder.norm_out.bias" + ] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] @@ -517,31 +645,55 @@ def convert_ldm_vae_checkpoint(checkpoint, config): new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only - num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) + num_down_blocks = len( + { + ".".join(layer.split(".")[:3]) + for layer in vae_state_dict + if "encoder.down" in layer + } + ) down_blocks = { - layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) + layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] + for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only - num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) + num_up_blocks = len( + { + ".".join(layer.split(".")[:3]) + for layer in vae_state_dict + if "decoder.up" in layer + } + ) up_blocks = { - layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) + layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] + for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): - resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] + resnets = [ + key + for key in down_blocks[i] + if f"down.{i}" in key and f"down.{i}.downsample" not in key + ] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: - new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( - f"encoder.down.{i}.downsample.conv.weight" - ) - new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( - f"encoder.down.{i}.downsample.conv.bias" - ) + new_checkpoint[ + f"encoder.down_blocks.{i}.downsamplers.0.conv.weight" + ] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.weight") + new_checkpoint[ + f"encoder.down_blocks.{i}.downsamplers.0.conv.bias" + ] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.bias") paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 @@ -550,31 +702,51 @@ def convert_ldm_vae_checkpoint(checkpoint, config): paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ - key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key + key + for key in up_blocks[block_id] + if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: - new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ - f"decoder.up.{block_id}.upsample.conv.weight" - ] - new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ - f"decoder.up.{block_id}.upsample.conv.bias" - ] + new_checkpoint[ + f"decoder.up_blocks.{i}.upsamplers.0.conv.weight" + ] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.weight"] + new_checkpoint[ + f"decoder.up_blocks.{i}.upsamplers.0.conv.bias" + ] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.bias"] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 @@ -583,12 +755,24 @@ def convert_ldm_vae_checkpoint(checkpoint, config): paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + assign_to_checkpoint( + paths, + new_checkpoint, + vae_state_dict, + additional_replacements=[meta_path], + config=config, + ) conv_attn_to_linear(new_checkpoint) return new_checkpoint @@ -630,7 +814,9 @@ def convert_ldm_bert_checkpoint(checkpoint, config): # copy embeds hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight - hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight + hf_model.model.embed_positions.weight.data = ( + checkpoint.transformer.pos_emb.emb.weight + ) # copy layer norm _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) @@ -644,7 +830,9 @@ def convert_ldm_bert_checkpoint(checkpoint, config): def convert_ldm_clip_checkpoint(checkpoint): - text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14",cache_dir=global_cache_dir('hub')) + text_model = CLIPTextModel.from_pretrained( + "openai/clip-vit-large-patch14", cache_dir=global_cache_dir("hub") + ) keys = list(checkpoint.keys()) @@ -652,7 +840,9 @@ def convert_ldm_clip_checkpoint(checkpoint): for key in keys: if key.startswith("cond_stage_model.transformer"): - text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] + text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[ + key + ] text_model.load_state_dict(text_model_dict) @@ -660,8 +850,14 @@ def convert_ldm_clip_checkpoint(checkpoint): textenc_conversion_lst = [ - ("cond_stage_model.model.positional_embedding", "text_model.embeddings.position_embedding.weight"), - ("cond_stage_model.model.token_embedding.weight", "text_model.embeddings.token_embedding.weight"), + ( + "cond_stage_model.model.positional_embedding", + "text_model.embeddings.position_embedding.weight", + ), + ( + "cond_stage_model.model.token_embedding.weight", + "text_model.embeddings.token_embedding.weight", + ), ("cond_stage_model.model.ln_final.weight", "text_model.final_layer_norm.weight"), ("cond_stage_model.model.ln_final.bias", "text_model.final_layer_norm.bias"), ] @@ -676,16 +872,24 @@ textenc_transformer_conversion_lst = [ (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), - ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), - ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), + ( + "token_embedding.weight", + "transformer.text_model.embeddings.token_embedding.weight", + ), + ( + "positional_embedding", + "transformer.text_model.embeddings.position_embedding.weight", + ), ] protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} textenc_pattern = re.compile("|".join(protected.keys())) def convert_paint_by_example_checkpoint(checkpoint): - cache_dir = global_cache_dir('hub') - config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir) + cache_dir = global_cache_dir("hub") + config = CLIPVisionConfig.from_pretrained( + "openai/clip-vit-large-patch14", cache_dir=cache_dir + ) model = PaintByExampleImageEncoder(config) keys = list(checkpoint.keys()) @@ -694,7 +898,9 @@ def convert_paint_by_example_checkpoint(checkpoint): for key in keys: if key.startswith("cond_stage_model.transformer"): - text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] + text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[ + key + ] # load clip vision model.model.load_state_dict(text_model_dict) @@ -752,24 +958,32 @@ def convert_paint_by_example_checkpoint(checkpoint): def convert_open_clip_checkpoint(checkpoint): - cache_dir=global_cache_dir('hub') - text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir) + cache_dir = global_cache_dir("hub") + text_model = CLIPTextModel.from_pretrained( + "stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir + ) keys = list(checkpoint.keys()) text_model_dict = {} - if 'cond_stage_model.model.text_projection' in keys: + if "cond_stage_model.model.text_projection" in keys: d_model = int(checkpoint["cond_stage_model.model.text_projection"].shape[0]) - elif 'cond_stage_model.model.ln_final.bias' in keys: - d_model = int(checkpoint['cond_stage_model.model.ln_final.bias'].shape[0]) + elif "cond_stage_model.model.ln_final.bias" in keys: + d_model = int(checkpoint["cond_stage_model.model.ln_final.bias"].shape[0]) else: - raise KeyError('Expected key "cond_stage_model.model.text_projection" not found in model') + raise KeyError( + 'Expected key "cond_stage_model.model.text_projection" not found in model' + ) - text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") + text_model_dict[ + "text_model.embeddings.position_ids" + ] = text_model.text_model.embeddings.get_buffer("position_ids") for key in keys: - if "resblocks.23" in key: # Diffusers drops the final layer and only uses the penultimate layer + if ( + "resblocks.23" in key + ): # Diffusers drops the final layer and only uses the penultimate layer continue if key in textenc_conversion_map: text_model_dict[textenc_conversion_map[key]] = checkpoint[key] @@ -777,18 +991,34 @@ def convert_open_clip_checkpoint(checkpoint): new_key = key[len("cond_stage_model.model.transformer.") :] if new_key.endswith(".in_proj_weight"): new_key = new_key[: -len(".in_proj_weight")] - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) - text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] - text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] - text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] + new_key = textenc_pattern.sub( + lambda m: protected[re.escape(m.group(0))], new_key + ) + text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][ + :d_model, : + ] + text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][ + d_model : d_model * 2, : + ] + text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][ + d_model * 2 :, : + ] elif new_key.endswith(".in_proj_bias"): new_key = new_key[: -len(".in_proj_bias")] - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + new_key = textenc_pattern.sub( + lambda m: protected[re.escape(m.group(0))], new_key + ) text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] - text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] - text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] + text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][ + d_model : d_model * 2 + ] + text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][ + d_model * 2 : + ] else: - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + new_key = textenc_pattern.sub( + lambda m: protected[re.escape(m.group(0))], new_key + ) text_model_dict[new_key] = checkpoint[key] @@ -796,21 +1026,22 @@ def convert_open_clip_checkpoint(checkpoint): return text_model + def load_pipeline_from_original_stable_diffusion_ckpt( - checkpoint_path:str, - original_config_file:str=None, - num_in_channels:int=None, - scheduler_type:str='pndm', - pipeline_type:str=None, - image_size:int=None, - prediction_type:str=None, - extract_ema:bool=True, - upcast_attn:bool=False, - vae:AutoencoderKL=None, - precision:torch.dtype=torch.float32, - return_generator_pipeline:bool=False, -)->Union[StableDiffusionPipeline,StableDiffusionGeneratorPipeline]: - ''' + checkpoint_path: str, + original_config_file: str = None, + num_in_channels: int = None, + scheduler_type: str = "pndm", + pipeline_type: str = None, + image_size: int = None, + prediction_type: str = None, + extract_ema: bool = True, + upcast_attn: bool = False, + vae: AutoencoderKL = None, + precision: torch.dtype = torch.float32, + return_generator_pipeline: bool = False, +) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]: + """ Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` config file. @@ -818,15 +1049,15 @@ def load_pipeline_from_original_stable_diffusion_ckpt( global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is recommended that you override the default values and/or supply an `original_config_file` wherever possible. - :param checkpoint_path: Path to `.ckpt` file. - :param original_config_file: Path to `.yaml` config file corresponding to the original architecture. + :param checkpoint_path: Path to `.ckpt` file. + :param original_config_file: Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically inferred by looking for a key that only exists in SD2.0 models. :param image_size: The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2 Base. Use 768 for Stable Diffusion v2. :param prediction_type: The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable Diffusion v2 Base. Use `'v-prediction'` for Stable Diffusion v2. :param num_in_channels: The number of input channels. If `None` number of input channels will be automatically - inferred. + inferred. :param scheduler_type: Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", "ddim"]`. :param model_type: The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", "FrozenCLIPEmbedder", "PaintByExample"]`. :param extract_ema: Only relevant for @@ -836,16 +1067,24 @@ def load_pipeline_from_original_stable_diffusion_ckpt( :param precision: precision to use - torch.float16, torch.float32 or torch.autocast :param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when running stable diffusion 2.1. - ''' + """ with warnings.catch_warnings(): - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") verbosity = dlogging.get_verbosity() dlogging.set_verbosity_error() - checkpoint = load_file(checkpoint_path) if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path) - cache_dir = global_cache_dir('hub') - pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline + checkpoint = ( + load_file(checkpoint_path) + if Path(checkpoint_path).suffix == ".safetensors" + else torch.load(checkpoint_path) + ) + cache_dir = global_cache_dir("hub") + pipeline_class = ( + StableDiffusionGeneratorPipeline + if return_generator_pipeline + else StableDiffusionPipeline + ) # Sometimes models don't have the global_step item if "global_step" in checkpoint: @@ -855,32 +1094,42 @@ def load_pipeline_from_original_stable_diffusion_ckpt( global_step = None # sometimes there is a state_dict key and sometimes not - if 'state_dict' in checkpoint: + if "state_dict" in checkpoint: checkpoint = checkpoint["state_dict"] upcast_attention = False if original_config_file is None: model_type = ModelManager.probe_model_type(checkpoint) - + if model_type == SDLegacyType.V2: - original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml' + original_config_file = ( + global_config_dir() / "stable-diffusion" / "v2-inference-v.yaml" + ) if global_step == 110000: # v2.1 needs to upcast attention upcast_attention = True - + elif model_type == SDLegacyType.V1_INPAINT: - original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml' - + original_config_file = ( + global_config_dir() + / "stable-diffusion" + / "v1-inpainting-inference.yaml" + ) + elif model_type == SDLegacyType.V1: - original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inference.yaml' + original_config_file = ( + global_config_dir() / "stable-diffusion" / "v1-inference.yaml" + ) else: - raise Exception('Unknown checkpoint type') + raise Exception("Unknown checkpoint type") original_config = OmegaConf.load(original_config_file) if num_in_channels is not None: - original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels + original_config["model"]["params"]["unet_config"]["params"][ + "in_channels" + ] = num_in_channels if ( "parameterization" in original_config["model"]["params"] @@ -937,7 +1186,9 @@ def load_pipeline_from_original_stable_diffusion_ckpt( raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") # Convert the UNet2DConditionModel model. - unet_config = create_unet_diffusers_config(original_config, image_size=image_size) + unet_config = create_unet_diffusers_config( + original_config, image_size=image_size + ) unet_config["upcast_attention"] = upcast_attention unet = UNet2DConditionModel(**unet_config) @@ -949,26 +1200,33 @@ def load_pipeline_from_original_stable_diffusion_ckpt( # Convert the VAE model, or use the one passed if not vae: - print(' | Using checkpoint model\'s original VAE') - vae_config = create_vae_diffusers_config(original_config, image_size=image_size) - converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) + print(" | Using checkpoint model's original VAE") + vae_config = create_vae_diffusers_config( + original_config, image_size=image_size + ) + converted_vae_checkpoint = convert_ldm_vae_checkpoint( + checkpoint, vae_config + ) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) else: - print(' | Using external VAE specified in config') + print(" | Using external VAE specified in config") # Convert the text model. model_type = pipeline_type if model_type is None: - model_type = original_config.model.params.cond_stage_config.target.split(".")[-1] + model_type = original_config.model.params.cond_stage_config.target.split( + "." + )[-1] if model_type == "FrozenOpenCLIPEmbedder": text_model = convert_open_clip_checkpoint(checkpoint) - tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", - subfolder="tokenizer", - cache_dir=cache_dir, - ) + tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2", + subfolder="tokenizer", + cache_dir=cache_dir, + ) pipe = pipeline_class( vae=vae, text_encoder=text_model, @@ -981,8 +1239,12 @@ def load_pipeline_from_original_stable_diffusion_ckpt( ) elif model_type == "PaintByExample": vision_model = convert_paint_by_example_checkpoint(checkpoint) - tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir) - feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir) + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", cache_dir=cache_dir + ) + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", cache_dir=cache_dir + ) pipe = PaintByExamplePipeline( vae=vae, image_encoder=vision_model, @@ -991,11 +1253,18 @@ def load_pipeline_from_original_stable_diffusion_ckpt( safety_checker=None, feature_extractor=feature_extractor, ) - elif model_type in ['FrozenCLIPEmbedder','WeightedFrozenCLIPEmbedder']: + elif model_type in ["FrozenCLIPEmbedder", "WeightedFrozenCLIPEmbedder"]: text_model = convert_ldm_clip_checkpoint(checkpoint) - tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir) - safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker',cache_dir=global_cache_dir("hub")) - feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir) + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", cache_dir=cache_dir + ) + safety_checker = StableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", + cache_dir=global_cache_dir("hub"), + ) + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", cache_dir=cache_dir + ) pipe = pipeline_class( vae=vae.to(precision), text_encoder=text_model.to(precision), @@ -1008,27 +1277,33 @@ def load_pipeline_from_original_stable_diffusion_ckpt( else: text_config = create_ldm_bert_config(original_config) text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) - tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased",cache_dir=cache_dir) - pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) + tokenizer = BertTokenizerFast.from_pretrained( + "bert-base-uncased", cache_dir=cache_dir + ) + pipe = LDMTextToImagePipeline( + vqvae=vae, + bert=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) dlogging.set_verbosity(verbosity) return pipe + def convert_ckpt_to_diffusers( - checkpoint_path:Union[str,Path], - dump_path:Union[str,Path], - **kwargs, + checkpoint_path: Union[str, Path], + dump_path: Union[str, Path], + **kwargs, ): - ''' + """ Takes all the arguments of load_pipeline_from_original_stable_diffusion_ckpt(), and in addition a path-like object indicating the location of the desired diffusers model to be written. - ''' - pipe = load_pipeline_from_original_stable_diffusion_ckpt( - checkpoint_path, - **kwargs - ) - + """ + pipe = load_pipeline_from_original_stable_diffusion_ckpt(checkpoint_path, **kwargs) + pipe.save_pretrained( dump_path, safe_serialization=is_safetensors_available(), diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 34a10969cd..450043d369 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -31,13 +31,11 @@ from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ..util import CPU_DEVICE from invokeai.backend.globals import Globals, global_cache_dir -from ..util import ( - ask_user, - download_with_resume, -) + from ..stable_diffusion import StableDiffusionGeneratorPipeline +from ..util import CPU_DEVICE, ask_user, download_with_resume + class SDLegacyType(Enum): V1 = 1 @@ -45,11 +43,13 @@ class SDLegacyType(Enum): V2 = 3 UNKNOWN = 99 + DEFAULT_MAX_MODELS = 2 VAE_TO_REPO_ID = { # hack, see note in convert_and_import() "vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse", } + class ModelManager(object): def __init__( self, @@ -428,11 +428,9 @@ class ModelManager(object): weights = os.path.normpath(os.path.join(Globals.root, weights)) # Convert to diffusers and return a diffusers pipeline - print( - f">> Converting legacy checkpoint {model_name} into a diffusers model..." - ) - - from . import load_pipeline_from_original_stable_diffusion_ckpt + print(f">> Converting legacy checkpoint {model_name} into a diffusers model...") + + from . import load_pipeline_from_original_stable_diffusion_ckpt self.offload_model(self.current_model) if vae_config := self._choose_diffusers_vae(model_name): @@ -444,9 +442,7 @@ class ModelManager(object): original_config_file=config, vae=vae, return_generator_pipeline=True, - precision=torch.float16 - if self.precision == "float16" - else torch.float32, + precision=torch.float16 if self.precision == "float16" else torch.float32, ) if self.sequential_offload: pipeline.enable_offload_submodels(self.device) @@ -547,7 +543,9 @@ class ModelManager(object): models.yaml file. """ model_name = model_name or Path(repo_or_path).stem - model_description = model_description or f"Imported diffusers model {model_name}" + model_description = ( + model_description or f"Imported diffusers model {model_name}" + ) new_config = dict( description=model_description, vae=vae, @@ -729,7 +727,7 @@ class ModelManager(object): f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import" ) return - + diffuser_path = Path( Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem ) @@ -781,7 +779,7 @@ class ModelManager(object): # By passing the specified VAE to the conversion function, the autoencoder # will be built into the model rather than tacked on afterward via the config file vae_model = self._load_vae(vae) if vae else None - convert_ckpt_to_diffusers ( + convert_ckpt_to_diffusers( ckpt_path, diffusers_path, extract_ema=True, diff --git a/invokeai/backend/prompting/__init__.py b/invokeai/backend/prompting/__init__.py index 29694c6538..152edf646b 100644 --- a/invokeai/backend/prompting/__init__.py +++ b/invokeai/backend/prompting/__init__.py @@ -1,7 +1,10 @@ -''' +""" Initialization file for invokeai.backend.prompting -''' -from .conditioning import (get_uc_and_c_and_ec, - split_weighted_subprompts, - get_tokens_for_prompt_object, - get_prompt_structure, get_tokenizer) +""" +from .conditioning import ( + get_prompt_structure, + get_tokenizer, + get_tokens_for_prompt_object, + get_uc_and_c_and_ec, + split_weighted_subprompts, +) diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index b9378cf5ed..04051084c0 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -1,31 +1,46 @@ -''' +""" This module handles the generation of the conditioning tensors. Useful function exports: get_uc_and_c_and_ec() get the conditioned and unconditioned latent, and edited conditioning if we're doing cross-attention control -''' +""" import re -from typing import Union, Optional, Any - -from transformers import CLIPTokenizer, CLIPTextModel +from typing import Any, Optional, Union from compel import Compel -from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser -from ..util import torch_dtype -from ..stable_diffusion import InvokeAIDiffuserComponent +from compel.prompt_parser import ( + Blend, + CrossAttentionControlSubstitute, + FlattenedPrompt, + Fragment, + PromptParser, +) +from transformers import CLIPTextModel, CLIPTokenizer + from invokeai.backend.globals import Globals +from ..stable_diffusion import InvokeAIDiffuserComponent +from ..util import torch_dtype + + def get_tokenizer(model) -> CLIPTokenizer: # TODO remove legacy ckpt fallback handling - return (getattr(model, 'tokenizer', None) # diffusers - or model.cond_stage_model.tokenizer) # ldm + return ( + getattr(model, "tokenizer", None) # diffusers + or model.cond_stage_model.tokenizer + ) # ldm + def get_text_encoder(model) -> Any: # TODO remove legacy ckpt fallback handling - return (getattr(model, 'text_encoder', None) # diffusers - or UnsqueezingLDMTransformer(model.cond_stage_model.transformer)) # ldm + return getattr( + model, "text_encoder", None + ) or UnsqueezingLDMTransformer( # diffusers + model.cond_stage_model.transformer + ) # ldm + class UnsqueezingLDMTransformer: def __init__(self, ldm_transformer): @@ -40,28 +55,41 @@ class UnsqueezingLDMTransformer: return insufficiently_unsqueezed_tensor.unsqueeze(0) -def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False): +def get_uc_and_c_and_ec( + prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False +): # lazy-load any deferred textual inversions. # this might take a couple of seconds the first time a textual inversion is used. - model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string) + model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms( + prompt_string + ) tokenizer = get_tokenizer(model) text_encoder = get_text_encoder(model) - compel = Compel(tokenizer=tokenizer, - text_encoder=text_encoder, - textual_inversion_manager=model.textual_inversion_manager, - dtype_for_device_getter=torch_dtype) + compel = Compel( + tokenizer=tokenizer, + text_encoder=text_encoder, + textual_inversion_manager=model.textual_inversion_manager, + dtype_for_device_getter=torch_dtype, + ) # get rid of any newline characters prompt_string = prompt_string.replace("\n", " ") - positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) - legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend) - positive_prompt: FlattenedPrompt|Blend + ( + positive_prompt_string, + negative_prompt_string, + ) = split_prompt_to_positive_and_negative(prompt_string) + legacy_blend = try_parse_legacy_blend( + positive_prompt_string, skip_normalize_legacy_blend + ) + positive_prompt: FlattenedPrompt | Blend if legacy_blend is not None: positive_prompt = legacy_blend else: positive_prompt = Compel.parse_prompt_string(positive_prompt_string) - negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string) + negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( + negative_prompt_string + ) if log_tokens or getattr(Globals, "log_tokenization", False): log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer) @@ -71,42 +99,70 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l tokens_count = get_max_token_count(tokenizer, positive_prompt) - ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count, - cross_attention_control_args=options.get( - 'cross_attention_control', None)) + ec = InvokeAIDiffuserComponent.ExtraConditioningInfo( + tokens_count_including_eos_bos=tokens_count, + cross_attention_control_args=options.get("cross_attention_control", None), + ) return uc, c, ec -def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = False) -> ( - Union[FlattenedPrompt, Blend], FlattenedPrompt): - positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) - legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend) - positive_prompt: FlattenedPrompt|Blend +def get_prompt_structure( + prompt_string, skip_normalize_legacy_blend: bool = False +) -> (Union[FlattenedPrompt, Blend], FlattenedPrompt): + ( + positive_prompt_string, + negative_prompt_string, + ) = split_prompt_to_positive_and_negative(prompt_string) + legacy_blend = try_parse_legacy_blend( + positive_prompt_string, skip_normalize_legacy_blend + ) + positive_prompt: FlattenedPrompt | Blend if legacy_blend is not None: positive_prompt = legacy_blend else: positive_prompt = Compel.parse_prompt_string(positive_prompt_string) - negative_prompt: FlattenedPrompt|Blend = Compel.parse_prompt_string(negative_prompt_string) + negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( + negative_prompt_string + ) return positive_prompt, negative_prompt -def get_max_token_count(tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True) -> int: + +def get_max_token_count( + tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True +) -> int: if type(prompt) is Blend: blend: Blend = prompt - return max([get_max_token_count(tokenizer, c, truncate_if_too_long) for c in blend.prompts]) + return max( + [ + get_max_token_count(tokenizer, c, truncate_if_too_long) + for c in blend.prompts + ] + ) else: - return len(get_tokens_for_prompt_object(tokenizer, prompt, truncate_if_too_long)) + return len( + get_tokens_for_prompt_object(tokenizer, prompt, truncate_if_too_long) + ) -def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True) -> [str]: - +def get_tokens_for_prompt_object( + tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True +) -> [str]: if type(parsed_prompt) is Blend: - raise ValueError("Blend is not supported here - you need to get tokens for each of its .children") + raise ValueError( + "Blend is not supported here - you need to get tokens for each of its .children" + ) - text_fragments = [x.text if type(x) is Fragment else - (" ".join([f.text for f in x.original]) if type(x) is CrossAttentionControlSubstitute else - str(x)) - for x in parsed_prompt.children] + text_fragments = [ + x.text + if type(x) is Fragment + else ( + " ".join([f.text for f in x.original]) + if type(x) is CrossAttentionControlSubstitute + else str(x) + ) + for x in parsed_prompt.children + ] text = " ".join(text_fragments) tokens = tokenizer.tokenize(text) if truncate_if_too_long: @@ -116,39 +172,47 @@ def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, trun def split_prompt_to_positive_and_negative(prompt_string_uncleaned: str): - unconditioned_words = '' - unconditional_regex = r'\[(.*?)\]' + unconditioned_words = "" + unconditional_regex = r"\[(.*?)\]" unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned) if len(unconditionals) > 0: - unconditioned_words = ' '.join(unconditionals) + unconditioned_words = " ".join(unconditionals) # Remove Unconditioned Words From Prompt unconditional_regex_compile = re.compile(unconditional_regex) - clean_prompt = unconditional_regex_compile.sub(' ', prompt_string_uncleaned) - prompt_string_cleaned = re.sub(' +', ' ', clean_prompt) + clean_prompt = unconditional_regex_compile.sub(" ", prompt_string_uncleaned) + prompt_string_cleaned = re.sub(" +", " ", clean_prompt) else: prompt_string_cleaned = prompt_string_uncleaned return prompt_string_cleaned, unconditioned_words -def log_tokenization(positive_prompt: Union[Blend, FlattenedPrompt], - negative_prompt: Union[Blend, FlattenedPrompt], - tokenizer): +def log_tokenization( + positive_prompt: Union[Blend, FlattenedPrompt], + negative_prompt: Union[Blend, FlattenedPrompt], + tokenizer, +): print(f"\n>> [TOKENLOG] Parsed Prompt: {positive_prompt}") print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {negative_prompt}") log_tokenization_for_prompt_object(positive_prompt, tokenizer) - log_tokenization_for_prompt_object(negative_prompt, tokenizer, display_label_prefix="(negative prompt)") + log_tokenization_for_prompt_object( + negative_prompt, tokenizer, display_label_prefix="(negative prompt)" + ) -def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None): +def log_tokenization_for_prompt_object( + p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None +): display_label_prefix = display_label_prefix or "" if type(p) is Blend: blend: Blend = p for i, c in enumerate(blend.prompts): log_tokenization_for_prompt_object( - c, tokenizer, - display_label_prefix=f"{display_label_prefix}(blend part {i + 1}, weight={blend.weights[i]})") + c, + tokenizer, + display_label_prefix=f"{display_label_prefix}(blend part {i + 1}, weight={blend.weights[i]})", + ) elif type(p) is FlattenedPrompt: flattened_prompt: FlattenedPrompt = p if flattened_prompt.wants_cross_attention_control: @@ -163,18 +227,26 @@ def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokeniz edited_fragments.append(f) original_text = " ".join([x.text for x in original_fragments]) - log_tokenization_for_text(original_text, tokenizer, - display_label=f"{display_label_prefix}(.swap originals)") + log_tokenization_for_text( + original_text, + tokenizer, + display_label=f"{display_label_prefix}(.swap originals)", + ) edited_text = " ".join([x.text for x in edited_fragments]) - log_tokenization_for_text(edited_text, tokenizer, - display_label=f"{display_label_prefix}(.swap replacements)") + log_tokenization_for_text( + edited_text, + tokenizer, + display_label=f"{display_label_prefix}(.swap replacements)", + ) else: text = " ".join([x.text for x in flattened_prompt.children]) - log_tokenization_for_text(text, tokenizer, display_label=display_label_prefix) + log_tokenization_for_text( + text, tokenizer, display_label=display_label_prefix + ) def log_tokenization_for_text(text, tokenizer, display_label=None): - """ shows how the prompt is tokenized + """shows how the prompt is tokenized # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' """ @@ -185,7 +257,7 @@ def log_tokenization_for_text(text, tokenizer, display_label=None): totalTokens = len(tokens) for i in range(0, totalTokens): - token = tokens[i].replace('', ' ') + token = tokens[i].replace("", " ") # alternate color s = (usedTokens % 6) + 1 if i < tokenizer.model_max_length: @@ -196,14 +268,14 @@ def log_tokenization_for_text(text, tokenizer, display_label=None): if usedTokens > 0: print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') - print(f'{tokenized}\x1b[0m') + print(f"{tokenized}\x1b[0m") if discarded != "": - print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):') - print(f'{discarded}\x1b[0m') + print(f"\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):") + print(f"{discarded}\x1b[0m") -def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]: +def try_parse_legacy_blend(text: str, skip_normalize: bool = False) -> Optional[Blend]: weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize) if len(weighted_subprompts) <= 1: return None @@ -214,10 +286,12 @@ def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Bl parsed_conjunctions = [pp.parse_conjunction(x) for x in strings] flattened_prompts = [x.prompts[0] for x in parsed_conjunctions] - return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize) + return Blend( + prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize + ) -def split_weighted_subprompts(text, skip_normalize=False)->list: +def split_weighted_subprompts(text, skip_normalize=False) -> list: """ Legacy blend parsing. @@ -226,7 +300,8 @@ def split_weighted_subprompts(text, skip_normalize=False)->list: if ':' has no value defined, defaults to 1.0 repeats until no text remaining """ - prompt_parser = re.compile(""" + prompt_parser = re.compile( + """ (?P # capture group for 'prompt' (?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:' ) # end 'prompt' @@ -239,16 +314,20 @@ def split_weighted_subprompts(text, skip_normalize=False)->list: | # OR $ # else, if no ':' then match end of line ) # end non-capture group - """, re.VERBOSE) - parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float( - match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)] + """, + re.VERBOSE, + ) + parsed_prompts = [ + (match.group("prompt").replace("\\:", ":"), float(match.group("weight") or 1)) + for match in re.finditer(prompt_parser, text) + ] if skip_normalize: return parsed_prompts weight_sum = sum(map(lambda x: x[1], parsed_prompts)) if weight_sum == 0: print( - "* Warning: Subprompt weights add up to zero. Discarding and using even weights instead.") + "* Warning: Subprompt weights add up to zero. Discarding and using even weights instead." + ) equal_weight = 1 / max(len(parsed_prompts), 1) return [(x[0], equal_weight) for x in parsed_prompts] return [(x[0], x[1] / weight_sum) for x in parsed_prompts] - diff --git a/invokeai/backend/restoration/__init__.py b/invokeai/backend/restoration/__init__.py index e784ea52b8..165ef080b3 100644 --- a/invokeai/backend/restoration/__init__.py +++ b/invokeai/backend/restoration/__init__.py @@ -1,4 +1,4 @@ -''' -Initialization file for the ldm.invoke.restoration package -''' +""" +Initialization file for the invokeai.backend.restoration package +""" from .base import Restoration diff --git a/invokeai/backend/restoration/base.py b/invokeai/backend/restoration/base.py index 036d56a271..0957811fc3 100644 --- a/invokeai/backend/restoration/base.py +++ b/invokeai/backend/restoration/base.py @@ -1,22 +1,24 @@ -class Restoration(): +class Restoration: def __init__(self) -> None: pass - def load_face_restore_models(self, gfpgan_model_path='./models/gfpgan/GFPGANv1.4.pth'): + def load_face_restore_models( + self, gfpgan_model_path="./models/gfpgan/GFPGANv1.4.pth" + ): # Load GFPGAN gfpgan = self.load_gfpgan(gfpgan_model_path) if gfpgan.gfpgan_model_exists: - print('>> GFPGAN Initialized') + print(">> GFPGAN Initialized") else: - print('>> GFPGAN Disabled') + print(">> GFPGAN Disabled") gfpgan = None # Load CodeFormer codeformer = self.load_codeformer() if codeformer.codeformer_model_exists: - print('>> CodeFormer Initialized') + print(">> CodeFormer Initialized") else: - print('>> CodeFormer Disabled') + print(">> CodeFormer Disabled") codeformer = None return gfpgan, codeformer @@ -24,15 +26,18 @@ class Restoration(): # Face Restore Models def load_gfpgan(self, gfpgan_model_path): from .gfpgan import GFPGAN + return GFPGAN(gfpgan_model_path) def load_codeformer(self): from .codeformer import CodeFormerRestoration + return CodeFormerRestoration() # Upscale Models def load_esrgan(self, esrgan_bg_tile=400): from .realesrgan import ESRGAN + esrgan = ESRGAN(esrgan_bg_tile) - print('>> ESRGAN Initialized') - return esrgan; + print(">> ESRGAN Initialized") + return esrgan diff --git a/invokeai/backend/restoration/codeformer.py b/invokeai/backend/restoration/codeformer.py index ab4ce82376..94add72b00 100644 --- a/invokeai/backend/restoration/codeformer.py +++ b/invokeai/backend/restoration/codeformer.py @@ -1,17 +1,21 @@ import os -import torch -import numpy as np -import warnings import sys -from invokeai.backend.globals import Globals +import warnings -pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' +import numpy as np +import torch -class CodeFormerRestoration(): - def __init__(self, - codeformer_dir='models/codeformer', - codeformer_model_path='codeformer.pth') -> None: +from ..globals import Globals +pretrained_model_url = ( + "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" +) + + +class CodeFormerRestoration: + def __init__( + self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth" + ) -> None: if not os.path.isabs(codeformer_dir): codeformer_dir = os.path.join(Globals.root, codeformer_dir) @@ -19,22 +23,23 @@ class CodeFormerRestoration(): self.codeformer_model_exists = os.path.isfile(self.model_path) if not self.codeformer_model_exists: - print('## NOT FOUND: CodeFormer model not found at ' + self.model_path) + print("## NOT FOUND: CodeFormer model not found at " + self.model_path) sys.path.append(os.path.abspath(codeformer_dir)) def process(self, image, strength, device, seed=None, fidelity=0.75): if seed is not None: - print(f'>> CodeFormer - Restoring Faces for image seed:{seed}') + print(f">> CodeFormer - Restoring Faces for image seed:{seed}") with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning) - warnings.filterwarnings('ignore', category=UserWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) + warnings.filterwarnings("ignore", category=UserWarning) - from basicsr.utils.download_util import load_file_from_url from basicsr.utils import img2tensor, tensor2img + from basicsr.utils.download_util import load_file_from_url from facexlib.utils.face_restoration_helper import FaceRestoreHelper - from ldm.invoke.restoration.codeformer_arch import CodeFormer - from torchvision.transforms.functional import normalize from PIL import Image + from torchvision.transforms.functional import normalize + + from .codeformer_arch import CodeFormer cf_class = CodeFormer @@ -43,28 +48,31 @@ class CodeFormerRestoration(): codebook_size=1024, n_head=8, n_layers=9, - connect_list=['32', '64', '128', '256'] + connect_list=["32", "64", "128", "256"], ).to(device) # note that this file should already be downloaded and cached at # this point - checkpoint_path = load_file_from_url(url=pretrained_model_url, - model_dir=os.path.abspath(os.path.dirname(self.model_path)), - progress=True + checkpoint_path = load_file_from_url( + url=pretrained_model_url, + model_dir=os.path.abspath(os.path.dirname(self.model_path)), + progress=True, ) - checkpoint = torch.load(checkpoint_path)['params_ema'] + checkpoint = torch.load(checkpoint_path)["params_ema"] cf.load_state_dict(checkpoint) cf.eval() - image = image.convert('RGB') + image = image.convert("RGB") # Codeformer expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1] + bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] face_helper = FaceRestoreHelper( upscale_factor=1, use_parse=True, device=device, - model_rootpath=os.path.join(Globals.root,'models','gfpgan','weights'), + model_rootpath=os.path.join( + Globals.root, "models", "gfpgan", "weights" + ), ) face_helper.clean_all() face_helper.read_image(bgr_image_array) @@ -72,30 +80,35 @@ class CodeFormerRestoration(): face_helper.align_warp_face() for idx, cropped_face in enumerate(face_helper.cropped_faces): - cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) - normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) + cropped_face_t = img2tensor( + cropped_face / 255.0, bgr2rgb=True, float32=True + ) + normalize( + cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True + ) cropped_face_t = cropped_face_t.unsqueeze(0).to(device) try: with torch.no_grad(): output = cf(cropped_face_t, w=fidelity, adain=True)[0] - restored_face = tensor2img(output.squeeze(0), rgb2bgr=True, min_max=(-1, 1)) + restored_face = tensor2img( + output.squeeze(0), rgb2bgr=True, min_max=(-1, 1) + ) del output torch.cuda.empty_cache() except RuntimeError as error: - print(f'\tFailed inference for CodeFormer: {error}.') + print(f"\tFailed inference for CodeFormer: {error}.") restored_face = cropped_face - restored_face = restored_face.astype('uint8') + restored_face = restored_face.astype("uint8") face_helper.add_restored_face(restored_face) - face_helper.get_inverse_affine(None) restored_img = face_helper.paste_faces_to_input_image() # Flip the channels back to RGB - res = Image.fromarray(restored_img[...,::-1]) + res = Image.fromarray(restored_img[..., ::-1]) if strength < 1.0: # Resize the image to the new image if the sizes have changed diff --git a/invokeai/backend/restoration/codeformer_arch.py b/invokeai/backend/restoration/codeformer_arch.py index dc71c86b33..0f6b881020 100644 --- a/invokeai/backend/restoration/codeformer_arch.py +++ b/invokeai/backend/restoration/codeformer_arch.py @@ -1,13 +1,15 @@ import math +from typing import List, Optional + import numpy as np import torch -from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List - -from .vqgan_arch import * from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY +from torch import Tensor, nn + +from .vqgan_arch import * + def calc_mean_std(feat, eps=1e-5): """Calculate mean and std for adaptive_instance_normalization. @@ -18,13 +20,14 @@ def calc_mean_std(feat, eps=1e-5): divide-by-zero. Default: 1e-5. """ size = feat.size() - assert len(size) == 4, 'The input feature should be 4D tensor.' + assert len(size) == 4, "The input feature should be 4D tensor." b, c = size[:2] feat_var = feat.view(b, c, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(b, c, 1, 1) feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) return feat_mean, feat_std + def adaptive_instance_normalization(content_feat, style_feat): """Adaptive instance normalization. @@ -38,7 +41,9 @@ def adaptive_instance_normalization(content_feat, style_feat): size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat) content_mean, content_std = calc_mean_std(content_feat) - normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) + normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand( + size + ) return normalized_feat * style_std.expand(size) + style_mean.expand(size) @@ -48,7 +53,9 @@ class PositionEmbeddingSine(nn.Module): used by the Attention is all you need paper, generalized to work on images. """ - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + def __init__( + self, num_pos_feats=64, temperature=10000, normalize=False, scale=None + ): super().__init__() self.num_pos_feats = num_pos_feats self.temperature = temperature @@ -61,7 +68,9 @@ class PositionEmbeddingSine(nn.Module): def forward(self, x, mask=None): if mask is None: - mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + mask = torch.zeros( + (x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool + ) not_mask = ~mask y_embed = not_mask.cumsum(1, dtype=torch.float32) x_embed = not_mask.cumsum(2, dtype=torch.float32) @@ -84,6 +93,7 @@ class PositionEmbeddingSine(nn.Module): pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos + def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": @@ -92,11 +102,13 @@ def _get_activation_fn(activation): return F.gelu if activation == "glu": return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") class TransformerSALayer(nn.Module): - def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"): + def __init__( + self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu" + ): super().__init__() self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout) # Implementation of Feedforward model - MLP @@ -114,16 +126,19 @@ class TransformerSALayer(nn.Module): def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos - def forward(self, tgt, - tgt_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - + def forward( + self, + tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): # self attention tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] + tgt2 = self.self_attn( + q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] tgt = tgt + self.dropout1(tgt2) # ffn @@ -132,20 +147,23 @@ class TransformerSALayer(nn.Module): tgt = tgt + self.dropout2(tgt2) return tgt + class Fuse_sft_block(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() - self.encode_enc = ResBlock(2*in_ch, out_ch) + self.encode_enc = ResBlock(2 * in_ch, out_ch) self.scale = nn.Sequential( - nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), - nn.LeakyReLU(0.2, True), - nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1)) + nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), + nn.LeakyReLU(0.2, True), + nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), + ) self.shift = nn.Sequential( - nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), - nn.LeakyReLU(0.2, True), - nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1)) + nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), + nn.LeakyReLU(0.2, True), + nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), + ) def forward(self, enc_feat, dec_feat, w=1): enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1)) @@ -158,11 +176,19 @@ class Fuse_sft_block(nn.Module): @ARCH_REGISTRY.register() class CodeFormer(VQAutoEncoder): - def __init__(self, dim_embd=512, n_head=8, n_layers=9, - codebook_size=1024, latent_size=256, - connect_list=['32', '64', '128', '256'], - fix_modules=['quantize','generator']): - super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) + def __init__( + self, + dim_embd=512, + n_head=8, + n_layers=9, + codebook_size=1024, + latent_size=256, + connect_list=["32", "64", "128", "256"], + fix_modules=["quantize", "generator"], + ): + super(CodeFormer, self).__init__( + 512, 64, [1, 2, 2, 4, 4, 8], "nearest", 2, [16], codebook_size + ) if fix_modules is not None: for module in fix_modules: @@ -172,33 +198,53 @@ class CodeFormer(VQAutoEncoder): self.connect_list = connect_list self.n_layers = n_layers self.dim_embd = dim_embd - self.dim_mlp = dim_embd*2 + self.dim_mlp = dim_embd * 2 self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd)) self.feat_emb = nn.Linear(256, self.dim_embd) # transformer - self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) - for _ in range(self.n_layers)]) + self.ft_layers = nn.Sequential( + *[ + TransformerSALayer( + embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0 + ) + for _ in range(self.n_layers) + ] + ) # logits_predict head self.idx_pred_layer = nn.Sequential( - nn.LayerNorm(dim_embd), - nn.Linear(dim_embd, codebook_size, bias=False)) + nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False) + ) self.channels = { - '16': 512, - '32': 256, - '64': 256, - '128': 128, - '256': 128, - '512': 64, + "16": 512, + "32": 256, + "64": 256, + "128": 128, + "256": 128, + "512": 64, } # after second residual block for > 16, before attn layer for ==16 - self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18} + self.fuse_encoder_block = { + "512": 2, + "256": 5, + "128": 8, + "64": 11, + "32": 14, + "16": 18, + } # after first residual block for > 16, before attn layer for ==16 - self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21} + self.fuse_generator_block = { + "16": 6, + "32": 9, + "64": 12, + "128": 15, + "256": 18, + "512": 21, + } # fuse_convs_dict self.fuse_convs_dict = nn.ModuleDict() @@ -227,20 +273,20 @@ class CodeFormer(VQAutoEncoder): lq_feat = x # ################# Transformer ################### # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat) - pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1) + pos_emb = self.position_emb.unsqueeze(1).repeat(1, x.shape[0], 1) # BCHW -> BC(HW) -> (HW)BC - feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1)) + feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2, 0, 1)) query_emb = feat_emb # Transformer encoder for layer in self.ft_layers: query_emb = layer(query_emb, query_pos=pos_emb) # output logits - logits = self.idx_pred_layer(query_emb) # (hw)bn - logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n + logits = self.idx_pred_layer(query_emb) # (hw)bn + logits = logits.permute(1, 0, 2) # (hw)bn -> b(hw)n - if code_only: # for training stage II - # logits doesn't need softmax before cross_entropy loss + if code_only: # for training stage II + # logits doesn't need softmax before cross_entropy loss return logits, lq_feat # ################# Quantization ################### @@ -251,12 +297,14 @@ class CodeFormer(VQAutoEncoder): # ------------ soft_one_hot = F.softmax(logits, dim=2) _, top_idx = torch.topk(soft_one_hot, 1, dim=2) - quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256]) + quant_feat = self.quantize.get_codebook_feat( + top_idx, shape=[x.shape[0], 16, 16, 256] + ) # preserve gradients # quant_feat = lq_feat + (quant_feat - lq_feat).detach() if detach_16: - quant_feat = quant_feat.detach() # for training stage III + quant_feat = quant_feat.detach() # for training stage III if adain: quant_feat = adaptive_instance_normalization(quant_feat, lq_feat) @@ -266,10 +314,12 @@ class CodeFormer(VQAutoEncoder): for i, block in enumerate(self.generator.blocks): x = block(x) - if i in fuse_list: # fuse after i-th block + if i in fuse_list: # fuse after i-th block f_size = str(x.shape[-1]) - if w>0: - x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) + if w > 0: + x = self.fuse_convs_dict[f_size]( + enc_feat_dict[f_size].detach(), x, w + ) out = x # logits doesn't need softmax before cross_entropy loss return out, logits, lq_feat diff --git a/invokeai/backend/restoration/gfpgan.py b/invokeai/backend/restoration/gfpgan.py index 5fff52583f..d13745d0c6 100644 --- a/invokeai/backend/restoration/gfpgan.py +++ b/invokeai/backend/restoration/gfpgan.py @@ -1,26 +1,25 @@ -import torch -import warnings import os import sys -import numpy as np -from invokeai.backend.globals import Globals +import warnings +import numpy as np +import torch from PIL import Image +from invokeai.backend.globals import Globals -class GFPGAN(): - def __init__( - self, - gfpgan_model_path='models/gfpgan/GFPGANv1.4.pth' - ) -> None: +class GFPGAN: + def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None: if not os.path.isabs(gfpgan_model_path): - gfpgan_model_path=os.path.abspath(os.path.join(Globals.root,gfpgan_model_path)) + gfpgan_model_path = os.path.abspath( + os.path.join(Globals.root, gfpgan_model_path) + ) self.model_path = gfpgan_model_path self.gfpgan_model_exists = os.path.isfile(self.model_path) if not self.gfpgan_model_exists: - print('## NOT FOUND: GFPGAN model not found at ' + self.model_path) + print("## NOT FOUND: GFPGAN model not found at " + self.model_path) return None def model_exists(self): @@ -28,40 +27,40 @@ class GFPGAN(): def process(self, image, strength: float, seed: str = None): if seed is not None: - print(f'>> GFPGAN - Restoring Faces for image seed:{seed}') + print(f">> GFPGAN - Restoring Faces for image seed:{seed}") with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning) - warnings.filterwarnings('ignore', category=UserWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) + warnings.filterwarnings("ignore", category=UserWarning) cwd = os.getcwd() - os.chdir(os.path.join(Globals.root,'models')) + os.chdir(os.path.join(Globals.root, "models")) try: from gfpgan import GFPGANer + self.gfpgan = GFPGANer( model_path=self.model_path, upscale=1, - arch='clean', + arch="clean", channel_multiplier=2, bg_upsampler=None, ) except Exception: import traceback - print('>> Error loading GFPGAN:', file=sys.stderr) + + print(">> Error loading GFPGAN:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) os.chdir(cwd) if self.gfpgan is None: + print(f">> WARNING: GFPGAN not initialized.") print( - f'>> WARNING: GFPGAN not initialized.' - ) - print( - f'>> Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth to {self.model_path}' + f">> Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth to {self.model_path}" ) - image = image.convert('RGB') + image = image.convert("RGB") # GFPGAN expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1] + bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] _, _, restored_img = self.gfpgan.enhance( bgr_image_array, @@ -71,7 +70,7 @@ class GFPGAN(): ) # Flip the channels back to RGB - res = Image.fromarray(restored_img[...,::-1]) + res = Image.fromarray(restored_img[..., ::-1]) if strength < 1.0: # Resize the image to the new image if the sizes have changed @@ -79,7 +78,6 @@ class GFPGAN(): image = image.resize(res.size) res = Image.blend(image, res, strength) - if torch.cuda.is_available(): torch.cuda.empty_cache() self.gfpgan = None diff --git a/invokeai/backend/restoration/outcrop.py b/invokeai/backend/restoration/outcrop.py index 60bedea98c..0778d7cc8f 100644 --- a/invokeai/backend/restoration/outcrop.py +++ b/invokeai/backend/restoration/outcrop.py @@ -1,23 +1,25 @@ -import warnings import math +import warnings + from PIL import Image, ImageFilter + class Outcrop(object): def __init__( - self, - image, - generate, # current generate object + self, + image, + generate, # current generate object ): - self.image = image - self.generate = generate + self.image = image + self.generate = generate - def process ( - self, - extents:dict, - opt, # current options - orig_opt, # ones originally used to generate the image - image_callback = None, - prefix = None + def process( + self, + extents: dict, + opt, # current options + orig_opt, # ones originally used to generate the image + image_callback=None, + prefix=None, ): # grow and mask the image extended_image = self._extend_all(extents) @@ -27,29 +29,33 @@ class Outcrop(object): self.generate.sampler_name = opt.sampler_name self.generate._set_sampler() - def wrapped_callback(img,seed,**kwargs): - preferred_seed = orig_opt.seed if orig_opt.seed is not None and orig_opt.seed >= 0 else seed - image_callback(img,preferred_seed,use_prefix=prefix,**kwargs) + def wrapped_callback(img, seed, **kwargs): + preferred_seed = ( + orig_opt.seed + if orig_opt.seed is not None and orig_opt.seed >= 0 + else seed + ) + image_callback(img, preferred_seed, use_prefix=prefix, **kwargs) - result= self.generate.prompt2image( + result = self.generate.prompt2image( opt.prompt, - seed = opt.seed or orig_opt.seed, - sampler = self.generate.sampler, - steps = opt.steps, - cfg_scale = opt.cfg_scale, - ddim_eta = self.generate.ddim_eta, - width = extended_image.width, - height = extended_image.height, - init_img = extended_image, - strength = 0.90, - image_callback = wrapped_callback if image_callback else None, - seam_size = opt.seam_size or 96, - seam_blur = opt.seam_blur or 16, - seam_strength = opt.seam_strength or 0.7, - seam_steps = 20, - tile_size = 32, - color_match = True, - force_outpaint = True, # this just stops the warning about erased regions + seed=opt.seed or orig_opt.seed, + sampler=self.generate.sampler, + steps=opt.steps, + cfg_scale=opt.cfg_scale, + ddim_eta=self.generate.ddim_eta, + width=extended_image.width, + height=extended_image.height, + init_img=extended_image, + strength=0.90, + image_callback=wrapped_callback if image_callback else None, + seam_size=opt.seam_size or 96, + seam_blur=opt.seam_blur or 16, + seam_strength=opt.seam_strength or 0.7, + seam_steps=20, + tile_size=32, + color_match=True, + force_outpaint=True, # this just stops the warning about erased regions ) # swap sampler back @@ -57,52 +63,57 @@ class Outcrop(object): return result def _extend_all( - self, - extents:dict, + self, + extents: dict, ) -> Image: - ''' + """ Extend the image in direction ('top','bottom','left','right') by the indicated value. The image canvas is extended, and the empty rectangular section will be filled with a blurred copy of the adjacent image. - ''' + """ image = self.image for direction in extents: - assert direction in ['top', 'left', 'bottom', 'right'],'Direction must be one of "top", "left", "bottom", "right"' + assert direction in [ + "top", + "left", + "bottom", + "right", + ], 'Direction must be one of "top", "left", "bottom", "right"' pixels = extents[direction] # round pixels up to the nearest 64 - pixels = math.ceil(pixels/64) * 64 - print(f'>> extending image {direction}ward by {pixels} pixels') - image = self._rotate(image,direction) - image = self._extend(image,pixels) - image = self._rotate(image,direction,reverse=True) + pixels = math.ceil(pixels / 64) * 64 + print(f">> extending image {direction}ward by {pixels} pixels") + image = self._rotate(image, direction) + image = self._extend(image, pixels) + image = self._rotate(image, direction, reverse=True) return image - def _rotate(self,image:Image,direction:str,reverse=False) -> Image: - ''' + def _rotate(self, image: Image, direction: str, reverse=False) -> Image: + """ Rotates image so that the area to extend is always at the top top. Simplifies logic later. The reverse argument, if true, will undo the previous transpose. - ''' + """ transposes = { - 'right': ['ROTATE_90','ROTATE_270'], - 'bottom': ['ROTATE_180','ROTATE_180'], - 'left': ['ROTATE_270','ROTATE_90'] + "right": ["ROTATE_90", "ROTATE_270"], + "bottom": ["ROTATE_180", "ROTATE_180"], + "left": ["ROTATE_270", "ROTATE_90"], } if direction not in transposes: return image transpose = transposes[direction][1 if reverse else 0] return image.transpose(Image.Transpose.__dict__[transpose]) - def _extend(self,image:Image,pixels:int)-> Image: - extended_img = Image.new('RGBA',(image.width,image.height+pixels)) + def _extend(self, image: Image, pixels: int) -> Image: + extended_img = Image.new("RGBA", (image.width, image.height + pixels)) - extended_img.paste((0,0,0),[0,0,image.width,image.height+pixels]) - extended_img.paste(image,box=(0,pixels)) + extended_img.paste((0, 0, 0), [0, 0, image.width, image.height + pixels]) + extended_img.paste(image, box=(0, pixels)) # now make the top part transparent to use as a mask - alpha = extended_img.getchannel('A') - alpha.paste(0,(0,0,extended_img.width,pixels)) + alpha = extended_img.getchannel("A") + alpha.paste(0, (0, 0, extended_img.width, pixels)) extended_img.putalpha(alpha) return extended_img diff --git a/invokeai/backend/restoration/outpaint.py b/invokeai/backend/restoration/outpaint.py index 06b69188a5..184db10fa3 100644 --- a/invokeai/backend/restoration/outpaint.py +++ b/invokeai/backend/restoration/outpaint.py @@ -1,39 +1,43 @@ -import warnings import math +import warnings + from PIL import Image, ImageFilter + class Outpaint(object): def __init__(self, image, generate): - self.image = image - self.generate = generate + self.image = image + self.generate = generate - def process(self, opt, old_opt, image_callback = None, prefix = None): + def process(self, opt, old_opt, image_callback=None, prefix=None): image = self._create_outpaint_image(self.image, opt.out_direction) - seed = old_opt.seed + seed = old_opt.seed prompt = old_opt.prompt - def wrapped_callback(img,seed,**kwargs): - image_callback(img,seed,use_prefix=prefix,**kwargs) - + def wrapped_callback(img, seed, **kwargs): + image_callback(img, seed, use_prefix=prefix, **kwargs) return self.generate.prompt2image( prompt, - seed = seed, - sampler = self.generate.sampler, - steps = opt.steps, - cfg_scale = opt.cfg_scale, - ddim_eta = self.generate.ddim_eta, - width = opt.width, - height = opt.height, - init_img = image, - strength = 0.83, - image_callback = wrapped_callback, - prefix = prefix, + seed=seed, + sampler=self.generate.sampler, + steps=opt.steps, + cfg_scale=opt.cfg_scale, + ddim_eta=self.generate.ddim_eta, + width=opt.width, + height=opt.height, + init_img=image, + strength=0.83, + image_callback=wrapped_callback, + prefix=prefix, ) def _create_outpaint_image(self, image, direction_args): - assert len(direction_args) in [1, 2], 'Direction (-D) must have exactly one or two arguments.' + assert len(direction_args) in [ + 1, + 2, + ], "Direction (-D) must have exactly one or two arguments." if len(direction_args) == 1: direction = direction_args[0] @@ -42,19 +46,26 @@ class Outpaint(object): direction = direction_args[0] pixels = int(direction_args[1]) - assert direction in ['top', 'left', 'bottom', 'right'], 'Direction (-D) must be one of "top", "left", "bottom", "right"' + assert direction in [ + "top", + "left", + "bottom", + "right", + ], 'Direction (-D) must be one of "top", "left", "bottom", "right"' image = image.convert("RGBA") # we always extend top, but rotate to extend along the requested side - if direction == 'left': + if direction == "left": image = image.transpose(Image.Transpose.ROTATE_270) - elif direction == 'bottom': + elif direction == "bottom": image = image.transpose(Image.Transpose.ROTATE_180) - elif direction == 'right': + elif direction == "right": image = image.transpose(Image.Transpose.ROTATE_90) - pixels = image.height//2 if pixels is None else int(pixels) - assert 0 < pixels < image.height, 'Direction (-D) pixels length must be in the range 0 - image.size' + pixels = image.height // 2 if pixels is None else int(pixels) + assert ( + 0 < pixels < image.height + ), "Direction (-D) pixels length must be in the range 0 - image.size" # the top part of the image is taken from the source image mirrored # coordinates (0,0) are the upper left corner of an image @@ -74,19 +85,18 @@ class Outpaint(object): new_img.paste(bottom, (0, pixels)) # create a 10% dither in the middle - dither = min(image.height//10, pixels) + dither = min(image.height // 10, pixels) for x in range(0, image.width, 2): for y in range(pixels - dither, pixels + dither): (r, g, b, a) = new_img.getpixel((x, y)) new_img.putpixel((x, y), (r, g, b, 0)) # let's rotate back again - if direction == 'left': + if direction == "left": new_img = new_img.transpose(Image.Transpose.ROTATE_90) - elif direction == 'bottom': + elif direction == "bottom": new_img = new_img.transpose(Image.Transpose.ROTATE_180) - elif direction == 'right': + elif direction == "right": new_img = new_img.transpose(Image.Transpose.ROTATE_270) return new_img - diff --git a/invokeai/backend/restoration/realesrgan.py b/invokeai/backend/restoration/realesrgan.py index 0b99f8bbb6..ad6ad556f1 100644 --- a/invokeai/backend/restoration/realesrgan.py +++ b/invokeai/backend/restoration/realesrgan.py @@ -1,13 +1,15 @@ -import torch -import warnings -import numpy as np import os +import warnings -from invokeai.backend.globals import Globals +import numpy as np +import torch from PIL import Image from PIL.Image import Image as ImageType -class ESRGAN(): +from invokeai.backend.globals import Globals + + +class ESRGAN: def __init__(self, bg_tile_size=400) -> None: self.bg_tile_size = bg_tile_size @@ -22,12 +24,23 @@ class ESRGAN(): else: use_half_precision = True - from realesrgan.archs.srvgg_arch import SRVGGNetCompact from realesrgan import RealESRGANer + from realesrgan.archs.srvgg_arch import SRVGGNetCompact - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') - model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-x4v3.pth') - wdn_model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-wdn-x4v3.pth') + model = SRVGGNetCompact( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_conv=32, + upscale=4, + act_type="prelu", + ) + model_path = os.path.join( + Globals.root, "models/realesrgan/realesr-general-x4v3.pth" + ) + wdn_model_path = os.path.join( + Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth" + ) scale = 4 bg_upsampler = RealESRGANer( @@ -43,41 +56,49 @@ class ESRGAN(): return bg_upsampler - def process(self, image: ImageType, strength: float, seed: str = None, upsampler_scale: int = 2, denoise_str: float = 0.75): + def process( + self, + image: ImageType, + strength: float, + seed: str = None, + upsampler_scale: int = 2, + denoise_str: float = 0.75, + ): with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning) - warnings.filterwarnings('ignore', category=UserWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) + warnings.filterwarnings("ignore", category=UserWarning) try: upsampler = self.load_esrgan_bg_upsampler(denoise_str) except Exception: - import traceback import sys - print('>> Error loading Real-ESRGAN:', file=sys.stderr) + import traceback + + print(">> Error loading Real-ESRGAN:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) if upsampler_scale == 0: - print('>> Real-ESRGAN: Invalid scaling option. Image not upscaled.') + print(">> Real-ESRGAN: Invalid scaling option. Image not upscaled.") return image if seed is not None: print( - f'>> Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}' + f">> Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}" ) # ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB image = image.convert("RGB") # REALSRGAN expects a BGR np array; make array and flip channels - bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1] + bgr_image_array = np.array(image, dtype=np.uint8)[..., ::-1] output, _ = upsampler.enhance( bgr_image_array, outscale=upsampler_scale, - alpha_upsampler='realesrgan', + alpha_upsampler="realesrgan", ) # Flip the channels back to RGB - res = Image.fromarray(output[...,::-1]) + res = Image.fromarray(output[..., ::-1]) if strength < 1.0: # Resize the image to the new image if the sizes have changed diff --git a/invokeai/backend/restoration/vqgan_arch.py b/invokeai/backend/restoration/vqgan_arch.py index e78f90815d..96d5f04eee 100644 --- a/invokeai/backend/restoration/vqgan_arch.py +++ b/invokeai/backend/restoration/vqgan_arch.py @@ -1,23 +1,27 @@ -''' +""" VQGAN code, adapted from the original created by the Unleashing Transformers authors: https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py -''' +""" +import copy + import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY + def normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) @torch.jit.script def swish(x): - return x*torch.sigmoid(x) + return x * torch.sigmoid(x) # Define VQVAE classes @@ -28,7 +32,9 @@ class VectorQuantizer(nn.Module): self.emb_dim = emb_dim # dimension of embedding self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 self.embedding = nn.Embedding(self.codebook_size, self.emb_dim) - self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size) + self.embedding.weight.data.uniform_( + -1.0 / self.codebook_size, 1.0 / self.codebook_size + ) def forward(self, z): # reshape z -> (batch, height, width, channel) and flatten @@ -36,23 +42,32 @@ class VectorQuantizer(nn.Module): z_flattened = z.view(-1, self.emb_dim) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \ - 2 * torch.matmul(z_flattened, self.embedding.weight.t()) + d = ( + (z_flattened**2).sum(dim=1, keepdim=True) + + (self.embedding.weight**2).sum(1) + - 2 * torch.matmul(z_flattened, self.embedding.weight.t()) + ) mean_distance = torch.mean(d) # find closest encodings # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) - min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False) + min_encoding_scores, min_encoding_indices = torch.topk( + d, 1, dim=1, largest=False + ) # [0-1], higher score, higher confidence - min_encoding_scores = torch.exp(-min_encoding_scores/10) + min_encoding_scores = torch.exp(-min_encoding_scores / 10) - min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z) + min_encodings = torch.zeros( + min_encoding_indices.shape[0], self.codebook_size + ).to(z) min_encodings.scatter_(1, min_encoding_indices, 1) # get quantized latent vectors z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) # compute loss for embedding - loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2) + loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean( + (z_q - z.detach()) ** 2 + ) # preserve gradients z_q = z + (z_q - z).detach() @@ -62,18 +77,22 @@ class VectorQuantizer(nn.Module): # reshape back to match original input shape z_q = z_q.permute(0, 3, 1, 2).contiguous() - return z_q, loss, { - "perplexity": perplexity, - "min_encodings": min_encodings, - "min_encoding_indices": min_encoding_indices, - "min_encoding_scores": min_encoding_scores, - "mean_distance": mean_distance - } + return ( + z_q, + loss, + { + "perplexity": perplexity, + "min_encodings": min_encodings, + "min_encoding_indices": min_encoding_indices, + "min_encoding_scores": min_encoding_scores, + "mean_distance": mean_distance, + }, + ) def get_codebook_feat(self, indices, shape): # input indices: batch*token_num -> (batch*token_num)*1 # shape: batch, height, width, channel - indices = indices.view(-1,1) + indices = indices.view(-1, 1) min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices) min_encodings.scatter_(1, indices, 1) # get quantized latent vectors @@ -86,14 +105,24 @@ class VectorQuantizer(nn.Module): class GumbelQuantizer(nn.Module): - def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0): + def __init__( + self, + codebook_size, + emb_dim, + num_hiddens, + straight_through=False, + kl_weight=5e-4, + temp_init=1.0, + ): super().__init__() self.codebook_size = codebook_size # number of embeddings self.emb_dim = emb_dim # dimension of embedding self.straight_through = straight_through self.temperature = temp_init self.kl_weight = kl_weight - self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits + self.proj = nn.Conv2d( + num_hiddens, codebook_size, 1 + ) # projects last encoder layer to quantized logits self.embed = nn.Embedding(codebook_size, emb_dim) def forward(self, z): @@ -107,18 +136,21 @@ class GumbelQuantizer(nn.Module): # + kl divergence to the prior loss qy = F.softmax(logits, dim=1) - diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean() + diff = ( + self.kl_weight + * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean() + ) min_encoding_indices = soft_one_hot.argmax(dim=1) - return z_q, diff, { - "min_encoding_indices": min_encoding_indices - } + return z_q, diff, {"min_encoding_indices": min_encoding_indices} class Downsample(nn.Module): def __init__(self, in_channels): super().__init__() - self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) def forward(self, x): pad = (0, 1, 0, 1) @@ -130,7 +162,9 @@ class Downsample(nn.Module): class Upsample(nn.Module): def __init__(self, in_channels): super().__init__() - self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + self.conv = nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) def forward(self, x): x = F.interpolate(x, scale_factor=2.0, mode="nearest") @@ -145,11 +179,17 @@ class ResBlock(nn.Module): self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.norm1 = normalize(in_channels) - self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.conv1 = nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) self.norm2 = normalize(out_channels) - self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.conv2 = nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) if self.in_channels != self.out_channels: - self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.conv_out = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) def forward(self, x_in): x = x_in @@ -172,32 +212,16 @@ class AttnBlock(nn.Module): self.norm = normalize(in_channels) self.q = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 + in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.k = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 + in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.v = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 + in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) self.proj_out = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 + in_channels, in_channels, kernel_size=1, stride=1, padding=0 ) def forward(self, x): @@ -209,26 +233,35 @@ class AttnBlock(nn.Module): # compute attention b, c, h, w = q.shape - q = q.reshape(b, c, h*w) + q = q.reshape(b, c, h * w) q = q.permute(0, 2, 1) - k = k.reshape(b, c, h*w) + k = k.reshape(b, c, h * w) w_ = torch.bmm(q, k) - w_ = w_ * (int(c)**(-0.5)) + w_ = w_ * (int(c) ** (-0.5)) w_ = F.softmax(w_, dim=2) # attend to values - v = v.reshape(b, c, h*w) + v = v.reshape(b, c, h * w) w_ = w_.permute(0, 2, 1) h_ = torch.bmm(v, w_) h_ = h_.reshape(b, c, h, w) h_ = self.proj_out(h_) - return x+h_ + return x + h_ class Encoder(nn.Module): - def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions): + def __init__( + self, + in_channels, + nf, + emb_dim, + ch_mult, + num_res_blocks, + resolution, + attn_resolutions, + ): super().__init__() self.nf = nf self.num_resolutions = len(ch_mult) @@ -237,7 +270,7 @@ class Encoder(nn.Module): self.attn_resolutions = attn_resolutions curr_res = self.resolution - in_ch_mult = (1,)+tuple(ch_mult) + in_ch_mult = (1,) + tuple(ch_mult) blocks = [] # initial convultion @@ -264,7 +297,9 @@ class Encoder(nn.Module): # normalise and convert to latent size blocks.append(normalize(block_in_ch)) - blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1)) + blocks.append( + nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1) + ) self.blocks = nn.ModuleList(blocks) def forward(self, x): @@ -286,11 +321,13 @@ class Generator(nn.Module): self.in_channels = emb_dim self.out_channels = 3 block_in_ch = self.nf * self.ch_mult[-1] - curr_res = self.resolution // 2 ** (self.num_resolutions-1) + curr_res = self.resolution // 2 ** (self.num_resolutions - 1) blocks = [] # initial conv - blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1)) + blocks.append( + nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1) + ) # non-local attention block blocks.append(ResBlock(block_in_ch, block_in_ch)) @@ -312,11 +349,14 @@ class Generator(nn.Module): curr_res = curr_res * 2 blocks.append(normalize(block_in_ch)) - blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) + blocks.append( + nn.Conv2d( + block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1 + ) + ) self.blocks = nn.ModuleList(blocks) - def forward(self, x): for block in self.blocks: x = block(x) @@ -326,8 +366,21 @@ class Generator(nn.Module): @ARCH_REGISTRY.register() class VQAutoEncoder(nn.Module): - def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256, - beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): + def __init__( + self, + img_size, + nf, + ch_mult, + quantizer="nearest", + res_blocks=2, + attn_resolutions=[16], + codebook_size=1024, + emb_dim=256, + beta=0.25, + gumbel_straight_through=False, + gumbel_kl_weight=1e-8, + model_path=None, + ): super().__init__() logger = get_root_logger() self.in_channels = 3 @@ -346,11 +399,13 @@ class VQAutoEncoder(nn.Module): self.ch_mult, self.n_blocks, self.resolution, - self.attn_resolutions + self.attn_resolutions, ) if self.quantizer_type == "nearest": - self.beta = beta #0.25 - self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta) + self.beta = beta # 0.25 + self.quantize = VectorQuantizer( + self.codebook_size, self.embed_dim, self.beta + ) elif self.quantizer_type == "gumbel": self.gumbel_num_hiddens = emb_dim self.straight_through = gumbel_straight_through @@ -360,7 +415,7 @@ class VQAutoEncoder(nn.Module): self.embed_dim, self.gumbel_num_hiddens, self.straight_through, - self.kl_weight + self.kl_weight, ) self.generator = Generator( self.nf, @@ -368,20 +423,23 @@ class VQAutoEncoder(nn.Module): self.ch_mult, self.n_blocks, self.resolution, - self.attn_resolutions + self.attn_resolutions, ) if model_path is not None: - chkpt = torch.load(model_path, map_location='cpu') - if 'params_ema' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema']) - logger.info(f'vqgan is loaded from: {model_path} [params_ema]') - elif 'params' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) - logger.info(f'vqgan is loaded from: {model_path} [params]') + chkpt = torch.load(model_path, map_location="cpu") + if "params_ema" in chkpt: + self.load_state_dict( + torch.load(model_path, map_location="cpu")["params_ema"] + ) + logger.info(f"vqgan is loaded from: {model_path} [params_ema]") + elif "params" in chkpt: + self.load_state_dict( + torch.load(model_path, map_location="cpu")["params"] + ) + logger.info(f"vqgan is loaded from: {model_path} [params]") else: - raise ValueError(f'Wrong params!') - + raise ValueError(f"Wrong params!") def forward(self, x): x = self.encoder(x) @@ -390,46 +448,67 @@ class VQAutoEncoder(nn.Module): return x, codebook_loss, quant_stats - # patch based discriminator @ARCH_REGISTRY.register() class VQGANDiscriminator(nn.Module): def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None): super().__init__() - layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)] + layers = [ + nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), + nn.LeakyReLU(0.2, True), + ] ndf_mult = 1 ndf_mult_prev = 1 for n in range(1, n_layers): # gradually increase the number of filters ndf_mult_prev = ndf_mult - ndf_mult = min(2 ** n, 8) + ndf_mult = min(2**n, 8) layers += [ - nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False), + nn.Conv2d( + ndf * ndf_mult_prev, + ndf * ndf_mult, + kernel_size=4, + stride=2, + padding=1, + bias=False, + ), nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True) + nn.LeakyReLU(0.2, True), ] ndf_mult_prev = ndf_mult - ndf_mult = min(2 ** n_layers, 8) + ndf_mult = min(2**n_layers, 8) layers += [ - nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False), + nn.Conv2d( + ndf * ndf_mult_prev, + ndf * ndf_mult, + kernel_size=4, + stride=1, + padding=1, + bias=False, + ), nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True) + nn.LeakyReLU(0.2, True), ] layers += [ - nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map + nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1) + ] # output 1 channel prediction map self.main = nn.Sequential(*layers) if model_path is not None: - chkpt = torch.load(model_path, map_location='cpu') - if 'params_d' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d']) - elif 'params' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) + chkpt = torch.load(model_path, map_location="cpu") + if "params_d" in chkpt: + self.load_state_dict( + torch.load(model_path, map_location="cpu")["params_d"] + ) + elif "params" in chkpt: + self.load_state_dict( + torch.load(model_path, map_location="cpu")["params"] + ) else: - raise ValueError(f'Wrong params!') + raise ValueError(f"Wrong params!") def forward(self, x): return self.main(x) diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py index 13e28ce22d..05886f7b10 100644 --- a/invokeai/backend/stable_diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -1,16 +1,16 @@ -''' +""" Initialization file for the invokeai.backend.stable_diffusion package -''' +""" +from .concepts_lib import HuggingFaceConceptsLibrary +from .diffusers_pipeline import ( + ConditioningData, + PipelineIntermediateState, + StableDiffusionGeneratorPipeline, +) from .diffusion import InvokeAIDiffuserComponent +from .diffusion.cross_attention_map_saving import AttentionMapSaver from .diffusion.ddim import DDIMSampler from .diffusion.ksampler import KSampler from .diffusion.plms import PLMSSampler -from .diffusion.cross_attention_map_saving import AttentionMapSaver from .diffusion.shared_invokeai_diffusion import PostprocessingSettings from .textual_inversion_manager import TextualInversionManager -from .concepts_lib import HuggingFaceConceptsLibrary -from .diffusers_pipeline import (StableDiffusionGeneratorPipeline, - ConditioningData, - PipelineIntermediateState, - StableDiffusionGeneratorPipeline - ) diff --git a/invokeai/backend/stable_diffusion/attention.py b/invokeai/backend/stable_diffusion/attention.py index 27fc5cf4bd..484b42c0bd 100644 --- a/invokeai/backend/stable_diffusion/attention.py +++ b/invokeai/backend/stable_diffusion/attention.py @@ -1,21 +1,22 @@ -from inspect import isfunction import math +from inspect import isfunction from typing import Callable, Optional import torch import torch.nn.functional as F -from torch import nn, einsum from einops import rearrange, repeat +from torch import einsum, nn from .diffusion import InvokeAICrossAttentionMixin from .diffusionmodules.util import checkpoint + def exists(val): return val is not None def uniq(arr): - return{el: True for el in arr}.keys() + return {el: True for el in arr}.keys() def default(val, d): @@ -47,19 +48,18 @@ class GEGLU(nn.Module): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) + project_in = ( + nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) + if not glu + else GEGLU(dim, inner_dim) + ) self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) + project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) ) def forward(self, x): @@ -76,7 +76,9 @@ def zero_module(module): def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + return torch.nn.GroupNorm( + num_groups=32, num_channels=in_channels, eps=1e-6, affine=True + ) class LinearAttention(nn.Module): @@ -84,17 +86,21 @@ class LinearAttention(nn.Module): super().__init__() self.heads = heads hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + q, k, v = rearrange( + qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3 + ) k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + context = torch.einsum("bhdn,bhen->bhde", k, v) + out = torch.einsum("bhde,bhdn->bhen", context, q) + out = rearrange( + out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w + ) return self.to_out(out) @@ -104,26 +110,18 @@ class SpatialSelfAttention(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) def forward(self, x): h_ = x @@ -133,43 +131,45 @@ class SpatialSelfAttention(nn.Module): v = self.v(h_) # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) + b, c, h, w = q.shape + q = rearrange(q, "b c h w -> b (h w) c") + k = rearrange(k, "b c h w -> b c (h w)") + w_ = torch.einsum("bij,bjk->bik", q, k) - w_ = w_ * (int(c)**(-0.5)) + w_ = w_ * (int(c) ** (-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + v = rearrange(v, "b c h w -> b c (h w)") + w_ = rearrange(w_, "b i j -> b j i") + h_ = torch.einsum("bij,bjk->bik", v, w_) + h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) h_ = self.proj_out(h_) - return x+h_ + return x + h_ + def get_mem_free_total(device): - #only on cuda + # only on cuda if not torch.cuda.is_available(): return None stats = torch.cuda.memory_stats(device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] + mem_active = stats["active_bytes.all.current"] + mem_reserved = stats["reserved_bytes.all.current"] mem_free_cuda, _ = torch.cuda.mem_get_info(device) mem_free_torch = mem_reserved - mem_active mem_free_total = mem_free_cuda + mem_free_torch return mem_free_total + class CrossAttention(nn.Module, InvokeAICrossAttentionMixin): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): super().__init__() InvokeAICrossAttentionMixin.__init__(self) inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) - self.scale = dim_head ** -0.5 + self.scale = dim_head**-0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) @@ -177,8 +177,7 @@ class CrossAttention(nn.Module, InvokeAICrossAttentionMixin): self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) + nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) ) def forward(self, x, context=None, mask=None): @@ -190,7 +189,7 @@ class CrossAttention(nn.Module, InvokeAICrossAttentionMixin): v = self.to_v(context) del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> (b h) n d", h=h), (q, k, v)) # don't apply scale twice cached_scale = self.scale @@ -198,29 +197,45 @@ class CrossAttention(nn.Module, InvokeAICrossAttentionMixin): r = self.get_invokeai_attention_mem_efficient(q, k, v) self.scale = cached_scale - hidden_states = rearrange(r, '(b h) n d -> b n (h d)', h=h) + hidden_states = rearrange(r, "(b h) n d -> b n (h d)", h=h) return self.to_out(hidden_states) - - class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): + def __init__( + self, + dim, + n_heads, + d_head, + dropout=0.0, + context_dim=None, + gated_ff=True, + checkpoint=True, + ): super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention + self.attn1 = CrossAttention( + query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout + ) # is a self-attention self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.attn2 = CrossAttention( + query_dim=dim, + context_dim=context_dim, + heads=n_heads, + dim_head=d_head, + dropout=dropout, + ) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + return checkpoint( + self._forward, (x, context), self.parameters(), self.checkpoint + ) def _forward(self, x, context=None): - x = x.contiguous() if x.device.type == 'mps' else x + x = x.contiguous() if x.device.type == "mps" else x x += self.attn1(self.norm1(x.clone())) x += self.attn2(self.norm2(x.clone()), context=context) x += self.ff(self.norm3(x.clone())) @@ -235,29 +250,31 @@ class SpatialTransformer(nn.Module): Then apply standard transformer action. Finally, reshape to image """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): + + def __init__( + self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None + ): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] + self.proj_in = nn.Conv2d( + in_channels, inner_dim, kernel_size=1, stride=1, padding=0 ) - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim + ) + for d in range(depth) + ] + ) + + self.proj_out = zero_module( + nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) + ) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention @@ -265,9 +282,9 @@ class SpatialTransformer(nn.Module): x_in = x x = self.norm(x) x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') + x = rearrange(x, "b c h w -> b (h w) c") for block in self.transformer_blocks: x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) x = self.proj_out(x) return x + x_in diff --git a/invokeai/backend/stable_diffusion/autoencoder.py b/invokeai/backend/stable_diffusion/autoencoder.py index ba081bab28..2bc7fa84f6 100644 --- a/invokeai/backend/stable_diffusion/autoencoder.py +++ b/invokeai/backend/stable_diffusion/autoencoder.py @@ -1,16 +1,13 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F from contextlib import contextmanager +import pytorch_lightning as pl +import torch +import torch.nn.functional as F from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from .diffusionmodules.model import Encoder, Decoder -from .distributions.distributions import ( - DiagonalGaussianDistribution, -) - from ..util import instantiate_from_config +from .diffusionmodules.model import Decoder, Encoder +from .distributions.distributions import DiagonalGaussianDistribution class VQModel(pl.LightningModule): @@ -22,7 +19,7 @@ class VQModel(pl.LightningModule): embed_dim, ckpt_path=None, ignore_keys=[], - image_key='image', + image_key="image", colorize_nlabels=None, monitor=None, batch_resize_range=None, @@ -46,27 +43,23 @@ class VQModel(pl.LightningModule): remap=remap, sane_index_shape=sane_index_shape, ) - self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d( - embed_dim, ddconfig['z_channels'], 1 - ) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) if colorize_nlabels is not None: assert type(colorize_nlabels) == int - self.register_buffer( - 'colorize', torch.randn(3, colorize_nlabels, 1, 1) - ) + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor self.batch_resize_range = batch_resize_range if self.batch_resize_range is not None: print( - f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.' + f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}." ) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self) - print(f'>> Keeping EMAs of {len(list(self.model_ema.buffers()))}.') + print(f">> Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) @@ -79,30 +72,30 @@ class VQModel(pl.LightningModule): self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: - print(f'{context}: Switched to EMA weights') + print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: - print(f'{context}: Restored training weights') + print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location='cpu')['state_dict'] + sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) + print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: - print(f'Missing Keys: {missing}') - print(f'Unexpected Keys: {unexpected}') + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") def on_train_batch_end(self, *args, **kwargs): if self.use_ema: @@ -140,11 +133,7 @@ class VQModel(pl.LightningModule): x = batch[k] if len(x.shape) == 3: x = x[..., None] - x = ( - x.permute(0, 3, 1, 2) - .to(memory_format=torch.contiguous_format) - .float() - ) + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() if self.batch_resize_range is not None: lower_size = self.batch_resize_range[0] upper_size = self.batch_resize_range[1] @@ -156,7 +145,7 @@ class VQModel(pl.LightningModule): np.arange(lower_size, upper_size + 16, 16) ) if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode='bicubic') + x = F.interpolate(x, size=new_resize, mode="bicubic") x = x.detach() return x @@ -175,7 +164,7 @@ class VQModel(pl.LightningModule): optimizer_idx, self.global_step, last_layer=self.get_last_layer(), - split='train', + split="train", predicted_indices=ind, ) @@ -197,7 +186,7 @@ class VQModel(pl.LightningModule): optimizer_idx, self.global_step, last_layer=self.get_last_layer(), - split='train', + split="train", ) self.log_dict( log_dict_disc, @@ -211,12 +200,10 @@ class VQModel(pl.LightningModule): def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): - log_dict_ema = self._validation_step( - batch, batch_idx, suffix='_ema' - ) + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") return log_dict - def _validation_step(self, batch, batch_idx, suffix=''): + def _validation_step(self, batch, batch_idx, suffix=""): x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) aeloss, log_dict_ae = self.loss( @@ -226,7 +213,7 @@ class VQModel(pl.LightningModule): 0, self.global_step, last_layer=self.get_last_layer(), - split='val' + suffix, + split="val" + suffix, predicted_indices=ind, ) @@ -237,12 +224,12 @@ class VQModel(pl.LightningModule): 1, self.global_step, last_layer=self.get_last_layer(), - split='val' + suffix, + split="val" + suffix, predicted_indices=ind, ) - rec_loss = log_dict_ae[f'val{suffix}/rec_loss'] + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] self.log( - f'val{suffix}/rec_loss', + f"val{suffix}/rec_loss", rec_loss, prog_bar=True, logger=True, @@ -251,7 +238,7 @@ class VQModel(pl.LightningModule): sync_dist=True, ) self.log( - f'val{suffix}/aeloss', + f"val{suffix}/aeloss", aeloss, prog_bar=True, logger=True, @@ -259,8 +246,8 @@ class VQModel(pl.LightningModule): on_epoch=True, sync_dist=True, ) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f'val{suffix}/rec_loss'] + if version.parse(pl.__version__) >= version.parse("1.4.0"): + del log_dict_ae[f"val{suffix}/rec_loss"] self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict @@ -268,8 +255,8 @@ class VQModel(pl.LightningModule): def configure_optimizers(self): lr_d = self.learning_rate lr_g = self.lr_g_factor * self.learning_rate - print('lr_d', lr_d) - print('lr_g', lr_g) + print("lr_d", lr_d) + print("lr_g", lr_g) opt_ae = torch.optim.Adam( list(self.encoder.parameters()) + list(self.decoder.parameters()) @@ -286,21 +273,17 @@ class VQModel(pl.LightningModule): if self.scheduler_config is not None: scheduler = instantiate_from_config(self.scheduler_config) - print('Setting up LambdaLR scheduler...') + print("Setting up LambdaLR scheduler...") scheduler = [ { - 'scheduler': LambdaLR( - opt_ae, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, + "scheduler": LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, }, { - 'scheduler': LambdaLR( - opt_disc, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, + "scheduler": LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, }, ] return [opt_ae, opt_disc], scheduler @@ -314,7 +297,7 @@ class VQModel(pl.LightningModule): x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: - log['inputs'] = x + log["inputs"] = x return log xrec, _ = self(x) if x.shape[1] > 3: @@ -322,22 +305,20 @@ class VQModel(pl.LightningModule): assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) - log['inputs'] = x - log['reconstructions'] = xrec + log["inputs"] = x + log["reconstructions"] = xrec if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log['reconstructions_ema'] = xrec_ema + log["reconstructions_ema"] = xrec_ema return log def to_rgb(self, x): - assert self.image_key == 'segmentation' - if not hasattr(self, 'colorize'): - self.register_buffer( - 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) - ) + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 return x @@ -372,7 +353,7 @@ class AutoencoderKL(pl.LightningModule): embed_dim, ckpt_path=None, ignore_keys=[], - image_key='image', + image_key="image", colorize_nlabels=None, monitor=None, ): @@ -381,34 +362,28 @@ class AutoencoderKL(pl.LightningModule): self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) - assert ddconfig['double_z'] - self.quant_conv = torch.nn.Conv2d( - 2 * ddconfig['z_channels'], 2 * embed_dim, 1 - ) - self.post_quant_conv = torch.nn.Conv2d( - embed_dim, ddconfig['z_channels'], 1 - ) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels) == int - self.register_buffer( - 'colorize', torch.randn(3, colorize_nlabels, 1, 1) - ) + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location='cpu')['state_dict'] + sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) + print("Deleting key {} from state_dict.".format(k)) del sd[k] self.load_state_dict(sd, strict=False) - print(f'Restored from {path}') + print(f"Restored from {path}") def encode(self, x): h = self.encoder(x) @@ -434,11 +409,7 @@ class AutoencoderKL(pl.LightningModule): x = batch[k] if len(x.shape) == 3: x = x[..., None] - x = ( - x.permute(0, 3, 1, 2) - .to(memory_format=torch.contiguous_format) - .float() - ) + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() return x def training_step(self, batch, batch_idx, optimizer_idx): @@ -454,10 +425,10 @@ class AutoencoderKL(pl.LightningModule): optimizer_idx, self.global_step, last_layer=self.get_last_layer(), - split='train', + split="train", ) self.log( - 'aeloss', + "aeloss", aeloss, prog_bar=True, logger=True, @@ -482,11 +453,11 @@ class AutoencoderKL(pl.LightningModule): optimizer_idx, self.global_step, last_layer=self.get_last_layer(), - split='train', + split="train", ) self.log( - 'discloss', + "discloss", discloss, prog_bar=True, logger=True, @@ -512,7 +483,7 @@ class AutoencoderKL(pl.LightningModule): 0, self.global_step, last_layer=self.get_last_layer(), - split='val', + split="val", ) discloss, log_dict_disc = self.loss( @@ -522,10 +493,10 @@ class AutoencoderKL(pl.LightningModule): 1, self.global_step, last_layer=self.get_last_layer(), - split='val', + split="val", ) - self.log('val/rec_loss', log_dict_ae['val/rec_loss']) + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict @@ -560,17 +531,15 @@ class AutoencoderKL(pl.LightningModule): assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) - log['samples'] = self.decode(torch.randn_like(posterior.sample())) - log['reconstructions'] = xrec - log['inputs'] = x + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x return log def to_rgb(self, x): - assert self.image_key == 'segmentation' - if not hasattr(self, 'colorize'): - self.register_buffer( - 'colorize', torch.randn(3, x.shape[1], 1, 1).to(x) - ) + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 return x diff --git a/invokeai/backend/stable_diffusion/concepts_lib.py b/invokeai/backend/stable_diffusion/concepts_lib.py index 63d0a660a0..50ff6c16c9 100644 --- a/invokeai/backend/stable_diffusion/concepts_lib.py +++ b/invokeai/backend/stable_diffusion/concepts_lib.py @@ -8,32 +8,50 @@ import os import re import traceback from typing import Callable -from urllib import request, error as ul_error -from huggingface_hub import HfFolder, hf_hub_url, ModelSearchArguments, ModelFilter, HfApi +from urllib import error as ul_error +from urllib import request + +from huggingface_hub import ( + HfApi, + HfFolder, + ModelFilter, + ModelSearchArguments, + hf_hub_url, +) + from invokeai.backend.globals import Globals + class HuggingFaceConceptsLibrary(object): def __init__(self, root=None): - ''' + """ Initialize the Concepts object. May optionally pass a root directory. - ''' + """ self.root = root or Globals.root self.hf_api = HfApi() self.local_concepts = dict() self.concept_list = None self.concepts_loaded = dict() - self.triggers = dict() # concept name to trigger phrase - self.concept_names = dict() # trigger phrase to concept name - self.match_trigger = re.compile('(<[\w\- >]+>)') # trigger is slightly less restrictive than HF concept name - self.match_concept = re.compile('<([\w\-]+)>') # HF concept name can only contain A-Za-z0-9_- + self.triggers = dict() # concept name to trigger phrase + self.concept_names = dict() # trigger phrase to concept name + self.match_trigger = re.compile( + "(<[\w\- >]+>)" + ) # trigger is slightly less restrictive than HF concept name + self.match_concept = re.compile( + "<([\w\-]+)>" + ) # HF concept name can only contain A-Za-z0-9_- - def list_concepts(self)->list: - ''' + def list_concepts(self) -> list: + """ Return a list of all the concepts by name, without the 'sd-concepts-library' part. Also adds local concepts in invokeai/embeddings folder. - ''' - local_concepts_now = self.get_local_concepts(os.path.join(self.root, 'embeddings')) - local_concepts_to_add = set(local_concepts_now).difference(set(self.local_concepts)) + """ + local_concepts_now = self.get_local_concepts( + os.path.join(self.root, "embeddings") + ) + local_concepts_to_add = set(local_concepts_now).difference( + set(self.local_concepts) + ) self.local_concepts.update(local_concepts_now) if self.concept_list is not None: @@ -43,83 +61,96 @@ class HuggingFaceConceptsLibrary(object): return self.concept_list else: try: - models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/')) - self.concept_list = [a.id.split('/')[1] for a in models] + models = self.hf_api.list_models( + filter=ModelFilter(model_name="sd-concepts-library/") + ) + self.concept_list = [a.id.split("/")[1] for a in models] # when init, add all in dir. when not init, add only concepts added between init and now self.concept_list.extend(list(local_concepts_to_add)) except Exception as e: - print(f' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.') - print(' ** You may load .bin and .pt file(s) manually using the --embedding_directory argument.') + print( + f" ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}." + ) + print( + " ** You may load .bin and .pt file(s) manually using the --embedding_directory argument." + ) return self.concept_list - def get_concept_model_path(self, concept_name:str)->str: - ''' + def get_concept_model_path(self, concept_name: str) -> str: + """ Returns the path to the 'learned_embeds.bin' file in the named concept. Returns None if invalid or cannot be downloaded. - ''' + """ if not concept_name in self.list_concepts(): - print(f'This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept.') + print( + f"This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept." + ) return None - return self.get_concept_file(concept_name.lower(),'learned_embeds.bin') + return self.get_concept_file(concept_name.lower(), "learned_embeds.bin") - def concept_to_trigger(self, concept_name:str)->str: - ''' + def concept_to_trigger(self, concept_name: str) -> str: + """ Given a concept name returns its trigger by looking in the "token_identifier.txt" file. - ''' + """ if concept_name in self.triggers: return self.triggers[concept_name] elif self.concept_is_local(concept_name): - trigger = f'<{concept_name}>' + trigger = f"<{concept_name}>" self.triggers[concept_name] = trigger self.concept_names[trigger] = concept_name return trigger - file = self.get_concept_file(concept_name, 'token_identifier.txt', local_only=True) + file = self.get_concept_file( + concept_name, "token_identifier.txt", local_only=True + ) if not file: return None - with open(file,'r') as f: + with open(file, "r") as f: trigger = f.readline() trigger = trigger.strip() self.triggers[concept_name] = trigger self.concept_names[trigger] = concept_name return trigger - def trigger_to_concept(self, trigger:str)->str: - ''' + def trigger_to_concept(self, trigger: str) -> str: + """ Given a trigger phrase, maps it to the concept library name. Only works if concept_to_trigger() has previously been called on this library. There needs to be a persistent database for this. - ''' - concept = self.concept_names.get(trigger,None) - return f'<{concept}>' if concept else f'{trigger}' + """ + concept = self.concept_names.get(trigger, None) + return f"<{concept}>" if concept else f"{trigger}" - def replace_triggers_with_concepts(self, prompt:str)->str: - ''' + def replace_triggers_with_concepts(self, prompt: str) -> str: + """ Given a prompt string that contains tags, replace these tags with the concept name. The reason for this is so that the concept names get stored in the prompt metadata. There is no controlling of colliding triggers in the SD library, so it is better to store the concept name (unique) than the concept trigger (not necessarily unique!) - ''' + """ if not prompt: return prompt triggers = self.match_trigger.findall(prompt) if not triggers: return prompt - def do_replace(match)->str: - return self.trigger_to_concept(match.group(1)) or f'<{match.group(1)}>' + def do_replace(match) -> str: + return self.trigger_to_concept(match.group(1)) or f"<{match.group(1)}>" + return self.match_trigger.sub(do_replace, prompt) - def replace_concepts_with_triggers(self, - prompt:str, - load_concepts_callback: Callable[[list], any], - excluded_tokens:list[str])->str: - ''' + def replace_concepts_with_triggers( + self, + prompt: str, + load_concepts_callback: Callable[[list], any], + excluded_tokens: list[str], + ) -> str: + """ Given a prompt string that contains `` tags, replace these tags with the appropriate trigger. @@ -128,20 +159,30 @@ class HuggingFaceConceptsLibrary(object): `excluded_tokens` are any tokens that should not be replaced, typically because they are trigger tokens from a locally-loaded embedding. - ''' + """ concepts = self.match_concept.findall(prompt) if not concepts: return prompt load_concepts_callback(concepts) - def do_replace(match)->str: - if excluded_tokens and f'<{match.group(1)}>' in excluded_tokens: - return f'<{match.group(1)}>' - return self.concept_to_trigger(match.group(1)) or f'<{match.group(1)}>' + def do_replace(match) -> str: + if excluded_tokens and f"<{match.group(1)}>" in excluded_tokens: + return f"<{match.group(1)}>" + return self.concept_to_trigger(match.group(1)) or f"<{match.group(1)}>" + return self.match_concept.sub(do_replace, prompt) - def get_concept_file(self, concept_name:str, file_name:str='learned_embeds.bin' , local_only:bool=False)->str: - if not (self.concept_is_downloaded(concept_name) or self.concept_is_local(concept_name) or local_only): + def get_concept_file( + self, + concept_name: str, + file_name: str = "learned_embeds.bin", + local_only: bool = False, + ) -> str: + if not ( + self.concept_is_downloaded(concept_name) + or self.concept_is_local(concept_name) + or local_only + ): self.download_concept(concept_name) # get local path in invokeai/embeddings if local concept @@ -153,19 +194,19 @@ class HuggingFaceConceptsLibrary(object): path = os.path.join(concept_path, file_name) return path if os.path.exists(path) else None - def concept_is_local(self, concept_name)->bool: + def concept_is_local(self, concept_name) -> bool: return concept_name in self.local_concepts - def concept_is_downloaded(self, concept_name)->bool: + def concept_is_downloaded(self, concept_name) -> bool: concept_directory = self._concept_path(concept_name) return os.path.exists(concept_directory) - def download_concept(self,concept_name)->bool: + def download_concept(self, concept_name) -> bool: repo_id = self._concept_id(concept_name) dest = self._concept_path(concept_name) access_token = HfFolder.get_token() - header = [("Authorization", f'Bearer {access_token}')] if access_token else [] + header = [("Authorization", f"Bearer {access_token}")] if access_token else [] opener = request.build_opener() opener.addheaders = header request.install_opener(opener) @@ -174,45 +215,59 @@ class HuggingFaceConceptsLibrary(object): succeeded = True bytes = 0 + def tally_download_size(chunk, size, total): nonlocal bytes - if chunk==0: + if chunk == 0: bytes += total - print(f'>> Downloading {repo_id}...',end='') + print(f">> Downloading {repo_id}...", end="") try: - for file in ('README.md','learned_embeds.bin','token_identifier.txt','type_of_concept.txt'): + for file in ( + "README.md", + "learned_embeds.bin", + "token_identifier.txt", + "type_of_concept.txt", + ): url = hf_hub_url(repo_id, file) - request.urlretrieve(url, os.path.join(dest,file),reporthook=tally_download_size) + request.urlretrieve( + url, os.path.join(dest, file), reporthook=tally_download_size + ) except ul_error.HTTPError as e: - if e.code==404: - print(f'This concept is not known to the Hugging Face library. Generation will continue without the concept.') + if e.code == 404: + print( + f"This concept is not known to the Hugging Face library. Generation will continue without the concept." + ) else: - print(f'Failed to download {concept_name}/{file} ({str(e)}. Generation will continue without the concept.)') + print( + f"Failed to download {concept_name}/{file} ({str(e)}. Generation will continue without the concept.)" + ) os.rmdir(dest) return False except ul_error.URLError as e: - print(f'ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept.') + print( + f"ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept." + ) os.rmdir(dest) return False - print('...{:.2f}Kb'.format(bytes/1024)) + print("...{:.2f}Kb".format(bytes / 1024)) return succeeded - def _concept_id(self, concept_name:str)->str: - return f'sd-concepts-library/{concept_name}' + def _concept_id(self, concept_name: str) -> str: + return f"sd-concepts-library/{concept_name}" - def _concept_path(self, concept_name:str)->str: - return os.path.join(self.root,'models','sd-concepts-library',concept_name) + def _concept_path(self, concept_name: str) -> str: + return os.path.join(self.root, "models", "sd-concepts-library", concept_name) - def _concept_local_path(self, concept_name:str)->str: + def _concept_local_path(self, concept_name: str) -> str: filename = self.local_concepts[concept_name] - return os.path.join(self.root,'embeddings',filename) + return os.path.join(self.root, "embeddings", filename) - def get_local_concepts(self, loc_dir:str): + def get_local_concepts(self, loc_dir: str): locs_dic = dict() if os.path.isdir(loc_dir): for file in os.listdir(loc_dir): f = os.path.splitext(file) - if f[1] == '.bin' or f[1] == '.pt': + if f[1] == ".bin" or f[1] == ".pt": locs_dic[f[0]] = file return locs_dic diff --git a/invokeai/backend/stable_diffusion/data/base.py b/invokeai/backend/stable_diffusion/data/base.py index de9493fc1e..1b6a138bf7 100644 --- a/invokeai/backend/stable_diffusion/data/base.py +++ b/invokeai/backend/stable_diffusion/data/base.py @@ -1,10 +1,6 @@ from abc import abstractmethod -from torch.utils.data import ( - Dataset, - ConcatDataset, - ChainDataset, - IterableDataset, -) + +from torch.utils.data import ChainDataset, ConcatDataset, Dataset, IterableDataset class Txt2ImgIterableBaseDataset(IterableDataset): @@ -19,9 +15,7 @@ class Txt2ImgIterableBaseDataset(IterableDataset): self.sample_ids = valid_ids self.size = size - print( - f'{self.__class__.__name__} dataset contains {self.__len__()} examples.' - ) + print(f"{self.__class__.__name__} dataset contains {self.__len__()} examples.") def __len__(self): return self.num_records diff --git a/invokeai/backend/stable_diffusion/data/imagenet.py b/invokeai/backend/stable_diffusion/data/imagenet.py index d155f6d6ae..84bad27590 100644 --- a/invokeai/backend/stable_diffusion/data/imagenet.py +++ b/invokeai/backend/stable_diffusion/data/imagenet.py @@ -1,31 +1,32 @@ -import os, yaml, pickle, shutil, tarfile, glob -import cv2 -import albumentations -import PIL -import numpy as np -import torchvision.transforms.functional as TF -from omegaconf import OmegaConf +import glob +import os +import pickle +import shutil +import tarfile from functools import partial -from PIL import Image -from tqdm import tqdm -from torch.utils.data import Dataset, Subset +import albumentations +import cv2 +import numpy as np +import PIL import taming.data.utils as tdu +import torchvision.transforms.functional as TF +import yaml +from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light +from omegaconf import OmegaConf +from PIL import Image from taming.data.imagenet import ( - str_to_indices, - give_synsets_from_indices, + ImagePaths, download, + give_synsets_from_indices, retrieve, + str_to_indices, ) -from taming.data.imagenet import ImagePaths - -from ldm.modules.image_degradation import ( - degradation_fn_bsr, - degradation_fn_bsr_light, -) +from torch.utils.data import Dataset, Subset +from tqdm import tqdm -def synset2idx(path_to_yaml='data/index_synset.yaml'): +def synset2idx(path_to_yaml="data/index_synset.yaml"): with open(path_to_yaml) as f: di2s = yaml.load(f) return dict((v, k) for k, v in di2s.items()) @@ -36,9 +37,7 @@ class ImageNetBase(Dataset): self.config = config or OmegaConf.create() if not type(self.config) == dict: self.config = OmegaConf.to_container(self.config) - self.keep_orig_class_label = self.config.get( - 'keep_orig_class_label', False - ) + self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) self.process_images = True # if False we skip loading & processing images and self.data contains filepaths self._prepare() self._prepare_synset_to_human() @@ -58,21 +57,19 @@ class ImageNetBase(Dataset): def _filter_relpaths(self, relpaths): ignore = set( [ - 'n06596364_9591.JPEG', + "n06596364_9591.JPEG", ] ) - relpaths = [ - rpath for rpath in relpaths if not rpath.split('/')[-1] in ignore - ] - if 'sub_indices' in self.config: - indices = str_to_indices(self.config['sub_indices']) + relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] + if "sub_indices" in self.config: + indices = str_to_indices(self.config["sub_indices"]) synsets = give_synsets_from_indices( indices, path_to_yaml=self.idx2syn ) # returns a list of strings self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) files = [] for rpath in relpaths: - syn = rpath.split('/')[0] + syn = rpath.split("/")[0] if syn in synsets: files.append(rpath) return files @@ -81,8 +78,8 @@ class ImageNetBase(Dataset): def _prepare_synset_to_human(self): SIZE = 2655750 - URL = 'https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1' - self.human_dict = os.path.join(self.root, 'synset_human.txt') + URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" + self.human_dict = os.path.join(self.root, "synset_human.txt") if ( not os.path.exists(self.human_dict) or not os.path.getsize(self.human_dict) == SIZE @@ -90,64 +87,62 @@ class ImageNetBase(Dataset): download(URL, self.human_dict) def _prepare_idx_to_synset(self): - URL = 'https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1' - self.idx2syn = os.path.join(self.root, 'index_synset.yaml') + URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" + self.idx2syn = os.path.join(self.root, "index_synset.yaml") if not os.path.exists(self.idx2syn): download(URL, self.idx2syn) def _prepare_human_to_integer_label(self): - URL = 'https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1' + URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" self.human2integer = os.path.join( - self.root, 'imagenet1000_clsidx_to_labels.txt' + self.root, "imagenet1000_clsidx_to_labels.txt" ) if not os.path.exists(self.human2integer): download(URL, self.human2integer) - with open(self.human2integer, 'r') as f: + with open(self.human2integer, "r") as f: lines = f.read().splitlines() assert len(lines) == 1000 self.human2integer_dict = dict() for line in lines: - value, key = line.split(':') + value, key = line.split(":") self.human2integer_dict[key] = int(value) def _load(self): - with open(self.txt_filelist, 'r') as f: + with open(self.txt_filelist, "r") as f: self.relpaths = f.read().splitlines() l1 = len(self.relpaths) self.relpaths = self._filter_relpaths(self.relpaths) print( - 'Removed {} files from filelist during filtering.'.format( + "Removed {} files from filelist during filtering.".format( l1 - len(self.relpaths) ) ) - self.synsets = [p.split('/')[0] for p in self.relpaths] + self.synsets = [p.split("/")[0] for p in self.relpaths] self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] unique_synsets = np.unique(self.synsets) - class_dict = dict( - (synset, i) for i, synset in enumerate(unique_synsets) - ) + class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) if not self.keep_orig_class_label: self.class_labels = [class_dict[s] for s in self.synsets] else: self.class_labels = [self.synset2idx[s] for s in self.synsets] - with open(self.human_dict, 'r') as f: + with open(self.human_dict, "r") as f: human_dict = f.read().splitlines() human_dict = dict(line.split(maxsplit=1) for line in human_dict) self.human_labels = [human_dict[s] for s in self.synsets] labels = { - 'relpath': np.array(self.relpaths), - 'synsets': np.array(self.synsets), - 'class_label': np.array(self.class_labels), - 'human_label': np.array(self.human_labels), + "relpath": np.array(self.relpaths), + "synsets": np.array(self.synsets), + "class_label": np.array(self.class_labels), + "human_label": np.array(self.human_labels), } if self.process_images: - self.size = retrieve(self.config, 'size', default=256) + self.size = retrieve(self.config, "size", default=256) self.data = ImagePaths( self.abspaths, labels=labels, @@ -159,11 +154,11 @@ class ImageNetBase(Dataset): class ImageNetTrain(ImageNetBase): - NAME = 'ILSVRC2012_train' - URL = 'http://www.image-net.org/challenges/LSVRC/2012/' - AT_HASH = 'a306397ccf9c2ead27155983c254227c0fd938e2' + NAME = "ILSVRC2012_train" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" FILES = [ - 'ILSVRC2012_img_train.tar', + "ILSVRC2012_img_train.tar", ] SIZES = [ 147897477120, @@ -178,20 +173,18 @@ class ImageNetTrain(ImageNetBase): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: - cachedir = os.environ.get( - 'XDG_CACHE_HOME', os.path.expanduser('~/.cache') - ) - self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - self.datadir = os.path.join(self.root, 'data') - self.txt_filelist = os.path.join(self.root, 'filelist.txt') + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") self.expected_length = 1281167 self.random_crop = retrieve( - self.config, 'ImageNetTrain/random_crop', default=True + self.config, "ImageNetTrain/random_crop", default=True ) if not tdu.is_prepared(self.root): # prep - print('Preparing dataset {} in {}'.format(self.NAME, self.root)) + print("Preparing dataset {} in {}".format(self.NAME, self.root)) datadir = self.datadir if not os.path.exists(datadir): @@ -205,37 +198,37 @@ class ImageNetTrain(ImageNetBase): atpath = at.get(self.AT_HASH, datastore=self.root) assert atpath == path - print('Extracting {} to {}'.format(path, datadir)) + print("Extracting {} to {}".format(path, datadir)) os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, 'r:') as tar: + with tarfile.open(path, "r:") as tar: tar.extractall(path=datadir) - print('Extracting sub-tars.') - subpaths = sorted(glob.glob(os.path.join(datadir, '*.tar'))) + print("Extracting sub-tars.") + subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) for subpath in tqdm(subpaths): - subdir = subpath[: -len('.tar')] + subdir = subpath[: -len(".tar")] os.makedirs(subdir, exist_ok=True) - with tarfile.open(subpath, 'r:') as tar: + with tarfile.open(subpath, "r:") as tar: tar.extractall(path=subdir) - filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) - filelist = '\n'.join(filelist) + '\n' - with open(self.txt_filelist, 'w') as f: + filelist = "\n".join(filelist) + "\n" + with open(self.txt_filelist, "w") as f: f.write(filelist) tdu.mark_prepared(self.root) class ImageNetValidation(ImageNetBase): - NAME = 'ILSVRC2012_validation' - URL = 'http://www.image-net.org/challenges/LSVRC/2012/' - AT_HASH = '5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5' - VS_URL = 'https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1' + NAME = "ILSVRC2012_validation" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" + VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" FILES = [ - 'ILSVRC2012_img_val.tar', - 'validation_synset.txt', + "ILSVRC2012_img_val.tar", + "validation_synset.txt", ] SIZES = [ 6744924160, @@ -251,19 +244,17 @@ class ImageNetValidation(ImageNetBase): if self.data_root: self.root = os.path.join(self.data_root, self.NAME) else: - cachedir = os.environ.get( - 'XDG_CACHE_HOME', os.path.expanduser('~/.cache') - ) - self.root = os.path.join(cachedir, 'autoencoders/data', self.NAME) - self.datadir = os.path.join(self.root, 'data') - self.txt_filelist = os.path.join(self.root, 'filelist.txt') + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") self.expected_length = 50000 self.random_crop = retrieve( - self.config, 'ImageNetValidation/random_crop', default=False + self.config, "ImageNetValidation/random_crop", default=False ) if not tdu.is_prepared(self.root): # prep - print('Preparing dataset {} in {}'.format(self.NAME, self.root)) + print("Preparing dataset {} in {}".format(self.NAME, self.root)) datadir = self.datadir if not os.path.exists(datadir): @@ -277,9 +268,9 @@ class ImageNetValidation(ImageNetBase): atpath = at.get(self.AT_HASH, datastore=self.root) assert atpath == path - print('Extracting {} to {}'.format(path, datadir)) + print("Extracting {} to {}".format(path, datadir)) os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, 'r:') as tar: + with tarfile.open(path, "r:") as tar: tar.extractall(path=datadir) vspath = os.path.join(self.root, self.FILES[1]) @@ -289,11 +280,11 @@ class ImageNetValidation(ImageNetBase): ): download(self.VS_URL, vspath) - with open(vspath, 'r') as f: + with open(vspath, "r") as f: synset_dict = f.read().splitlines() synset_dict = dict(line.split() for line in synset_dict) - print('Reorganizing into synset folders') + print("Reorganizing into synset folders") synsets = np.unique(list(synset_dict.values())) for s in synsets: os.makedirs(os.path.join(datadir, s), exist_ok=True) @@ -302,11 +293,11 @@ class ImageNetValidation(ImageNetBase): dst = os.path.join(datadir, v) shutil.move(src, dst) - filelist = glob.glob(os.path.join(datadir, '**', '*.JPEG')) + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) filelist = [os.path.relpath(p, start=datadir) for p in filelist] filelist = sorted(filelist) - filelist = '\n'.join(filelist) + '\n' - with open(self.txt_filelist, 'w') as f: + filelist = "\n".join(filelist) + "\n" + with open(self.txt_filelist, "w") as f: f.write(filelist) tdu.mark_prepared(self.root) @@ -356,32 +347,28 @@ class ImageNetSR(Dataset): False # gets reset later if incase interp_op is from pillow ) - if degradation == 'bsrgan': - self.degradation_process = partial( - degradation_fn_bsr, sf=downscale_f - ) + if degradation == "bsrgan": + self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) - elif degradation == 'bsrgan_light': - self.degradation_process = partial( - degradation_fn_bsr_light, sf=downscale_f - ) + elif degradation == "bsrgan_light": + self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) else: interpolation_fn = { - 'cv_nearest': cv2.INTER_NEAREST, - 'cv_bilinear': cv2.INTER_LINEAR, - 'cv_bicubic': cv2.INTER_CUBIC, - 'cv_area': cv2.INTER_AREA, - 'cv_lanczos': cv2.INTER_LANCZOS4, - 'pil_nearest': PIL.Image.NEAREST, - 'pil_bilinear': PIL.Image.BILINEAR, - 'pil_bicubic': PIL.Image.BICUBIC, - 'pil_box': PIL.Image.BOX, - 'pil_hamming': PIL.Image.HAMMING, - 'pil_lanczos': PIL.Image.LANCZOS, + "cv_nearest": cv2.INTER_NEAREST, + "cv_bilinear": cv2.INTER_LINEAR, + "cv_bicubic": cv2.INTER_CUBIC, + "cv_area": cv2.INTER_AREA, + "cv_lanczos": cv2.INTER_LANCZOS4, + "pil_nearest": PIL.Image.NEAREST, + "pil_bilinear": PIL.Image.BILINEAR, + "pil_bicubic": PIL.Image.BICUBIC, + "pil_box": PIL.Image.BOX, + "pil_hamming": PIL.Image.HAMMING, + "pil_lanczos": PIL.Image.LANCZOS, }[degradation] - self.pil_interpolation = degradation.startswith('pil_') + self.pil_interpolation = degradation.startswith("pil_") if self.pil_interpolation: self.degradation_process = partial( @@ -400,10 +387,10 @@ class ImageNetSR(Dataset): def __getitem__(self, i): example = self.base[i] - image = Image.open(example['file_path_']) + image = Image.open(example["file_path_"]) - if not image.mode == 'RGB': - image = image.convert('RGB') + if not image.mode == "RGB": + image = image.convert("RGB") image = np.array(image).astype(np.uint8) @@ -423,8 +410,8 @@ class ImageNetSR(Dataset): height=crop_side_len, width=crop_side_len ) - image = self.cropper(image=image)['image'] - image = self.image_rescaler(image=image)['image'] + image = self.cropper(image=image)["image"] + image = self.image_rescaler(image=image)["image"] if self.pil_interpolation: image_pil = PIL.Image.fromarray(image) @@ -432,10 +419,10 @@ class ImageNetSR(Dataset): LR_image = np.array(LR_image).astype(np.uint8) else: - LR_image = self.degradation_process(image=image)['image'] + LR_image = self.degradation_process(image=image)["image"] - example['image'] = (image / 127.5 - 1.0).astype(np.float32) - example['LR_image'] = (LR_image / 127.5 - 1.0).astype(np.float32) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) + example["LR_image"] = (LR_image / 127.5 - 1.0).astype(np.float32) return example @@ -445,7 +432,7 @@ class ImageNetSRTrain(ImageNetSR): super().__init__(**kwargs) def get_base(self): - with open('data/imagenet_train_hr_indices.p', 'rb') as f: + with open("data/imagenet_train_hr_indices.p", "rb") as f: indices = pickle.load(f) dset = ImageNetTrain( process_images=False, @@ -458,7 +445,7 @@ class ImageNetSRValidation(ImageNetSR): super().__init__(**kwargs) def get_base(self): - with open('data/imagenet_val_hr_indices.p', 'rb') as f: + with open("data/imagenet_val_hr_indices.p", "rb") as f: indices = pickle.load(f) dset = ImageNetValidation( process_images=False, diff --git a/invokeai/backend/stable_diffusion/data/lsun.py b/invokeai/backend/stable_diffusion/data/lsun.py index 4a7ecb147e..e9c2543f10 100644 --- a/invokeai/backend/stable_diffusion/data/lsun.py +++ b/invokeai/backend/stable_diffusion/data/lsun.py @@ -1,4 +1,5 @@ import os + import numpy as np import PIL from PIL import Image @@ -12,27 +13,25 @@ class LSUNBase(Dataset): txt_file, data_root, size=None, - interpolation='bicubic', + interpolation="bicubic", flip_p=0.5, ): self.data_paths = txt_file self.data_root = data_root - with open(self.data_paths, 'r') as f: + with open(self.data_paths, "r") as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) self.labels = { - 'relative_file_path_': [l for l in self.image_paths], - 'file_path_': [ - os.path.join(self.data_root, l) for l in self.image_paths - ], + "relative_file_path_": [l for l in self.image_paths], + "file_path_": [os.path.join(self.data_root, l) for l in self.image_paths], } self.size = size self.interpolation = { - 'linear': PIL.Image.LINEAR, - 'bilinear': PIL.Image.BILINEAR, - 'bicubic': PIL.Image.BICUBIC, - 'lanczos': PIL.Image.LANCZOS, + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, }[interpolation] self.flip = transforms.RandomHorizontalFlip(p=flip_p) @@ -41,14 +40,17 @@ class LSUNBase(Dataset): def __getitem__(self, i): example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example['file_path_']) - if not image.mode == 'RGB': - image = image.convert('RGB') + image = Image.open(example["file_path_"]) + if not image.mode == "RGB": + image = image.convert("RGB") # default to score-sde preprocessing img = np.array(image).astype(np.uint8) crop = min(img.shape[0], img.shape[1]) - h, w, = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) @@ -59,68 +61,64 @@ class LSUNBase(Dataset): image = Image.fromarray(img) if self.size is not None: - image = image.resize( - (self.size, self.size), resample=self.interpolation - ) + image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip(image) image = np.array(image).astype(np.uint8) - example['image'] = (image / 127.5 - 1.0).astype(np.float32) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) return example class LSUNChurchesTrain(LSUNBase): def __init__(self, **kwargs): super().__init__( - txt_file='data/lsun/church_outdoor_train.txt', - data_root='data/lsun/churches', - **kwargs + txt_file="data/lsun/church_outdoor_train.txt", + data_root="data/lsun/churches", + **kwargs, ) class LSUNChurchesValidation(LSUNBase): def __init__(self, flip_p=0.0, **kwargs): super().__init__( - txt_file='data/lsun/church_outdoor_val.txt', - data_root='data/lsun/churches', + txt_file="data/lsun/church_outdoor_val.txt", + data_root="data/lsun/churches", flip_p=flip_p, - **kwargs + **kwargs, ) class LSUNBedroomsTrain(LSUNBase): def __init__(self, **kwargs): super().__init__( - txt_file='data/lsun/bedrooms_train.txt', - data_root='data/lsun/bedrooms', - **kwargs + txt_file="data/lsun/bedrooms_train.txt", + data_root="data/lsun/bedrooms", + **kwargs, ) class LSUNBedroomsValidation(LSUNBase): def __init__(self, flip_p=0.0, **kwargs): super().__init__( - txt_file='data/lsun/bedrooms_val.txt', - data_root='data/lsun/bedrooms', + txt_file="data/lsun/bedrooms_val.txt", + data_root="data/lsun/bedrooms", flip_p=flip_p, - **kwargs + **kwargs, ) class LSUNCatsTrain(LSUNBase): def __init__(self, **kwargs): super().__init__( - txt_file='data/lsun/cat_train.txt', - data_root='data/lsun/cats', - **kwargs + txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs ) class LSUNCatsValidation(LSUNBase): def __init__(self, flip_p=0.0, **kwargs): super().__init__( - txt_file='data/lsun/cat_val.txt', - data_root='data/lsun/cats', + txt_file="data/lsun/cat_val.txt", + data_root="data/lsun/cats", flip_p=flip_p, - **kwargs + **kwargs, ) diff --git a/invokeai/backend/stable_diffusion/data/personalized.py b/invokeai/backend/stable_diffusion/data/personalized.py index 8d9573fbc6..fc8297a68a 100644 --- a/invokeai/backend/stable_diffusion/data/personalized.py +++ b/invokeai/backend/stable_diffusion/data/personalized.py @@ -1,99 +1,99 @@ import os +import random + import numpy as np import PIL from PIL import Image from torch.utils.data import Dataset from torchvision import transforms -import random - imagenet_templates_smallest = [ - 'a photo of a {}', + "a photo of a {}", ] imagenet_templates_small = [ - 'a photo of a {}', - 'a rendering of a {}', - 'a cropped photo of the {}', - 'the photo of a {}', - 'a photo of a clean {}', - 'a photo of a dirty {}', - 'a dark photo of the {}', - 'a photo of my {}', - 'a photo of the cool {}', - 'a close-up photo of a {}', - 'a bright photo of the {}', - 'a cropped photo of a {}', - 'a photo of the {}', - 'a good photo of the {}', - 'a photo of one {}', - 'a close-up photo of the {}', - 'a rendition of the {}', - 'a photo of the clean {}', - 'a rendition of a {}', - 'a photo of a nice {}', - 'a good photo of a {}', - 'a photo of the nice {}', - 'a photo of the small {}', - 'a photo of the weird {}', - 'a photo of the large {}', - 'a photo of a cool {}', - 'a photo of a small {}', + "a photo of a {}", + "a rendering of a {}", + "a cropped photo of the {}", + "the photo of a {}", + "a photo of a clean {}", + "a photo of a dirty {}", + "a dark photo of the {}", + "a photo of my {}", + "a photo of the cool {}", + "a close-up photo of a {}", + "a bright photo of the {}", + "a cropped photo of a {}", + "a photo of the {}", + "a good photo of the {}", + "a photo of one {}", + "a close-up photo of the {}", + "a rendition of the {}", + "a photo of the clean {}", + "a rendition of a {}", + "a photo of a nice {}", + "a good photo of a {}", + "a photo of the nice {}", + "a photo of the small {}", + "a photo of the weird {}", + "a photo of the large {}", + "a photo of a cool {}", + "a photo of a small {}", ] imagenet_dual_templates_small = [ - 'a photo of a {} with {}', - 'a rendering of a {} with {}', - 'a cropped photo of the {} with {}', - 'the photo of a {} with {}', - 'a photo of a clean {} with {}', - 'a photo of a dirty {} with {}', - 'a dark photo of the {} with {}', - 'a photo of my {} with {}', - 'a photo of the cool {} with {}', - 'a close-up photo of a {} with {}', - 'a bright photo of the {} with {}', - 'a cropped photo of a {} with {}', - 'a photo of the {} with {}', - 'a good photo of the {} with {}', - 'a photo of one {} with {}', - 'a close-up photo of the {} with {}', - 'a rendition of the {} with {}', - 'a photo of the clean {} with {}', - 'a rendition of a {} with {}', - 'a photo of a nice {} with {}', - 'a good photo of a {} with {}', - 'a photo of the nice {} with {}', - 'a photo of the small {} with {}', - 'a photo of the weird {} with {}', - 'a photo of the large {} with {}', - 'a photo of a cool {} with {}', - 'a photo of a small {} with {}', + "a photo of a {} with {}", + "a rendering of a {} with {}", + "a cropped photo of the {} with {}", + "the photo of a {} with {}", + "a photo of a clean {} with {}", + "a photo of a dirty {} with {}", + "a dark photo of the {} with {}", + "a photo of my {} with {}", + "a photo of the cool {} with {}", + "a close-up photo of a {} with {}", + "a bright photo of the {} with {}", + "a cropped photo of a {} with {}", + "a photo of the {} with {}", + "a good photo of the {} with {}", + "a photo of one {} with {}", + "a close-up photo of the {} with {}", + "a rendition of the {} with {}", + "a photo of the clean {} with {}", + "a rendition of a {} with {}", + "a photo of a nice {} with {}", + "a good photo of a {} with {}", + "a photo of the nice {} with {}", + "a photo of the small {} with {}", + "a photo of the weird {} with {}", + "a photo of the large {} with {}", + "a photo of a cool {} with {}", + "a photo of a small {} with {}", ] per_img_token_list = [ - 'א', - 'ב', - 'ג', - 'ד', - 'ה', - 'ו', - 'ז', - 'ח', - 'ט', - 'י', - 'כ', - 'ל', - 'מ', - 'נ', - 'ס', - 'ע', - 'פ', - 'צ', - 'ק', - 'ר', - 'ש', - 'ת', + "א", + "ב", + "ג", + "ד", + "ה", + "ו", + "ז", + "ח", + "ט", + "י", + "כ", + "ל", + "מ", + "נ", + "ס", + "ע", + "פ", + "צ", + "ק", + "ר", + "ש", + "ת", ] @@ -103,21 +103,21 @@ class PersonalizedBase(Dataset): data_root, size=None, repeats=100, - interpolation='bicubic', + interpolation="bicubic", flip_p=0.5, - set='train', - placeholder_token='*', + set="train", + placeholder_token="*", per_image_tokens=False, center_crop=False, mixing_prob=0.25, coarse_class_text=None, ): - self.data_root = data_root self.image_paths = [ os.path.join(self.data_root, file_path) - for file_path in os.listdir(self.data_root) if file_path != ".DS_Store" + for file_path in os.listdir(self.data_root) + if file_path != ".DS_Store" ] # self._length = len(self.image_paths) @@ -137,15 +137,15 @@ class PersonalizedBase(Dataset): per_img_token_list ), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'." - if set == 'train': + if set == "train": self._length = self.num_images * repeats self.size = size self.interpolation = { - 'linear': PIL.Image.LINEAR, - 'bilinear': PIL.Image.BILINEAR, - 'bicubic': PIL.Image.BICUBIC, - 'lanczos': PIL.Image.LANCZOS, + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, }[interpolation] self.flip = transforms.RandomHorizontalFlip(p=flip_p) @@ -156,32 +156,31 @@ class PersonalizedBase(Dataset): example = {} image = Image.open(self.image_paths[i % self.num_images]) - if not image.mode == 'RGB': - image = image.convert('RGB') + if not image.mode == "RGB": + image = image.convert("RGB") placeholder_string = self.placeholder_token if self.coarse_class_text: - placeholder_string = ( - f'{self.coarse_class_text} {placeholder_string}' - ) + placeholder_string = f"{self.coarse_class_text} {placeholder_string}" if self.per_image_tokens and np.random.uniform() < self.mixing_prob: text = random.choice(imagenet_dual_templates_small).format( placeholder_string, per_img_token_list[i % self.num_images] ) else: - text = random.choice(imagenet_templates_small).format( - placeholder_string - ) + text = random.choice(imagenet_templates_small).format(placeholder_string) - example['caption'] = text + example["caption"] = text # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) - h, w, = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) @@ -192,11 +191,9 @@ class PersonalizedBase(Dataset): image = Image.fromarray(img) if self.size is not None: - image = image.resize( - (self.size, self.size), resample=self.interpolation - ) + image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip(image) image = np.array(image).astype(np.uint8) - example['image'] = (image / 127.5 - 1.0).astype(np.float32) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) return example diff --git a/invokeai/backend/stable_diffusion/data/personalized_style.py b/invokeai/backend/stable_diffusion/data/personalized_style.py index 118d5be991..246c25e930 100644 --- a/invokeai/backend/stable_diffusion/data/personalized_style.py +++ b/invokeai/backend/stable_diffusion/data/personalized_style.py @@ -1,77 +1,77 @@ import os +import random + import numpy as np import PIL from PIL import Image from torch.utils.data import Dataset from torchvision import transforms -import random - imagenet_templates_small = [ - 'a painting in the style of {}', - 'a rendering in the style of {}', - 'a cropped painting in the style of {}', - 'the painting in the style of {}', - 'a clean painting in the style of {}', - 'a dirty painting in the style of {}', - 'a dark painting in the style of {}', - 'a picture in the style of {}', - 'a cool painting in the style of {}', - 'a close-up painting in the style of {}', - 'a bright painting in the style of {}', - 'a cropped painting in the style of {}', - 'a good painting in the style of {}', - 'a close-up painting in the style of {}', - 'a rendition in the style of {}', - 'a nice painting in the style of {}', - 'a small painting in the style of {}', - 'a weird painting in the style of {}', - 'a large painting in the style of {}', + "a painting in the style of {}", + "a rendering in the style of {}", + "a cropped painting in the style of {}", + "the painting in the style of {}", + "a clean painting in the style of {}", + "a dirty painting in the style of {}", + "a dark painting in the style of {}", + "a picture in the style of {}", + "a cool painting in the style of {}", + "a close-up painting in the style of {}", + "a bright painting in the style of {}", + "a cropped painting in the style of {}", + "a good painting in the style of {}", + "a close-up painting in the style of {}", + "a rendition in the style of {}", + "a nice painting in the style of {}", + "a small painting in the style of {}", + "a weird painting in the style of {}", + "a large painting in the style of {}", ] imagenet_dual_templates_small = [ - 'a painting in the style of {} with {}', - 'a rendering in the style of {} with {}', - 'a cropped painting in the style of {} with {}', - 'the painting in the style of {} with {}', - 'a clean painting in the style of {} with {}', - 'a dirty painting in the style of {} with {}', - 'a dark painting in the style of {} with {}', - 'a cool painting in the style of {} with {}', - 'a close-up painting in the style of {} with {}', - 'a bright painting in the style of {} with {}', - 'a cropped painting in the style of {} with {}', - 'a good painting in the style of {} with {}', - 'a painting of one {} in the style of {}', - 'a nice painting in the style of {} with {}', - 'a small painting in the style of {} with {}', - 'a weird painting in the style of {} with {}', - 'a large painting in the style of {} with {}', + "a painting in the style of {} with {}", + "a rendering in the style of {} with {}", + "a cropped painting in the style of {} with {}", + "the painting in the style of {} with {}", + "a clean painting in the style of {} with {}", + "a dirty painting in the style of {} with {}", + "a dark painting in the style of {} with {}", + "a cool painting in the style of {} with {}", + "a close-up painting in the style of {} with {}", + "a bright painting in the style of {} with {}", + "a cropped painting in the style of {} with {}", + "a good painting in the style of {} with {}", + "a painting of one {} in the style of {}", + "a nice painting in the style of {} with {}", + "a small painting in the style of {} with {}", + "a weird painting in the style of {} with {}", + "a large painting in the style of {} with {}", ] per_img_token_list = [ - 'א', - 'ב', - 'ג', - 'ד', - 'ה', - 'ו', - 'ז', - 'ח', - 'ט', - 'י', - 'כ', - 'ל', - 'מ', - 'נ', - 'ס', - 'ע', - 'פ', - 'צ', - 'ק', - 'ר', - 'ש', - 'ת', + "א", + "ב", + "ג", + "ד", + "ה", + "ו", + "ז", + "ח", + "ט", + "י", + "כ", + "ל", + "מ", + "נ", + "ס", + "ע", + "פ", + "צ", + "ק", + "ר", + "ש", + "ת", ] @@ -81,19 +81,19 @@ class PersonalizedBase(Dataset): data_root, size=None, repeats=100, - interpolation='bicubic', + interpolation="bicubic", flip_p=0.5, - set='train', - placeholder_token='*', + set="train", + placeholder_token="*", per_image_tokens=False, center_crop=False, ): - self.data_root = data_root self.image_paths = [ os.path.join(self.data_root, file_path) - for file_path in os.listdir(self.data_root) if file_path != ".DS_Store" + for file_path in os.listdir(self.data_root) + if file_path != ".DS_Store" ] # self._length = len(self.image_paths) @@ -110,15 +110,15 @@ class PersonalizedBase(Dataset): per_img_token_list ), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'." - if set == 'train': + if set == "train": self._length = self.num_images * repeats self.size = size self.interpolation = { - 'linear': PIL.Image.LINEAR, - 'bilinear': PIL.Image.BILINEAR, - 'bicubic': PIL.Image.BICUBIC, - 'lanczos': PIL.Image.LANCZOS, + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, }[interpolation] self.flip = transforms.RandomHorizontalFlip(p=flip_p) @@ -129,8 +129,8 @@ class PersonalizedBase(Dataset): example = {} image = Image.open(self.image_paths[i % self.num_images]) - if not image.mode == 'RGB': - image = image.convert('RGB') + if not image.mode == "RGB": + image = image.convert("RGB") if self.per_image_tokens and np.random.uniform() < 0.25: text = random.choice(imagenet_dual_templates_small).format( @@ -141,14 +141,17 @@ class PersonalizedBase(Dataset): self.placeholder_token ) - example['caption'] = text + example["caption"] = text # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) - h, w, = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) @@ -159,11 +162,9 @@ class PersonalizedBase(Dataset): image = Image.fromarray(img) if self.size is not None: - image = image.resize( - (self.size, self.size), resample=self.interpolation - ) + image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip(image) image = np.array(image).astype(np.uint8) - example['image'] = (image / 127.5 - 1.0).astype(np.float32) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) return example diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index c4ac77aaf6..f1622f86be 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -2,22 +2,28 @@ from __future__ import annotations import dataclasses import inspect -import psutil import secrets from collections.abc import Sequence from dataclasses import dataclass, field -from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any +from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union -import PIL.Image import einops +import PIL.Image import psutil import torch import torchvision.transforms as T +from compel import EmbeddingsProvider from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( + StableDiffusionPipeline, +) +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import ( + StableDiffusionImg2ImgPipeline, +) +from diffusers.pipelines.stable_diffusion.safety_checker import ( + StableDiffusionSafetyChecker, +) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput from diffusers.utils.import_utils import is_xformers_available @@ -27,11 +33,16 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec from invokeai.backend.globals import Globals -from .diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver + +from ..util import CPU_DEVICE, normalize_device +from .diffusion import ( + AttentionMapSaver, + InvokeAIDiffuserComponent, + PostprocessingSettings, +) +from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup from .textual_inversion_manager import TextualInversionManager -from .offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..util import normalize_device, CPU_DEVICE -from compel import EmbeddingsProvider + @dataclass class PipelineIntermediateState: @@ -49,7 +60,7 @@ _default_personalization_config_params = dict( initializer_wods=["sculpture"], per_image_tokens=False, num_vectors_per_token=1, - progressive_words=False + progressive_words=False, ) @@ -62,29 +73,34 @@ class AddsMaskLatents: This class assumes the same mask and base image should apply to all items in the batch. """ + forward: Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] mask: torch.Tensor initial_image_latents: torch.Tensor - def __call__(self, latents: torch.Tensor, t: torch.Tensor, text_embeddings: torch.Tensor) -> torch.Tensor: + def __call__( + self, latents: torch.Tensor, t: torch.Tensor, text_embeddings: torch.Tensor + ) -> torch.Tensor: model_input = self.add_mask_channels(latents) return self.forward(model_input, t, text_embeddings) def add_mask_channels(self, latents): batch_size = latents.size(0) # duplicate mask and latents for each batch - mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) - image_latents = einops.repeat(self.initial_image_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) + mask = einops.repeat( + self.mask, "b c h w -> (repeat b) c h w", repeat=batch_size + ) + image_latents = einops.repeat( + self.initial_image_latents, "b c h w -> (repeat b) c h w", repeat=batch_size + ) # add mask and image as additional channels - model_input, _ = einops.pack([latents, mask, image_latents], 'b * h w') + model_input, _ = einops.pack([latents, mask, image_latents], "b * h w") return model_input def are_like_tensors(a: torch.Tensor, b: object) -> bool: - return ( - isinstance(b, torch.Tensor) - and (a.size() == b.size()) - ) + return isinstance(b, torch.Tensor) and (a.size() == b.size()) + @dataclass class AddsMaskGuidance: @@ -94,7 +110,9 @@ class AddsMaskGuidance: noise: torch.Tensor _debug: Optional[Callable] = None - def __call__(self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning) -> BaseOutput: + def __call__( + self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning + ) -> BaseOutput: output_class = step_output.__class__ # We'll create a new one with masked data. # The problem with taking SchedulerOutput instead of the model output is that we're less certain what's in it. @@ -104,30 +122,41 @@ class AddsMaskGuidance: prev_sample = step_output[0] # Mask anything that has the same shape as prev_sample, return others as-is. return output_class( - {k: (self.apply_mask(v, self._t_for_field(k, t)) - if are_like_tensors(prev_sample, v) else v) - for k, v in step_output.items()} + { + k: ( + self.apply_mask(v, self._t_for_field(k, t)) + if are_like_tensors(prev_sample, v) + else v + ) + for k, v in step_output.items() + } ) - def _t_for_field(self, field_name:str, t): + def _t_for_field(self, field_name: str, t): if field_name == "pred_original_sample": return torch.zeros_like(t, dtype=t.dtype) # it represents t=0 return t def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor: batch_size = latents.size(0) - mask = einops.repeat(self.mask, 'b c h w -> (repeat b) c h w', repeat=batch_size) + mask = einops.repeat( + self.mask, "b c h w -> (repeat b) c h w", repeat=batch_size + ) if t.dim() == 0: # some schedulers expect t to be one-dimensional. # TODO: file diffusers bug about inconsistency? - t = einops.repeat(t, '-> batch', batch=batch_size) + t = einops.repeat(t, "-> batch", batch=batch_size) # Noise shouldn't be re-randomized between steps here. The multistep schedulers # get very confused about what is happening from step to step when we do that. mask_latents = self.scheduler.add_noise(self.mask_latents, self.noise, t) # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? # mask_latents = self.scheduler.scale_model_input(mask_latents, t) - mask_latents = einops.repeat(mask_latents, 'b c h w -> (repeat b) c h w', repeat=batch_size) - masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) + mask_latents = einops.repeat( + mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size + ) + masked_input = torch.lerp( + mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype) + ) if self._debug: self._debug(masked_input, f"t={t} lerped") return masked_input @@ -137,7 +166,9 @@ def trim_to_multiple_of(*args, multiple_of=8): return tuple((x - x % multiple_of) for x in args) -def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool=True, multiple_of=8) -> torch.FloatTensor: +def image_resized_to_grid_as_tensor( + image: PIL.Image.Image, normalize: bool = True, multiple_of=8 +) -> torch.FloatTensor: """ :param image: input image @@ -145,10 +176,12 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool=True :param multiple_of: resize the input so both dimensions are a multiple of this """ w, h = trim_to_multiple_of(*image.size) - transformation = T.Compose([ - T.Resize((h, w), T.InterpolationMode.LANCZOS), - T.ToTensor(), - ]) + transformation = T.Compose( + [ + T.Resize((h, w), T.InterpolationMode.LANCZOS), + T.ToTensor(), + ] + ) tensor = transformation(image) if normalize: tensor = tensor * 2.0 - 1.0 @@ -158,9 +191,11 @@ def image_resized_to_grid_as_tensor(image: PIL.Image.Image, normalize: bool=True def is_inpainting_model(unet: UNet2DConditionModel): return unet.conv_in.in_channels == 9 -CallbackType = TypeVar('CallbackType') -ReturnType = TypeVar('ReturnType') -ParamType = ParamSpec('ParamType') + +CallbackType = TypeVar("CallbackType") +ReturnType = TypeVar("ReturnType") +ParamType = ParamSpec("ParamType") + @dataclass(frozen=True) class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]): @@ -169,9 +204,12 @@ class GeneratorToCallbackinator(Generic[ParamType, ReturnType, CallbackType]): generator_method: Callable[ParamType, ReturnType] callback_arg_type: Type[CallbackType] - def __call__(self, *args: ParamType.args, - callback:Callable[[CallbackType], Any]=None, - **kwargs: ParamType.kwargs) -> ReturnType: + def __call__( + self, + *args: ParamType.args, + callback: Callable[[CallbackType], Any] = None, + **kwargs: ParamType.kwargs, + ) -> ReturnType: result = None for result in self.generator_method(*args, **kwargs): if callback is not None and isinstance(result, self.callback_arg_type): @@ -216,6 +254,7 @@ class ConditioningData: scheduler_args[name] = value return dataclasses.replace(self, scheduler_args=scheduler_args) + @dataclass class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput): r""" @@ -273,10 +312,18 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): safety_checker: Optional[StableDiffusionSafetyChecker], feature_extractor: Optional[CLIPFeatureExtractor], requires_safety_checker: bool = False, - precision: str = 'float32', + precision: str = "float32", ): - super().__init__(vae, text_encoder, tokenizer, unet, scheduler, - safety_checker, feature_extractor, requires_safety_checker) + super().__init__( + vae, + text_encoder, + tokenizer, + unet, + scheduler, + safety_checker, + feature_extractor, + requires_safety_checker, + ) self.register_modules( vae=vae, @@ -287,27 +334,34 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): safety_checker=safety_checker, feature_extractor=feature_extractor, ) - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.unet, self._unet_forward, is_running_diffusers=True) - use_full_precision = (precision == 'float32' or precision == 'autocast') - self.textual_inversion_manager = TextualInversionManager(tokenizer=self.tokenizer, - text_encoder=self.text_encoder, - full_precision=use_full_precision) + self.invokeai_diffuser = InvokeAIDiffuserComponent( + self.unet, self._unet_forward, is_running_diffusers=True + ) + use_full_precision = precision == "float32" or precision == "autocast" + self.textual_inversion_manager = TextualInversionManager( + tokenizer=self.tokenizer, + text_encoder=self.text_encoder, + full_precision=use_full_precision, + ) # InvokeAI's interface for text embeddings and whatnot self.embeddings_provider = EmbeddingsProvider( tokenizer=self.tokenizer, text_encoder=self.text_encoder, - textual_inversion_manager=self.textual_inversion_manager + textual_inversion_manager=self.textual_inversion_manager, ) self._model_group = FullyLoadedModelGroup(self.unet.device) self._model_group.install(*self._submodels) - def _adjust_memory_efficient_attention(self, latents: torch.Tensor): """ if xformers is available, use it, otherwise use sliced attention. """ - if torch.cuda.is_available() and is_xformers_available() and not Globals.disable_xformers: + if ( + torch.cuda.is_available() + and is_xformers_available() + and not Globals.disable_xformers + ): self.enable_xformers_memory_efficient_attention() else: if torch.backends.mps.is_available(): @@ -316,25 +370,32 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline. pass else: - if self.device.type == 'cpu' or self.device.type == 'mps': + if self.device.type == "cpu" or self.device.type == "mps": mem_free = psutil.virtual_memory().free - elif self.device.type == 'cuda': + elif self.device.type == "cuda": mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device)) else: raise ValueError(f"unrecognized device {self.device}") # input tensor of [1, 4, h/8, w/8] # output tensor of [16, (h/8 * w/8), (h/8 * w/8)] - bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 - max_size_required_for_baddbmm = \ - 16 * \ - latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \ - bytes_per_element_needed_for_baddbmm_duplication - if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code - self.enable_attention_slicing(slice_size='max') + bytes_per_element_needed_for_baddbmm_duplication = ( + latents.element_size() + 4 + ) + max_size_required_for_baddbmm = ( + 16 + * latents.size(dim=2) + * latents.size(dim=3) + * latents.size(dim=2) + * latents.size(dim=3) + * bytes_per_element_needed_for_baddbmm_duplication + ) + if max_size_required_for_baddbmm > ( + mem_free * 3.0 / 4.0 + ): # 3.3 / 4.0 is from old Invoke code + self.enable_attention_slicing(slice_size="max") else: self.disable_attention_slicing() - def enable_offload_submodels(self, device: torch.device): """ Offload each submodel when it's not in use. @@ -396,12 +457,16 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): values = [getattr(self, name) for name in module_names.keys()] return [m for m in values if isinstance(m, torch.nn.Module)] - def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - callback: Callable[[PipelineIntermediateState], None]=None, - run_id=None) -> InvokeAIStableDiffusionPipelineOutput: + def image_from_embeddings( + self, + latents: torch.Tensor, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + callback: Callable[[PipelineIntermediateState], None] = None, + run_id=None, + ) -> InvokeAIStableDiffusionPipelineOutput: r""" Function invoked when calling the pipeline for generation. @@ -415,71 +480,104 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): :param run_id: """ result_latents, result_attention_map_saver = self.latents_from_embeddings( - latents, num_inference_steps, + latents, + num_inference_steps, conditioning_data, noise=noise, run_id=run_id, - callback=callback) + callback=callback, + ) # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 torch.cuda.empty_cache() with torch.inference_mode(): image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_map_saver) + output = InvokeAIStableDiffusionPipelineOutput( + images=image, + nsfw_content_detected=[], + attention_map_saver=result_attention_map_saver, + ) return self.check_for_safety(output, dtype=conditioning_data.dtype) - def latents_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - timesteps=None, - additional_guidance: List[Callable] = None, run_id=None, - callback: Callable[[PipelineIntermediateState], None] = None - ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: + def latents_from_embeddings( + self, + latents: torch.Tensor, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + timesteps=None, + additional_guidance: List[Callable] = None, + run_id=None, + callback: Callable[[PipelineIntermediateState], None] = None, + ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: if timesteps is None: - self.scheduler.set_timesteps(num_inference_steps, device=self._model_group.device_for(self.unet)) + self.scheduler.set_timesteps( + num_inference_steps, device=self._model_group.device_for(self.unet) + ) timesteps = self.scheduler.timesteps - infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState) + infer_latents_from_embeddings = GeneratorToCallbackinator( + self.generate_latents_from_embeddings, PipelineIntermediateState + ) result: PipelineIntermediateState = infer_latents_from_embeddings( - latents, timesteps, conditioning_data, + latents, + timesteps, + conditioning_data, noise=noise, additional_guidance=additional_guidance, run_id=run_id, - callback=callback) + callback=callback, + ) return result.latents, result.attention_map_saver - def generate_latents_from_embeddings(self, latents: torch.Tensor, timesteps, - conditioning_data: ConditioningData, - *, - noise: torch.Tensor, - run_id: str = None, - additional_guidance: List[Callable] = None): + def generate_latents_from_embeddings( + self, + latents: torch.Tensor, + timesteps, + conditioning_data: ConditioningData, + *, + noise: torch.Tensor, + run_id: str = None, + additional_guidance: List[Callable] = None, + ): self._adjust_memory_efficient_attention(latents) if run_id is None: run_id = secrets.token_urlsafe(self.ID_LENGTH) if additional_guidance is None: additional_guidance = [] extra_conditioning_info = conditioning_data.extra - with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info, - step_count=len(self.scheduler.timesteps) - ): - - yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps, - latents=latents) + with self.invokeai_diffuser.custom_attention_context( + extra_conditioning_info=extra_conditioning_info, + step_count=len(self.scheduler.timesteps), + ): + yield PipelineIntermediateState( + run_id=run_id, + step=-1, + timestep=self.scheduler.num_train_timesteps, + latents=latents, + ) batch_size = latents.shape[0] - batched_t = torch.full((batch_size,), timesteps[0], - dtype=timesteps.dtype, device=self._model_group.device_for(self.unet)) + batched_t = torch.full( + (batch_size,), + timesteps[0], + dtype=timesteps.dtype, + device=self._model_group.device_for(self.unet), + ) latents = self.scheduler.add_noise(latents, noise, batched_t) attention_map_saver: Optional[AttentionMapSaver] = None for i, t in enumerate(self.progress_bar(timesteps)): batched_t.fill_(t) - step_output = self.step(batched_t, latents, conditioning_data, - step_index=i, - total_step_count=len(timesteps), - additional_guidance=additional_guidance) + step_output = self.step( + batched_t, + latents, + conditioning_data, + step_index=i, + total_step_count=len(timesteps), + additional_guidance=additional_guidance, + ) latents = step_output.prev_sample latents = self.invokeai_diffuser.do_latent_postprocessing( @@ -487,28 +585,39 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): latents=latents, sigma=batched_t, step_index=i, - total_step_count=len(timesteps) + total_step_count=len(timesteps), ) - predicted_original = getattr(step_output, 'pred_original_sample', None) + predicted_original = getattr(step_output, "pred_original_sample", None) # TODO resuscitate attention map saving - #if i == len(timesteps)-1 and extra_conditioning_info is not None: + # if i == len(timesteps)-1 and extra_conditioning_info is not None: # eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 # attention_map_token_ids = range(1, eos_token_index) # attention_map_saver = AttentionMapSaver(token_ids=attention_map_token_ids, latents_shape=latents.shape[-2:]) # self.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) - yield PipelineIntermediateState(run_id=run_id, step=i, timestep=int(t), latents=latents, - predicted_original=predicted_original, attention_map_saver=attention_map_saver) + yield PipelineIntermediateState( + run_id=run_id, + step=i, + timestep=int(t), + latents=latents, + predicted_original=predicted_original, + attention_map_saver=attention_map_saver, + ) return latents, attention_map_saver @torch.inference_mode() - def step(self, t: torch.Tensor, latents: torch.Tensor, - conditioning_data: ConditioningData, - step_index:int, total_step_count:int, - additional_guidance: List[Callable] = None): + def step( + self, + t: torch.Tensor, + latents: torch.Tensor, + conditioning_data: ConditioningData, + step_index: int, + total_step_count: int, + additional_guidance: List[Callable] = None, + ): # invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value timestep = t[0] @@ -521,16 +630,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # predict the noise residual noise_pred = self.invokeai_diffuser.do_diffusion_step( - latent_model_input, t, - conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings, + latent_model_input, + t, + conditioning_data.unconditioned_embeddings, + conditioning_data.text_embeddings, conditioning_data.guidance_scale, step_index=step_index, total_step_count=total_step_count, ) # compute the previous noisy sample x_t -> x_t-1 - step_output = self.scheduler.step(noise_pred, timestep, latents, - **conditioning_data.scheduler_args) + step_output = self.scheduler.step( + noise_pred, timestep, latents, **conditioning_data.scheduler_args + ) # TODO: this additional_guidance extension point feels redundant with InvokeAIDiffusionComponent. # But the way things are now, scheduler runs _after_ that, so there was @@ -540,7 +652,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): return step_output - def _unet_forward(self, latents, t, text_embeddings, cross_attention_kwargs: Optional[dict[str,Any]] = None): + def _unet_forward( + self, + latents, + t, + text_embeddings, + cross_attention_kwargs: Optional[dict[str, Any]] = None, + ): """predict the noise residual""" if is_inpainting_model(self.unet) and latents.size(1) == 4: # Pad out normal non-inpainting inputs for an inpainting model. @@ -549,67 +667,100 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # use of AddsMaskLatents. latents = AddsMaskLatents( self._unet_forward, - mask=torch.ones_like(latents[:1, :1], device=latents.device, dtype=latents.dtype), - initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype) + mask=torch.ones_like( + latents[:1, :1], device=latents.device, dtype=latents.dtype + ), + initial_image_latents=torch.zeros_like( + latents[:1], device=latents.device, dtype=latents.dtype + ), ).add_mask_channels(latents) # First three args should be positional, not keywords, so torch hooks can see them. - return self.unet(latents, t, text_embeddings, - cross_attention_kwargs=cross_attention_kwargs).sample + return self.unet( + latents, t, text_embeddings, cross_attention_kwargs=cross_attention_kwargs + ).sample - def img2img_from_embeddings(self, - init_image: Union[torch.FloatTensor, PIL.Image.Image], - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, - noise_func=None - ) -> InvokeAIStableDiffusionPipelineOutput: + def img2img_from_embeddings( + self, + init_image: Union[torch.FloatTensor, PIL.Image.Image], + strength: float, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, + callback: Callable[[PipelineIntermediateState], None] = None, + run_id=None, + noise_func=None, + ) -> InvokeAIStableDiffusionPipelineOutput: if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) + init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB")) if init_image.dim() == 3: - init_image = einops.rearrange(init_image, 'c h w -> 1 c h w') + init_image = einops.rearrange(init_image, "c h w -> 1 c h w") # 6. Prepare latent variables initial_latents = self.non_noised_latents_from_image( - init_image, device=self._model_group.device_for(self.unet), - dtype=self.unet.dtype) + init_image, + device=self._model_group.device_for(self.unet), + dtype=self.unet.dtype, + ) noise = noise_func(initial_latents) - return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps, - conditioning_data, - strength, - noise, run_id, callback) + return self.img2img_from_latents_and_embeddings( + initial_latents, + num_inference_steps, + conditioning_data, + strength, + noise, + run_id, + callback, + ) - def img2img_from_latents_and_embeddings(self, initial_latents, num_inference_steps, - conditioning_data: ConditioningData, - strength, - noise: torch.Tensor, run_id=None, callback=None - ) -> InvokeAIStableDiffusionPipelineOutput: - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, - device=self._model_group.device_for(self.unet)) + def img2img_from_latents_and_embeddings( + self, + initial_latents, + num_inference_steps, + conditioning_data: ConditioningData, + strength, + noise: torch.Tensor, + run_id=None, + callback=None, + ) -> InvokeAIStableDiffusionPipelineOutput: + timesteps, _ = self.get_img2img_timesteps( + num_inference_steps, + strength, + device=self._model_group.device_for(self.unet), + ) result_latents, result_attention_maps = self.latents_from_embeddings( - initial_latents, num_inference_steps, conditioning_data, + initial_latents, + num_inference_steps, + conditioning_data, timesteps=timesteps, noise=noise, run_id=run_id, - callback=callback) + callback=callback, + ) # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 torch.cuda.empty_cache() with torch.inference_mode(): image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) + output = InvokeAIStableDiffusionPipelineOutput( + images=image, + nsfw_content_detected=[], + attention_map_saver=result_attention_maps, + ) return self.check_for_safety(output, dtype=conditioning_data.dtype) - def get_img2img_timesteps(self, num_inference_steps: int, strength: float, device) -> (torch.Tensor, int): + def get_img2img_timesteps( + self, num_inference_steps: int, strength: float, device + ) -> (torch.Tensor, int): img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components) assert img2img_pipeline.scheduler is self.scheduler img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, adjusted_steps = img2img_pipeline.get_timesteps(num_inference_steps, strength, device=device) + timesteps, adjusted_steps = img2img_pipeline.get_timesteps( + num_inference_steps, strength, device=device + ) # Workaround for low strength resulting in zero timesteps. # TODO: submit upstream fix for zero-step img2img if timesteps.numel() == 0: @@ -618,21 +769,22 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): return timesteps, adjusted_steps def inpaint_from_embeddings( - self, - init_image: torch.FloatTensor, - mask: torch.FloatTensor, - strength: float, - num_inference_steps: int, - conditioning_data: ConditioningData, - *, callback: Callable[[PipelineIntermediateState], None] = None, - run_id=None, - noise_func=None, - ) -> InvokeAIStableDiffusionPipelineOutput: + self, + init_image: torch.FloatTensor, + mask: torch.FloatTensor, + strength: float, + num_inference_steps: int, + conditioning_data: ConditioningData, + *, + callback: Callable[[PipelineIntermediateState], None] = None, + run_id=None, + noise_func=None, + ) -> InvokeAIStableDiffusionPipelineOutput: device = self._model_group.device_for(self.unet) latents_dtype = self.unet.dtype if isinstance(init_image, PIL.Image.Image): - init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB')) + init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB")) init_image = init_image.to(device=device, dtype=latents_dtype) mask = mask.to(device=device, dtype=latents_dtype) @@ -640,18 +792,23 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): if init_image.dim() == 3: init_image = init_image.unsqueeze(0) - timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, device=device) + timesteps, _ = self.get_img2img_timesteps( + num_inference_steps, strength, device=device + ) # 6. Prepare latent variables # can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents # because we have our own noise function - init_image_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype) + init_image_latents = self.non_noised_latents_from_image( + init_image, device=device, dtype=latents_dtype + ) noise = noise_func(init_image_latents) if mask.dim() == 3: mask = mask.unsqueeze(0) - latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR) \ - .to(device=device, dtype=latents_dtype) + latent_mask = tv_resize( + mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR + ).to(device=device, dtype=latents_dtype) guidance: List[Callable] = [] @@ -659,20 +816,30 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint # (that's why there's a mask!) but it seems to really want that blanked out. masked_init_image = init_image * torch.where(mask < 0.5, 1, 0) - masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype) + masked_latents = self.non_noised_latents_from_image( + masked_init_image, device=device, dtype=latents_dtype + ) # TODO: we should probably pass this in so we don't have to try/finally around setting it. - self.invokeai_diffuser.model_forward_callback = \ - AddsMaskLatents(self._unet_forward, latent_mask, masked_latents) + self.invokeai_diffuser.model_forward_callback = AddsMaskLatents( + self._unet_forward, latent_mask, masked_latents + ) else: - guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise)) + guidance.append( + AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise) + ) try: result_latents, result_attention_maps = self.latents_from_embeddings( - init_image_latents, num_inference_steps, - conditioning_data, noise=noise, timesteps=timesteps, + init_image_latents, + num_inference_steps, + conditioning_data, + noise=noise, + timesteps=timesteps, additional_guidance=guidance, - run_id=run_id, callback=callback) + run_id=run_id, + callback=callback, + ) finally: self.invokeai_diffuser.model_forward_callback = self._unet_forward @@ -681,13 +848,17 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): with torch.inference_mode(): image = self.decode_latents(result_latents) - output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps) + output = InvokeAIStableDiffusionPipelineOutput( + images=image, + nsfw_content_detected=[], + attention_map_saver=result_attention_maps, + ) return self.check_for_safety(output, dtype=conditioning_data.dtype) def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype): init_image = init_image.to(device=device, dtype=dtype) with torch.inference_mode(): - if device.type == 'mps': + if device.type == "mps": # workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222 # TODO remove this workaround once kulinseth#222 is merged to pytorch mainline self.vae.to(CPU_DEVICE) @@ -695,8 +866,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): else: self._model_group.load(self.vae) init_latent_dist = self.vae.encode(init_image).latent_dist - init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible! - if device.type == 'mps': + init_latents = init_latent_dist.sample().to( + dtype=dtype + ) # FIXME: uses torch.randn. make reproducible! + if device.type == "mps": self.vae.to(device) init_latents = init_latents.to(device) @@ -705,14 +878,18 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): def check_for_safety(self, output, dtype): with torch.inference_mode(): - screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype) + screened_images, has_nsfw_concept = self.run_safety_checker( + output.images, dtype=dtype + ) screened_attention_map_saver = None if has_nsfw_concept is None or not has_nsfw_concept: screened_attention_map_saver = output.attention_map_saver - return InvokeAIStableDiffusionPipelineOutput(screened_images, - has_nsfw_concept, - # block the attention maps if NSFW content is detected - attention_map_saver=screened_attention_map_saver) + return InvokeAIStableDiffusionPipelineOutput( + screened_images, + has_nsfw_concept, + # block the attention maps if NSFW content is detected + attention_map_saver=screened_attention_map_saver, + ) def run_safety_checker(self, image, device=None, dtype=None): # overriding to use the model group for device info instead of requiring the caller to know. @@ -721,7 +898,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): return super().run_safety_checker(image, device, dtype) @torch.inference_mode() - def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None): + def get_learned_conditioning( + self, c: List[List[str]], *, return_tokens=True, fragment_weights=None + ): """ Compatibility function for invokeai.models.diffusion.ddpm.LatentDiffusion. """ @@ -729,7 +908,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): text_batch=c, fragment_weights_batch=fragment_weights, should_return_tokens=return_tokens, - device=self._model_group.device_for(self.unet)) + device=self._model_group.device_for(self.unet), + ) @property def cond_stage_model(self): @@ -758,6 +938,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): def debug_latents(self, latents, msg): with torch.inference_mode(): from ldm.util import debug_image + decoded = self.numpy_to_pil(self.decode_latents(latents)) for i, img in enumerate(decoded): - debug_image(img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True) + debug_image( + img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True + ) diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py index 569c22c429..6dd2817f29 100644 --- a/invokeai/backend/stable_diffusion/diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py @@ -1,6 +1,6 @@ -''' +""" Initialization file for invokeai.models.diffusion -''' -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +""" from .cross_attention_control import InvokeAICrossAttentionMixin from .cross_attention_map_saving import AttentionMapSaver +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py~ b/invokeai/backend/stable_diffusion/diffusion/__init__.py~ deleted file mode 100644 index d7706c27eb..0000000000 --- a/invokeai/backend/stable_diffusion/diffusion/__init__.py~ +++ /dev/null @@ -1,4 +0,0 @@ -''' -Initialization file for invokeai.models.diffusion -''' -from shared_invokeai_diffusion import InvokeAIDiffuserComponent diff --git a/invokeai/backend/stable_diffusion/diffusion/classifier.py b/invokeai/backend/stable_diffusion/diffusion/classifier.py index be0d8c1919..89aba16ee9 100644 --- a/invokeai/backend/stable_diffusion/diffusion/classifier.py +++ b/invokeai/backend/stable_diffusion/diffusion/classifier.py @@ -1,22 +1,19 @@ import os -import torch +from copy import deepcopy +from glob import glob + import pytorch_lightning as pl +import torch +from einops import rearrange +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import default, instantiate_from_config, ismap, log_txt_as_img +from natsort import natsorted from omegaconf import OmegaConf from torch.nn import functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted -from ldm.modules.diffusionmodules.openaimodel import ( - EncoderUNetModel, - UNetModel, -) -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = {'class_label': EncoderUNetModel, 'segmentation': UNetModel} +__models__ = {"class_label": EncoderUNetModel, "segmentation": UNetModel} def disabled_train(self, mode=True): @@ -31,13 +28,13 @@ class NoisyLatentImageClassifier(pl.LightningModule): diffusion_path, num_classes, ckpt_path=None, - pool='attention', + pool="attention", label_key=None, diffusion_ckpt_path=None, scheduler_config=None, weight_decay=1.0e-2, log_steps=10, - monitor='val/loss', + monitor="val/loss", *args, **kwargs, ): @@ -45,30 +42,26 @@ class NoisyLatentImageClassifier(pl.LightningModule): self.num_classes = num_classes # get latest config of diffusion model diffusion_config = natsorted( - glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')) + glob(os.path.join(diffusion_path, "configs", "*-project.yaml")) )[-1] self.diffusion_config = OmegaConf.load(diffusion_config).model self.diffusion_config.params.ckpt_path = diffusion_ckpt_path self.load_diffusion() self.monitor = monitor - self.numd = ( - self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - ) - self.log_time_interval = ( - self.diffusion_model.num_timesteps // log_steps - ) + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps self.log_steps = log_steps self.label_key = ( label_key - if not hasattr(self.diffusion_model, 'cond_stage_key') + if not hasattr(self.diffusion_model, "cond_stage_key") else self.diffusion_model.cond_stage_key ) assert ( self.label_key is not None - ), 'label_key neither in diffusion model nor in model.params' + ), "label_key neither in diffusion model nor in model.params" if self.label_key not in __models__: raise NotImplementedError() @@ -80,14 +73,14 @@ class NoisyLatentImageClassifier(pl.LightningModule): self.weight_decay = weight_decay def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location='cpu') - if 'state_dict' in list(sd.keys()): - sd = sd['state_dict'] + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) + print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = ( self.load_state_dict(sd, strict=False) @@ -95,12 +88,12 @@ class NoisyLatentImageClassifier(pl.LightningModule): else self.model.load_state_dict(sd, strict=False) ) print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: - print(f'Missing Keys: {missing}') + print(f"Missing Keys: {missing}") if len(unexpected) > 0: - print(f'Unexpected Keys: {unexpected}') + print(f"Unexpected Keys: {unexpected}") def load_diffusion(self): model = instantiate_from_config(self.diffusion_config) @@ -110,24 +103,22 @@ class NoisyLatentImageClassifier(pl.LightningModule): param.requires_grad = False def load_classifier(self, ckpt_path, pool): - model_config = deepcopy( - self.diffusion_config.params.unet_config.params - ) + model_config = deepcopy(self.diffusion_config.params.unet_config.params) model_config.in_channels = ( self.diffusion_config.params.unet_config.params.out_channels ) model_config.out_channels = self.num_classes - if self.label_key == 'class_label': + if self.label_key == "class_label": model_config.pool = pool self.model = __models__[self.label_key](**model_config) if ckpt_path is not None: print( - '#####################################################################' + "#####################################################################" ) print(f'load from ckpt "{ckpt_path}"') print( - '#####################################################################' + "#####################################################################" ) self.init_from_ckpt(ckpt_path) @@ -137,9 +128,7 @@ class NoisyLatentImageClassifier(pl.LightningModule): continuous_sqrt_alpha_cumprod = None if self.diffusion_model.use_continuous_noise: continuous_sqrt_alpha_cumprod = ( - self.diffusion_model.sample_continuous_noise_level( - x.shape[0], t + 1 - ) + self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) ) # todo: make sure t+1 is correct here @@ -158,7 +147,7 @@ class NoisyLatentImageClassifier(pl.LightningModule): x = batch[k] if len(x.shape) == 3: x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') + x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x @@ -166,45 +155,41 @@ class NoisyLatentImageClassifier(pl.LightningModule): def get_conditioning(self, batch, k=None): if k is None: k = self.label_key - assert k is not None, 'Needs to provide label key' + assert k is not None, "Needs to provide label key" targets = batch[k].to(self.device) - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') + if self.label_key == "segmentation": + targets = rearrange(targets, "b h w c -> b c h w") for down in range(self.numd): h, w = targets.shape[-2:] - targets = F.interpolate( - targets, size=(h // 2, w // 2), mode='nearest' - ) + targets = F.interpolate(targets, size=(h // 2, w // 2), mode="nearest") # targets = rearrange(targets,'b c h w -> b h w c') return targets - def compute_top_k(self, logits, labels, k, reduction='mean'): + def compute_top_k(self, logits, labels, k, reduction="mean"): _, top_ks = torch.topk(logits, k, dim=1) - if reduction == 'mean': - return ( - (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - ) - elif reduction == 'none': + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": return (top_ks == labels[:, None]).float().sum(dim=-1) def on_train_epoch_start(self): # save some memory - self.diffusion_model.model.to('cpu') + self.diffusion_model.model.to("cpu") @torch.no_grad() def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' + log_prefix = "train" if self.training else "val" log = {} - log[f'{log_prefix}/loss'] = loss.mean() - log[f'{log_prefix}/acc@1'] = self.compute_top_k( - logits, targets, k=1, reduction='mean' + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" ) - log[f'{log_prefix}/acc@5'] = self.compute_top_k( - logits, targets, k=5, reduction='mean' + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" ) self.log_dict( @@ -214,19 +199,17 @@ class NoisyLatentImageClassifier(pl.LightningModule): on_step=self.training, on_epoch=True, ) + self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False) self.log( - 'loss', log[f'{log_prefix}/loss'], prog_bar=True, logger=False - ) - self.log( - 'global_step', + "global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True, ) - lr = self.optimizers().param_groups[0]['lr'] + lr = self.optimizers().param_groups[0]["lr"] self.log( - 'lr_abs', + "lr_abs", lr, on_step=True, logger=True, @@ -249,13 +232,11 @@ class NoisyLatentImageClassifier(pl.LightningModule): device=self.device, ).long() else: - t = torch.full( - size=(x.shape[0],), fill_value=t, device=self.device - ).long() + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() x_noisy = self.get_x_noisy(x, t) logits = self(x_noisy, t) - loss = F.cross_entropy(logits, targets, reduction='none') + loss = F.cross_entropy(logits, targets, reduction="none") self.write_logs(loss.detach(), logits.detach(), targets.detach()) @@ -268,7 +249,7 @@ class NoisyLatentImageClassifier(pl.LightningModule): def reset_noise_accs(self): self.noisy_acc = { - t: {'acc@1': [], 'acc@5': []} + t: {"acc@1": [], "acc@5": []} for t in range( 0, self.diffusion_model.num_timesteps, @@ -285,11 +266,11 @@ class NoisyLatentImageClassifier(pl.LightningModule): for t in self.noisy_acc: _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append( - self.compute_top_k(logits, targets, k=1, reduction='mean') + self.noisy_acc[t]["acc@1"].append( + self.compute_top_k(logits, targets, k=1, reduction="mean") ) - self.noisy_acc[t]['acc@5'].append( - self.compute_top_k(logits, targets, k=5, reduction='mean') + self.noisy_acc[t]["acc@5"].append( + self.compute_top_k(logits, targets, k=5, reduction="mean") ) return loss @@ -304,14 +285,12 @@ class NoisyLatentImageClassifier(pl.LightningModule): if self.use_scheduler: scheduler = instantiate_from_config(self.scheduler_config) - print('Setting up LambdaLR scheduler...') + print("Setting up LambdaLR scheduler...") scheduler = [ { - 'scheduler': LambdaLR( - optimizer, lr_lambda=scheduler.schedule - ), - 'interval': 'step', - 'frequency': 1, + "scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, } ] return [optimizer], scheduler @@ -322,32 +301,28 @@ class NoisyLatentImageClassifier(pl.LightningModule): def log_images(self, batch, N=8, *args, **kwargs): log = dict() x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x + log["inputs"] = x y = self.get_conditioning(batch) - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch['human_label']) - log['labels'] = y + if self.label_key == "class_label": + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log["labels"] = y if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) + log["labels"] = self.diffusion_model.to_rgb(y) for step in range(self.log_steps): current_time = step * self.log_time_interval _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - log[f'inputs@t{current_time}'] = x_noisy + log[f"inputs@t{current_time}"] = x_noisy - pred = F.one_hot( - logits.argmax(dim=1), num_classes=self.num_classes - ) - pred = rearrange(pred, 'b h w c -> b c h w') + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, "b h w c -> b c h w") - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb( - pred - ) + log[f"pred@t{current_time}"] = self.diffusion_model.to_rgb(pred) for key in log: log[key] = log[key][:N] diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 3373bf0e61..f933a11a6f 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -1,20 +1,19 @@ - # adapted from bloc97's CrossAttentionControl colab # https://github.com/bloc97/CrossAttentionControl import enum import math -from typing import Optional, Callable +from typing import Callable, Optional +import diffusers import psutil import torch -import diffusers +from compel.cross_attention_control import Arguments +from diffusers.models.cross_attention import AttnProcessor +from diffusers.models.unet_2d_condition import UNet2DConditionModel from torch import nn -from compel.cross_attention_control import Arguments -from diffusers.models.unet_2d_condition import UNet2DConditionModel -from diffusers.models.cross_attention import AttnProcessor from ...util import torch_dtype @@ -24,13 +23,12 @@ class CrossAttentionType(enum.Enum): class Context: - cross_attention_mask: Optional[torch.Tensor] cross_attention_index_map: Optional[torch.Tensor] class Action(enum.Enum): NONE = 0 - SAVE = 1, + SAVE = (1,) APPLY = 2 def __init__(self, arguments: Arguments, step_count: int): @@ -53,11 +51,13 @@ class Context: self.clear_requests(cleanup=True) def register_cross_attention_modules(self, model): - for name,module in get_cross_attention_modules(model, CrossAttentionType.SELF): + for name, module in get_cross_attention_modules(model, CrossAttentionType.SELF): if name in self.self_cross_attention_module_identifiers: assert False, f"name {name} cannot appear more than once" self.self_cross_attention_module_identifiers.append(name) - for name,module in get_cross_attention_modules(model, CrossAttentionType.TOKENS): + for name, module in get_cross_attention_modules( + model, CrossAttentionType.TOKENS + ): if name in self.tokens_cross_attention_module_identifiers: assert False, f"name {name} cannot appear more than once" self.tokens_cross_attention_module_identifiers.append(name) @@ -68,7 +68,9 @@ class Context: else: self.tokens_cross_attention_action = Context.Action.SAVE - def request_apply_saved_attention_maps(self, cross_attention_type: CrossAttentionType): + def request_apply_saved_attention_maps( + self, cross_attention_type: CrossAttentionType + ): if cross_attention_type == CrossAttentionType.SELF: self.self_cross_attention_action = Context.Action.APPLY else: @@ -91,8 +93,9 @@ class Context: return self.tokens_cross_attention_action == Context.Action.APPLY return False - def get_active_cross_attention_control_types_for_step(self, percent_through:float=None)\ - -> list[CrossAttentionType]: + def get_active_cross_attention_control_types_for_step( + self, percent_through: float = None + ) -> list[CrossAttentionType]: """ Should cross-attention control be applied on the given step? :param percent_through: How far through the step sequence are we (0.0=pure noise, 1.0=completely denoised image). Expected range 0.0..<1.0. @@ -103,50 +106,73 @@ class Context: opts = self.arguments.edit_options to_control = [] - if opts['s_start'] <= percent_through < opts['s_end']: + if opts["s_start"] <= percent_through < opts["s_end"]: to_control.append(CrossAttentionType.SELF) - if opts['t_start'] <= percent_through < opts['t_end']: + if opts["t_start"] <= percent_through < opts["t_end"]: to_control.append(CrossAttentionType.TOKENS) return to_control - def save_slice(self, identifier: str, slice: torch.Tensor, dim: Optional[int], offset: int, - slice_size: Optional[int]): + def save_slice( + self, + identifier: str, + slice: torch.Tensor, + dim: Optional[int], + offset: int, + slice_size: Optional[int], + ): if identifier not in self.saved_cross_attention_maps: self.saved_cross_attention_maps[identifier] = { - 'dim': dim, - 'slice_size': slice_size, - 'slices': {offset or 0: slice} + "dim": dim, + "slice_size": slice_size, + "slices": {offset or 0: slice}, } else: - self.saved_cross_attention_maps[identifier]['slices'][offset or 0] = slice + self.saved_cross_attention_maps[identifier]["slices"][offset or 0] = slice - def get_slice(self, identifier: str, requested_dim: Optional[int], requested_offset: int, slice_size: int): + def get_slice( + self, + identifier: str, + requested_dim: Optional[int], + requested_offset: int, + slice_size: int, + ): saved_attention_dict = self.saved_cross_attention_maps[identifier] if requested_dim is None: - if saved_attention_dict['dim'] is not None: - raise RuntimeError(f"dim mismatch: expected dim=None, have {saved_attention_dict['dim']}") - return saved_attention_dict['slices'][0] - - if saved_attention_dict['dim'] == requested_dim: - if slice_size != saved_attention_dict['slice_size']: + if saved_attention_dict["dim"] is not None: raise RuntimeError( - f"slice_size mismatch: expected slice_size={slice_size}, have {saved_attention_dict['slice_size']}") - return saved_attention_dict['slices'][requested_offset] + f"dim mismatch: expected dim=None, have {saved_attention_dict['dim']}" + ) + return saved_attention_dict["slices"][0] - if saved_attention_dict['dim'] is None: - whole_saved_attention = saved_attention_dict['slices'][0] + if saved_attention_dict["dim"] == requested_dim: + if slice_size != saved_attention_dict["slice_size"]: + raise RuntimeError( + f"slice_size mismatch: expected slice_size={slice_size}, have {saved_attention_dict['slice_size']}" + ) + return saved_attention_dict["slices"][requested_offset] + + if saved_attention_dict["dim"] is None: + whole_saved_attention = saved_attention_dict["slices"][0] if requested_dim == 0: - return whole_saved_attention[requested_offset:requested_offset + slice_size] + return whole_saved_attention[ + requested_offset : requested_offset + slice_size + ] elif requested_dim == 1: - return whole_saved_attention[:, requested_offset:requested_offset + slice_size] + return whole_saved_attention[ + :, requested_offset : requested_offset + slice_size + ] - raise RuntimeError(f"Cannot convert dim {saved_attention_dict['dim']} to requested dim {requested_dim}") + raise RuntimeError( + f"Cannot convert dim {saved_attention_dict['dim']} to requested dim {requested_dim}" + ) - def get_slicing_strategy(self, identifier: str) -> tuple[Optional[int], Optional[int]]: + def get_slicing_strategy( + self, identifier: str + ) -> tuple[Optional[int], Optional[int]]: saved_attention = self.saved_cross_attention_maps.get(identifier, None) if saved_attention is None: return None, None - return saved_attention['dim'], saved_attention['slice_size'] + return saved_attention["dim"], saved_attention["slice_size"] def clear_requests(self, cleanup=True): self.tokens_cross_attention_action = Context.Action.NONE @@ -156,9 +182,8 @@ class Context: def offload_saved_attention_slices_to_cpu(self): for key, map_dict in self.saved_cross_attention_maps.items(): - for offset, slice in map_dict['slices'].items(): - map_dict[offset] = slice.to('cpu') - + for offset, slice in map_dict["slices"].items(): + map_dict[offset] = slice.to("cpu") class InvokeAICrossAttentionMixin: @@ -167,14 +192,20 @@ class InvokeAICrossAttentionMixin: through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling and dymamic slicing strategy selection. """ + def __init__(self): self.mem_total_gb = psutil.virtual_memory().total // (1 << 30) self.attention_slice_wrangler = None self.slicing_strategy_getter = None self.attention_slice_calculated_callback = None - def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]): - ''' + def set_attention_slice_wrangler( + self, + wrangler: Optional[ + Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor] + ], + ): + """ Set custom attention calculator to be called when attention is calculated :param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size), which returns either the suggested_attention_slice or an adjusted equivalent. @@ -185,20 +216,30 @@ class InvokeAICrossAttentionMixin: Pass None to use the default attention calculation. :return: - ''' + """ self.attention_slice_wrangler = wrangler - def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]): + def set_slicing_strategy_getter( + self, getter: Optional[Callable[[nn.Module], tuple[int, int]]] + ): self.slicing_strategy_getter = getter - def set_attention_slice_calculated_callback(self, callback: Optional[Callable[[torch.Tensor], None]]): + def set_attention_slice_calculated_callback( + self, callback: Optional[Callable[[torch.Tensor], None]] + ): self.attention_slice_calculated_callback = callback def einsum_lowest_level(self, query, key, value, dim, offset, slice_size): # calculate attention scores - #attention_scores = torch.einsum('b i d, b j d -> b i j', q, k) + # attention_scores = torch.einsum('b i d, b j d -> b i j', q, k) attention_scores = torch.baddbmm( - torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), + torch.empty( + query.shape[0], + query.shape[1], + key.shape[1], + dtype=query.dtype, + device=query.device, + ), query, key.transpose(-1, -2), beta=0, @@ -206,35 +247,49 @@ class InvokeAICrossAttentionMixin: ) # calculate attention slice by taking the best scores for each latent pixel - default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype) + default_attention_slice = attention_scores.softmax( + dim=-1, dtype=attention_scores.dtype + ) attention_slice_wrangler = self.attention_slice_wrangler if attention_slice_wrangler is not None: - attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size) + attention_slice = attention_slice_wrangler( + self, default_attention_slice, dim, offset, slice_size + ) else: attention_slice = default_attention_slice if self.attention_slice_calculated_callback is not None: - self.attention_slice_calculated_callback(attention_slice, dim, offset, slice_size) + self.attention_slice_calculated_callback( + attention_slice, dim, offset, slice_size + ) hidden_states = torch.bmm(attention_slice, value) return hidden_states def einsum_op_slice_dim0(self, q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + r = torch.zeros( + q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype + ) for i in range(0, q.shape[0], slice_size): end = i + slice_size - r[i:end] = self.einsum_lowest_level(q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size) + r[i:end] = self.einsum_lowest_level( + q[i:end], k[i:end], v[i:end], dim=0, offset=i, slice_size=slice_size + ) return r def einsum_op_slice_dim1(self, q, k, v, slice_size): - r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + r = torch.zeros( + q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype + ) for i in range(0, q.shape[1], slice_size): end = i + slice_size - r[:, i:end] = self.einsum_lowest_level(q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size) + r[:, i:end] = self.einsum_lowest_level( + q[:, i:end], k, v, dim=1, offset=i, slice_size=slice_size + ) return r def einsum_op_mps_v1(self, q, k, v): - if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 + if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) @@ -272,13 +327,12 @@ class InvokeAICrossAttentionMixin: # Divide factor of safety as there's copying and fragmentation return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) - def get_invokeai_attention_mem_efficient(self, q, k, v): - if q.device.type == 'cuda': - #print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device)) + if q.device.type == "cuda": + # print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device)) return self.einsum_op_cuda(q, k, v) - if q.device.type == 'mps' or q.device.type == 'cpu': + if q.device.type == "mps" or q.device.type == "cpu": if self.mem_total_gb >= 32: return self.einsum_op_mps_v1(q, k, v) return self.einsum_op_mps_v2(q, k, v) @@ -288,8 +342,11 @@ class InvokeAICrossAttentionMixin: return self.einsum_op_tensor_mem(q, k, v, 32) - -def restore_default_cross_attention(model, is_running_diffusers: bool, restore_attention_processor: Optional[AttnProcessor]=None): +def restore_default_cross_attention( + model, + is_running_diffusers: bool, + restore_attention_processor: Optional[AttnProcessor] = None, +): if is_running_diffusers: unet = model unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor()) @@ -297,7 +354,7 @@ def restore_default_cross_attention(model, is_running_diffusers: bool, restore_a remove_attention_function(model) -def override_cross_attention(model, context: Context, is_running_diffusers = False): +def override_cross_attention(model, context: Context, is_running_diffusers=False): """ Inject attention parameters and functions into the passed in model to enable cross attention editing. @@ -316,7 +373,7 @@ def override_cross_attention(model, context: Context, is_running_diffusers = Fal indices = torch.arange(max_length, dtype=torch.long) for name, a0, a1, b0, b1 in context.arguments.edit_opcodes: if b0 < max_length: - if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0): + if name == "equal": # or (name == "replace" and a1 - a0 == b1 - b0): # these tokens have not been edited indices[b0:b1] = indices_target[a0:a1] mask[b0:b1] = 1 @@ -332,7 +389,14 @@ def override_cross_attention(model, context: Context, is_running_diffusers = Fal else: # try to re-use an existing slice size default_slice_size = 4 - slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size) + slice_size = next( + ( + p.slice_size + for p in old_attn_processors.values() + if type(p) is SlicedAttnProcessor + ), + default_slice_size, + ) unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size)) return old_attn_processors else: @@ -341,65 +405,96 @@ def override_cross_attention(model, context: Context, is_running_diffusers = Fal return None +def get_cross_attention_modules( + model, which: CrossAttentionType +) -> list[tuple[str, InvokeAICrossAttentionMixin]]: + from ldm.modules.attention import CrossAttention # avoid circular import - -def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]: - from ldm.modules.attention import CrossAttention # avoid circular import - cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention + cross_attention_class: type = ( + InvokeAIDiffusersCrossAttention + if isinstance(model, UNet2DConditionModel) + else CrossAttention + ) which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2" - attention_module_tuples = [(name,module) for name, module in model.named_modules() if - isinstance(module, cross_attention_class) and which_attn in name] + attention_module_tuples = [ + (name, module) + for name, module in model.named_modules() + if isinstance(module, cross_attention_class) and which_attn in name + ] cross_attention_modules_in_model_count = len(attention_module_tuples) expected_count = 16 if cross_attention_modules_in_model_count != expected_count: # non-fatal error but .swap() won't work. - print(f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " + - f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " + - f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " + - f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " + - f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " + - f"work properly until it is fixed.") + print( + f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model " + + f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed " + + f"or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, " + + f"and/or update the {expected_count} above to an appropriate number, and/or find and inform someone who knows " + + f"what it means. This error is non-fatal, but it is likely that .swap() and attention map display will not " + + f"work properly until it is fixed." + ) return attention_module_tuples def inject_attention_function(unet, context: Context): # ORIGINAL SOURCE CODE: https://github.com/huggingface/diffusers/blob/91ddd2a25b848df0fa1262d4f1cd98c7ccb87750/src/diffusers/models/attention.py#L276 - def attention_slice_wrangler(module, suggested_attention_slice:torch.Tensor, dim, offset, slice_size): - - #memory_usage = suggested_attention_slice.element_size() * suggested_attention_slice.nelement() + def attention_slice_wrangler( + module, suggested_attention_slice: torch.Tensor, dim, offset, slice_size + ): + # memory_usage = suggested_attention_slice.element_size() * suggested_attention_slice.nelement() attention_slice = suggested_attention_slice if context.get_should_save_maps(module.identifier): - #print(module.identifier, "saving suggested_attention_slice of shape", + # print(module.identifier, "saving suggested_attention_slice of shape", # suggested_attention_slice.shape, "dim", dim, "offset", offset) - slice_to_save = attention_slice.to('cpu') if dim is not None else attention_slice - context.save_slice(module.identifier, slice_to_save, dim=dim, offset=offset, slice_size=slice_size) + slice_to_save = ( + attention_slice.to("cpu") if dim is not None else attention_slice + ) + context.save_slice( + module.identifier, + slice_to_save, + dim=dim, + offset=offset, + slice_size=slice_size, + ) elif context.get_should_apply_saved_maps(module.identifier): - #print(module.identifier, "applying saved attention slice for dim", dim, "offset", offset) - saved_attention_slice = context.get_slice(module.identifier, dim, offset, slice_size) + # print(module.identifier, "applying saved attention slice for dim", dim, "offset", offset) + saved_attention_slice = context.get_slice( + module.identifier, dim, offset, slice_size + ) # slice may have been offloaded to CPU - saved_attention_slice = saved_attention_slice.to(suggested_attention_slice.device) + saved_attention_slice = saved_attention_slice.to( + suggested_attention_slice.device + ) if context.is_tokens_cross_attention(module.identifier): index_map = context.cross_attention_index_map - remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map) + remapped_saved_attention_slice = torch.index_select( + saved_attention_slice, -1, index_map + ) this_attention_slice = suggested_attention_slice - mask = context.cross_attention_mask.to(torch_dtype(suggested_attention_slice.device)) + mask = context.cross_attention_mask.to( + torch_dtype(suggested_attention_slice.device) + ) saved_mask = mask this_mask = 1 - mask - attention_slice = remapped_saved_attention_slice * saved_mask + \ - this_attention_slice * this_mask + attention_slice = ( + remapped_saved_attention_slice * saved_mask + + this_attention_slice * this_mask + ) else: # just use everything attention_slice = saved_attention_slice return attention_slice - cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) + cross_attention_modules = get_cross_attention_modules( + unet, CrossAttentionType.TOKENS + ) + get_cross_attention_modules(unet, CrossAttentionType.SELF) for identifier, module in cross_attention_modules: module.identifier = identifier try: @@ -408,56 +503,61 @@ def inject_attention_function(unet, context: Context): lambda module: context.get_slicing_strategy(identifier) ) except AttributeError as e: - if is_attribute_error_about(e, 'set_attention_slice_wrangler'): - print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO + if is_attribute_error_about(e, "set_attention_slice_wrangler"): + print( + f"TODO: implement set_attention_slice_wrangler for {type(module)}" + ) # TODO else: raise def remove_attention_function(unet): - cross_attention_modules = get_cross_attention_modules(unet, CrossAttentionType.TOKENS) + get_cross_attention_modules(unet, CrossAttentionType.SELF) + cross_attention_modules = get_cross_attention_modules( + unet, CrossAttentionType.TOKENS + ) + get_cross_attention_modules(unet, CrossAttentionType.SELF) for identifier, module in cross_attention_modules: try: # clear wrangler callback module.set_attention_slice_wrangler(None) module.set_slicing_strategy_getter(None) except AttributeError as e: - if is_attribute_error_about(e, 'set_attention_slice_wrangler'): - print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") + if is_attribute_error_about(e, "set_attention_slice_wrangler"): + print( + f"TODO: implement set_attention_slice_wrangler for {type(module)}" + ) else: raise def is_attribute_error_about(error: AttributeError, attribute: str): - if hasattr(error, 'name'): # Python 3.10 + if hasattr(error, "name"): # Python 3.10 return error.name == attribute else: # Python 3.9 return attribute in str(error) - def get_mem_free_total(device): - #only on cuda + # only on cuda if not torch.cuda.is_available(): return None stats = torch.cuda.memory_stats(device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] + mem_active = stats["active_bytes.all.current"] + mem_reserved = stats["reserved_bytes.all.current"] mem_free_cuda, _ = torch.cuda.mem_get_info(device) mem_free_torch = mem_reserved - mem_active mem_free_total = mem_free_cuda + mem_free_torch return mem_free_total - -class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin): - +class InvokeAIDiffusersCrossAttention( + diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin +): def __init__(self, **kwargs): super().__init__(**kwargs) InvokeAICrossAttentionMixin.__init__(self) def _attention(self, query, key, value, attention_mask=None): - #default_result = super()._attention(query, key, value) + # default_result = super()._attention(query, key, value) if attention_mask is not None: print(f"{type(self).__name__} ignoring passed-in attention_mask") attention_result = self.get_invokeai_attention_mem_efficient(query, key, value) @@ -466,9 +566,6 @@ class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, return hidden_states - - - ## 🧨diffusers implementation follows @@ -501,25 +598,30 @@ class CrossAttnProcessor: return hidden_states """ -from dataclasses import field, dataclass +from dataclasses import dataclass, field import torch - -from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor +from diffusers.models.cross_attention import ( + CrossAttention, + CrossAttnProcessor, + SlicedAttnProcessor, +) @dataclass class SwapCrossAttnContext: modified_text_embeddings: torch.Tensor - index_map: torch.Tensor # maps from original prompt token indices to the equivalent tokens in the modified prompt - mask: torch.Tensor # in the target space of the index_map + index_map: torch.Tensor # maps from original prompt token indices to the equivalent tokens in the modified prompt + mask: torch.Tensor # in the target space of the index_map cross_attention_types_to_do: list[CrossAttentionType] = field(default_factory=list) - def __int__(self, - cac_types_to_do: [CrossAttentionType], - modified_text_embeddings: torch.Tensor, - index_map: torch.Tensor, - mask: torch.Tensor): + def __int__( + self, + cac_types_to_do: [CrossAttentionType], + modified_text_embeddings: torch.Tensor, + index_map: torch.Tensor, + mask: torch.Tensor, + ): self.cross_attention_types_to_do = cac_types_to_do self.modified_text_embeddings = modified_text_embeddings self.index_map = index_map @@ -529,9 +631,9 @@ class SwapCrossAttnContext: return attn_type in self.cross_attention_types_to_do @classmethod - def make_mask_and_index_map(cls, edit_opcodes: list[tuple[str, int, int, int, int]], max_length: int) \ - -> tuple[torch.Tensor, torch.Tensor]: - + def make_mask_and_index_map( + cls, edit_opcodes: list[tuple[str, int, int, int, int]], max_length: int + ) -> tuple[torch.Tensor, torch.Tensor]: # mask=1 means use original prompt attention, mask=0 means use modified prompt attention mask = torch.zeros(max_length) indices_target = torch.arange(max_length, dtype=torch.long) @@ -547,28 +649,42 @@ class SwapCrossAttnContext: class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor): - # TODO: dynamically pick slice size based on memory conditions - def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None, - # kwargs - swap_cross_attn_context: SwapCrossAttnContext=None): - - attention_type = CrossAttentionType.SELF if encoder_hidden_states is None else CrossAttentionType.TOKENS + def __call__( + self, + attn: CrossAttention, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + # kwargs + swap_cross_attn_context: SwapCrossAttnContext = None, + ): + attention_type = ( + CrossAttentionType.SELF + if encoder_hidden_states is None + else CrossAttentionType.TOKENS + ) # if cross-attention control is not in play, just call through to the base implementation. - if attention_type is CrossAttentionType.SELF or \ - swap_cross_attn_context is None or \ - not swap_cross_attn_context.wants_cross_attention_control(attention_type): - #print(f"SwapCrossAttnContext for {attention_type} not active - passing request to superclass") - return super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask) - #else: + if ( + attention_type is CrossAttentionType.SELF + or swap_cross_attn_context is None + or not swap_cross_attn_context.wants_cross_attention_control(attention_type) + ): + # print(f"SwapCrossAttnContext for {attention_type} not active - passing request to superclass") + return super().__call__( + attn, hidden_states, encoder_hidden_states, attention_mask + ) + # else: # print(f"SwapCrossAttnContext for {attention_type} active") batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask( - attention_mask=attention_mask, target_length=sequence_length, - batch_size=batch_size) + attention_mask=attention_mask, + target_length=sequence_length, + batch_size=batch_size, + ) query = attn.to_q(hidden_states) dim = query.shape[-1] @@ -589,41 +705,51 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor): # compute slices and prepare output tensor batch_size_attention = query.shape[0] hidden_states = torch.zeros( - (batch_size_attention, sequence_length, dim // attn.heads), device=query.device, dtype=query.dtype + (batch_size_attention, sequence_length, dim // attn.heads), + device=query.device, + dtype=query.dtype, ) # do slices - for i in range(max(1,hidden_states.shape[0] // self.slice_size)): + for i in range(max(1, hidden_states.shape[0] // self.slice_size)): start_idx = i * self.slice_size end_idx = (i + 1) * self.slice_size query_slice = query[start_idx:end_idx] original_key_slice = original_text_key[start_idx:end_idx] modified_key_slice = modified_text_key[start_idx:end_idx] - attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None + attn_mask_slice = ( + attention_mask[start_idx:end_idx] + if attention_mask is not None + else None + ) - original_attn_slice = attn.get_attention_scores(query_slice, original_key_slice, attn_mask_slice) - modified_attn_slice = attn.get_attention_scores(query_slice, modified_key_slice, attn_mask_slice) + original_attn_slice = attn.get_attention_scores( + query_slice, original_key_slice, attn_mask_slice + ) + modified_attn_slice = attn.get_attention_scores( + query_slice, modified_key_slice, attn_mask_slice + ) # because the prompt modifications may result in token sequences shifted forwards or backwards, # the original attention probabilities must be remapped to account for token index changes in the # modified prompt - remapped_original_attn_slice = torch.index_select(original_attn_slice, -1, - swap_cross_attn_context.index_map) + remapped_original_attn_slice = torch.index_select( + original_attn_slice, -1, swap_cross_attn_context.index_map + ) # only some tokens taken from the original attention probabilities. this is controlled by the mask. mask = swap_cross_attn_context.mask inverse_mask = 1 - mask - attn_slice = \ - remapped_original_attn_slice * mask + \ - modified_attn_slice * inverse_mask + attn_slice = ( + remapped_original_attn_slice * mask + modified_attn_slice * inverse_mask + ) del remapped_original_attn_slice, modified_attn_slice attn_slice = torch.bmm(attn_slice, modified_value[start_idx:end_idx]) hidden_states[start_idx:end_idx] = attn_slice - # done hidden_states = attn.batch_to_head_dim(hidden_states) @@ -636,7 +762,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor): class SwapCrossAttnProcessor(SlicedSwapCrossAttnProcesser): - def __init__(self): - super(SwapCrossAttnProcessor, self).__init__(slice_size=int(1e9)) # massive slice size = don't slice - + super(SwapCrossAttnProcessor, self).__init__( + slice_size=int(1e9) + ) # massive slice size = don't slice diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py index eede431d33..c489c2f0a9 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_map_saving.py @@ -2,17 +2,17 @@ import math import PIL import torch -from torchvision.transforms.functional import resize as tv_resize, InterpolationMode +from torchvision.transforms.functional import InterpolationMode +from torchvision.transforms.functional import resize as tv_resize -from .cross_attention_control import get_cross_attention_modules, CrossAttentionType +from .cross_attention_control import CrossAttentionType, get_cross_attention_modules -class AttentionMapSaver(): - +class AttentionMapSaver: def __init__(self, token_ids: range, latents_shape: torch.Size): self.token_ids = token_ids self.latents_shape = latents_shape - #self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) + # self.collated_maps = #torch.zeros([len(token_ids), latents_shape[0], latents_shape[1]]) self.collated_maps = {} def clear_maps(self): @@ -25,7 +25,7 @@ class AttentionMapSaver(): :param key: Storage key. If a map already exists for this key it will be summed with the incoming data. In this case the maps sizes (H and W) should match. :return: None """ - key_and_size = f'{key}_{maps.shape[1]}' + key_and_size = f"{key}_{maps.shape[1]}" # extract desired tokens maps = maps[:, :, self.token_ids] @@ -35,12 +35,12 @@ class AttentionMapSaver(): # store if key_and_size not in self.collated_maps: - self.collated_maps[key_and_size] = torch.zeros_like(maps, device='cpu') + self.collated_maps[key_and_size] = torch.zeros_like(maps, device="cpu") self.collated_maps[key_and_size] += maps.cpu() def write_maps_to_disk(self, path: str): pil_image = self.get_stacked_maps_image() - pil_image.save(path, 'PNG') + pil_image.save(path, "PNG") def get_stacked_maps_image(self) -> PIL.Image: """ @@ -57,39 +57,50 @@ class AttentionMapSaver(): merged = None for key, maps in self.collated_maps.items(): - # maps has shape [(H*W), N] for N tokens # but we want [N, H, W] - this_scale_factor = math.sqrt(maps.shape[0] / (latents_width * latents_height)) + this_scale_factor = math.sqrt( + maps.shape[0] / (latents_width * latents_height) + ) this_maps_height = int(float(latents_height) * this_scale_factor) this_maps_width = int(float(latents_width) * this_scale_factor) # and we need to do some dimension juggling - maps = torch.reshape(torch.swapdims(maps, 0, 1), [num_tokens, this_maps_height, this_maps_width]) + maps = torch.reshape( + torch.swapdims(maps, 0, 1), + [num_tokens, this_maps_height, this_maps_width], + ) # scale to output size if necessary if this_scale_factor != 1: - maps = tv_resize(maps, [latents_height, latents_width], InterpolationMode.BICUBIC) + maps = tv_resize( + maps, [latents_height, latents_width], InterpolationMode.BICUBIC + ) # normalize maps_min = torch.min(maps) maps_range = torch.max(maps) - maps_min - #print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}") + # print(f"map {key} size {[this_maps_width, this_maps_height]} range {[maps_min, maps_min + maps_range]}") maps_normalized = (maps - maps_min) / maps_range # expand to (-0.1, 1.1) and clamp maps_normalized_expanded = maps_normalized * 1.1 - 0.05 - maps_normalized_expanded_clamped = torch.clamp(maps_normalized_expanded, 0, 1) + maps_normalized_expanded_clamped = torch.clamp( + maps_normalized_expanded, 0, 1 + ) # merge together, producing a vertical stack - maps_stacked = torch.reshape(maps_normalized_expanded_clamped, [num_tokens * latents_height, latents_width]) + maps_stacked = torch.reshape( + maps_normalized_expanded_clamped, + [num_tokens * latents_height, latents_width], + ) if merged is None: merged = maps_stacked else: # screen blend - merged = 1 - (1 - maps_stacked)*(1 - merged) + merged = 1 - (1 - maps_stacked) * (1 - merged) if merged is None: return None - merged_bytes = merged.mul(0xff).byte() - return PIL.Image.fromarray(merged_bytes.numpy(), mode='L') + merged_bytes = merged.mul(0xFF).byte() + return PIL.Image.fromarray(merged_bytes.numpy(), mode="L") diff --git a/invokeai/backend/stable_diffusion/diffusion/ddim.py b/invokeai/backend/stable_diffusion/diffusion/ddim.py index 41d6249fc5..87f6f2166b 100644 --- a/invokeai/backend/stable_diffusion/diffusion/ddim.py +++ b/invokeai/backend/stable_diffusion/diffusion/ddim.py @@ -1,77 +1,82 @@ """SAMPLING ONLY.""" import torch -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent + +from ..diffusionmodules.util import noise_like from .sampler import Sampler -from ..diffusionmodules.util import noise_like +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent + class DDIMSampler(Sampler): - def __init__(self, model, schedule='linear', device=None, **kwargs): - super().__init__(model,schedule,model.num_timesteps,device) + def __init__(self, model, schedule="linear", device=None, **kwargs): + super().__init__(model, schedule, model.num_timesteps, device) - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, - model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) + self.invokeai_diffuser = InvokeAIDiffuserComponent( + self.model, + model_forward_callback=lambda x, sigma, cond: self.model.apply_model( + x, sigma, cond + ), + ) def prepare_to_sample(self, t_enc, **kwargs): super().prepare_to_sample(t_enc, **kwargs) - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) + extra_conditioning_info = kwargs.get("extra_conditioning_info", None) + all_timesteps_count = kwargs.get("all_timesteps_count", t_enc) - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) + if ( + extra_conditioning_info is not None + and extra_conditioning_info.wants_cross_attention_control + ): + self.invokeai_diffuser.override_cross_attention( + extra_conditioning_info, step_count=all_timesteps_count + ) else: self.invokeai_diffuser.restore_default_cross_attention() - # This is the central routine @torch.no_grad() def p_sample( - self, - x, - c, - t, - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - step_count:int=1000, # total number of steps - **kwargs, + self, + x, + c, + t, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + step_count: int = 1000, # total number of steps + **kwargs, ): b, *_, device = *x.shape, x.device - if ( - unconditional_conditioning is None - or unconditional_guidance_scale == 1.0 - ): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.0: # damian0815 would like to know when/if this code path is used e_t = self.model.apply_model(x, t, c) else: # step_index counts in the opposite direction to index - step_index = step_count-(index+1) + step_index = step_count - (index + 1) e_t = self.invokeai_diffuser.do_diffusion_step( - x, t, - unconditional_conditioning, c, + x, + t, + unconditional_conditioning, + c, unconditional_guidance_scale, - step_index=step_index + step_index=step_index, ) if score_corrector is not None: - assert self.model.parameterization == 'eps' + assert self.model.parameterization == "eps" e_t = score_corrector.modify_score( self.model, e_t, x, t, c, **corrector_kwargs ) - alphas = ( - self.model.alphas_cumprod - if use_original_steps - else self.ddim_alphas - ) + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = ( self.model.alphas_cumprod_prev if use_original_steps @@ -101,11 +106,8 @@ class DDIMSampler(Sampler): pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t - noise = ( - sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - ) + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0, None - diff --git a/invokeai/backend/stable_diffusion/diffusion/ddpm.py b/invokeai/backend/stable_diffusion/diffusion/ddpm.py index 36251a0940..6741498303 100644 --- a/invokeai/backend/stable_diffusion/diffusion/ddpm.py +++ b/invokeai/backend/stable_diffusion/diffusion/ddpm.py @@ -6,55 +6,43 @@ https://github.com/CompVis/taming-transformers -- merci """ -import torch - -import torch.nn as nn import os -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat +import urllib from contextlib import contextmanager from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig -import urllib -from ..textual_inversion_manager import TextualInversionManager +import numpy as np +import pytorch_lightning as pl +import torch +import torch.nn as nn +from einops import rearrange, repeat +from omegaconf import ListConfig +from pytorch_lightning.utilities.distributed import rank_zero_only +from torch.optim.lr_scheduler import LambdaLR +from torchvision.utils import make_grid +from tqdm import tqdm + from ...util.util import ( - log_txt_as_img, - exists, - default, - ismap, - isimage, - mean_flat, count_params, + default, + exists, instantiate_from_config, + isimage, + ismap, + log_txt_as_img, + mean_flat, ) +from ..autoencoder import AutoencoderKL, IdentityFirstStage, VQModelInterface +from ..diffusionmodules.util import extract_into_tensor, make_beta_schedule, noise_like +from ..distributions.distributions import DiagonalGaussianDistribution, normal_kl from ..ema import LitEma -from ..distributions.distributions import ( - normal_kl, - DiagonalGaussianDistribution, -) -from ..autoencoder import ( - VQModelInterface, - IdentityFirstStage, - AutoencoderKL, -) -from ..diffusionmodules.util import ( - make_beta_schedule, - extract_into_tensor, - noise_like, -) +from ..textual_inversion_manager import TextualInversionManager from .ddim import DDIMSampler - __conditioning_keys__ = { - 'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y', + "concat": "c_concat", + "crossattn": "c_crossattn", + "adm": "y", } @@ -74,14 +62,14 @@ class DDPM(pl.LightningModule): self, unet_config, timesteps=1000, - beta_schedule='linear', - loss_type='l2', + beta_schedule="linear", + loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, - monitor='val/loss', + monitor="val/loss", use_ema=True, - first_stage_key='image', + first_stage_key="image", image_size=256, channels=3, log_every_t=100, @@ -95,7 +83,7 @@ class DDPM(pl.LightningModule): v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, - parameterization='eps', # all assuming fixed variance schedules + parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, @@ -103,12 +91,12 @@ class DDPM(pl.LightningModule): ): super().__init__() assert parameterization in [ - 'eps', - 'x0', + "eps", + "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( - f' | {self.__class__.__name__}: Running in {self.parameterization}-prediction mode' + f" | {self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised @@ -122,7 +110,7 @@ class DDPM(pl.LightningModule): self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) - print(f' | Keeping EMAs of {len(list(self.model_ema.buffers()))}.') + print(f" | Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: @@ -152,16 +140,14 @@ class DDPM(pl.LightningModule): self.loss_type = loss_type self.learn_logvar = learn_logvar - self.logvar = torch.full( - fill_value=logvar_init, size=(self.num_timesteps,) - ) + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule( self, given_betas=None, - beta_schedule='linear', + beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, @@ -187,34 +173,30 @@ class DDPM(pl.LightningModule): self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps - ), 'alphas have to be defined for each timestep' + ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer( - 'alphas_cumprod_prev', to_torch(alphas_cumprod_prev) - ) + self.register_buffer("betas", to_torch(betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) + self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( - 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)) - ) - self.register_buffer( - 'sqrt_one_minus_alphas_cumprod', + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)), ) self.register_buffer( - 'log_one_minus_alphas_cumprod', + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)), ) self.register_buffer( - 'sqrt_recip_alphas_cumprod', + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)), ) self.register_buffer( - 'sqrt_recipm1_alphas_cumprod', + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)), ) @@ -223,47 +205,41 @@ class DDPM(pl.LightningModule): 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer( - 'posterior_variance', to_torch(posterior_variance) - ) + self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( - 'posterior_log_variance_clipped', + "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( - 'posterior_mean_coef1', - to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) - ), + "posterior_mean_coef1", + to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( - 'posterior_mean_coef2', + "posterior_mean_coef2", to_torch( - (1.0 - alphas_cumprod_prev) - * np.sqrt(alphas) - / (1.0 - alphas_cumprod) + (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) - if self.parameterization == 'eps': + if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) - elif self.parameterization == 'x0': + elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: - raise NotImplementedError('mu not supported') + raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager @@ -272,24 +248,24 @@ class DDPM(pl.LightningModule): self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: - print(f'{context}: Switched to EMA weights') + print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: - print(f'{context}: Restored training weights') + print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location='cpu') - if 'state_dict' in list(sd.keys()): - sd = sd['state_dict'] + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): - print('Deleting key {} from state_dict.'.format(k)) + print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = ( self.load_state_dict(sd, strict=False) @@ -297,12 +273,12 @@ class DDPM(pl.LightningModule): else self.model.load_state_dict(sd, strict=False) ) print( - f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: - print(f'Missing Keys: {missing}') + print(f"Missing Keys: {missing}") if len(unexpected) > 0: - print(f'Unexpected Keys: {unexpected}') + print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ @@ -311,13 +287,8 @@ class DDPM(pl.LightningModule): :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ - mean = ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) - * x_start - ) - variance = extract_into_tensor( - 1.0 - self.alphas_cumprod, t, x_start.shape - ) + mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) @@ -325,24 +296,17 @@ class DDPM(pl.LightningModule): def predict_start_from_noise(self, x_t, t, noise): return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) - * x_t - - extract_into_tensor( - self.sqrt_recipm1_alphas_cumprod, t, x_t.shape - ) + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) - * x_start - + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) - * x_t - ) - posterior_variance = extract_into_tensor( - self.posterior_variance, t, x_t.shape + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) @@ -354,9 +318,9 @@ class DDPM(pl.LightningModule): def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) - if self.parameterization == 'eps': + if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == 'x0': + elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) @@ -376,13 +340,8 @@ class DDPM(pl.LightningModule): ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape( - b, *((1,) * (len(x.shape) - 1)) - ) - return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise - ) + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): @@ -392,7 +351,7 @@ class DDPM(pl.LightningModule): intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), - desc='Sampling t', + desc="Sampling t", total=self.num_timesteps, dynamic_ncols=True, ): @@ -419,26 +378,21 @@ class DDPM(pl.LightningModule): def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) - * x_start - + extract_into_tensor( - self.sqrt_one_minus_alphas_cumprod, t, x_start.shape - ) + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': + if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() - elif self.loss_type == 'l2': + elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: - loss = torch.nn.functional.mse_loss( - target, pred, reduction='none' - ) + loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") @@ -450,28 +404,28 @@ class DDPM(pl.LightningModule): model_out = self.model(x_noisy, t) loss_dict = {} - if self.parameterization == 'eps': + if self.parameterization == "eps": target = noise - elif self.parameterization == 'x0': + elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( - f'Paramterization {self.parameterization} not yet supported' + f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - log_prefix = 'train' if self.training else 'val' + log_prefix = "train" if self.training else "val" - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb - loss_dict.update({f'{log_prefix}/loss': loss}) + loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict @@ -487,7 +441,7 @@ class DDPM(pl.LightningModule): x = batch[k] if len(x.shape) == 3: x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') + x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x @@ -504,7 +458,7 @@ class DDPM(pl.LightningModule): ) self.log( - 'global_step', + "global_step", self.global_step, prog_bar=True, logger=True, @@ -513,9 +467,9 @@ class DDPM(pl.LightningModule): ) if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] + lr = self.optimizers().param_groups[0]["lr"] self.log( - 'lr_abs', + "lr_abs", lr, prog_bar=True, logger=True, @@ -530,9 +484,7 @@ class DDPM(pl.LightningModule): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = { - key + '_ema': loss_dict_ema[key] for key in loss_dict_ema - } + loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, @@ -554,21 +506,19 @@ class DDPM(pl.LightningModule): def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = rearrange(samples, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() - def log_images( - self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs - ): + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] - log['inputs'] = x + log["inputs"] = x # get diffusion row diffusion_row = list() @@ -576,23 +526,23 @@ class DDPM(pl.LightningModule): for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) - log['diffusion_row'] = self._get_rows_from_list(diffusion_row) + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row - with self.ema_scope('Plotting'): + with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) - log['samples'] = samples - log['denoise_row'] = self._get_rows_from_list(denoise_row) + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: @@ -619,7 +569,7 @@ class LatentDiffusion(DDPM): cond_stage_config, personalization_config, num_timesteps_cond=None, - cond_stage_key='image', + cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, @@ -629,32 +579,29 @@ class LatentDiffusion(DDPM): *args, **kwargs, ): - self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] + assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': + conditioning_key = "concat" if concat_mode else "crossattn" + if cond_stage_config == "__is_unconditional__": conditioning_key = None - ckpt_path = kwargs.pop('ckpt_path', None) - ignore_keys = kwargs.pop('ignore_keys', []) + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: - self.num_downs = ( - len(first_stage_config.params.ddconfig.ch_mult) - 1 - ) + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) @@ -680,12 +627,14 @@ class LatentDiffusion(DDPM): personalization_config, self.cond_stage_model ) self.textual_inversion_manager = TextualInversionManager( - tokenizer = self.cond_stage_model.tokenizer, - text_encoder = self.cond_stage_model.transformer, - full_precision = True + tokenizer=self.cond_stage_model.tokenizer, + text_encoder=self.cond_stage_model.transformer, + full_precision=True, ) # this circular component dependency is gross and bad, needs to be rethought - self.cond_stage_model.set_textual_inversion_manager(self.textual_inversion_manager) + self.cond_stage_model.set_textual_inversion_manager( + self.textual_inversion_manager + ) self.emb_ckpt_counter = 0 @@ -721,22 +670,22 @@ class LatentDiffusion(DDPM): ): assert ( self.scale_factor == 1.0 - ), 'rather not use custom rescaling and std-rescaling simultaneously' + ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings - print('### USING STD-RESCALING ###') + print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor - self.register_buffer('scale_factor', 1.0 / z.flatten().std()) - print(f'setting self.scale_factor to {self.scale_factor}') - print('### USING STD-RESCALING ###') + self.register_buffer("scale_factor", 1.0 / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, - beta_schedule='linear', + beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, @@ -764,13 +713,11 @@ class LatentDiffusion(DDPM): def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: - if config == '__is_first_stage__': - print('Using first stage also as cond stage.') + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model - elif config == '__is_unconditional__': - print( - f'Training {self.__class__.__name__} as an unconditional model.' - ) + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: @@ -780,8 +727,8 @@ class LatentDiffusion(DDPM): for param in self.cond_stage_model.parameters(): param.requires_grad = False else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' + assert config != "__is_first_stage__" + assert config != "__is_unconditional__" try: model = instantiate_from_config(config) except urllib.error.URLError: @@ -794,14 +741,14 @@ class LatentDiffusion(DDPM): model = instantiate_from_config(config, embedder=embedder) if config.params.get( - 'embedding_manager_ckpt', None - ): # do not load if missing OR empty string + "embedding_manager_ckpt", None + ): # do not load if missing OR empty string model.load(config.params.embedding_manager_ckpt) return model def _get_denoise_row_from_list( - self, samples, desc='', force_no_decoder_quantization=False + self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): @@ -813,8 +760,8 @@ class LatentDiffusion(DDPM): ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") + denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @@ -831,11 +778,11 @@ class LatentDiffusion(DDPM): def get_learned_conditioning(self, c, **kwargs): if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable( + if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode( - c, embedding_manager=self.embedding_manager,**kwargs + c, embedding_manager=self.embedding_manager, **kwargs ) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() @@ -873,19 +820,17 @@ class LatentDiffusion(DDPM): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, - self.split_input_params['clip_min_weight'], - self.split_input_params['clip_max_weight'], - ) - weighting = ( - weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - if self.split_input_params['tie_braker']: + if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, - self.split_input_params['clip_min_tie_weight'], - self.split_input_params['clip_max_tie_weight'], + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) @@ -916,12 +861,8 @@ class LatentDiffusion(DDPM): weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) - normalization = fold(weighting).view( - 1, 1, h, w - ) # normalizes the overlap - weighting = weighting.view( - (1, 1, kernel_size[0], kernel_size[1], Ly * Lx) - ) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( @@ -1003,9 +944,9 @@ class LatentDiffusion(DDPM): if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: + if cond_key in ["caption", "coordinates_bbox"]: xc = batch[cond_key] - elif cond_key == 'class_label': + elif cond_key == "class_label": xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) @@ -1025,14 +966,14 @@ class LatentDiffusion(DDPM): if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + c = {ckey: c, "pos_x": pos_x, "pos_y": pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} + c = {"pos_x": pos_x, "pos_y": pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) @@ -1042,32 +983,28 @@ class LatentDiffusion(DDPM): return out @torch.no_grad() - def decode_first_stage( - self, z, predict_cids=False, force_not_quantize=False - ): + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry( - z, shape=None - ) - z = rearrange(z, 'b h w c -> b c h w').contiguous() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - uf = self.split_input_params['vqf'] + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') + print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') + print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf @@ -1084,26 +1021,20 @@ class LatentDiffusion(DDPM): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], - force_not_quantize=predict_cids - or force_not_quantize, + force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: - output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] - o = torch.stack( - output_list, axis=-1 - ) # # (bn, nc, ks[0], ks[1], L) + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) @@ -1132,26 +1063,24 @@ class LatentDiffusion(DDPM): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry( - z, shape=None - ) - z = rearrange(z, 'b h w c -> b c h w').contiguous() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - uf = self.split_input_params['vqf'] + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') + print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') + print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf @@ -1168,26 +1097,20 @@ class LatentDiffusion(DDPM): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], - force_not_quantize=predict_cids - or force_not_quantize, + force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: - output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] - o = torch.stack( - output_list, axis=-1 - ) # # (bn, nc, ks[0], ks[1], L) + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) @@ -1211,20 +1134,20 @@ class LatentDiffusion(DDPM): @torch.no_grad() def encode_first_stage(self, x): - if hasattr(self, 'split_input_params'): - if self.split_input_params['patch_distributed_vq']: - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) - df = self.split_input_params['vqf'] - self.split_input_params['original_image_size'] = x.shape[-2:] + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) - print('reducing Kernel') + print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) - print('reducing stride') + print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df @@ -1244,9 +1167,7 @@ class LatentDiffusion(DDPM): o = o * weighting # Reverse reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization @@ -1272,15 +1193,11 @@ class LatentDiffusion(DDPM): c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) - c = self.q_sample( - x_start=c, t=tc, noise=torch.randn_like(c.float()) - ) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations( - self, bboxes, crop_coordinates - ): # TODO: move to dataset + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) @@ -1291,7 +1208,6 @@ class LatentDiffusion(DDPM): return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): - if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass @@ -1299,19 +1215,15 @@ class LatentDiffusion(DDPM): if not isinstance(cond, list): cond = [cond] key = ( - 'c_concat' - if self.model.conditioning_key == 'concat' - else 'c_crossattn' + "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} - if hasattr(self, 'split_input_params'): - assert ( - len(cond) == 1 - ) # todo can only deal with one conditioning atm + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids - ks = self.split_input_params['ks'] # eg. (128, 128) - stride = self.split_input_params['stride'] # eg. (64, 64) + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] @@ -1327,15 +1239,12 @@ class LatentDiffusion(DDPM): z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( - self.cond_stage_key - in ['image', 'LR_image', 'segmentation', 'bbox_img'] + self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value - assert ( - len(c) == 1 - ) # todo extend to list with more than one elem + assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) @@ -1343,20 +1252,16 @@ class LatentDiffusion(DDPM): (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) - cond_list = [ - {c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1]) - ] + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - elif self.cond_stage_key == 'coordinates_bbox': + elif self.cond_stage_key == "coordinates_bbox": assert ( - 'original_image_size' in self.split_input_params - ), 'BoudingBoxRescaling is missing original_image_size' + "original_image_size" in self.split_input_params + ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params[ - 'original_image_size' - ] + full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 @@ -1392,35 +1297,30 @@ class LatentDiffusion(DDPM): # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ - torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[ - None - ].to(self.device) + torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( + self.device + ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning - assert isinstance( - cond, dict - ), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + assert isinstance(cond, dict), "cond must be dict to be fed into model" + cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack( - [ - torch.cat([cut_cond, p], dim=1) - for p in patch_limits_tknzd - ] + [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( - adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1] + adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) print(adapted_cond.shape) - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ @@ -1429,8 +1329,7 @@ class LatentDiffusion(DDPM): # apply model by loop over crops output_list = [ - self.model(z_list[i], t, **cond_list[i]) - for i in range(z.shape[-1]) + self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple @@ -1439,9 +1338,7 @@ class LatentDiffusion(DDPM): o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape - o = o.view( - (o.shape[0], -1, o.shape[-1]) - ) # (bn, nc * ks[0] * ks[1], L) + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization @@ -1455,8 +1352,7 @@ class LatentDiffusion(DDPM): def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) - * x_t + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) @@ -1469,9 +1365,7 @@ class LatentDiffusion(DDPM): :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] - t = torch.tensor( - [self.num_timesteps - 1] * batch_size, device=x_start.device - ) + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 @@ -1484,46 +1378,42 @@ class LatentDiffusion(DDPM): model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} - prefix = 'train' if self.training else 'val' + prefix = "train" if self.training else "val" - if self.parameterization == 'x0': + if self.parameterization == "x0": target = x_start - elif self.parameterization == 'eps': + elif self.parameterization == "eps": target = noise else: raise NotImplementedError() - loss_simple = self.get_loss(model_output, target, mean=False).mean( - [1, 2, 3] - ) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t.item()].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) + loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) + loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() - loss_vlb = self.get_loss(model_output, target, mean=False).mean( - dim=(1, 2, 3) - ) + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb - loss_dict.update({f'{prefix}/loss': loss}) + loss_dict.update({f"{prefix}/loss": loss}) if self.embedding_reg_weight > 0: loss_embedding_reg = ( self.embedding_manager.embedding_to_coarse_loss().mean() ) - loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg}) + loss_dict.update({f"{prefix}/loss_emb_reg": loss_embedding_reg}) loss += self.embedding_reg_weight * loss_embedding_reg - loss_dict.update({f'{prefix}/loss': loss}) + loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict @@ -1540,12 +1430,10 @@ class LatentDiffusion(DDPM): corrector_kwargs=None, ): t_in = t - model_out = self.apply_model( - x, t_in, c, return_ids=return_codebook_ids - ) + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: - assert self.parameterization == 'eps' + assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) @@ -1553,9 +1441,9 @@ class LatentDiffusion(DDPM): if return_codebook_ids: model_out, logits = model_out - if self.parameterization == 'eps': + if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == 'x0': + elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() @@ -1563,9 +1451,7 @@ class LatentDiffusion(DDPM): if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize( - x_recon - ) + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) ( model_mean, posterior_variance, @@ -1617,7 +1503,7 @@ class LatentDiffusion(DDPM): corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: - raise DeprecationWarning('Support dropped.') + raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs @@ -1628,9 +1514,7 @@ class LatentDiffusion(DDPM): if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape( - b, *((1,) * (len(x.shape) - 1)) - ) + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( @@ -1638,15 +1522,11 @@ class LatentDiffusion(DDPM): ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise, + model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: - return ( - model_mean - + nonzero_mask * (0.5 * model_log_variance).exp() * noise - ) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( @@ -1701,7 +1581,7 @@ class LatentDiffusion(DDPM): iterator = ( tqdm( reversed(range(0, timesteps)), - desc='Progressive Generation', + desc="Progressive Generation", total=timesteps, ) if verbose @@ -1713,11 +1593,9 @@ class LatentDiffusion(DDPM): for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' + assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample( - x_start=cond, t=tc, noise=torch.randn_like(cond) - ) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, @@ -1761,7 +1639,6 @@ class LatentDiffusion(DDPM): start_T=None, log_every_t=None, ): - if not log_every_t: log_every_t = self.log_every_t device = self.betas.device @@ -1780,7 +1657,7 @@ class LatentDiffusion(DDPM): iterator = ( tqdm( reversed(range(0, timesteps)), - desc='Sampling t', + desc="Sampling t", total=timesteps, ) if verbose @@ -1789,18 +1666,14 @@ class LatentDiffusion(DDPM): if mask is not None: assert x0 is not None - assert ( - x0.shape[2:3] == mask.shape[2:3] - ) # spatial size has to match + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' + assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample( - x_start=cond, t=tc, noise=torch.randn_like(cond) - ) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, @@ -1874,7 +1747,6 @@ class LatentDiffusion(DDPM): @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) @@ -1927,7 +1799,6 @@ class LatentDiffusion(DDPM): plot_diffusion_rows=False, **kwargs, ): - use_ddim = ddim_steps is not None log = dict() @@ -1941,24 +1812,22 @@ class LatentDiffusion(DDPM): ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) - log['inputs'] = x - log['reconstruction'] = xrec + log["inputs"] = x + log["reconstruction"] = xrec if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, 'decode'): + if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) - log['conditioning'] = xc - elif self.cond_stage_key in ['caption']: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption']) - log['conditioning'] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img( - (x.shape[2], x.shape[3]), batch['human_label'] - ) - log['conditioning'] = xc + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == "class_label": + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log["conditioning"] = xc elif isimage(xc): - log['conditioning'] = xc + log["conditioning"] = xc if ismap(xc): - log['original_conditioning'] = self.to_rgb(xc) + log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row @@ -1966,27 +1835,21 @@ class LatentDiffusion(DDPM): z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) - diffusion_row = torch.stack( - diffusion_row - ) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange( - diffusion_grid, 'b n c h w -> (b n) c h w' - ) - diffusion_grid = make_grid( - diffusion_grid, nrow=diffusion_row.shape[0] - ) - log['diffusion_row'] = diffusion_grid + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") + diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid if sample: # get denoise row - with self.ema_scope('Plotting'): + with self.ema_scope("Plotting"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, @@ -1996,12 +1859,12 @@ class LatentDiffusion(DDPM): ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) - log['samples'] = x_samples + log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log['denoise_row'] = denoise_grid + log["denoise_row"] = denoise_grid - uc = self.get_learned_conditioning(len(c) * ['']) + uc = self.get_learned_conditioning(len(c) * [""]) sample_scaled, _ = self.sample_log( cond=c, batch_size=N, @@ -2011,7 +1874,7 @@ class LatentDiffusion(DDPM): unconditional_guidance_scale=5.0, unconditional_conditioning=uc, ) - log['samples_scaled'] = self.decode_first_stage(sample_scaled) + log["samples_scaled"] = self.decode_first_stage(sample_scaled) if ( quantize_denoised @@ -2019,7 +1882,7 @@ class LatentDiffusion(DDPM): and not isinstance(self.first_stage_model, IdentityFirstStage) ): # also display when quantizing x0 while sampling - with self.ema_scope('Plotting Quantized Denoised'): + with self.ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, @@ -2031,7 +1894,7 @@ class LatentDiffusion(DDPM): # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_x0_quantized'] = x_samples + log["samples_x0_quantized"] = x_samples if inpaint: # make a simple center square @@ -2040,8 +1903,7 @@ class LatentDiffusion(DDPM): # zeros will be filled in mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0 mask = mask[:, None, ...] - with self.ema_scope('Plotting Inpaint'): - + with self.ema_scope("Plotting Inpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, @@ -2052,11 +1914,11 @@ class LatentDiffusion(DDPM): mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_inpainting'] = x_samples - log['mask'] = mask + log["samples_inpainting"] = x_samples + log["mask"] = mask # outpaint - with self.ema_scope('Plotting Outpaint'): + with self.ema_scope("Plotting Outpaint"): samples, _ = self.sample_log( cond=c, batch_size=N, @@ -2067,19 +1929,19 @@ class LatentDiffusion(DDPM): mask=mask, ) x_samples = self.decode_first_stage(samples.to(self.device)) - log['samples_outpainting'] = x_samples + log["samples_outpainting"] = x_samples if plot_progressive_rows: - with self.ema_scope('Plotting Progressives'): + with self.ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising( c, shape=(self.channels, self.image_size, self.image_size), batch_size=N, ) prog_row = self._get_denoise_row_from_list( - progressives, desc='Progressive Generation' + progressives, desc="Progressive Generation" ) - log['progressive_row'] = prog_row + log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: @@ -2097,24 +1959,22 @@ class LatentDiffusion(DDPM): else: params = list(self.model.parameters()) if self.cond_stage_trainable: - print( - f'{self.__class__.__name__}: Also optimizing conditioner params!' - ) + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") params = params + list(self.cond_stage_model.parameters()) if self.learn_logvar: - print('Diffusion model optimizing logvar') + print("Diffusion model optimizing logvar") params.append(self.logvar) opt = torch.optim.AdamW(params, lr=lr) if self.use_scheduler: - assert 'target' in self.scheduler_config + assert "target" in self.scheduler_config scheduler = instantiate_from_config(self.scheduler_config) - print('Setting up LambdaLR scheduler...') + print("Setting up LambdaLR scheduler...") scheduler = [ { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1, + "scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule), + "interval": "step", + "frequency": 1, } ] return [opt], scheduler @@ -2123,7 +1983,7 @@ class LatentDiffusion(DDPM): @torch.no_grad() def to_rgb(self, x): x = x.float() - if not hasattr(self, 'colorize'): + if not hasattr(self, "colorize"): self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) x = nn.functional.conv2d(x, weight=self.colorize) x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0 @@ -2135,16 +1995,14 @@ class LatentDiffusion(DDPM): if os.path.isdir(self.trainer.checkpoint_callback.dirpath): self.embedding_manager.save( - os.path.join( - self.trainer.checkpoint_callback.dirpath, 'embeddings.pt' - ) + os.path.join(self.trainer.checkpoint_callback.dirpath, "embeddings.pt") ) if (self.global_step - self.emb_ckpt_counter) > 500: self.embedding_manager.save( os.path.join( self.trainer.checkpoint_callback.dirpath, - f'embeddings_gs-{self.global_step}.pt', + f"embeddings_gs-{self.global_step}.pt", ) ) @@ -2158,26 +2016,26 @@ class DiffusionWrapper(pl.LightningModule): self.conditioning_key = conditioning_key assert self.conditioning_key in [ None, - 'concat', - 'crossattn', - 'hybrid', - 'adm', + "concat", + "crossattn", + "hybrid", + "adm", ] def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): if self.conditioning_key is None: out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': + elif self.conditioning_key == "concat": xc = torch.cat([x] + c_concat, dim=1) out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': + elif self.conditioning_key == "crossattn": cc = torch.cat(c_crossattn, 1) out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': + elif self.conditioning_key == "hybrid": cc = torch.cat(c_crossattn, 1) xc = torch.cat([x] + c_concat, dim=1) out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': + elif self.conditioning_key == "adm": cc = c_crossattn[0] out = self.diffusion_model(x, t, y=cc) else: @@ -2190,31 +2048,28 @@ class Layout2ImgDiffusion(LatentDiffusion): # TODO: move all layout-specific hacks to this class def __init__(self, cond_stage_key, *args, **kwargs): assert ( - cond_stage_key == 'coordinates_bbox' + cond_stage_key == "coordinates_bbox" ), 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) def log_images(self, batch, N=8, *args, **kwargs): logs = super().log_images(batch=batch, N=N, *args, **kwargs) - key = 'train' if self.training else 'validation' + key = "train" if self.training else "validation" dset = self.trainer.datamodule.datasets[key] mapper = dset.conditional_builders[self.cond_stage_key] bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label( - dset.get_category_id(catno) - ) + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot( - tknzd_bbox.detach().cpu(), map_fn, (256, 256) - ) + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) bbox_imgs.append(bboximg) cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img + logs["bbox_image"] = cond_img return logs + class LatentInpaintDiffusion(LatentDiffusion): def __init__( self, @@ -2229,7 +2084,6 @@ class LatentInpaintDiffusion(LatentDiffusion): assert self.masked_image_key in concat_keys self.concat_keys = concat_keys - @torch.no_grad() def get_input( self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False diff --git a/invokeai/backend/stable_diffusion/diffusion/ksampler.py b/invokeai/backend/stable_diffusion/diffusion/ksampler.py index f98ca8de21..eddcc11ea8 100644 --- a/invokeai/backend/stable_diffusion/diffusion/ksampler.py +++ b/invokeai/backend/stable_diffusion/diffusion/ksampler.py @@ -8,12 +8,12 @@ from .cross_attention_map_saving import AttentionMapSaver from .sampler import Sampler from .shared_invokeai_diffusion import InvokeAIDiffuserComponent - # at this threshold, the scheduler will stop using the Karras # noise schedule and start using the model's schedule STEP_THRESHOLD = 30 -def cfg_apply_threshold(result, threshold = 0.0, scale = 0.7): + +def cfg_apply_threshold(result, threshold=0.0, scale=0.7): if threshold <= 0.0: return result maxval = 0.0 + torch.max(result).cpu().numpy() @@ -21,35 +21,43 @@ def cfg_apply_threshold(result, threshold = 0.0, scale = 0.7): if maxval < threshold and minval > -threshold: return result if maxval > threshold: - maxval = min(max(1, scale*maxval), threshold) + maxval = min(max(1, scale * maxval), threshold) if minval < -threshold: - minval = max(min(-1, scale*minval), -threshold) + minval = max(min(-1, scale * minval), -threshold) return torch.clamp(result, min=minval, max=maxval) class CFGDenoiser(nn.Module): - def __init__(self, model, threshold = 0, warmup = 0): + def __init__(self, model, threshold=0, warmup=0): super().__init__() self.inner_model = model self.threshold = threshold self.warmup_max = warmup self.warmup = max(warmup / 10, 1) - self.invokeai_diffuser = InvokeAIDiffuserComponent(model, - model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond)) - + self.invokeai_diffuser = InvokeAIDiffuserComponent( + model, + model_forward_callback=lambda x, sigma, cond: self.inner_model( + x, sigma, cond=cond + ), + ) def prepare_to_sample(self, t_enc, **kwargs): + extra_conditioning_info = kwargs.get("extra_conditioning_info", None) - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = t_enc) + if ( + extra_conditioning_info is not None + and extra_conditioning_info.wants_cross_attention_control + ): + self.invokeai_diffuser.override_cross_attention( + extra_conditioning_info, step_count=t_enc + ) else: self.invokeai_diffuser.restore_default_cross_attention() - def forward(self, x, sigma, uncond, cond, cond_scale): - next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale) + next_x = self.invokeai_diffuser.do_diffusion_step( + x, sigma, uncond, cond, cond_scale + ) if self.warmup < self.warmup_max: thresh = max(1, 1 + (self.threshold - 1) * (self.warmup / self.warmup_max)) self.warmup += 1 @@ -59,8 +67,9 @@ class CFGDenoiser(nn.Module): thresh = self.threshold return cfg_apply_threshold(next_x, thresh) + class KSampler(Sampler): - def __init__(self, model, schedule='lms', device=None, **kwargs): + def __init__(self, model, schedule="lms", device=None, **kwargs): denoiser = K.external.CompVisDenoiser(model) super().__init__( denoiser, @@ -68,45 +77,49 @@ class KSampler(Sampler): steps=model.num_timesteps, ) self.sigmas = None - self.ds = None - self.s_in = None - self.karras_max = kwargs.get('karras_max',STEP_THRESHOLD) + self.ds = None + self.s_in = None + self.karras_max = kwargs.get("karras_max", STEP_THRESHOLD) if self.karras_max is None: self.karras_max = STEP_THRESHOLD def make_schedule( - self, - ddim_num_steps, - ddim_discretize='uniform', - ddim_eta=0.0, - verbose=False, + self, + ddim_num_steps, + ddim_discretize="uniform", + ddim_eta=0.0, + verbose=False, ): outer_model = self.model - self.model = outer_model.inner_model + self.model = outer_model.inner_model super().make_schedule( ddim_num_steps, - ddim_discretize='uniform', + ddim_discretize="uniform", ddim_eta=0.0, verbose=False, ) - self.model = outer_model + self.model = outer_model self.ddim_num_steps = ddim_num_steps # we don't need both of these sigmas, but storing them here to make # comparison easier later on - self.model_sigmas = self.model.get_sigmas(ddim_num_steps) + self.model_sigmas = self.model.get_sigmas(ddim_num_steps) self.karras_sigmas = K.sampling.get_sigmas_karras( n=ddim_num_steps, sigma_min=self.model.sigmas[0].item(), sigma_max=self.model.sigmas[-1].item(), - rho=7., + rho=7.0, device=self.device, ) if ddim_num_steps >= self.karras_max: - print(f'>> Ksampler using model noise schedule (steps >= {self.karras_max})') + print( + f">> Ksampler using model noise schedule (steps >= {self.karras_max})" + ) self.sigmas = self.model_sigmas else: - print(f'>> Ksampler using karras noise schedule (steps < {self.karras_max})') + print( + f">> Ksampler using karras noise schedule (steps < {self.karras_max})" + ) self.sigmas = self.karras_sigmas # ALERT: We are completely overriding the sample() method in the base class, which @@ -116,31 +129,31 @@ class KSampler(Sampler): @torch.no_grad() def decode( - self, - z_enc, - cond, - t_enc, - img_callback=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_original_steps=False, - init_latent = None, - mask = None, - **kwargs + self, + z_enc, + cond, + t_enc, + img_callback=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + init_latent=None, + mask=None, + **kwargs, ): - samples,_ = self.sample( - batch_size = 1, - S = t_enc, - x_T = z_enc, - shape = z_enc.shape[1:], - conditioning = cond, + samples, _ = self.sample( + batch_size=1, + S=t_enc, + x_T=z_enc, + shape=z_enc.shape[1:], + conditioning=cond, unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning = unconditional_conditioning, - img_callback = img_callback, - x0 = init_latent, - mask = mask, - **kwargs - ) + unconditional_conditioning=unconditional_conditioning, + img_callback=img_callback, + x0=init_latent, + mask=mask, + **kwargs, + ) return samples # this is a no-op, provided here for compatibility with ddim and plms samplers @@ -174,26 +187,26 @@ class KSampler(Sampler): log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo=None, - threshold = 0, - perlin = 0, + extra_conditioning_info: InvokeAIDiffuserComponent.ExtraConditioningInfo = None, + threshold=0, + perlin=0, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs, ): def route_callback(k_callback_values): if img_callback is not None: - img_callback(k_callback_values['x'],k_callback_values['i']) + img_callback(k_callback_values["x"], k_callback_values["i"]) # if make_schedule() hasn't been called, we do it now if self.sigmas is None: self.make_schedule( ddim_num_steps=S, - ddim_eta = eta, - verbose = False, + ddim_eta=eta, + verbose=False, ) # sigmas are set up in make_schedule - we take the last steps items - sigmas = self.sigmas[-S-1:] + sigmas = self.sigmas[-S - 1 :] # x_T is variation noise. When an init image is provided (in x0) we need to add # more randomness to the starting image. @@ -205,27 +218,40 @@ class KSampler(Sampler): else: x = torch.randn([batch_size, *shape], device=self.device) * sigmas[0] - model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10)) - model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info) + model_wrap_cfg = CFGDenoiser( + self.model, threshold=threshold, warmup=max(0.8 * S, S - 10) + ) + model_wrap_cfg.prepare_to_sample( + S, extra_conditioning_info=extra_conditioning_info + ) # setup attention maps saving. checks for None are because there are multiple code paths to get here. attention_map_saver = None if attention_maps_callback is not None and extra_conditioning_info is not None: eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1 attention_map_token_ids = range(1, eos_token_index) - attention_map_saver = AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:]) - model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_map_saver) + attention_map_saver = AttentionMapSaver( + token_ids=attention_map_token_ids, latents_shape=x.shape[-2:] + ) + model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving( + attention_map_saver + ) extra_args = { - 'cond': conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': unconditional_guidance_scale, + "cond": conditioning, + "uncond": unconditional_conditioning, + "cond_scale": unconditional_guidance_scale, } - print(f'>> Sampling with k_{self.schedule} starting at step {len(self.sigmas)-S-1} of {len(self.sigmas)-1} ({S} new sampling steps)') + print( + f">> Sampling with k_{self.schedule} starting at step {len(self.sigmas)-S-1} of {len(self.sigmas)-1} ({S} new sampling steps)" + ) sampling_result = ( - K.sampling.__dict__[f'sample_{self.schedule}']( - model_wrap_cfg, x, sigmas, extra_args=extra_args, - callback=route_callback + K.sampling.__dict__[f"sample_{self.schedule}"]( + model_wrap_cfg, + x, + sigmas, + extra_args=extra_args, + callback=route_callback, ), None, ) @@ -237,25 +263,25 @@ class KSampler(Sampler): # a workaround is found. @torch.no_grad() def p_sample( - self, - img, - cond, - ts, - index, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - extra_conditioning_info=None, - **kwargs, + self, + img, + cond, + ts, + index, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + extra_conditioning_info=None, + **kwargs, ): if self.model_wrap is None: self.model_wrap = CFGDenoiser(self.model) extra_args = { - 'cond': cond, - 'uncond': unconditional_conditioning, - 'cond_scale': unconditional_guidance_scale, + "cond": cond, + "uncond": unconditional_conditioning, + "cond_scale": unconditional_guidance_scale, } if self.s_in is None: - self.s_in = img.new_ones([img.shape[0]]) + self.s_in = img.new_ones([img.shape[0]]) if self.ds is None: self.ds = [] @@ -270,14 +296,16 @@ class KSampler(Sampler): # so the actual formula for indexing into sigmas: # sigma_index = (steps-index) s_index = t_enc - index - 1 - self.model_wrap.prepare_to_sample(s_index, extra_conditioning_info=extra_conditioning_info) - img = K.sampling.__dict__[f'_{self.schedule}']( + self.model_wrap.prepare_to_sample( + s_index, extra_conditioning_info=extra_conditioning_info + ) + img = K.sampling.__dict__[f"_{self.schedule}"]( self.model_wrap, img, self.sigmas, s_index, - s_in = self.s_in, - ds = self.ds, + s_in=self.s_in, + ds=self.ds, extra_args=extra_args, ) @@ -287,26 +315,25 @@ class KSampler(Sampler): # we should not be multiplying by self.sigmas[0] if we # are at an intermediate step in img2img. See similar in # sample() which does work. - def get_initial_image(self,x_T,shape,steps): - print(f'WARNING: ksampler.get_initial_image(): get_initial_image needs testing') - x = (torch.randn(shape, device=self.device) * self.sigmas[0]) + def get_initial_image(self, x_T, shape, steps): + print(f"WARNING: ksampler.get_initial_image(): get_initial_image needs testing") + x = torch.randn(shape, device=self.device) * self.sigmas[0] if x_T is not None: return x_T + x else: return x - def prepare_to_sample(self,t_enc,**kwargs): - self.t_enc = t_enc + def prepare_to_sample(self, t_enc, **kwargs): + self.t_enc = t_enc self.model_wrap = None - self.ds = None - self.s_in = None + self.ds = None + self.s_in = None - def q_sample(self,x0,ts): - ''' + def q_sample(self, x0, ts): + """ Overrides parent method to return the q_sample of the inner model. - ''' - return self.model.inner_model.q_sample(x0,ts) + """ + return self.model.inner_model.q_sample(x0, ts) - def conditioning_key(self)->str: + def conditioning_key(self) -> str: return self.model.inner_model.model.conditioning_key - diff --git a/invokeai/backend/stable_diffusion/diffusion/plms.py b/invokeai/backend/stable_diffusion/diffusion/plms.py index f4aa9bb12d..df37afcc24 100644 --- a/invokeai/backend/stable_diffusion/diffusion/plms.py +++ b/invokeai/backend/stable_diffusion/diffusion/plms.py @@ -1,52 +1,58 @@ """SAMPLING ONLY.""" -import torch -import numpy as np -from tqdm import tqdm from functools import partial + +import numpy as np +import torch +from tqdm import tqdm + from ...util import choose_torch_device -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent -from .sampler import Sampler from ..diffusionmodules.util import noise_like +from .sampler import Sampler +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent class PLMSSampler(Sampler): - def __init__(self, model, schedule='linear', device=None, **kwargs): - super().__init__(model,schedule,model.num_timesteps, device) + def __init__(self, model, schedule="linear", device=None, **kwargs): + super().__init__(model, schedule, model.num_timesteps, device) def prepare_to_sample(self, t_enc, **kwargs): super().prepare_to_sample(t_enc, **kwargs) - extra_conditioning_info = kwargs.get('extra_conditioning_info', None) - all_timesteps_count = kwargs.get('all_timesteps_count', t_enc) + extra_conditioning_info = kwargs.get("extra_conditioning_info", None) + all_timesteps_count = kwargs.get("all_timesteps_count", t_enc) - if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control: - self.invokeai_diffuser.override_cross_attention(extra_conditioning_info, step_count = all_timesteps_count) + if ( + extra_conditioning_info is not None + and extra_conditioning_info.wants_cross_attention_control + ): + self.invokeai_diffuser.override_cross_attention( + extra_conditioning_info, step_count=all_timesteps_count + ) else: self.invokeai_diffuser.restore_default_cross_attention() - # this is the essential routine @torch.no_grad() def p_sample( - self, - x, # image, called 'img' elsewhere - c, # conditioning, called 'cond' elsewhere - t, # timesteps, called 'ts' elsewhere - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - old_eps=[], - t_next=None, - step_count:int=1000, # total number of steps - **kwargs, + self, + x, # image, called 'img' elsewhere + c, # conditioning, called 'cond' elsewhere + t, # timesteps, called 'ts' elsewhere + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + old_eps=[], + t_next=None, + step_count: int = 1000, # total number of steps + **kwargs, ): b, *_, device = *x.shape, x.device @@ -59,24 +65,24 @@ class PLMSSampler(Sampler): e_t = self.model.apply_model(x, t, c) else: # step_index counts in the opposite direction to index - step_index = step_count-(index+1) - e_t = self.invokeai_diffuser.do_diffusion_step(x, t, - unconditional_conditioning, c, - unconditional_guidance_scale, - step_index=step_index) + step_index = step_count - (index + 1) + e_t = self.invokeai_diffuser.do_diffusion_step( + x, + t, + unconditional_conditioning, + c, + unconditional_guidance_scale, + step_index=step_index, + ) if score_corrector is not None: - assert self.model.parameterization == 'eps' + assert self.model.parameterization == "eps" e_t = score_corrector.modify_score( self.model, e_t, x, t, c, **corrector_kwargs ) return e_t - alphas = ( - self.model.alphas_cumprod - if use_original_steps - else self.ddim_alphas - ) + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = ( self.model.alphas_cumprod_prev if use_original_steps @@ -96,9 +102,7 @@ class PLMSSampler(Sampler): def get_x_prev_and_pred_x0(e_t, index): # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full( - (b, 1, 1, 1), alphas_prev[index], device=device - ) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full( (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device @@ -110,11 +114,7 @@ class PLMSSampler(Sampler): pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) # direction pointing to x_t dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t - noise = ( - sigma_t - * noise_like(x.shape, device, repeat_noise) - * temperature - ) + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise @@ -135,10 +135,7 @@ class PLMSSampler(Sampler): elif len(old_eps) >= 3: # 4nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = ( - 55 * e_t - - 59 * old_eps[-1] - + 37 * old_eps[-2] - - 9 * old_eps[-3] + 55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3] ) / 24 x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) diff --git a/invokeai/backend/stable_diffusion/diffusion/sampler.py b/invokeai/backend/stable_diffusion/diffusion/sampler.py index 5f9ee2da29..beb74eaefb 100644 --- a/invokeai/backend/stable_diffusion/diffusion/sampler.py +++ b/invokeai/backend/stable_diffusion/diffusion/sampler.py @@ -1,31 +1,37 @@ -''' +""" invokeai.models.diffusion.sampler Base class for invokeai.models.diffusion.ddim, invokeai.models.diffusion.ksampler, etc -''' -import torch -import numpy as np -from tqdm import tqdm +""" from functools import partial -from ...util import choose_torch_device -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent +import numpy as np +import torch +from tqdm import tqdm + +from ...util import choose_torch_device from ..diffusionmodules.util import ( + extract_into_tensor, make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, - extract_into_tensor, ) +from .shared_invokeai_diffusion import InvokeAIDiffuserComponent + class Sampler(object): - def __init__(self, model, schedule='linear', steps=None, device=None, **kwargs): + def __init__(self, model, schedule="linear", steps=None, device=None, **kwargs): self.model = model self.ddim_timesteps = None self.ddpm_num_timesteps = steps self.schedule = schedule - self.device = device or choose_torch_device() - self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model, - model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond)) + self.device = device or choose_torch_device() + self.invokeai_diffuser = InvokeAIDiffuserComponent( + self.model, + model_forward_callback=lambda x, sigma, cond: self.model.apply_model( + x, sigma, cond + ), + ) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: @@ -36,11 +42,11 @@ class Sampler(object): # This method was copied over from ddim.py and probably does stuff that is # ddim-specific. Disentangle at some point. def make_schedule( - self, - ddim_num_steps, - ddim_discretize='uniform', - ddim_eta=0.0, - verbose=False, + self, + ddim_num_steps, + ddim_discretize="uniform", + ddim_eta=0.0, + verbose=False, ): self.total_steps = ddim_num_steps self.ddim_timesteps = make_ddim_timesteps( @@ -52,38 +58,33 @@ class Sampler(object): alphas_cumprod = self.model.alphas_cumprod assert ( alphas_cumprod.shape[0] == self.ddpm_num_timesteps - ), 'alphas have to be defined for each timestep' - to_torch = ( - lambda x: x.clone() - .detach() - .to(torch.float32) - .to(self.model.device) - ) + ), "alphas have to be defined for each timestep" + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer("betas", to_torch(self.model.betas)) + self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer( - 'alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev) + "alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev) ) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer( - 'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())) + "sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu())) ) self.register_buffer( - 'sqrt_one_minus_alphas_cumprod', + "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( - 'log_one_minus_alphas_cumprod', + "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu())), ) self.register_buffer( - 'sqrt_recip_alphas_cumprod', + "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu())), ) self.register_buffer( - 'sqrt_recipm1_alphas_cumprod', + "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)), ) @@ -98,19 +99,17 @@ class Sampler(object): eta=ddim_eta, verbose=verbose, ) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer( - 'ddim_sqrt_one_minus_alphas', np.sqrt(1.0 - ddim_alphas) - ) + self.register_buffer("ddim_sigmas", ddim_sigmas) + self.register_buffer("ddim_alphas", ddim_alphas) + self.register_buffer("ddim_alphas_prev", ddim_alphas_prev) + self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (1 - self.alphas_cumprod / self.alphas_cumprod_prev) ) self.register_buffer( - 'ddim_sigmas_for_original_num_steps', + "ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps, ) @@ -129,20 +128,19 @@ class Sampler(object): noise = torch.randn_like(x0) return ( extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 - + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) - * noise + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise ) @torch.no_grad() def sample( self, - S, # S is steps + S, # S is steps batch_size, shape, conditioning=None, callback=None, normals_sequence=None, - img_callback=None, # TODO: this is very confusing because it is called "step_callback" elsewhere. Change. + img_callback=None, # TODO: this is very confusing because it is called "step_callback" elsewhere. Change. quantize_x0=False, eta=0.0, mask=None, @@ -159,7 +157,6 @@ class Sampler(object): # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs, ): - if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] @@ -167,17 +164,21 @@ class Sampler(object): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + print( + f"Warning: Got {cbs} conditionings but batch-size is {batch_size}" + ) else: if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + print( + f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}" + ) # check to see if make_schedule() has run, and if not, run it if self.ddim_timesteps is None: self.make_schedule( ddim_num_steps=S, - ddim_eta = eta, - verbose = False, + ddim_eta=eta, + verbose=False, ) ts = self.get_timesteps(S) @@ -204,32 +205,32 @@ class Sampler(object): unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, steps=S, - **kwargs + **kwargs, ) return samples, intermediates @torch.no_grad() def do_sampling( - self, - cond, - shape, - timesteps=None, - x_T=None, - ddim_use_original_steps=False, - callback=None, - quantize_denoised=False, - mask=None, - x0=None, - img_callback=None, - log_every_t=100, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - steps=None, - **kwargs + self, + cond, + shape, + timesteps=None, + x_T=None, + ddim_use_original_steps=False, + callback=None, + quantize_denoised=False, + mask=None, + x0=None, + img_callback=None, + log_every_t=100, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + steps=None, + **kwargs, ): b = shape[0] time_range = ( @@ -238,29 +239,24 @@ class Sampler(object): else np.flip(timesteps) ) - total_steps=steps + total_steps = steps iterator = tqdm( time_range, - desc=f'{self.__class__.__name__}', + desc=f"{self.__class__.__name__}", total=total_steps, dynamic_ncols=True, ) old_eps = [] - self.prepare_to_sample(t_enc=total_steps,all_timesteps_count=steps,**kwargs) - img = self.get_initial_image(x_T,shape,total_steps) + self.prepare_to_sample(t_enc=total_steps, all_timesteps_count=steps, **kwargs) + img = self.get_initial_image(x_T, shape, total_steps) # probably don't need this at all - intermediates = {'x_inter': [img], 'pred_x0': [img]} + intermediates = {"x_inter": [img], "pred_x0": [img]} for i, step in enumerate(iterator): index = total_steps - i - 1 - ts = torch.full( - (b,), - step, - device=self.device, - dtype=torch.long - ) + ts = torch.full((b,), step, device=self.device, dtype=torch.long) ts_next = torch.full( (b,), time_range[min(i + 1, len(time_range) - 1)], @@ -290,7 +286,7 @@ class Sampler(object): unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next, - step_count=steps + step_count=steps, ) img, pred_x0, e_t = outs @@ -300,11 +296,11 @@ class Sampler(object): if callback: callback(i) if img_callback: - img_callback(img,i) + img_callback(img, i) if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) + intermediates["x_inter"].append(img) + intermediates["pred_x0"].append(pred_x0) return img, intermediates @@ -312,18 +308,18 @@ class Sampler(object): # The variable names are changed in order to be confusing. @torch.no_grad() def decode( - self, - x_latent, - cond, - t_start, - img_callback=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_original_steps=False, - init_latent = None, - mask = None, - all_timesteps_count = None, - **kwargs + self, + x_latent, + cond, + t_start, + img_callback=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + init_latent=None, + mask=None, + all_timesteps_count=None, + **kwargs, ): timesteps = ( np.arange(self.ddpm_num_timesteps) @@ -334,12 +330,16 @@ class Sampler(object): time_range = np.flip(timesteps) total_steps = timesteps.shape[0] - print(f'>> Running {self.__class__.__name__} sampling starting at step {self.total_steps - t_start} of {self.total_steps} ({total_steps} new sampling steps)') + print( + f">> Running {self.__class__.__name__} sampling starting at step {self.total_steps - t_start} of {self.total_steps} ({total_steps} new sampling steps)" + ) - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - x0 = init_latent - self.prepare_to_sample(t_enc=total_steps, all_timesteps_count=all_timesteps_count, **kwargs) + iterator = tqdm(time_range, desc="Decoding image", total=total_steps) + x_dec = x_latent + x0 = init_latent + self.prepare_to_sample( + t_enc=total_steps, all_timesteps_count=all_timesteps_count, **kwargs + ) for i, step in enumerate(iterator): index = total_steps - i - 1 @@ -370,81 +370,85 @@ class Sampler(object): use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, - t_next = ts_next, - step_count=len(self.ddim_timesteps) + t_next=ts_next, + step_count=len(self.ddim_timesteps), ) x_dec, pred_x0, e_t = outs if img_callback: - img_callback(x_dec,i) + img_callback(x_dec, i) return x_dec - def get_initial_image(self,x_T,shape,timesteps=None): + def get_initial_image(self, x_T, shape, timesteps=None): if x_T is None: return torch.randn(shape, device=self.device) else: return x_T def p_sample( - self, - img, - cond, - ts, - index, - repeat_noise=False, - use_original_steps=False, - quantize_denoised=False, - temperature=1.0, - noise_dropout=0.0, - score_corrector=None, - corrector_kwargs=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - old_eps=None, - t_next=None, - steps=None, + self, + img, + cond, + ts, + index, + repeat_noise=False, + use_original_steps=False, + quantize_denoised=False, + temperature=1.0, + noise_dropout=0.0, + score_corrector=None, + corrector_kwargs=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + old_eps=None, + t_next=None, + steps=None, ): - raise NotImplementedError("p_sample() must be implemented in a descendent class") + raise NotImplementedError( + "p_sample() must be implemented in a descendent class" + ) - def prepare_to_sample(self,t_enc,**kwargs): - ''' + def prepare_to_sample(self, t_enc, **kwargs): + """ Hook that will be called right before the very first invocation of p_sample() to allow subclass to do additional initialization. t_enc corresponds to the actual number of steps that will be run, and may be less than total steps if img2img is active. - ''' + """ pass - def get_timesteps(self,ddim_steps): - ''' + def get_timesteps(self, ddim_steps): + """ The ddim and plms samplers work on timesteps. This method is called after ddim_timesteps are created in make_schedule(), and selects the portion of timesteps that will be used for sampling, depending on the t_enc in img2img. - ''' + """ return self.ddim_timesteps[:ddim_steps] - def q_sample(self,x0,ts): - ''' + def q_sample(self, x0, ts): + """ Returns self.model.q_sample(x0,ts). Is overridden in the k* samplers to return self.model.inner_model.q_sample(x0,ts) - ''' - return self.model.q_sample(x0,ts) + """ + return self.model.q_sample(x0, ts) - def conditioning_key(self)->str: + def conditioning_key(self) -> str: return self.model.model.conditioning_key - def uses_inpainting_model(self)->bool: - return self.conditioning_key() in ('hybrid','concat') + def uses_inpainting_model(self) -> bool: + return self.conditioning_key() in ("hybrid", "concat") - def adjust_settings(self,**kwargs): - ''' + def adjust_settings(self, **kwargs): + """ This is a catch-all method for adjusting any instance variables after the sampler is instantiated. No type-checking performed here, so use with care! - ''' + """ for k in kwargs.keys(): try: - setattr(self,k,kwargs[k]) + setattr(self, k, kwargs[k]) except AttributeError: - print(f'** Warning: attempt to set unknown attribute {k} in sampler of type {type(self)}') + print( + f"** Warning: attempt to set unknown attribute {k} in sampler of type {type(self)}" + ) diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index b02a381d70..7e3ab455b9 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -1,7 +1,7 @@ from contextlib import contextmanager from dataclasses import dataclass from math import ceil -from typing import Callable, Optional, Union, Any, Dict +from typing import Any, Callable, Dict, Optional, Union import numpy as np import torch @@ -9,17 +9,28 @@ from diffusers.models.cross_attention import AttnProcessor from typing_extensions import TypeAlias from invokeai.backend.globals import Globals -from .cross_attention_control import Arguments, \ - restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ - CrossAttentionType, SwapCrossAttnContext + +from .cross_attention_control import ( + Arguments, + Context, + CrossAttentionType, + SwapCrossAttnContext, + get_cross_attention_modules, + override_cross_attention, + restore_default_cross_attention, +) from .cross_attention_map_saving import AttentionMapSaver ModelForwardCallback: TypeAlias = Union[ # x, t, conditioning, Optional[cross-attention kwargs] - Callable[[torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]], torch.Tensor], - Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor] + Callable[ + [torch.Tensor, torch.Tensor, torch.Tensor, Optional[dict[str, Any]]], + torch.Tensor, + ], + Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor], ] + @dataclass(frozen=True) class PostprocessingSettings: threshold: float @@ -29,20 +40,20 @@ class PostprocessingSettings: class InvokeAIDiffuserComponent: - ''' + """ The aim of this component is to provide a single place for code that can be applied identically to all InvokeAI diffusion procedures. At the moment it includes the following features: * Cross attention control ("prompt2prompt") * Hybrid conditioning (used for inpainting) - ''' + """ + debug_thresholding = False sequential_guidance = False @dataclass class ExtraConditioningInfo: - tokens_count_including_eos_bos: int cross_attention_control_args: Optional[Arguments] = None @@ -50,10 +61,12 @@ class InvokeAIDiffuserComponent: def wants_cross_attention_control(self): return self.cross_attention_control_args is not None - - def __init__(self, model, model_forward_callback: ModelForwardCallback, - is_running_diffusers: bool=False, - ): + def __init__( + self, + model, + model_forward_callback: ModelForwardCallback, + is_running_diffusers: bool = False, + ): """ :param model: the unet model to pass through to cross attention control :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) @@ -66,23 +79,29 @@ class InvokeAIDiffuserComponent: self.sequential_guidance = Globals.sequential_guidance @contextmanager - def custom_attention_context(self, - extra_conditioning_info: Optional[ExtraConditioningInfo], - step_count: int): - do_swap = extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control + def custom_attention_context( + self, extra_conditioning_info: Optional[ExtraConditioningInfo], step_count: int + ): + do_swap = ( + extra_conditioning_info is not None + and extra_conditioning_info.wants_cross_attention_control + ) old_attn_processor = None if do_swap: - old_attn_processor = self.override_cross_attention(extra_conditioning_info, - step_count=step_count) + old_attn_processor = self.override_cross_attention( + extra_conditioning_info, step_count=step_count + ) try: yield None finally: if old_attn_processor is not None: self.restore_default_cross_attention(old_attn_processor) # TODO resuscitate attention map saving - #self.remove_attention_map_saving() + # self.remove_attention_map_saving() - def override_cross_attention(self, conditioning: ExtraConditioningInfo, step_count: int) -> Dict[str, AttnProcessor]: + def override_cross_attention( + self, conditioning: ExtraConditioningInfo, step_count: int + ) -> Dict[str, AttnProcessor]: """ setup cross attention .swap control. for diffusers this replaces the attention processor, so the previous attention processor is returned so that the caller can restore it later. @@ -90,18 +109,24 @@ class InvokeAIDiffuserComponent: self.conditioning = conditioning self.cross_attention_control_context = Context( arguments=self.conditioning.cross_attention_control_args, - step_count=step_count + step_count=step_count, + ) + return override_cross_attention( + self.model, + self.cross_attention_control_context, + is_running_diffusers=self.is_running_diffusers, ) - return override_cross_attention(self.model, - self.cross_attention_control_context, - is_running_diffusers=self.is_running_diffusers) - def restore_default_cross_attention(self, restore_attention_processor: Optional['AttnProcessor']=None): + def restore_default_cross_attention( + self, restore_attention_processor: Optional["AttnProcessor"] = None + ): self.conditioning = None self.cross_attention_control_context = None - restore_default_cross_attention(self.model, - is_running_diffusers=self.is_running_diffusers, - restore_attention_processor=restore_attention_processor) + restore_default_cross_attention( + self.model, + is_running_diffusers=self.is_running_diffusers, + restore_attention_processor=restore_attention_processor, + ) def setup_attention_map_saving(self, saver: AttentionMapSaver): def callback(slice, dim, offset, slice_size, key): @@ -110,26 +135,40 @@ class InvokeAIDiffuserComponent: return saver.add_attention_maps(slice, key) - tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) + tokens_cross_attention_modules = get_cross_attention_modules( + self.model, CrossAttentionType.TOKENS + ) for identifier, module in tokens_cross_attention_modules: - key = ('down' if identifier.startswith('down') else - 'up' if identifier.startswith('up') else - 'mid') + key = ( + "down" + if identifier.startswith("down") + else "up" + if identifier.startswith("up") + else "mid" + ) module.set_attention_slice_calculated_callback( - lambda slice, dim, offset, slice_size, key=key: callback(slice, dim, offset, slice_size, key)) + lambda slice, dim, offset, slice_size, key=key: callback( + slice, dim, offset, slice_size, key + ) + ) def remove_attention_map_saving(self): - tokens_cross_attention_modules = get_cross_attention_modules(self.model, CrossAttentionType.TOKENS) + tokens_cross_attention_modules = get_cross_attention_modules( + self.model, CrossAttentionType.TOKENS + ) for _, module in tokens_cross_attention_modules: module.set_attention_slice_calculated_callback(None) - def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor, - unconditioning: Union[torch.Tensor,dict], - conditioning: Union[torch.Tensor,dict], - unconditional_guidance_scale: float, - step_index: Optional[int]=None, - total_step_count: Optional[int]=None, - ): + def do_diffusion_step( + self, + x: torch.Tensor, + sigma: torch.Tensor, + unconditioning: Union[torch.Tensor, dict], + conditioning: Union[torch.Tensor, dict], + unconditional_guidance_scale: float, + step_index: Optional[int] = None, + total_step_count: Optional[int] = None, + ): """ :param x: current latents :param sigma: aka t, passed to the internal model to control how much denoising will occur @@ -140,33 +179,55 @@ class InvokeAIDiffuserComponent: :return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning. """ - cross_attention_control_types_to_do = [] context: Context = self.cross_attention_control_context if self.cross_attention_control_context is not None: - percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) - cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through) + percent_through = self.calculate_percent_through( + sigma, step_index, total_step_count + ) + cross_attention_control_types_to_do = ( + context.get_active_cross_attention_control_types_for_step( + percent_through + ) + ) - wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0) + wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 wants_hybrid_conditioning = isinstance(conditioning, dict) if wants_hybrid_conditioning: - unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning(x, sigma, unconditioning, - conditioning) + unconditioned_next_x, conditioned_next_x = self._apply_hybrid_conditioning( + x, sigma, unconditioning, conditioning + ) elif wants_cross_attention_control: - unconditioned_next_x, conditioned_next_x = self._apply_cross_attention_controlled_conditioning(x, sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do) + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_cross_attention_controlled_conditioning( + x, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ) elif self.sequential_guidance: - unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially( - x, sigma, unconditioning, conditioning) + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning_sequentially( + x, sigma, unconditioning, conditioning + ) else: - unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning( - x, sigma, unconditioning, conditioning) + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning( + x, sigma, unconditioning, conditioning + ) - combined_next_x = self._combine(unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale) + combined_next_x = self._combine( + unconditioned_next_x, conditioned_next_x, unconditional_guidance_scale + ) return combined_next_x @@ -176,24 +237,33 @@ class InvokeAIDiffuserComponent: latents: torch.Tensor, sigma, step_index, - total_step_count + total_step_count, ) -> torch.Tensor: if postprocessing_settings is not None: - percent_through = self.calculate_percent_through(sigma, step_index, total_step_count) - latents = self.apply_threshold(postprocessing_settings, latents, percent_through) - latents = self.apply_symmetry(postprocessing_settings, latents, percent_through) + percent_through = self.calculate_percent_through( + sigma, step_index, total_step_count + ) + latents = self.apply_threshold( + postprocessing_settings, latents, percent_through + ) + latents = self.apply_symmetry( + postprocessing_settings, latents, percent_through + ) return latents def calculate_percent_through(self, sigma, step_index, total_step_count): if step_index is not None and total_step_count is not None: # 🧨diffusers codepath - percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate + percent_through = ( + step_index / total_step_count + ) # will never reach 1.0 - this is deliberate else: # legacy compvis codepath # TODO remove when compvis codepath support is dropped if step_index is None and sigma is None: raise ValueError( - f"Either step_index or sigma is required when doing cross attention control, but both are None.") + f"Either step_index or sigma is required when doing cross attention control, but both are None." + ) percent_through = self.estimate_percent_through(step_index, sigma) return percent_through @@ -204,24 +274,30 @@ class InvokeAIDiffuserComponent: x_twice = torch.cat([x] * 2) sigma_twice = torch.cat([sigma] * 2) both_conditionings = torch.cat([unconditioning, conditioning]) - both_results = self.model_forward_callback(x_twice, sigma_twice, both_conditionings) + both_results = self.model_forward_callback( + x_twice, sigma_twice, both_conditionings + ) unconditioned_next_x, conditioned_next_x = both_results.chunk(2) - if conditioned_next_x.device.type == 'mps': + if conditioned_next_x.device.type == "mps": # prevent a result filled with zeros. seems to be a torch bug. conditioned_next_x = conditioned_next_x.clone() return unconditioned_next_x, conditioned_next_x - - def _apply_standard_conditioning_sequentially(self, x: torch.Tensor, sigma, unconditioning: torch.Tensor, conditioning: torch.Tensor): + def _apply_standard_conditioning_sequentially( + self, + x: torch.Tensor, + sigma, + unconditioning: torch.Tensor, + conditioning: torch.Tensor, + ): # low-memory sequential path unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) conditioned_next_x = self.model_forward_callback(x, sigma, conditioning) - if conditioned_next_x.device.type == 'mps': + if conditioned_next_x.device.type == "mps": # prevent a result filled with zeros. seems to be a torch bug. conditioned_next_x = conditioned_next_x.clone() return unconditioned_next_x, conditioned_next_x - def _apply_hybrid_conditioning(self, x, sigma, unconditioning, conditioning): assert isinstance(conditioning, dict) assert isinstance(unconditioning, dict) @@ -236,48 +312,80 @@ class InvokeAIDiffuserComponent: ] else: both_conditionings[k] = torch.cat([unconditioning[k], conditioning[k]]) - unconditioned_next_x, conditioned_next_x = self.model_forward_callback(x_twice, sigma_twice, both_conditionings).chunk(2) + unconditioned_next_x, conditioned_next_x = self.model_forward_callback( + x_twice, sigma_twice, both_conditionings + ).chunk(2) return unconditioned_next_x, conditioned_next_x - - def _apply_cross_attention_controlled_conditioning(self, - x: torch.Tensor, - sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do): + def _apply_cross_attention_controlled_conditioning( + self, + x: torch.Tensor, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ): if self.is_running_diffusers: - return self._apply_cross_attention_controlled_conditioning__diffusers(x, sigma, unconditioning, - conditioning, - cross_attention_control_types_to_do) + return self._apply_cross_attention_controlled_conditioning__diffusers( + x, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ) else: - return self._apply_cross_attention_controlled_conditioning__compvis(x, sigma, unconditioning, conditioning, - cross_attention_control_types_to_do) + return self._apply_cross_attention_controlled_conditioning__compvis( + x, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ) - def _apply_cross_attention_controlled_conditioning__diffusers(self, - x: torch.Tensor, - sigma, - unconditioning, - conditioning, - cross_attention_control_types_to_do): + def _apply_cross_attention_controlled_conditioning__diffusers( + self, + x: torch.Tensor, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ): context: Context = self.cross_attention_control_context - cross_attn_processor_context = SwapCrossAttnContext(modified_text_embeddings=context.arguments.edited_conditioning, - index_map=context.cross_attention_index_map, - mask=context.cross_attention_mask, - cross_attention_types_to_do=[]) + cross_attn_processor_context = SwapCrossAttnContext( + modified_text_embeddings=context.arguments.edited_conditioning, + index_map=context.cross_attention_index_map, + mask=context.cross_attention_mask, + cross_attention_types_to_do=[], + ) # no cross attention for unconditioning (negative prompt) - unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning, - {"swap_cross_attn_context": cross_attn_processor_context}) + unconditioned_next_x = self.model_forward_callback( + x, + sigma, + unconditioning, + {"swap_cross_attn_context": cross_attn_processor_context}, + ) # do requested cross attention types for conditioning (positive prompt) - cross_attn_processor_context.cross_attention_types_to_do = cross_attention_control_types_to_do - conditioned_next_x = self.model_forward_callback(x, sigma, conditioning, - {"swap_cross_attn_context": cross_attn_processor_context}) + cross_attn_processor_context.cross_attention_types_to_do = ( + cross_attention_control_types_to_do + ) + conditioned_next_x = self.model_forward_callback( + x, + sigma, + conditioning, + {"swap_cross_attn_context": cross_attn_processor_context}, + ) return unconditioned_next_x, conditioned_next_x - - def _apply_cross_attention_controlled_conditioning__compvis(self, x:torch.Tensor, sigma, unconditioning, conditioning, cross_attention_control_types_to_do): + def _apply_cross_attention_controlled_conditioning__compvis( + self, + x: torch.Tensor, + sigma, + unconditioning, + conditioning, + cross_attention_control_types_to_do, + ): # print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do) # slower non-batched path (20% slower on mac MPS) # We are only interested in using attention maps for conditioned_next_x, but batching them with generation of @@ -287,24 +395,28 @@ class InvokeAIDiffuserComponent: # representing batched uncond + cond, but then when it comes to applying the saved attention, the # wrangler gets an attention tensor which only has shape[0]=8, representing just self.edited_conditionings.) # todo: give CrossAttentionControl's `wrangler` function more info so it can work with a batched call as well. - context:Context = self.cross_attention_control_context + context: Context = self.cross_attention_control_context try: unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning) # process x using the original prompt, saving the attention maps - #print("saving attention maps for", cross_attention_control_types_to_do) + # print("saving attention maps for", cross_attention_control_types_to_do) for ca_type in cross_attention_control_types_to_do: context.request_save_attention_maps(ca_type) _ = self.model_forward_callback(x, sigma, conditioning) context.clear_requests(cleanup=False) # process x again, using the saved attention maps to control where self.edited_conditioning will be applied - #print("applying saved attention maps for", cross_attention_control_types_to_do) + # print("applying saved attention maps for", cross_attention_control_types_to_do) for ca_type in cross_attention_control_types_to_do: context.request_apply_saved_attention_maps(ca_type) - edited_conditioning = self.conditioning.cross_attention_control_args.edited_conditioning - conditioned_next_x = self.model_forward_callback(x, sigma, edited_conditioning) + edited_conditioning = ( + self.conditioning.cross_attention_control_args.edited_conditioning + ) + conditioned_next_x = self.model_forward_callback( + x, sigma, edited_conditioning + ) context.clear_requests(cleanup=True) except: @@ -323,17 +435,21 @@ class InvokeAIDiffuserComponent: self, postprocessing_settings: PostprocessingSettings, latents: torch.Tensor, - percent_through: float + percent_through: float, ) -> torch.Tensor: - - if postprocessing_settings.threshold is None or postprocessing_settings.threshold == 0.0: + if ( + postprocessing_settings.threshold is None + or postprocessing_settings.threshold == 0.0 + ): return latents threshold = postprocessing_settings.threshold warmup = postprocessing_settings.warmup if percent_through < warmup: - current_threshold = threshold + threshold * 5 * (1 - (percent_through / warmup)) + current_threshold = threshold + threshold * 5 * ( + 1 - (percent_through / warmup) + ) else: current_threshold = threshold @@ -347,10 +463,14 @@ class InvokeAIDiffuserComponent: if self.debug_thresholding: std, mean = [i.item() for i in torch.std_mean(latents)] - outside = torch.count_nonzero((latents < -current_threshold) | (latents > current_threshold)) - print(f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n" - f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n" - f" | {outside / latents.numel() * 100:.2f}% values outside threshold") + outside = torch.count_nonzero( + (latents < -current_threshold) | (latents > current_threshold) + ) + print( + f"\nThreshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})\n" + f" | min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}\n" + f" | {outside / latents.numel() * 100:.2f}% values outside threshold" + ) if maxval < current_threshold and minval > -current_threshold: return latents @@ -363,17 +483,23 @@ class InvokeAIDiffuserComponent: latents = torch.clone(latents) maxval = np.clip(maxval * scale, 1, current_threshold) num_altered += torch.count_nonzero(latents > maxval) - latents[latents > maxval] = torch.rand_like(latents[latents > maxval]) * maxval + latents[latents > maxval] = ( + torch.rand_like(latents[latents > maxval]) * maxval + ) if minval < -current_threshold: latents = torch.clone(latents) minval = np.clip(minval * scale, -current_threshold, -1) num_altered += torch.count_nonzero(latents < minval) - latents[latents < minval] = torch.rand_like(latents[latents < minval]) * minval + latents[latents < minval] = ( + torch.rand_like(latents[latents < minval]) * minval + ) if self.debug_thresholding: - print(f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n" - f" | {num_altered / latents.numel() * 100:.2f}% values altered") + print( + f" | min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})\n" + f" | {num_altered / latents.numel() * 100:.2f}% values altered" + ) return latents @@ -381,9 +507,8 @@ class InvokeAIDiffuserComponent: self, postprocessing_settings: PostprocessingSettings, latents: torch.Tensor, - percent_through: float + percent_through: float, ) -> torch.Tensor: - # Reset our last percent through if this is our first step. if percent_through == 0.0: self.last_percent_through = 0.0 @@ -393,36 +518,52 @@ class InvokeAIDiffuserComponent: # Check for out of bounds h_symmetry_time_pct = postprocessing_settings.h_symmetry_time_pct - if (h_symmetry_time_pct is not None and (h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0)): + if h_symmetry_time_pct is not None and ( + h_symmetry_time_pct <= 0.0 or h_symmetry_time_pct > 1.0 + ): h_symmetry_time_pct = None v_symmetry_time_pct = postprocessing_settings.v_symmetry_time_pct - if (v_symmetry_time_pct is not None and (v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0)): + if v_symmetry_time_pct is not None and ( + v_symmetry_time_pct <= 0.0 or v_symmetry_time_pct > 1.0 + ): v_symmetry_time_pct = None dev = latents.device.type - latents.to(device='cpu') + latents.to(device="cpu") if ( - h_symmetry_time_pct != None and - self.last_percent_through < h_symmetry_time_pct and - percent_through >= h_symmetry_time_pct + h_symmetry_time_pct != None + and self.last_percent_through < h_symmetry_time_pct + and percent_through >= h_symmetry_time_pct ): # Horizontal symmetry occurs on the 3rd dimension of the latent width = latents.shape[3] x_flipped = torch.flip(latents, dims=[3]) - latents = torch.cat([latents[:, :, :, 0:int(width/2)], x_flipped[:, :, :, int(width/2):int(width)]], dim=3) + latents = torch.cat( + [ + latents[:, :, :, 0 : int(width / 2)], + x_flipped[:, :, :, int(width / 2) : int(width)], + ], + dim=3, + ) if ( - v_symmetry_time_pct != None and - self.last_percent_through < v_symmetry_time_pct and - percent_through >= v_symmetry_time_pct + v_symmetry_time_pct != None + and self.last_percent_through < v_symmetry_time_pct + and percent_through >= v_symmetry_time_pct ): # Vertical symmetry occurs on the 2nd dimension of the latent height = latents.shape[2] y_flipped = torch.flip(latents, dims=[2]) - latents = torch.cat([latents[:, :, 0:int(height / 2)], y_flipped[:, :, int(height / 2):int(height)]], dim=2) + latents = torch.cat( + [ + latents[:, :, 0 : int(height / 2)], + y_flipped[:, :, int(height / 2) : int(height)], + ], + dim=2, + ) self.last_percent_through = percent_through return latents.to(device=dev) @@ -430,7 +571,9 @@ class InvokeAIDiffuserComponent: def estimate_percent_through(self, step_index, sigma): if step_index is not None and self.cross_attention_control_context is not None: # percent_through will never reach 1.0 (but this is intended) - return float(step_index) / float(self.cross_attention_control_context.step_count) + return float(step_index) / float( + self.cross_attention_control_context.step_count + ) # find the best possible index of the current sigma in the sigma sequence smaller_sigmas = torch.nonzero(self.model.sigmas <= sigma) sigma_index = smaller_sigmas[-1].item() if smaller_sigmas.shape[0] > 0 else 0 @@ -439,33 +582,38 @@ class InvokeAIDiffuserComponent: return 1.0 - float(sigma_index + 1) / float(self.model.sigmas.shape[0]) # print('estimated percent_through', percent_through, 'from sigma', sigma.item()) - # todo: make this work @classmethod - def apply_conjunction(cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale): + def apply_conjunction( + cls, x, t, forward_func, uc, c_or_weighted_c_list, global_guidance_scale + ): x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) # aka sigmas + t_in = torch.cat([t] * 2) # aka sigmas deltas = None uncond_latents = None - weighted_cond_list = c_or_weighted_c_list if type(c_or_weighted_c_list) is list else [(c_or_weighted_c_list, 1)] + weighted_cond_list = ( + c_or_weighted_c_list + if type(c_or_weighted_c_list) is list + else [(c_or_weighted_c_list, 1)] + ) # below is fugly omg num_actual_conditionings = len(c_or_weighted_c_list) - conditionings = [uc] + [c for c,weight in weighted_cond_list] - weights = [1] + [weight for c,weight in weighted_cond_list] - chunk_count = ceil(len(conditionings)/2) + conditionings = [uc] + [c for c, weight in weighted_cond_list] + weights = [1] + [weight for c, weight in weighted_cond_list] + chunk_count = ceil(len(conditionings) / 2) deltas = None for chunk_index in range(chunk_count): - offset = chunk_index*2 - chunk_size = min(2, len(conditionings)-offset) + offset = chunk_index * 2 + chunk_size = min(2, len(conditionings) - offset) if chunk_size == 1: c_in = conditionings[offset] latents_a = forward_func(x_in[:-1], t_in[:-1], c_in) latents_b = None else: - c_in = torch.cat(conditionings[offset:offset+2]) + c_in = torch.cat(conditionings[offset : offset + 2]) latents_a, latents_b = forward_func(x_in, t_in, c_in).chunk(2) # first chunk is guaranteed to be 2 entries: uncond_latents + first conditioining @@ -478,11 +626,15 @@ class InvokeAIDiffuserComponent: deltas = torch.cat((deltas, latents_b - uncond_latents)) # merge the weighted deltas together into a single merged delta - per_delta_weights = torch.tensor(weights[1:], dtype=deltas.dtype, device=deltas.device) + per_delta_weights = torch.tensor( + weights[1:], dtype=deltas.dtype, device=deltas.device + ) normalize = False if normalize: per_delta_weights /= torch.sum(per_delta_weights) - reshaped_weights = per_delta_weights.reshape(per_delta_weights.shape + (1, 1, 1)) + reshaped_weights = per_delta_weights.reshape( + per_delta_weights.shape + (1, 1, 1) + ) deltas_merged = torch.sum(deltas * reshaped_weights, dim=0, keepdim=True) # old_return_value = super().forward(x, sigma, uncond, cond, cond_scale) diff --git a/invokeai/backend/stable_diffusion/diffusionmodules/model.py b/invokeai/backend/stable_diffusion/diffusionmodules/model.py index 94b0dfc4c7..62cb45d508 100644 --- a/invokeai/backend/stable_diffusion/diffusionmodules/model.py +++ b/invokeai/backend/stable_diffusion/diffusionmodules/model.py @@ -1,16 +1,17 @@ # pytorch_diffusion + derived encoder decoder import gc import math + +import numpy as np +import psutil import torch import torch.nn as nn -from torch.nn.functional import silu -import numpy as np from einops import rearrange +from torch.nn.functional import silu from ...util import instantiate_from_config from ..attention import LinearAttention -import psutil def get_timestep_embedding(timesteps, embedding_dim): """ @@ -29,12 +30,14 @@ def get_timestep_embedding(timesteps, embedding_dim): emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + return torch.nn.GroupNorm( + num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True + ) class Upsample(nn.Module): @@ -42,22 +45,25 @@ class Upsample(nn.Module): super().__init__() self.with_conv = with_conv if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) def forward(self, x): - cpu_m1_cond = True if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available() and \ - x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3] % 2**27 == 0 else False + cpu_m1_cond = ( + True + if hasattr(torch.backends, "mps") + and torch.backends.mps.is_available() + and x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3] % 2**27 == 0 + else False + ) if cpu_m1_cond: - x = x.to('cpu') # send to cpu + x = x.to("cpu") # send to cpu x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) if cpu_m1_cond: - x = x.to('mps') # return to mps + x = x.to("mps") # return to mps return x @@ -67,15 +73,13 @@ class Downsample(nn.Module): self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=2, padding=0 + ) def forward(self, x): if self.with_conv: - pad = (0,1,0,1) + pad = (0, 1, 0, 1) x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: @@ -84,8 +88,15 @@ class Downsample(nn.Module): class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): + def __init__( + self, + *, + in_channels, + out_channels=None, + conv_shortcut=False, + dropout, + temb_channels=512, + ): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels @@ -93,50 +104,41 @@ class ResnetBlock(nn.Module): self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv1 = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) + self.temb_proj = torch.nn.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv2 = torch.nn.Conv2d( + out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) if self.in_channels != self.out_channels: if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) + self.nin_shortcut = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0 + ) def forward(self, x, temb): - if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): x_size = x.size() if (x_size[0] * x_size[1] * x_size[2] * x_size[3]) % 2**29 == 0: - self.to('cpu') - x = x.to('cpu') + self.to("cpu") + x = x.to("cpu") else: - self.to('mps') - x = x.to('mps') + self.to("mps") + x = x.to("mps") h = self.norm1(x) h = silu(h) h = self.conv1(h) if temb is not None: - h = h + self.temb_proj(silu(temb))[:,:,None,None] + h = h + self.temb_proj(silu(temb))[:, :, None, None] h = self.norm2(h) h = silu(h) @@ -151,8 +153,10 @@ class ResnetBlock(nn.Module): return x + h + class LinAttnBlock(LinearAttention): """to match AttnBlock usage""" + def __init__(self, in_channels): super().__init__(dim=in_channels, heads=1, dim_head=in_channels) @@ -163,27 +167,18 @@ class AttnBlock(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - + self.q = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=1, stride=1, padding=0 + ) def forward(self, x): h_ = x @@ -195,21 +190,21 @@ class AttnBlock(nn.Module): # compute attention b, c, h, w = q1.shape - q2 = q1.reshape(b, c, h*w) + q2 = q1.reshape(b, c, h * w) del q1 - q = q2.permute(0, 2, 1) # b,hw,c + q = q2.permute(0, 2, 1) # b,hw,c del q2 - k = k1.reshape(b, c, h*w) # b,c,hw + k = k1.reshape(b, c, h * w) # b,c,hw del k1 h_ = torch.zeros_like(k, device=q.device) - if q.device.type == 'cuda': + if q.device.type == "cuda": stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] + mem_active = stats["active_bytes.all.current"] + mem_reserved = stats["reserved_bytes.all.current"] mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) mem_free_torch = mem_reserved - mem_active mem_free_total = mem_free_cuda + mem_free_torch @@ -219,31 +214,37 @@ class AttnBlock(nn.Module): steps = 1 if mem_required > mem_free_total: - steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + slice_size = ( + q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + ) else: if psutil.virtual_memory().available / (1024**3) < 12: slice_size = 1 else: - slice_size = min(q.shape[1], math.floor(2**30 / (q.shape[0] * q.shape[1]))) + slice_size = min( + q.shape[1], math.floor(2**30 / (q.shape[0] * q.shape[1])) + ) for i in range(0, q.shape[1], slice_size): end = i + slice_size - w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w2 = w1 * (int(c)**(-0.5)) + w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w2 = w1 * (int(c) ** (-0.5)) del w1 w3 = torch.nn.functional.softmax(w2, dim=2) del w2 # attend to values - v1 = v.reshape(b, c, h*w) - w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + v1 = v.reshape(b, c, h * w) + w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) del w3 - h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_[:, :, i:end] = torch.bmm( + v1, w4 + ) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] del v1, w4 h2 = h_.reshape(b, c, h, w) @@ -258,7 +259,7 @@ class AttnBlock(nn.Module): def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + assert attn_type in ["vanilla", "linear", "none"], f"attn_type {attn_type} unknown" print(f" | Making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": return AttnBlock(in_channels) @@ -269,13 +270,27 @@ def make_attn(in_channels, attn_type="vanilla"): class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + use_timestep=True, + use_linear_attn=False, + attn_type="vanilla", + ): super().__init__() - if use_linear_attn: attn_type = "linear" + if use_linear_attn: + attn_type = "linear" self.ch = ch - self.temb_ch = self.ch*4 + self.temb_ch = self.ch * 4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution @@ -285,70 +300,80 @@ class Model(nn.Module): if self.use_timestep: # timestep embedding self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) + self.temb.dense = nn.ModuleList( + [ + torch.nn.Linear(self.ch, self.temb_ch), + torch.nn.Linear(self.temb_ch, self.temb_ch), + ] + ) # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) + in_ch_mult = (1,) + tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn - if i_level != self.num_resolutions-1: + if i_level != self.num_resolutions - 1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): + block_out = ch * ch_mult[i_level] + skip_in = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) + skip_in = ch * in_ch_mult[i_level] + block.append( + ResnetBlock( + in_channels=block_in + skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) @@ -358,18 +383,16 @@ class Model(nn.Module): if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order + self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution + # assert x.shape[2] == x.shape[3] == self.resolution if context is not None: # assume aligned context, cat along channel axis x = torch.cat((x, context), dim=1) @@ -391,7 +414,7 @@ class Model(nn.Module): if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) - if i_level != self.num_resolutions-1: + if i_level != self.num_resolutions - 1: hs.append(self.down[i_level].downsample(hs[-1])) # middle @@ -402,9 +425,10 @@ class Model(nn.Module): # upsampling for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): + for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) + torch.cat([h, hs.pop()], dim=1), temb + ) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: @@ -421,12 +445,27 @@ class Model(nn.Module): class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + double_z=True, + use_linear_attn=False, + attn_type="vanilla", + **ignore_kwargs, + ): super().__init__() - if use_linear_attn: attn_type = "linear" + if use_linear_attn: + attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) @@ -435,56 +474,64 @@ class Encoder(nn.Module): self.in_channels = in_channels # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) + self.conv_in = torch.nn.Conv2d( + in_channels, self.ch, kernel_size=3, stride=1, padding=1 + ) curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) + in_ch_mult = (1,) + tuple(ch_mult) self.in_ch_mult = in_ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] + block_in = ch * in_ch_mult[i_level] + block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn - if i_level != self.num_resolutions-1: + if i_level != self.num_resolutions - 1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv_out = torch.nn.Conv2d( + block_in, + 2 * z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1, + ) def forward(self, x): # timestep embedding @@ -498,7 +545,7 @@ class Encoder(nn.Module): if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) - if i_level != self.num_resolutions-1: + if i_level != self.num_resolutions - 1: hs.append(self.down[i_level].downsample(hs[-1])) # middle @@ -515,12 +562,28 @@ class Encoder(nn.Module): class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): + def __init__( + self, + *, + ch, + out_ch, + ch_mult=(1, 2, 4, 8), + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + in_channels, + resolution, + z_channels, + give_pre_end=False, + tanh_out=False, + use_linear_attn=False, + attn_type="vanilla", + **ignorekwargs, + ): super().__init__() - if use_linear_attn: attn_type = "linear" + if use_linear_attn: + attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) @@ -531,43 +594,52 @@ class Decoder(nn.Module): self.tanh_out = tanh_out # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print(" | Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) + in_ch_mult = (1,) + tuple(ch_mult) + block_in = ch * ch_mult[self.num_resolutions - 1] + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.z_shape = (1, z_channels, curr_res, curr_res) + print( + " | Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape) + ) + ) # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) + self.conv_in = torch.nn.Conv2d( + z_channels, block_in, kernel_size=3, stride=1, padding=1 + ) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_1 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) + self.mid.block_2 = ResnetBlock( + in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout, + ) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) @@ -577,18 +649,16 @@ class Decoder(nn.Module): if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order + self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) + self.conv_out = torch.nn.Conv2d( + block_in, out_ch, kernel_size=3, stride=1, padding=1 + ) def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] + # assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding @@ -604,12 +674,12 @@ class Decoder(nn.Module): # prepare for up sampling gc.collect() - if h.device.type == 'cuda': + if h.device.type == "cuda": torch.cuda.empty_cache() # upsampling for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): + for i_block in range(self.num_res_blocks + 1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) @@ -631,29 +701,40 @@ class Decoder(nn.Module): class SimpleDecoder(nn.Module): def __init__(self, in_channels, out_channels, *args, **kwargs): super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) + self.model = nn.ModuleList( + [ + nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock( + in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, + dropout=0.0, + ), + ResnetBlock( + in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, + dropout=0.0, + ), + nn.Conv2d(2 * in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True), + ] + ) # end self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv_out = torch.nn.Conv2d( + in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) def forward(self, x): for i, layer in enumerate(self.model): - if i in [1,2,3]: + if i in [1, 2, 3]: x = layer(x, None) else: x = layer(x) @@ -665,8 +746,16 @@ class SimpleDecoder(nn.Module): class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): + def __init__( + self, + in_channels, + out_channels, + ch, + num_res_blocks, + resolution, + ch_mult=(2, 2), + dropout=0.0, + ): super().__init__() # upsampling self.temb_ch = 0 @@ -680,10 +769,14 @@ class UpsampleDecoder(nn.Module): res_block = [] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) + res_block.append( + ResnetBlock( + in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout, + ) + ) block_in = block_out self.res_blocks.append(nn.ModuleList(res_block)) if i_level != self.num_resolutions - 1: @@ -692,11 +785,9 @@ class UpsampleDecoder(nn.Module): # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) + self.conv_out = torch.nn.Conv2d( + block_in, out_channels, kernel_size=3, stride=1, padding=1 + ) def forward(self, x): # upsampling @@ -717,31 +808,50 @@ class LatentRescaler(nn.Module): super().__init__() # residual block, interpolate, residual block self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) + self.conv_in = nn.Conv2d( + in_channels, mid_channels, kernel_size=3, stride=1, padding=1 + ) + self.res_block1 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) + self.res_block2 = nn.ModuleList( + [ + ResnetBlock( + in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0, + ) + for _ in range(depth) + ] + ) - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) + self.conv_out = nn.Conv2d( + mid_channels, + out_channels, + kernel_size=1, + ) def forward(self, x): x = self.conv_in(x) for block in self.res_block1: x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = torch.nn.functional.interpolate( + x, + size=( + int(round(x.shape[2] * self.factor)), + int(round(x.shape[3] * self.factor)), + ), + ) x = self.attn(x) for block in self.res_block2: x = block(x, None) @@ -750,17 +860,42 @@ class LatentRescaler(nn.Module): class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + def __init__( + self, + in_channels, + ch, + resolution, + out_ch, + num_res_blocks, + attn_resolutions, + dropout=0.0, + resamp_with_conv=True, + ch_mult=(1, 2, 4, 8), + rescale_factor=1.0, + rescale_module_depth=1, + ): super().__init__() intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + self.encoder = Encoder( + in_channels=in_channels, + num_res_blocks=num_res_blocks, + ch=ch, + ch_mult=ch_mult, + z_channels=intermediate_chn, + double_z=False, + resolution=resolution, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + out_ch=None, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=intermediate_chn, + mid_channels=intermediate_chn, + out_channels=out_ch, + depth=rescale_module_depth, + ) def forward(self, x): x = self.encoder(x) @@ -769,15 +904,41 @@ class MergedRescaleEncoder(nn.Module): class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + def __init__( + self, + z_channels, + out_ch, + resolution, + num_res_blocks, + attn_resolutions, + ch, + ch_mult=(1, 2, 4, 8), + dropout=0.0, + resamp_with_conv=True, + rescale_factor=1.0, + rescale_module_depth=1, + ): super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) + tmp_chn = z_channels * ch_mult[-1] + self.decoder = Decoder( + out_ch=out_ch, + z_channels=tmp_chn, + attn_resolutions=attn_resolutions, + dropout=dropout, + resamp_with_conv=resamp_with_conv, + in_channels=None, + num_res_blocks=num_res_blocks, + ch_mult=ch_mult, + resolution=resolution, + ch=ch, + ) + self.rescaler = LatentRescaler( + factor=rescale_factor, + in_channels=z_channels, + mid_channels=tmp_chn, + out_channels=tmp_chn, + depth=rescale_module_depth, + ) def forward(self, x): x = self.rescaler(x) @@ -789,14 +950,27 @@ class Upsampler(nn.Module): def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): super().__init__() assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) + num_blocks = int(np.log2(out_size // in_size)) + 1 + factor_up = 1.0 + (out_size % in_size) + print( + f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}" + ) + self.rescaler = LatentRescaler( + factor=factor_up, + in_channels=in_channels, + mid_channels=2 * in_channels, + out_channels=in_channels, + ) + self.decoder = Decoder( + out_ch=out_channels, + resolution=out_size, + z_channels=in_channels, + num_res_blocks=2, + attn_resolutions=[], + in_channels=None, + ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)], + ) def forward(self, x): x = self.rescaler(x) @@ -810,37 +984,47 @@ class Resize(nn.Module): self.with_conv = learned self.mode = mode if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + print( + f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode" + ) raise NotImplementedError() assert in_channels is not None # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) + self.conv = torch.nn.Conv2d( + in_channels, in_channels, kernel_size=4, stride=2, padding=1 + ) def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: + if scale_factor == 1.0: return x else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + x = torch.nn.functional.interpolate( + x, mode=self.mode, align_corners=False, scale_factor=scale_factor + ) return x -class FirstStagePostProcessor(nn.Module): - def __init__(self, ch_mult:list, in_channels, - pretrained_model:nn.Module=None, - reshape=False, - n_channels=None, - dropout=0., - pretrained_config=None): +class FirstStagePostProcessor(nn.Module): + def __init__( + self, + ch_mult: list, + in_channels, + pretrained_model: nn.Module = None, + reshape=False, + n_channels=None, + dropout=0.0, + pretrained_config=None, + ): super().__init__() if pretrained_config is None: - assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + assert ( + pretrained_model is not None + ), 'Either "pretrained_model" or "pretrained_config" must not be None' self.pretrained_model = pretrained_model else: - assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + assert ( + pretrained_config is not None + ), 'Either "pretrained_model" or "pretrained_config" must not be None' self.instantiate_pretrained(pretrained_config) self.do_reshape = reshape @@ -848,22 +1032,26 @@ class FirstStagePostProcessor(nn.Module): if n_channels is None: n_channels = self.pretrained_model.encoder.ch - self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) - self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, - stride=1,padding=1) + self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2) + self.proj = nn.Conv2d( + in_channels, n_channels, kernel_size=3, stride=1, padding=1 + ) blocks = [] downs = [] ch_in = n_channels for m in ch_mult: - blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + blocks.append( + ResnetBlock( + in_channels=ch_in, out_channels=m * n_channels, dropout=dropout + ) + ) ch_in = m * n_channels downs.append(Downsample(ch_in, with_conv=False)) self.model = nn.ModuleList(blocks) self.downsampler = nn.ModuleList(downs) - def instantiate_pretrained(self, config): model = instantiate_from_config(config) self.pretrained_model = model.eval() @@ -871,24 +1059,23 @@ class FirstStagePostProcessor(nn.Module): for param in self.pretrained_model.parameters(): param.requires_grad = False - @torch.no_grad() - def encode_with_pretrained(self,x): + def encode_with_pretrained(self, x): c = self.pretrained_model.encode(x) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() - return c + return c - def forward(self,x): + def forward(self, x): z_fs = self.encode_with_pretrained(x) z = self.proj_norm(z_fs) z = self.proj(z) z = silu(z) - for submodel, downmodel in zip(self.model,self.downsampler): - z = submodel(z,temb=None) + for submodel, downmodel in zip(self.model, self.downsampler): + z = submodel(z, temb=None) z = downmodel(z) if self.do_reshape: - z = rearrange(z,'b c h w -> b (h w) c') + z = rearrange(z, "b c h w -> b (h w) c") return z diff --git a/invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py b/invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py index d6baa76a1c..867a1a30ca 100644 --- a/invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py +++ b/invokeai/backend/stable_diffusion/diffusionmodules/openaimodel.py @@ -1,23 +1,22 @@ +import math from abc import abstractmethod from functools import partial -import math from typing import Iterable import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F - +from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.util import ( + avg_pool_nd, checkpoint, conv_nd, linear, - avg_pool_nd, - zero_module, normalization, timestep_embedding, + zero_module, ) -from ldm.modules.attention import SpatialTransformer # dummy replace @@ -100,9 +99,7 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__( - self, channels, use_conv, dims=2, out_channels=None, padding=1 - ): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -117,10 +114,10 @@ class Upsample(nn.Module): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest' + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: - x = F.interpolate(x, scale_factor=2, mode='nearest') + x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x @@ -151,9 +148,7 @@ class Downsample(nn.Module): downsampling occurs in the inner-two dimensions. """ - def __init__( - self, channels, use_conv, dims=2, out_channels=None, padding=1 - ): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -237,9 +232,7 @@ class ResBlock(TimestepBlock): nn.SiLU(), linear( emb_channels, - 2 * self.out_channels - if use_scale_shift_norm - else self.out_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( @@ -247,9 +240,7 @@ class ResBlock(TimestepBlock): nn.SiLU(), nn.Dropout(p=dropout), zero_module( - conv_nd( - dims, self.out_channels, self.out_channels, 3, padding=1 - ) + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) ), ) @@ -260,9 +251,7 @@ class ResBlock(TimestepBlock): dims, channels, self.out_channels, 3, padding=1 ) else: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 1 - ) + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) def forward(self, x, emb): """ @@ -320,7 +309,7 @@ class AttentionBlock(nn.Module): else: assert ( channels % num_head_channels == 0 - ), f'q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}' + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.use_checkpoint = use_checkpoint self.norm = normalization(channels) @@ -337,7 +326,7 @@ class AttentionBlock(nn.Module): def forward(self, x): return checkpoint( self._forward, (x,), self.parameters(), True - ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! # return pt_checkpoint(self._forward, x) # pytorch def _forward(self, x): @@ -387,15 +376,13 @@ class QKVAttentionLegacy(nn.Module): bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split( - ch, dim=1 - ) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( - 'bct,bcs->bts', q * scale, k * scale + "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum('bts,bcs->bct', weight, v) + a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length) @staticmethod @@ -424,14 +411,12 @@ class QKVAttention(nn.Module): q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( - 'bct,bcs->bts', + "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum( - 'bts,bcs->bct', weight, v.reshape(bs * self.n_heads, ch, length) - ) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod @@ -500,12 +485,12 @@ class UNetModel(nn.Module): if use_spatial_transformer: assert ( context_dim is not None - ), 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..." if context_dim is not None: assert ( use_spatial_transformer - ), 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..." from omegaconf.listconfig import ListConfig if type(context_dim) == ListConfig: @@ -517,12 +502,12 @@ class UNetModel(nn.Module): if num_heads == -1: assert ( num_head_channels != -1 - ), 'Either num_heads or num_head_channels has to be set' + ), "Either num_heads or num_head_channels has to be set" if num_head_channels == -1: assert ( num_heads != -1 - ), 'Either num_heads or num_head_channels has to be set' + ), "Either num_heads or num_head_channels has to be set" self.image_size = image_size self.in_channels = in_channels @@ -641,11 +626,7 @@ class UNetModel(nn.Module): dim_head = num_head_channels if legacy: # num_heads = 1 - dim_head = ( - ch // num_heads - if use_spatial_transformer - else num_head_channels - ) + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, @@ -741,9 +722,7 @@ class UNetModel(nn.Module): up=True, ) if resblock_updown - else Upsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) @@ -752,9 +731,7 @@ class UNetModel(nn.Module): self.out = nn.Sequential( normalization(ch), nn.SiLU(), - zero_module( - conv_nd(dims, model_channels, out_channels, 3, padding=1) - ), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( @@ -790,11 +767,9 @@ class UNetModel(nn.Module): """ assert (y is not None) == ( self.num_classes is not None - ), 'must specify y if and only if the model is class-conditional' + ), "must specify y if and only if the model is class-conditional" hs = [] - t_emb = timestep_embedding( - timesteps, self.model_channels, repeat_only=False - ) + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) if self.num_classes is not None: @@ -842,7 +817,7 @@ class EncoderUNetModel(nn.Module): use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, - pool='adaptive', + pool="adaptive", *args, **kwargs, ): @@ -962,7 +937,7 @@ class EncoderUNetModel(nn.Module): ) self._feature_size += ch self.pool = pool - if pool == 'adaptive': + if pool == "adaptive": self.out = nn.Sequential( normalization(ch), nn.SiLU(), @@ -970,7 +945,7 @@ class EncoderUNetModel(nn.Module): zero_module(conv_nd(dims, ch, out_channels, 1)), nn.Flatten(), ) - elif pool == 'attention': + elif pool == "attention": assert num_head_channels != -1 self.out = nn.Sequential( normalization(ch), @@ -979,13 +954,13 @@ class EncoderUNetModel(nn.Module): (image_size // ds), ch, num_head_channels, out_channels ), ) - elif pool == 'spatial': + elif pool == "spatial": self.out = nn.Sequential( nn.Linear(self._feature_size, 2048), nn.ReLU(), nn.Linear(2048, self.out_channels), ) - elif pool == 'spatial_v2': + elif pool == "spatial_v2": self.out = nn.Sequential( nn.Linear(self._feature_size, 2048), normalization(2048), @@ -993,7 +968,7 @@ class EncoderUNetModel(nn.Module): nn.Linear(2048, self.out_channels), ) else: - raise NotImplementedError(f'Unexpected {pool} pooling') + raise NotImplementedError(f"Unexpected {pool} pooling") def convert_to_fp16(self): """ @@ -1016,18 +991,16 @@ class EncoderUNetModel(nn.Module): :param timesteps: a 1-D batch of timesteps. :return: an [N x K] Tensor of outputs. """ - emb = self.time_embed( - timestep_embedding(timesteps, self.model_channels) - ) + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) results = [] h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb) - if self.pool.startswith('spatial'): + if self.pool.startswith("spatial"): results.append(h.type(x.dtype).mean(dim=(2, 3))) h = self.middle_block(h, emb) - if self.pool.startswith('spatial'): + if self.pool.startswith("spatial"): results.append(h.type(x.dtype).mean(dim=(2, 3))) h = th.cat(results, axis=-1) return self.out(h) diff --git a/invokeai/backend/stable_diffusion/diffusionmodules/util.py b/invokeai/backend/stable_diffusion/diffusionmodules/util.py index a23f1abba4..b71b0f06f9 100644 --- a/invokeai/backend/stable_diffusion/diffusionmodules/util.py +++ b/invokeai/backend/stable_diffusion/diffusionmodules/util.py @@ -8,11 +8,12 @@ # thanks! -import os import math +import os + +import numpy as np import torch import torch.nn as nn -import numpy as np from einops import repeat from ...util.util import instantiate_from_config @@ -21,7 +22,7 @@ from ...util.util import instantiate_from_config def make_beta_schedule( schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3 ): - if schedule == 'linear': + if schedule == "linear": betas = ( torch.linspace( linear_start**0.5, @@ -32,10 +33,9 @@ def make_beta_schedule( ** 2 ) - elif schedule == 'cosine': + elif schedule == "cosine": timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep - + cosine_s + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s ) alphas = timesteps / (1 + cosine_s) * np.pi / 2 alphas = torch.cos(alphas).pow(2) @@ -43,15 +43,13 @@ def make_beta_schedule( betas = 1 - alphas[1:] / alphas[:-1] betas = np.clip(betas, a_min=0, a_max=0.999) - elif schedule == 'sqrt_linear': + elif schedule == "sqrt_linear": betas = torch.linspace( linear_start, linear_end, n_timestep, dtype=torch.float64 ) - elif schedule == 'sqrt': + elif schedule == "sqrt": betas = ( - torch.linspace( - linear_start, linear_end, n_timestep, dtype=torch.float64 - ) + torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 ) else: @@ -62,19 +60,14 @@ def make_beta_schedule( def make_ddim_timesteps( ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True ): - if ddim_discr_method == 'uniform': + if ddim_discr_method == "uniform": c = num_ddpm_timesteps // num_ddim_timesteps if c < 1: - c = 1 + c = 1 ddim_timesteps = (np.arange(0, num_ddim_timesteps) * c).astype(int) - elif ddim_discr_method == 'quad': + elif ddim_discr_method == "quad": ddim_timesteps = ( - ( - np.linspace( - 0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps - ) - ) - ** 2 + (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2 ).astype(int) else: raise NotImplementedError( @@ -87,18 +80,14 @@ def make_ddim_timesteps( # steps_out = ddim_timesteps if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') + print(f"Selected timesteps for ddim sampler: {steps_out}") return steps_out -def make_ddim_sampling_parameters( - alphacums, ddim_timesteps, eta, verbose=True -): +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): # select alphas for computing the variance schedule alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray( - [alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist() - ) + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) # according the the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt( @@ -106,11 +95,11 @@ def make_ddim_sampling_parameters( ) if verbose: print( - f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}' + f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}" ) print( - f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}' + f"For the chosen value of eta, which is {eta}, " + f"this results in the following sigma_t schedule for ddim sampler {sigmas}" ) return sigmas, alphas, alphas_prev @@ -150,9 +139,7 @@ def checkpoint(func, inputs, params, flag): explicitly take as arguments. :param flag: if False, disable gradient checkpointing. """ - if ( - False - ): # disabled checkpointing to allow requires_grad = False for main model + if False: # disabled checkpointing to allow requires_grad = False for main model args = tuple(inputs) + tuple(params) return CheckpointFunction.apply(func, len(inputs), *args) else: @@ -172,9 +159,7 @@ class CheckpointFunction(torch.autograd.Function): @staticmethod def backward(ctx, *output_grads): - ctx.input_tensors = [ - x.detach().requires_grad_(True) for x in ctx.input_tensors - ] + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] with torch.enable_grad(): # Fixes a bug where the first op in run_function modifies the # Tensor storage in place, which is not allowed for detach()'d @@ -216,7 +201,7 @@ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): [embedding, torch.zeros_like(embedding[:, :1])], dim=-1 ) else: - embedding = repeat(timesteps, 'b -> b d', d=dim) + embedding = repeat(timesteps, "b -> b d", d=dim) return embedding @@ -269,7 +254,7 @@ def conv_nd(dims, *args, **kwargs): return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) - raise ValueError(f'unsupported dimensions: {dims}') + raise ValueError(f"unsupported dimensions: {dims}") def linear(*args, **kwargs): @@ -289,21 +274,19 @@ def avg_pool_nd(dims, *args, **kwargs): return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f'unsupported dimensions: {dims}') + raise ValueError(f"unsupported dimensions: {dims}") class HybridConditioner(nn.Module): def __init__(self, c_concat_config, c_crossattn_config): super().__init__() self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config( - c_crossattn_config - ) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) def forward(self, c_concat, c_crossattn): c_concat = self.concat_conditioner(c_concat) c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + return {"c_concat": [c_concat], "c_crossattn": [c_crossattn]} def noise_like(shape, device, repeat=False): diff --git a/invokeai/backend/stable_diffusion/distributions/distributions.py b/invokeai/backend/stable_diffusion/distributions/distributions.py index 67ed535791..016be35523 100644 --- a/invokeai/backend/stable_diffusion/distributions/distributions.py +++ b/invokeai/backend/stable_diffusion/distributions/distributions.py @@ -1,5 +1,5 @@ -import torch import numpy as np +import torch class AbstractDistribution: @@ -64,9 +64,7 @@ class DiagonalGaussianDistribution(object): return torch.Tensor([0.0]) logtwopi = np.log(2.0 * np.pi) return 0.5 * torch.sum( - logtwopi - + self.logvar - + torch.pow(sample - self.mean, 2) / self.var, + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims, ) @@ -86,7 +84,7 @@ def normal_kl(mean1, logvar1, mean2, logvar2): if isinstance(obj, torch.Tensor): tensor = obj break - assert tensor is not None, 'at least one argument must be a Tensor' + assert tensor is not None, "at least one argument must be a Tensor" # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). diff --git a/invokeai/backend/stable_diffusion/ema.py b/invokeai/backend/stable_diffusion/ema.py index 2ceec5f0e7..880ca3d205 100644 --- a/invokeai/backend/stable_diffusion/ema.py +++ b/invokeai/backend/stable_diffusion/ema.py @@ -6,12 +6,12 @@ class LitEma(nn.Module): def __init__(self, model, decay=0.9999, use_num_upates=True): super().__init__() if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') + raise ValueError("Decay must be between 0 and 1") self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer("decay", torch.tensor(decay, dtype=torch.float32)) self.register_buffer( - 'num_updates', + "num_updates", torch.tensor(0, dtype=torch.int) if use_num_upates else torch.tensor(-1, dtype=torch.int), @@ -20,7 +20,7 @@ class LitEma(nn.Module): for name, p in model.named_parameters(): if p.requires_grad: # remove as '.'-character is not allowed in buffers - s_name = name.replace('.', '') + s_name = name.replace(".", "") self.m_name2s_name.update({name: s_name}) self.register_buffer(s_name, p.clone().detach().data) @@ -31,9 +31,7 @@ class LitEma(nn.Module): if self.num_updates >= 0: self.num_updates += 1 - decay = min( - self.decay, (1 + self.num_updates) / (10 + self.num_updates) - ) + decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) one_minus_decay = 1.0 - decay @@ -44,9 +42,7 @@ class LitEma(nn.Module): for key in m_param: if m_param[key].requires_grad: sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as( - m_param[key] - ) + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) shadow_params[sname].sub_( one_minus_decay * (shadow_params[sname] - m_param[key]) ) @@ -58,9 +54,7 @@ class LitEma(nn.Module): shadow_params = dict(self.named_buffers()) for key in m_param: if m_param[key].requires_grad: - m_param[key].data.copy_( - shadow_params[self.m_name2s_name[key]].data - ) + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) else: assert not key in self.m_name2s_name diff --git a/invokeai/backend/stable_diffusion/encoders/modules.py b/invokeai/backend/stable_diffusion/encoders/modules.py index 3c20b8d119..54afd12bc9 100644 --- a/invokeai/backend/stable_diffusion/encoders/modules.py +++ b/invokeai/backend/stable_diffusion/encoders/modules.py @@ -7,14 +7,14 @@ import kornia import torch import torch.nn as nn from einops import repeat -from transformers import CLIPTokenizer, CLIPTextModel +from transformers import CLIPTextModel, CLIPTokenizer -from ldm.invoke.devices import choose_torch_device -from invokeai.backend.globals import global_cache_dir -from ldm.modules.x_transformer import ( +from ...util import choose_torch_device +from ..globals import global_cache_dir +from ..x_transformer import ( # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test Encoder, TransformerWrapper, -) # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test +) def _expand_mask(mask, dtype, tgt_len=None): @@ -24,9 +24,7 @@ def _expand_mask(mask, dtype, tgt_len=None): bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len - expanded_mask = ( - mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - ) + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask @@ -54,7 +52,7 @@ class AbstractEncoder(nn.Module): class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): + def __init__(self, embed_dim, n_classes=1000, key="class"): super().__init__() self.key = key self.embedding = nn.Embedding(n_classes, embed_dim) @@ -99,20 +97,14 @@ class TransformerEmbedder(AbstractEncoder): class BERTTokenizer(AbstractEncoder): """Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__( - self, device=choose_torch_device(), vq_interface=True, max_length=77 - ): + def __init__(self, device=choose_torch_device(), vq_interface=True, max_length=77): super().__init__() - from transformers import ( - BertTokenizerFast, - ) + from transformers import BertTokenizerFast - cache = global_cache_dir('hub') + cache = global_cache_dir("hub") try: self.tokenizer = BertTokenizerFast.from_pretrained( - 'bert-base-uncased', - cache_dir=cache, - local_files_only=True + "bert-base-uncased", cache_dir=cache, local_files_only=True ) except OSError: raise SystemExit( @@ -129,10 +121,10 @@ class BERTTokenizer(AbstractEncoder): max_length=self.max_length, return_length=True, return_overflowing_tokens=False, - padding='max_length', - return_tensors='pt', + padding="max_length", + return_tensors="pt", ) - tokens = batch_encoding['input_ids'].to(self.device) + tokens = batch_encoding["input_ids"].to(self.device) return tokens @torch.no_grad() @@ -150,21 +142,19 @@ class BERTEmbedder(AbstractEncoder): """Uses the BERT tokenizr model and add some transformer encoder layers""" def __init__( - self, - n_embed, - n_layer, - vocab_size=30522, - max_seq_len=77, - device=choose_torch_device(), - use_tokenizer=True, - embedding_dropout=0.0, + self, + n_embed, + n_layer, + vocab_size=30522, + max_seq_len=77, + device=choose_torch_device(), + use_tokenizer=True, + embedding_dropout=0.0, ): super().__init__() self.use_tknz_fn = use_tokenizer if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer( - vq_interface=False, max_length=max_seq_len - ) + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) self.device = device self.transformer = TransformerWrapper( num_tokens=vocab_size, @@ -192,7 +182,7 @@ class SpatialRescaler(nn.Module): def __init__( self, n_stages=1, - method='bilinear', + method="bilinear", multiplier=0.5, in_channels=3, out_channels=None, @@ -202,25 +192,21 @@ class SpatialRescaler(nn.Module): self.n_stages = n_stages assert self.n_stages >= 0 assert method in [ - 'nearest', - 'linear', - 'bilinear', - 'trilinear', - 'bicubic', - 'area', + "nearest", + "linear", + "bilinear", + "trilinear", + "bicubic", + "area", ] self.multiplier = multiplier - self.interpolator = partial( - torch.nn.functional.interpolate, mode=method - ) + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) self.remap_output = out_channels is not None if self.remap_output: print( - f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.' - ) - self.channel_mapper = nn.Conv2d( - in_channels, out_channels, 1, bias=bias + f"Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing." ) + self.channel_mapper = nn.Conv2d(in_channels, out_channels, 1, bias=bias) def forward(self, x): for stage in range(self.n_stages): @@ -236,27 +222,24 @@ class SpatialRescaler(nn.Module): class FrozenCLIPEmbedder(AbstractEncoder): """Uses the CLIP transformer encoder for text (from Hugging Face)""" + tokenizer: CLIPTokenizer transformer: CLIPTextModel def __init__( self, - version:str='openai/clip-vit-large-patch14', - max_length:int=77, - tokenizer:Optional[CLIPTokenizer]=None, - transformer:Optional[CLIPTextModel]=None, + version: str = "openai/clip-vit-large-patch14", + max_length: int = 77, + tokenizer: Optional[CLIPTokenizer] = None, + transformer: Optional[CLIPTextModel] = None, ): super().__init__() - cache = global_cache_dir('hub') + cache = global_cache_dir("hub") self.tokenizer = tokenizer or CLIPTokenizer.from_pretrained( - version, - cache_dir=cache, - local_files_only=True + version, cache_dir=cache, local_files_only=True ) self.transformer = transformer or CLIPTextModel.from_pretrained( - version, - cache_dir=cache, - local_files_only=True + version, cache_dir=cache, local_files_only=True ) self.max_length = max_length self.freeze() @@ -268,7 +251,6 @@ class FrozenCLIPEmbedder(AbstractEncoder): inputs_embeds=None, embedding_manager=None, ) -> torch.Tensor: - seq_length = ( input_ids.shape[-1] if input_ids is not None @@ -289,8 +271,8 @@ class FrozenCLIPEmbedder(AbstractEncoder): return embeddings - self.transformer.text_model.embeddings.forward = ( - embedding_forward.__get__(self.transformer.text_model.embeddings) + self.transformer.text_model.embeddings.forward = embedding_forward.__get__( + self.transformer.text_model.embeddings ) def encoder_forward( @@ -313,9 +295,7 @@ class FrozenCLIPEmbedder(AbstractEncoder): else self.config.output_hidden_states ) return_dict = ( - return_dict - if return_dict is not None - else self.config.use_return_dict + return_dict if return_dict is not None else self.config.use_return_dict ) encoder_states = () if output_hidden_states else None @@ -368,13 +348,11 @@ class FrozenCLIPEmbedder(AbstractEncoder): else self.config.output_hidden_states ) return_dict = ( - return_dict - if return_dict is not None - else self.config.use_return_dict + return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is None: - raise ValueError('You have to specify either input_ids') + raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) @@ -395,9 +373,7 @@ class FrozenCLIPEmbedder(AbstractEncoder): # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask( - attention_mask, hidden_states.dtype - ) + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) last_hidden_state = self.encoder( inputs_embeds=hidden_states, @@ -436,9 +412,7 @@ class FrozenCLIPEmbedder(AbstractEncoder): embedding_manager=embedding_manager, ) - self.transformer.forward = transformer_forward.__get__( - self.transformer - ) + self.transformer.forward = transformer_forward.__get__(self.transformer) def freeze(self): self.transformer = self.transformer.eval() @@ -452,10 +426,10 @@ class FrozenCLIPEmbedder(AbstractEncoder): max_length=self.max_length, return_length=True, return_overflowing_tokens=False, - padding='max_length', - return_tensors='pt', + padding="max_length", + return_tensors="pt", ) - tokens = batch_encoding['input_ids'].to(self.device) + tokens = batch_encoding["input_ids"].to(self.device) z = self.transformer(input_ids=tokens, **kwargs) return z @@ -471,25 +445,25 @@ class FrozenCLIPEmbedder(AbstractEncoder): def device(self, device): self.transformer.to(device=device) -class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): +class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): fragment_weights_key = "fragment_weights" return_tokens_key = "return_tokens" - def set_textual_inversion_manager(self, manager): #TextualInversionManager): + def set_textual_inversion_manager(self, manager): # TextualInversionManager): # TODO all of the weighting and expanding stuff needs be moved out of this class self.textual_inversion_manager = manager def forward(self, text: list, **kwargs): # TODO all of the weighting and expanding stuff needs be moved out of this class - ''' + """ :param text: A batch of prompt strings, or, a batch of lists of fragments of prompt strings to which different weights shall be applied. :param kwargs: If the keyword arg "fragment_weights" is passed, it shall contain a batch of lists of weights for the prompt fragments. In this case text must contain batches of lists of prompt fragments. :return: A tensor of shape (B, 77, 768) containing weighted embeddings - ''' + """ if self.fragment_weights_key not in kwargs: # fallback to base class implementation return super().forward(text, **kwargs) @@ -507,7 +481,6 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): batch_z = None batch_tokens = None for fragments, weights in zip(text, fragment_weights): - # First, weight tokens in individual fragments by scaling the feature vectors as requested (effectively # applying a multiplier to the CFG scale on a per-token basis). # For tokens weighted<1, intuitively we want SD to become not merely *less* interested in the concept @@ -520,7 +493,9 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): # handle weights >=1 tokens, per_token_weights = self.get_tokens_and_weights(fragments, weights) - base_embedding = self.build_weighted_embedding_tensor(tokens, per_token_weights, **kwargs) + base_embedding = self.build_weighted_embedding_tensor( + tokens, per_token_weights, **kwargs + ) # this is our starting point embeddings = base_embedding.unsqueeze(0) @@ -536,12 +511,18 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): # such that the resulting lerped embedding is exactly half-way between "mountain man" and "mountain". for index, fragment_weight in enumerate(weights): if fragment_weight < 1: - fragments_without_this = fragments[:index] + fragments[index+1:] - weights_without_this = weights[:index] + weights[index+1:] - tokens, per_token_weights = self.get_tokens_and_weights(fragments_without_this, weights_without_this) - embedding_without_this = self.build_weighted_embedding_tensor(tokens, per_token_weights, **kwargs) + fragments_without_this = fragments[:index] + fragments[index + 1 :] + weights_without_this = weights[:index] + weights[index + 1 :] + tokens, per_token_weights = self.get_tokens_and_weights( + fragments_without_this, weights_without_this + ) + embedding_without_this = self.build_weighted_embedding_tensor( + tokens, per_token_weights, **kwargs + ) - embeddings = torch.cat((embeddings, embedding_without_this.unsqueeze(0)), dim=1) + embeddings = torch.cat( + (embeddings, embedding_without_this.unsqueeze(0)), dim=1 + ) # weight of the embedding *without* this fragment gets *stronger* as its weight approaches 0 # if fragment_weight = 0, basically we want embedding_without_this to completely overwhelm base_embedding # therefore: @@ -554,29 +535,43 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): # inf at PI/2 # -> tan((1-weight)*PI/2) should give us ideal lerp weights epsilon = 1e-9 - fragment_weight = max(epsilon, fragment_weight) # inf is bad - embedding_lerp_weight = math.tan((1.0 - fragment_weight) * math.pi / 2) + fragment_weight = max(epsilon, fragment_weight) # inf is bad + embedding_lerp_weight = math.tan( + (1.0 - fragment_weight) * math.pi / 2 + ) # todo handle negative weight? per_embedding_weights.append(embedding_lerp_weight) - lerped_embeddings = self.apply_embedding_weights(embeddings, per_embedding_weights, normalize=True).squeeze(0) + lerped_embeddings = self.apply_embedding_weights( + embeddings, per_embedding_weights, normalize=True + ).squeeze(0) - #print(f"assembled tokens for '{fragments}' into tensor of shape {lerped_embeddings.shape}") + # print(f"assembled tokens for '{fragments}' into tensor of shape {lerped_embeddings.shape}") # append to batch - batch_z = lerped_embeddings.unsqueeze(0) if batch_z is None else torch.cat([batch_z, lerped_embeddings.unsqueeze(0)], dim=1) - batch_tokens = tokens.unsqueeze(0) if batch_tokens is None else torch.cat([batch_tokens, tokens.unsqueeze(0)], dim=1) + batch_z = ( + lerped_embeddings.unsqueeze(0) + if batch_z is None + else torch.cat([batch_z, lerped_embeddings.unsqueeze(0)], dim=1) + ) + batch_tokens = ( + tokens.unsqueeze(0) + if batch_tokens is None + else torch.cat([batch_tokens, tokens.unsqueeze(0)], dim=1) + ) # should have shape (B, 77, 768) - #print(f"assembled all tokens into tensor of shape {batch_z.shape}") + # print(f"assembled all tokens into tensor of shape {batch_z.shape}") if should_return_tokens: return batch_z, batch_tokens else: return batch_z - def get_token_ids(self, fragments: list[str], include_start_and_end_markers: bool = True) -> list[list[int]]: + def get_token_ids( + self, fragments: list[str], include_start_and_end_markers: bool = True + ) -> list[list[int]]: """ Convert a list of strings like `["a cat", "sitting", "on a mat"]` into a list of lists of token ids like `[[bos, 0, 1, eos], [bos, 2, eos], [bos, 3, 0, 4, eos]]`. bos/eos markers are skipped if @@ -594,58 +589,81 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): truncation=True, max_length=self.max_length, return_overflowing_tokens=False, - padding='do_not_pad', + padding="do_not_pad", return_tensors=None, # just give me lists of ints - )['input_ids'] + )["input_ids"] result = [] for token_ids in token_ids_list: # trim eos/bos token_ids = token_ids[1:-1] # pad for textual inversions with vector length >1 - token_ids = self.textual_inversion_manager.expand_textual_inversion_token_ids_if_necessary(token_ids) + token_ids = self.textual_inversion_manager.expand_textual_inversion_token_ids_if_necessary( + token_ids + ) # restrict length to max_length-2 (leaving room for bos/eos) - token_ids = token_ids[0:self.max_length - 2] + token_ids = token_ids[0 : self.max_length - 2] # add back eos/bos if requested if include_start_and_end_markers: - token_ids = [self.tokenizer.bos_token_id] + token_ids + [self.tokenizer.eos_token_id] + token_ids = ( + [self.tokenizer.bos_token_id] + + token_ids + + [self.tokenizer.eos_token_id] + ) result.append(token_ids) return result - @classmethod - def apply_embedding_weights(self, embeddings: torch.Tensor, per_embedding_weights: list[float], normalize:bool) -> torch.Tensor: - per_embedding_weights = torch.tensor(per_embedding_weights, dtype=embeddings.dtype, device=embeddings.device) + def apply_embedding_weights( + self, + embeddings: torch.Tensor, + per_embedding_weights: list[float], + normalize: bool, + ) -> torch.Tensor: + per_embedding_weights = torch.tensor( + per_embedding_weights, dtype=embeddings.dtype, device=embeddings.device + ) if normalize: - per_embedding_weights = per_embedding_weights / torch.sum(per_embedding_weights) - reshaped_weights = per_embedding_weights.reshape(per_embedding_weights.shape + (1, 1,)) - #reshaped_weights = per_embedding_weights.reshape(per_embedding_weights.shape + (1,1,)).expand(embeddings.shape) + per_embedding_weights = per_embedding_weights / torch.sum( + per_embedding_weights + ) + reshaped_weights = per_embedding_weights.reshape( + per_embedding_weights.shape + + ( + 1, + 1, + ) + ) + # reshaped_weights = per_embedding_weights.reshape(per_embedding_weights.shape + (1,1,)).expand(embeddings.shape) return torch.sum(embeddings * reshaped_weights, dim=1) # lerped embeddings has shape (77, 768) - - def get_tokens_and_weights(self, fragments: list[str], weights: list[float]) -> (torch.Tensor, torch.Tensor): - ''' + def get_tokens_and_weights( + self, fragments: list[str], weights: list[float] + ) -> (torch.Tensor, torch.Tensor): + """ :param fragments: :param weights: Per-fragment weights (CFG scaling). No need for these to be normalized. They will not be normalized here and that's fine. :return: - ''' + """ # empty is meaningful if len(fragments) == 0 and len(weights) == 0: - fragments = [''] + fragments = [""] weights = [1] - per_fragment_token_ids = self.get_token_ids(fragments, include_start_and_end_markers=False) + per_fragment_token_ids = self.get_token_ids( + fragments, include_start_and_end_markers=False + ) all_token_ids = [] per_token_weights = [] - #print("all fragments:", fragments, weights) + # print("all fragments:", fragments, weights) for index, fragment in enumerate(per_fragment_token_ids): weight = float(weights[index]) - #print("processing fragment", fragment, weight) + # print("processing fragment", fragment, weight) this_fragment_token_ids = per_fragment_token_ids[index] - #print("fragment", fragment, "processed to", this_fragment_token_ids) + # print("fragment", fragment, "processed to", this_fragment_token_ids) # append all_token_ids += this_fragment_token_ids # fill out weights tensor with one float per token @@ -654,60 +672,85 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): # leave room for bos/eos max_token_count_without_bos_eos_markers = self.max_length - 2 if len(all_token_ids) > max_token_count_without_bos_eos_markers: - excess_token_count = len(all_token_ids) - max_token_count_without_bos_eos_markers + excess_token_count = ( + len(all_token_ids) - max_token_count_without_bos_eos_markers + ) # TODO build nice description string of how the truncation was applied # this should be done by calling self.tokenizer.convert_ids_to_tokens() then passing the result to # self.tokenizer.convert_tokens_to_string() for the token_ids on each side of the truncation limit. - print(f">> Prompt is {excess_token_count} token(s) too long and has been truncated") + print( + f">> Prompt is {excess_token_count} token(s) too long and has been truncated" + ) all_token_ids = all_token_ids[0:max_token_count_without_bos_eos_markers] - per_token_weights = per_token_weights[0:max_token_count_without_bos_eos_markers] + per_token_weights = per_token_weights[ + 0:max_token_count_without_bos_eos_markers + ] # pad out to a 77-entry array: [bos_token, , eos_token, pad_token…] # (77 = self.max_length) - all_token_ids = [self.tokenizer.bos_token_id] + all_token_ids + [self.tokenizer.eos_token_id] + all_token_ids = ( + [self.tokenizer.bos_token_id] + + all_token_ids + + [self.tokenizer.eos_token_id] + ) per_token_weights = [1.0] + per_token_weights + [1.0] pad_length = self.max_length - len(all_token_ids) all_token_ids += [self.tokenizer.pad_token_id] * pad_length per_token_weights += [1.0] * pad_length - all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long).to(self.device) - per_token_weights_tensor = torch.tensor(per_token_weights, dtype=torch.float32).to(self.device) - #print(f"assembled all_token_ids_tensor with shape {all_token_ids_tensor.shape}") + all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long).to( + self.device + ) + per_token_weights_tensor = torch.tensor( + per_token_weights, dtype=torch.float32 + ).to(self.device) + # print(f"assembled all_token_ids_tensor with shape {all_token_ids_tensor.shape}") return all_token_ids_tensor, per_token_weights_tensor - def build_weighted_embedding_tensor(self, token_ids: torch.Tensor, per_token_weights: torch.Tensor, weight_delta_from_empty=True, **kwargs) -> torch.Tensor: - ''' + def build_weighted_embedding_tensor( + self, + token_ids: torch.Tensor, + per_token_weights: torch.Tensor, + weight_delta_from_empty=True, + **kwargs, + ) -> torch.Tensor: + """ Build a tensor representing the passed-in tokens, each of which has a weight. :param token_ids: A tensor of shape (77) containing token ids (integers) :param per_token_weights: A tensor of shape (77) containing weights (floats) :param method: Whether to multiply the whole feature vector for each token or just its distance from an "empty" feature vector :param kwargs: passed on to self.transformer() :return: A tensor of shape (1, 77, 768) representing the requested weighted embeddings. - ''' - #print(f"building weighted embedding tensor for {tokens} with weights {per_token_weights}") + """ + # print(f"building weighted embedding tensor for {tokens} with weights {per_token_weights}") if token_ids.shape != torch.Size([self.max_length]): - raise ValueError(f"token_ids has shape {token_ids.shape} - expected [{self.max_length}]") + raise ValueError( + f"token_ids has shape {token_ids.shape} - expected [{self.max_length}]" + ) z = self.transformer(input_ids=token_ids.unsqueeze(0), **kwargs) - batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape) + batch_weights_expanded = per_token_weights.reshape( + per_token_weights.shape + (1,) + ).expand(z.shape) if weight_delta_from_empty: - empty_tokens = self.tokenizer([''] * z.shape[0], - truncation=True, - max_length=self.max_length, - padding='max_length', - return_tensors='pt' - )['input_ids'].to(self.device) + empty_tokens = self.tokenizer( + [""] * z.shape[0], + truncation=True, + max_length=self.max_length, + padding="max_length", + return_tensors="pt", + )["input_ids"].to(self.device) empty_z = self.transformer(input_ids=empty_tokens, **kwargs) z_delta_from_empty = z - empty_z weighted_z = empty_z + (z_delta_from_empty * batch_weights_expanded) - #weighted_z_delta_from_empty = (weighted_z-empty_z) - #print("weighted z has delta from empty with sum", weighted_z_delta_from_empty.sum().item(), "mean", weighted_z_delta_from_empty.mean().item() ) + # weighted_z_delta_from_empty = (weighted_z-empty_z) + # print("weighted z has delta from empty with sum", weighted_z_delta_from_empty.sum().item(), "mean", weighted_z_delta_from_empty.mean().item() ) - #print("using empty-delta method, first 5 rows:") - #print(weighted_z[:5]) + # print("using empty-delta method, first 5 rows:") + # print(weighted_z[:5]) return weighted_z @@ -716,7 +759,7 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder): z *= batch_weights_expanded after_weighting_mean = z.mean() # correct the mean. not sure if this is right but it's what the automatic1111 fork of SD does - mean_correction_factor = original_mean/after_weighting_mean + mean_correction_factor = original_mean / after_weighting_mean z *= mean_correction_factor return z @@ -728,7 +771,7 @@ class FrozenCLIPTextEmbedder(nn.Module): def __init__( self, - version='ViT-L/14', + version="ViT-L/14", device=choose_torch_device(), max_length=77, n_repeat=1, @@ -757,7 +800,7 @@ class FrozenCLIPTextEmbedder(nn.Module): z = self(text) if z.ndim == 2: z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) + z = repeat(z, "b 1 d -> b k d", k=self.n_repeat) return z @@ -779,12 +822,12 @@ class FrozenClipImageEmbedder(nn.Module): self.antialias = antialias self.register_buffer( - 'mean', + "mean", torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False, ) self.register_buffer( - 'std', + "std", torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False, ) @@ -794,7 +837,7 @@ class FrozenClipImageEmbedder(nn.Module): x = kornia.geometry.resize( x, (224, 224), - interpolation='bicubic', + interpolation="bicubic", align_corners=True, antialias=self.antialias, ) @@ -808,8 +851,8 @@ class FrozenClipImageEmbedder(nn.Module): return self.model.encode_image(self.preprocess(x)) -if __name__ == '__main__': - from ldm.util import count_params +if __name__ == "__main__": + from ...util.util import count_params model = FrozenCLIPEmbedder() count_params(model, verbose=True) diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py index b51217bd48..1760206073 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py +++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py @@ -10,20 +10,19 @@ # -------------------------------------------- """ -import numpy as np -import cv2 -import torch - -from functools import partial import random -from scipy import ndimage +from functools import partial + +import albumentations +import cv2 +import ldm.modules.image_degradation.utils_image as util +import numpy as np import scipy import scipy.stats as ss +import torch +from scipy import ndimage from scipy.interpolate import interp2d from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util def modcrop_np(img, sf): @@ -54,9 +53,7 @@ def analytic_kernel(k): # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): - big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += ( - k[r, c] * k - ) + big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] @@ -77,9 +74,7 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ v = np.dot( - np.array( - [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] - ), + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) @@ -139,13 +134,11 @@ def blur(x, k): """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d( - x, k, bias=None, stride=1, padding=0, groups=n * c - ) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x @@ -172,16 +165,12 @@ def gen_kernel( # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array( - [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] - ) + Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * ( - scale_factor - 1 - ) # - 0.5 * (scale_factor - k_size % 2) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian @@ -206,9 +195,7 @@ def fspecial_gaussian(hsize, sigma): hsize = [hsize, hsize] siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] std = sigma - [x, y] = np.meshgrid( - np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1) - ) + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) arg = -(x * x + y * y) / (2 * std * std) h = np.exp(arg) h[h < scipy.finfo(float).eps * h.max()] = 0 @@ -232,9 +219,9 @@ def fspecial(filter_type, *args, **kwargs): python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ - if filter_type == 'gaussian': + if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': + if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs) @@ -275,7 +262,7 @@ def srmd_degradation(x, k, sf=3): } """ x = ndimage.filters.convolve( - x, np.expand_dims(k, axis=2), mode='wrap' + x, np.expand_dims(k, axis=2), mode="wrap" ) # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x @@ -299,7 +286,7 @@ def dpsr_degradation(x, k, sf=3): } """ x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") return x @@ -312,7 +299,7 @@ def classical_degradation(x, k, sf=3): Return: downsampled LR image """ - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...] @@ -336,7 +323,7 @@ def add_sharpening(img, weight=0.5, radius=50, threshold=10): blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') + mask = mask.astype("float32") soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual @@ -357,12 +344,8 @@ def add_blur(img, sf=4): l2=l2, ) else: - k = fspecial( - 'gaussian', 2 * random.randint(2, 11) + 3, wd * random.random() - ) - img = ndimage.filters.convolve( - img, np.expand_dims(k, axis=2), mode='mirror' - ) + k = fspecial("gaussian", 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode="mirror") return img @@ -430,9 +413,9 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): img = np.clip(img, 0.0, 1.0) rnum = random.random() if rnum > 0.6: - img += img * np.random.normal( - 0, noise_level / 255.0, img.shape - ).astype(np.float32) + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype( + np.float32 + ) elif rnum < 0.4: img += img * np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) @@ -458,8 +441,7 @@ def add_Poisson_noise(img): img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0 noise_gray = ( - np.random.poisson(img_gray * vals).astype(np.float32) / vals - - img_gray + np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray ) img += noise_gray[:, :, np.newaxis] img = np.clip(img, 0.0, 1.0) @@ -470,7 +452,7 @@ def add_JPEG_noise(img): quality_factor = random.randint(30, 95) img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) result, encimg = cv2.imencode( - '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] + ".jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] ) img = cv2.imdecode(encimg, 1) img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) @@ -513,7 +495,7 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') + raise ValueError(f"img size ({h1}X{w1}) is too small!") hq = img.copy() @@ -538,7 +520,6 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): ) for i in shuffle_order: - if i == 0: img = add_blur(img, sf=sf) @@ -556,13 +537,11 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): interpolation=random.choice([1, 2, 3]), ) else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) - k_shifted = ( - k_shifted / k_shifted.sum() - ) # blur with shifted kernel + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve( - img, np.expand_dims(k_shifted, axis=2), mode='mirror' + img, np.expand_dims(k_shifted, axis=2), mode="mirror" ) img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) @@ -644,7 +623,6 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): ) for i in shuffle_order: - if i == 0: image = add_blur(image, sf=sf) @@ -665,13 +643,11 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): interpolation=random.choice([1, 2, 3]), ) else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) - k_shifted = ( - k_shifted / k_shifted.sum() - ) # blur with shifted kernel + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve( - image, np.expand_dims(k_shifted, axis=2), mode='mirror' + image, np.expand_dims(k_shifted, axis=2), mode="mirror" ) image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) @@ -703,7 +679,7 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) - example = {'image': image} + example = {"image": image} return example @@ -735,7 +711,7 @@ def degradation_bsrgan_plus( h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') + raise ValueError(f"img size ({h1}X{w1}) is too small!") if use_sharp: img = add_sharpening(img) @@ -746,12 +722,8 @@ def degradation_bsrgan_plus( else: shuffle_order = list(range(13)) # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample( - shuffle_order[2:6], len(range(2, 6)) - ) - shuffle_order[9:13] = random.sample( - shuffle_order[9:13], len(range(9, 13)) - ) + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 @@ -791,7 +763,7 @@ def degradation_bsrgan_plus( with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) else: - print('check the shuffle!') + print("check the shuffle!") # resize to desired size img = cv2.resize( @@ -809,15 +781,15 @@ def degradation_bsrgan_plus( return img, hq -if __name__ == '__main__': - print('hey') - img = util.imread_uint('utils/test.png', 3) +if __name__ == "__main__": + print("hey") + img = util.imread_uint("utils/test.png", 3) print(img) img = util.uint2single(img) print(img) img = img[:448, :448] h = img.shape[0] // 4 - print('resizing to', h) + print("resizing to", h) sf = 4 deg_fn = partial(degradation_bsrgan_variant, sf=sf) for i in range(20): @@ -826,9 +798,9 @@ if __name__ == '__main__': print(img_lq) img_lq_bicubic = albumentations.SmallestMaxSize( max_size=h, interpolation=cv2.INTER_CUBIC - )(image=img)['image'] + )(image=img)["image"] print(img_lq.shape) - print('bicubic', img_lq_bicubic.shape) + print("bicubic", img_lq_bicubic.shape) print(img_hq.shape) lq_nearest = cv2.resize( util.single2uint(img_lq), @@ -843,4 +815,4 @@ if __name__ == '__main__': img_concat = np.concatenate( [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1 ) - util.imsave(img_concat, str(i) + '.png') + util.imsave(img_concat, str(i) + ".png") diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py index 3500ef7316..1e8eee82b5 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py +++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py @@ -1,18 +1,17 @@ # -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial import random -from scipy import ndimage +from functools import partial + +import albumentations +import cv2 +import ldm.modules.image_degradation.utils_image as util +import numpy as np import scipy import scipy.stats as ss +import torch +from scipy import ndimage from scipy.interpolate import interp2d from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util """ # -------------------------------------------- @@ -54,9 +53,7 @@ def analytic_kernel(k): # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): - big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += ( - k[r, c] * k - ) + big_k[2 * r : 2 * r + k_size, 2 * c : 2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] @@ -77,9 +74,7 @@ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ v = np.dot( - np.array( - [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] - ), + np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1.0, 0.0]), ) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) @@ -139,13 +134,11 @@ def blur(x, k): """ n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode="replicate") k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d( - x, k, bias=None, stride=1, padding=0, groups=n * c - ) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x @@ -172,16 +165,12 @@ def gen_kernel( # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array( - [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] - ) + Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * ( - scale_factor - 1 - ) # - 0.5 * (scale_factor - k_size % 2) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian @@ -206,9 +195,7 @@ def fspecial_gaussian(hsize, sigma): hsize = [hsize, hsize] siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] std = sigma - [x, y] = np.meshgrid( - np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1) - ) + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) arg = -(x * x + y * y) / (2 * std * std) h = np.exp(arg) h[h < scipy.finfo(float).eps * h.max()] = 0 @@ -232,9 +219,9 @@ def fspecial(filter_type, *args, **kwargs): python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py """ - if filter_type == 'gaussian': + if filter_type == "gaussian": return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': + if filter_type == "laplacian": return fspecial_laplacian(*args, **kwargs) @@ -275,7 +262,7 @@ def srmd_degradation(x, k, sf=3): } """ x = ndimage.filters.convolve( - x, np.expand_dims(k, axis=2), mode='wrap' + x, np.expand_dims(k, axis=2), mode="wrap" ) # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x @@ -299,7 +286,7 @@ def dpsr_degradation(x, k, sf=3): } """ x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") return x @@ -312,7 +299,7 @@ def classical_degradation(x, k, sf=3): Return: downsampled LR image """ - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode="wrap") # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...] @@ -336,7 +323,7 @@ def add_sharpening(img, weight=0.5, radius=50, threshold=10): blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') + mask = mask.astype("float32") soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual @@ -361,12 +348,8 @@ def add_blur(img, sf=4): l2=l2, ) else: - k = fspecial( - 'gaussian', random.randint(2, 4) + 3, wd * random.random() - ) - img = ndimage.filters.convolve( - img, np.expand_dims(k, axis=2), mode='mirror' - ) + k = fspecial("gaussian", random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode="mirror") return img @@ -434,9 +417,9 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): img = np.clip(img, 0.0, 1.0) rnum = random.random() if rnum > 0.6: - img += img * np.random.normal( - 0, noise_level / 255.0, img.shape - ).astype(np.float32) + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype( + np.float32 + ) elif rnum < 0.4: img += img * np.random.normal( 0, noise_level / 255.0, (*img.shape[:2], 1) @@ -462,8 +445,7 @@ def add_Poisson_noise(img): img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.0 noise_gray = ( - np.random.poisson(img_gray * vals).astype(np.float32) / vals - - img_gray + np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray ) img += noise_gray[:, :, np.newaxis] img = np.clip(img, 0.0, 1.0) @@ -474,7 +456,7 @@ def add_JPEG_noise(img): quality_factor = random.randint(80, 95) img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) result, encimg = cv2.imencode( - '.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] + ".jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor] ) img = cv2.imdecode(encimg, 1) img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) @@ -517,7 +499,7 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') + raise ValueError(f"img size ({h1}X{w1}) is too small!") hq = img.copy() @@ -542,7 +524,6 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): ) for i in shuffle_order: - if i == 0: img = add_blur(img, sf=sf) @@ -560,13 +541,11 @@ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): interpolation=random.choice([1, 2, 3]), ) else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) - k_shifted = ( - k_shifted / k_shifted.sum() - ) # blur with shifted kernel + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve( - img, np.expand_dims(k_shifted, axis=2), mode='mirror' + img, np.expand_dims(k_shifted, axis=2), mode="mirror" ) img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) @@ -648,7 +627,6 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): ) for i in shuffle_order: - if i == 0: image = add_blur(image, sf=sf) @@ -672,13 +650,11 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): interpolation=random.choice([1, 2, 3]), ) else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k = fspecial("gaussian", 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) - k_shifted = ( - k_shifted / k_shifted.sum() - ) # blur with shifted kernel + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve( - image, np.expand_dims(k_shifted, axis=2), mode='mirror' + image, np.expand_dims(k_shifted, axis=2), mode="mirror" ) image = image[0::sf, 0::sf, ...] # nearest downsampling @@ -711,29 +687,29 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None): # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) - example = {'image': image} + example = {"image": image} return example -if __name__ == '__main__': - print('hey') - img = util.imread_uint('utils/test.png', 3) +if __name__ == "__main__": + print("hey") + img = util.imread_uint("utils/test.png", 3) img = img[:448, :448] h = img.shape[0] // 4 - print('resizing to', h) + print("resizing to", h) sf = 4 deg_fn = partial(degradation_bsrgan_variant, sf=sf) for i in range(20): print(i) img_hq = img - img_lq = deg_fn(img)['image'] + img_lq = deg_fn(img)["image"] img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) print(img_lq) img_lq_bicubic = albumentations.SmallestMaxSize( max_size=h, interpolation=cv2.INTER_CUBIC - )(image=img_hq)['image'] + )(image=img_hq)["image"] print(img_lq.shape) - print('bicubic', img_lq_bicubic.shape) + print("bicubic", img_lq_bicubic.shape) print(img_hq.shape) lq_nearest = cv2.resize( util.single2uint(img_lq), @@ -748,4 +724,4 @@ if __name__ == '__main__': img_concat = np.concatenate( [lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1 ) - util.imsave(img_concat, str(i) + '.png') + util.imsave(img_concat, str(i) + ".png") diff --git a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py index 59236683b5..08505edde0 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py +++ b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py @@ -1,16 +1,17 @@ -import os import math +import os import random +from datetime import datetime + +import cv2 import numpy as np import torch -import cv2 from torchvision.utils import make_grid -from datetime import datetime # import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py -os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" """ @@ -25,17 +26,17 @@ os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' IMG_EXTENSIONS = [ - '.jpg', - '.JPG', - '.jpeg', - '.JPEG', - '.png', - '.PNG', - '.ppm', - '.PPM', - '.bmp', - '.BMP', - '.tif', + ".jpg", + ".JPG", + ".jpeg", + ".JPEG", + ".png", + ".PNG", + ".ppm", + ".PPM", + ".bmp", + ".BMP", + ".tif", ] @@ -44,12 +45,12 @@ def is_image_file(filename): def get_timestamp(): - return datetime.now().strftime('%y%m%d-%H%M%S') + return datetime.now().strftime("%y%m%d-%H%M%S") def imshow(x, title=None, cbar=False, figsize=None): plt.figure(figsize=figsize) - plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + plt.imshow(np.squeeze(x), interpolation="nearest", cmap="gray") if title: plt.title(title) if cbar: @@ -57,9 +58,9 @@ def imshow(x, title=None, cbar=False, figsize=None): plt.show() -def surf(Z, cmap='rainbow', figsize=None): +def surf(Z, cmap="rainbow", figsize=None): plt.figure(figsize=figsize) - ax3 = plt.axes(projection='3d') + ax3 = plt.axes(projection="3d") w, h = Z.shape[:2] xx = np.arange(0, w, 1) @@ -85,14 +86,14 @@ def get_image_paths(dataroot): def _get_paths_from_images(path): - assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + assert os.path.isdir(path), "{:s} is not a valid directory".format(path) images = [] for dirpath, _, fnames in sorted(os.walk(path)): for fname in sorted(fnames): if is_image_file(fname): img_path = os.path.join(dirpath, fname) images.append(img_path) - assert images, '{:s} has no valid image file'.format(path) + assert images, "{:s} has no valid image file".format(path) return images @@ -133,7 +134,7 @@ def imssave(imgs, img_path): img = img[:, :, [2, 1, 0]] new_path = os.path.join( os.path.dirname(img_path), - img_name + str('_s{:04d}'.format(i)) + '.png', + img_name + str("_s{:04d}".format(i)) + ".png", ) cv2.imwrite(new_path, img) @@ -162,9 +163,7 @@ def split_imageset( # img_name, ext = os.path.splitext(os.path.basename(img_path)) img = imread_uint(img_path, n_channels=n_channels) patches = patches_from_image(img, p_size, p_overlap, p_max) - imssave( - patches, os.path.join(taget_dataroot, os.path.basename(img_path)) - ) + imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path))) # if original_dataroot == taget_dataroot: # del img_path @@ -191,8 +190,8 @@ def mkdirs(paths): def mkdir_and_rename(path): if os.path.exists(path): - new_name = path + '_archived_' + get_timestamp() - print('Path already exists. Rename it to [{:s}]'.format(new_name)) + new_name = path + "_archived_" + get_timestamp() + print("Path already exists. Rename it to [{:s}]".format(new_name)) os.replace(path, new_name) os.makedirs(path) @@ -273,22 +272,18 @@ def read_img(path): def uint2single(img): - return np.float32(img / 255.0) def single2uint(img): - return np.uint8((img.clip(0, 1) * 255.0).round()) def uint162single(img): - return np.float32(img / 65535.0) def single2uint16(img): - return np.uint16((img.clip(0, 1) * 65535.0).round()) @@ -315,10 +310,7 @@ def uint2tensor3(img): if img.ndim == 2: img = np.expand_dims(img, axis=2) return ( - torch.from_numpy(np.ascontiguousarray(img)) - .permute(2, 0, 1) - .float() - .div(255.0) + torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.0) ) @@ -379,18 +371,11 @@ def single2tensor5(img): def single32tensor5(img): - return ( - torch.from_numpy(np.ascontiguousarray(img)) - .float() - .unsqueeze(0) - .unsqueeze(0) - ) + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) def single42tensor4(img): - return ( - torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() - ) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() # from skimage.io import imread, imsave @@ -403,15 +388,11 @@ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): tensor = ( tensor.squeeze().float().cpu().clamp_(*min_max) ) # squeeze first, then clamp - tensor = (tensor - min_max[0]) / ( - min_max[1] - min_max[0] - ) # to range [0,1] + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) - img_np = make_grid( - tensor, nrow=int(math.sqrt(n_img)), normalize=False - ).numpy() + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() @@ -420,7 +401,7 @@ def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): img_np = tensor.numpy() else: raise TypeError( - 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format( + "Only support 4D, 3D and 2D tensor. But received with dimension: {:d}".format( n_dim ) ) @@ -564,7 +545,7 @@ def modcrop(img_in, scale): H_r, W_r = H % scale, W % scale img = img[: H - H_r, : W - W_r, :] else: - raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + raise ValueError("Wrong img ndim: [{:d}].".format(img.ndim)) return img @@ -675,13 +656,13 @@ def bgr2ycbcr(img, only_y=True): def channel_convert(in_c, tar_type, img_list): # conversion among BGR, gray and y - if in_c == 3 and tar_type == 'gray': # BGR to gray + if in_c == 3 and tar_type == "gray": # BGR to gray gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] return [np.expand_dims(img, axis=2) for img in gray_list] - elif in_c == 3 and tar_type == 'y': # BGR to y + elif in_c == 3 and tar_type == "y": # BGR to y y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] return [np.expand_dims(img, axis=2) for img in y_list] - elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + elif in_c == 1 and tar_type == "RGB": # gray/y to BGR return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] else: return img_list @@ -702,7 +683,7 @@ def calculate_psnr(img1, img2, border=0): # img1 = img1.squeeze() # img2 = img2.squeeze() if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') + raise ValueError("Input images must have the same dimensions.") h, w = img1.shape[:2] img1 = img1[border : h - border, border : w - border] img2 = img2[border : h - border, border : w - border] @@ -711,7 +692,7 @@ def calculate_psnr(img1, img2, border=0): img2 = img2.astype(np.float64) mse = np.mean((img1 - img2) ** 2) if mse == 0: - return float('inf') + return float("inf") return 20 * math.log10(255.0 / math.sqrt(mse)) @@ -726,7 +707,7 @@ def calculate_ssim(img1, img2, border=0): # img1 = img1.squeeze() # img2 = img2.squeeze() if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') + raise ValueError("Input images must have the same dimensions.") h, w = img1.shape[:2] img1 = img1[border : h - border, border : w - border] img2 = img2[border : h - border, border : w - border] @@ -742,7 +723,7 @@ def calculate_ssim(img1, img2, border=0): elif img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: - raise ValueError('Wrong input image dimensions.') + raise ValueError("Wrong input image dimensions.") def ssim(img1, img2): @@ -861,7 +842,7 @@ def imresize(img, scale, antialiasing=True): math.ceil(in_W * scale), ) kernel_width = 4 - kernel = 'cubic' + kernel = "cubic" # Return the desired dimension order for performing the resize. The # strategy is to perform the resize first along the dimension with the @@ -896,9 +877,7 @@ def imresize(img, scale, antialiasing=True): idx = int(indices_H[i][0]) for j in range(out_C): out_1[j, i, :] = ( - img_aug[j, idx : idx + kernel_width, :] - .transpose(0, 1) - .mv(weights_H[i]) + img_aug[j, idx : idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) ) # process W dimension @@ -921,9 +900,7 @@ def imresize(img, scale, antialiasing=True): for i in range(out_W): idx = int(indices_W[i][0]) for j in range(out_C): - out_2[j, :, i] = out_1_aug[j, :, idx : idx + kernel_width].mv( - weights_W[i] - ) + out_2[j, :, i] = out_1_aug[j, :, idx : idx + kernel_width].mv(weights_W[i]) if need_squeeze: out_2.squeeze_() return out_2 @@ -948,7 +925,7 @@ def imresize_np(img, scale, antialiasing=True): math.ceil(in_W * scale), ) kernel_width = 4 - kernel = 'cubic' + kernel = "cubic" # Return the desired dimension order for performing the resize. The # strategy is to perform the resize first along the dimension with the @@ -983,9 +960,7 @@ def imresize_np(img, scale, antialiasing=True): idx = int(indices_H[i][0]) for j in range(out_C): out_1[i, :, j] = ( - img_aug[idx : idx + kernel_width, :, j] - .transpose(0, 1) - .mv(weights_H[i]) + img_aug[idx : idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) ) # process W dimension @@ -1008,17 +983,15 @@ def imresize_np(img, scale, antialiasing=True): for i in range(out_W): idx = int(indices_W[i][0]) for j in range(out_C): - out_2[:, i, j] = out_1_aug[:, idx : idx + kernel_width, j].mv( - weights_W[i] - ) + out_2[:, i, j] = out_1_aug[:, idx : idx + kernel_width, j].mv(weights_W[i]) if need_squeeze: out_2.squeeze_() return out_2.numpy() -if __name__ == '__main__': - print('---') +if __name__ == "__main__": + print("---") # img = imread_uint('test.bmp', 3) # img = uint2single(img) # img_bicubic = imresize_np(img, 1/4) diff --git a/invokeai/backend/stable_diffusion/losses/contperceptual.py b/invokeai/backend/stable_diffusion/losses/contperceptual.py index 7fa4124346..1e3e6a00c4 100644 --- a/invokeai/backend/stable_diffusion/losses/contperceptual.py +++ b/invokeai/backend/stable_diffusion/losses/contperceptual.py @@ -1,6 +1,5 @@ import torch import torch.nn as nn - from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? @@ -18,11 +17,10 @@ class LPIPSWithDiscriminator(nn.Module): perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss='hinge', + disc_loss="hinge", ): - super().__init__() - assert disc_loss in ['hinge', 'vanilla'] + assert disc_loss in ["hinge", "vanilla"] self.kl_weight = kl_weight self.pixel_weight = pixelloss_weight self.perceptual_loss = LPIPS().eval() @@ -36,21 +34,15 @@ class LPIPSWithDiscriminator(nn.Module): use_actnorm=use_actnorm, ).apply(weights_init) self.discriminator_iter_start = disc_start - self.disc_loss = ( - hinge_d_loss if disc_loss == 'hinge' else vanilla_d_loss - ) + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss self.disc_factor = disc_factor self.discriminator_weight = disc_weight self.disc_conditional = disc_conditional def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): if last_layer is not None: - nll_grads = torch.autograd.grad( - nll_loss, last_layer, retain_graph=True - )[0] - g_grads = torch.autograd.grad( - g_loss, last_layer, retain_graph=True - )[0] + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] else: nll_grads = torch.autograd.grad( nll_loss, self.last_layer[0], retain_graph=True @@ -73,12 +65,10 @@ class LPIPSWithDiscriminator(nn.Module): global_step, last_layer=None, cond=None, - split='train', + split="train", weights=None, ): - rec_loss = torch.abs( - inputs.contiguous() - reconstructions.contiguous() - ) + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) if self.perceptual_weight > 0: p_loss = self.perceptual_loss( inputs.contiguous(), reconstructions.contiguous() @@ -89,9 +79,7 @@ class LPIPSWithDiscriminator(nn.Module): weighted_nll_loss = nll_loss if weights is not None: weighted_nll_loss = weights * nll_loss - weighted_nll_loss = ( - torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] - ) + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] kl_loss = posteriors.kl() kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] @@ -132,14 +120,14 @@ class LPIPSWithDiscriminator(nn.Module): ) log = { - '{}/total_loss'.format(split): loss.clone().detach().mean(), - '{}/logvar'.format(split): self.logvar.detach(), - '{}/kl_loss'.format(split): kl_loss.detach().mean(), - '{}/nll_loss'.format(split): nll_loss.detach().mean(), - '{}/rec_loss'.format(split): rec_loss.detach().mean(), - '{}/d_weight'.format(split): d_weight.detach(), - '{}/disc_factor'.format(split): torch.tensor(disc_factor), - '{}/g_loss'.format(split): g_loss.detach().mean(), + "{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), } return loss, log @@ -147,17 +135,13 @@ class LPIPSWithDiscriminator(nn.Module): # second pass for discriminator update if cond is None: logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator( - reconstructions.contiguous().detach() - ) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) else: logits_real = self.discriminator( torch.cat((inputs.contiguous().detach(), cond), dim=1) ) logits_fake = self.discriminator( - torch.cat( - (reconstructions.contiguous().detach(), cond), dim=1 - ) + torch.cat((reconstructions.contiguous().detach(), cond), dim=1) ) disc_factor = adopt_weight( @@ -168,8 +152,8 @@ class LPIPSWithDiscriminator(nn.Module): d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) log = { - '{}/disc_loss'.format(split): d_loss.clone().detach().mean(), - '{}/logits_real'.format(split): logits_real.detach().mean(), - '{}/logits_fake'.format(split): logits_fake.detach().mean(), + "{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean(), } return d_loss, log diff --git a/invokeai/backend/stable_diffusion/losses/vqperceptual.py b/invokeai/backend/stable_diffusion/losses/vqperceptual.py index 2f94bf5281..50413d37b8 100644 --- a/invokeai/backend/stable_diffusion/losses/vqperceptual.py +++ b/invokeai/backend/stable_diffusion/losses/vqperceptual.py @@ -1,14 +1,10 @@ import torch -from torch import nn import torch.nn.functional as F from einops import repeat - -from taming.modules.discriminator.model import ( - NLayerDiscriminator, - weights_init, -) +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init from taming.modules.losses.lpips import LPIPS from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss +from torch import nn def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): @@ -30,9 +26,7 @@ def adopt_weight(weight, global_step, threshold=0, value=0.0): def measure_perplexity(predicted_indices, n_embed): # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally - encodings = ( - F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) - ) + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) avg_probs = encodings.mean(0) perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() cluster_use = torch.sum(avg_probs > 0) @@ -61,27 +55,25 @@ class VQLPIPSWithDiscriminator(nn.Module): use_actnorm=False, disc_conditional=False, disc_ndf=64, - disc_loss='hinge', + disc_loss="hinge", n_classes=None, - perceptual_loss='lpips', - pixel_loss='l1', + perceptual_loss="lpips", + pixel_loss="l1", ): super().__init__() - assert disc_loss in ['hinge', 'vanilla'] - assert perceptual_loss in ['lpips', 'clips', 'dists'] - assert pixel_loss in ['l1', 'l2'] + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] self.codebook_weight = codebook_weight self.pixel_weight = pixelloss_weight - if perceptual_loss == 'lpips': - print(f'{self.__class__.__name__}: Running with LPIPS.') + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") self.perceptual_loss = LPIPS().eval() else: - raise ValueError( - f'Unknown perceptual loss: >> {perceptual_loss} <<' - ) + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") self.perceptual_weight = perceptual_weight - if pixel_loss == 'l1': + if pixel_loss == "l1": self.pixel_loss = l1 else: self.pixel_loss = l2 @@ -93,13 +85,13 @@ class VQLPIPSWithDiscriminator(nn.Module): ndf=disc_ndf, ).apply(weights_init) self.discriminator_iter_start = disc_start - if disc_loss == 'hinge': + if disc_loss == "hinge": self.disc_loss = hinge_d_loss - elif disc_loss == 'vanilla': + elif disc_loss == "vanilla": self.disc_loss = vanilla_d_loss else: raise ValueError(f"Unknown GAN loss '{disc_loss}'.") - print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.') + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") self.disc_factor = disc_factor self.discriminator_weight = disc_weight self.disc_conditional = disc_conditional @@ -107,12 +99,8 @@ class VQLPIPSWithDiscriminator(nn.Module): def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): if last_layer is not None: - nll_grads = torch.autograd.grad( - nll_loss, last_layer, retain_graph=True - )[0] - g_grads = torch.autograd.grad( - g_loss, last_layer, retain_graph=True - )[0] + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] else: nll_grads = torch.autograd.grad( nll_loss, self.last_layer[0], retain_graph=True @@ -135,15 +123,13 @@ class VQLPIPSWithDiscriminator(nn.Module): global_step, last_layer=None, cond=None, - split='train', + split="train", predicted_indices=None, ): if not exists(codebook_loss): codebook_loss = torch.tensor([0.0]).to(inputs.device) # rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - rec_loss = self.pixel_loss( - inputs.contiguous(), reconstructions.contiguous() - ) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) if self.perceptual_weight > 0: p_loss = self.perceptual_loss( inputs.contiguous(), reconstructions.contiguous() @@ -189,14 +175,14 @@ class VQLPIPSWithDiscriminator(nn.Module): ) log = { - '{}/total_loss'.format(split): loss.clone().detach().mean(), - '{}/quant_loss'.format(split): codebook_loss.detach().mean(), - '{}/nll_loss'.format(split): nll_loss.detach().mean(), - '{}/rec_loss'.format(split): rec_loss.detach().mean(), - '{}/p_loss'.format(split): p_loss.detach().mean(), - '{}/d_weight'.format(split): d_weight.detach(), - '{}/disc_factor'.format(split): torch.tensor(disc_factor), - '{}/g_loss'.format(split): g_loss.detach().mean(), + "{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), } if predicted_indices is not None: assert self.n_classes is not None @@ -204,25 +190,21 @@ class VQLPIPSWithDiscriminator(nn.Module): perplexity, cluster_usage = measure_perplexity( predicted_indices, self.n_classes ) - log[f'{split}/perplexity'] = perplexity - log[f'{split}/cluster_usage'] = cluster_usage + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage return loss, log if optimizer_idx == 1: # second pass for discriminator update if cond is None: logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator( - reconstructions.contiguous().detach() - ) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) else: logits_real = self.discriminator( torch.cat((inputs.contiguous().detach(), cond), dim=1) ) logits_fake = self.discriminator( - torch.cat( - (reconstructions.contiguous().detach(), cond), dim=1 - ) + torch.cat((reconstructions.contiguous().detach(), cond), dim=1) ) disc_factor = adopt_weight( @@ -233,8 +215,8 @@ class VQLPIPSWithDiscriminator(nn.Module): d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) log = { - '{}/disc_loss'.format(split): d_loss.clone().detach().mean(), - '{}/logits_real'.format(split): logits_real.detach().mean(), - '{}/logits_fake'.format(split): logits_fake.detach().mean(), + "{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean(), } return d_loss, log diff --git a/invokeai/backend/stable_diffusion/offloading.py b/invokeai/backend/stable_diffusion/offloading.py index e049f5fe09..871f994732 100644 --- a/invokeai/backend/stable_diffusion/offloading.py +++ b/invokeai/backend/stable_diffusion/offloading.py @@ -12,6 +12,7 @@ from torch.utils.hooks import RemovableHandle OFFLOAD_DEVICE = torch.device("cpu") + class _NoModel: """Symbol that indicates no model is loaded. @@ -28,6 +29,7 @@ class _NoModel: def __repr__(self): return "" + NO_MODEL = _NoModel() @@ -93,8 +95,10 @@ class ModelGroup(metaclass=ABCMeta): pass def __repr__(self) -> str: - return f"<{self.__class__.__name__} object at {id(self):x}: " \ - f"device={self.execution_device} >" + return ( + f"<{self.__class__.__name__} object at {id(self):x}: " + f"device={self.execution_device} >" + ) class LazilyLoadedModelGroup(ModelGroup): @@ -138,8 +142,11 @@ class LazilyLoadedModelGroup(ModelGroup): def _pre_hook(self, module: torch.nn.Module, forward_input): self.load(module) if len(forward_input) == 0: - warnings.warn(f"Hook for {module.__class__.__name__} got no input. " - f"Inputs must be positional, not keywords.", stacklevel=3) + warnings.warn( + f"Hook for {module.__class__.__name__} got no input. " + f"Inputs must be positional, not keywords.", + stacklevel=3, + ) return send_to_device(forward_input, self.execution_device) def load(self, module): @@ -154,7 +161,9 @@ class LazilyLoadedModelGroup(ModelGroup): self.clear_current_model() def _load(self, module: torch.nn.Module) -> torch.nn.Module: - assert self.is_empty(), f"A model is already loaded: {self._current_model_ref()}" + assert ( + self.is_empty() + ), f"A model is already loaded: {self._current_model_ref()}" module = module.to(self.execution_device) self.set_current_model(module) return module @@ -183,8 +192,12 @@ class LazilyLoadedModelGroup(ModelGroup): def device_for(self, model): if model not in self: - raise KeyError(f"This does not manage this model {type(model).__name__}", model) - return self.execution_device # this implementation only dispatches to one device + raise KeyError( + f"This does not manage this model {type(model).__name__}", model + ) + return ( + self.execution_device + ) # this implementation only dispatches to one device def ready(self): pass # always ready to load on-demand @@ -193,8 +206,10 @@ class LazilyLoadedModelGroup(ModelGroup): return model in self._hooks def __repr__(self) -> str: - return f"<{self.__class__.__name__} object at {id(self):x}: " \ - f"current_model={type(self._current_model_ref()).__name__} >" + return ( + f"<{self.__class__.__name__} object at {id(self):x}: " + f"current_model={type(self._current_model_ref()).__name__} >" + ) class FullyLoadedModelGroup(ModelGroup): @@ -203,6 +218,7 @@ class FullyLoadedModelGroup(ModelGroup): :py:meth:`.ready` loads _all_ the models to the execution device at once. """ + _models: weakref.WeakSet def __init__(self, execution_device: torch.device): @@ -240,8 +256,12 @@ class FullyLoadedModelGroup(ModelGroup): def device_for(self, model): if model not in self: - raise KeyError("This does not manage this model f{type(model).__name__}", model) - return self.execution_device # this implementation only dispatches to one device + raise KeyError( + "This does not manage this model f{type(model).__name__}", model + ) + return ( + self.execution_device + ) # this implementation only dispatches to one device def __contains__(self, model): return model in self._models diff --git a/invokeai/backend/stable_diffusion/textual_inversion_manager.py b/invokeai/backend/stable_diffusion/textual_inversion_manager.py index e45ea7362b..2b043afab7 100644 --- a/invokeai/backend/stable_diffusion/textual_inversion_manager.py +++ b/invokeai/backend/stable_diffusion/textual_inversion_manager.py @@ -5,10 +5,10 @@ from pathlib import Path from typing import Optional, Union import torch +from compel.embeddings_provider import BaseTextualInversionManager from picklescan.scanner import scan_file_path from transformers import CLIPTextModel, CLIPTokenizer -from compel.embeddings_provider import BaseTextualInversionManager from .concepts_lib import HuggingFaceConceptsLibrary diff --git a/invokeai/backend/stable_diffusion/x_transformer.py b/invokeai/backend/stable_diffusion/x_transformer.py index d6c4cc6881..b541d77ee2 100644 --- a/invokeai/backend/stable_diffusion/x_transformer.py +++ b/invokeai/backend/stable_diffusion/x_transformer.py @@ -1,23 +1,20 @@ """shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -import torch -from torch import nn, einsum -import torch.nn.functional as F +from collections import namedtuple from functools import partial from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat, reduce + +import torch +import torch.nn.functional as F +from einops import rearrange, reduce, repeat +from torch import einsum, nn # constants DEFAULT_DIM_HEAD = 64 -Intermediates = namedtuple( - 'Intermediates', ['pre_softmax_attn', 'post_softmax_attn'] -) +Intermediates = namedtuple("Intermediates", ["pre_softmax_attn", "post_softmax_attn"]) -LayerIntermediates = namedtuple( - 'Intermediates', ['hiddens', 'attn_intermediates'] -) +LayerIntermediates = namedtuple("Intermediates", ["hiddens", "attn_intermediates"]) class AbsolutePositionalEmbedding(nn.Module): @@ -38,16 +35,14 @@ class FixedPositionalEmbedding(nn.Module): def __init__(self, dim): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) + self.register_buffer("inv_freq", inv_freq) def forward(self, x, seq_dim=1, offset=0): t = ( - torch.arange(x.shape[seq_dim], device=x.device).type_as( - self.inv_freq - ) + torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset ) - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + sinusoid_inp = torch.einsum("i , j -> i j", t, self.inv_freq) emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) return emb[None, :, :] @@ -187,8 +182,8 @@ class GRUGating(nn.Module): def forward(self, x, residual): gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d'), + rearrange(x, "b n d -> (b n) d"), + rearrange(residual, "b n d -> (b n) d"), ) return gated_output.reshape_as(x) @@ -245,7 +240,7 @@ class Attention(nn.Module): super().__init__() if use_entmax15: raise NotImplementedError( - 'Check out entmax activation instead of softmax activation!' + "Check out entmax activation instead of softmax activation!" ) self.scale = dim_head**-0.5 self.heads = heads @@ -323,37 +318,31 @@ class Attention(nn.Module): k = self.to_k(k_input) v = self.to_v(v_input) - q, k, v = map( - lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v) - ) + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) input_mask = None if any(map(exists, (mask, context_mask))): - q_mask = default( - mask, lambda: torch.ones((b, n), device=device).bool() - ) + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) k_mask = q_mask if not exists(context) else context_mask k_mask = default( k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool(), ) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') + q_mask = rearrange(q_mask, "b i -> b () i ()") + k_mask = rearrange(k_mask, "b j -> b () () j") input_mask = q_mask * k_mask if self.num_mem_kv > 0: mem_k, mem_v = map( - lambda t: repeat(t, 'h n d -> b h n d', b=b), + lambda t: repeat(t, "h n d -> b h n d", b=b), (self.mem_k, self.mem_v), ) k = torch.cat((mem_k, k), dim=-2) v = torch.cat((mem_v, v), dim=-2) if exists(input_mask): - input_mask = F.pad( - input_mask, (self.num_mem_kv, 0), value=True - ) + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + dots = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale mask_value = max_neg_value(dots) if exists(prev_attn): @@ -363,7 +352,7 @@ class Attention(nn.Module): if talking_heads: dots = einsum( - 'b h i j, h k -> b k i j', dots, self.pre_softmax_proj + "b h i j, h k -> b k i j", dots, self.pre_softmax_proj ).contiguous() if exists(rel_pos): @@ -376,9 +365,7 @@ class Attention(nn.Module): if self.causal: i, j = dots.shape[-2:] r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange( - r, 'j -> () () () j' - ) + mask = rearrange(r, "i -> () () i ()") < rearrange(r, "j -> () () () j") mask = F.pad(mask, (j - i, 0), value=False) dots.masked_fill_(mask, mask_value) del mask @@ -397,11 +384,11 @@ class Attention(nn.Module): if talking_heads: attn = einsum( - 'b h i j, h k -> b k i j', attn, self.post_softmax_proj + "b h i j, h k -> b k i j", attn, self.post_softmax_proj ).contiguous() - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') + out = einsum("b h i j, b h j d -> b h i d", attn, v) + out = rearrange(out, "b h n d -> b n (h d)") intermediates = Intermediates( pre_softmax_attn=pre_softmax_attn, @@ -437,10 +424,10 @@ class AttentionLayers(nn.Module): **kwargs, ): super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + ff_kwargs, kwargs = groupby_prefix_and_trim("ff_", kwargs) + attn_kwargs, _ = groupby_prefix_and_trim("attn_", kwargs) - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + dim_head = attn_kwargs.get("dim_head", DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth @@ -454,7 +441,7 @@ class AttentionLayers(nn.Module): assert ( rel_pos_num_buckets <= rel_pos_max_distance - ), 'number of relative position buckets must be less than the relative position max distance' + ), "number of relative position buckets must be less than the relative position max distance" self.rel_pos = None self.pre_norm = pre_norm @@ -470,21 +457,21 @@ class AttentionLayers(nn.Module): branch_fn = Rezero if use_rezero else None if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') + default_block = ("a", "c", "f") elif cross_attend and only_cross: - default_block = ('c', 'f') + default_block = ("c", "f") else: - default_block = ('a', 'f') + default_block = ("a", "f") if macaron: - default_block = ('f',) + default_block + default_block = ("f",) + default_block if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) + assert 1 < par_ratio <= par_depth, "par ratio out of range" + default_block = tuple(filter(not_equals("f"), default_block)) par_attn = par_depth // par_ratio depth_cut = ( par_depth * 2 // 3 @@ -492,39 +479,35 @@ class AttentionLayers(nn.Module): par_width = (depth_cut + depth_cut // par_attn) // par_attn assert ( len(default_block) <= par_width - ), 'default block is too large for par_ratio' - par_block = default_block + ('f',) * ( - par_width - len(default_block) - ) + ), "default block is too large for par_ratio" + par_block = default_block + ("f",) * (par_width - len(default_block)) par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) + layer_types = par_head + ("f",) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert ( sandwich_coef > 0 and sandwich_coef <= depth - ), 'sandwich coefficient should be less than the depth' + ), "sandwich coefficient should be less than the depth" layer_types = ( - ('a',) * sandwich_coef + ("a",) * sandwich_coef + default_block * (depth - sandwich_coef) - + ('f',) * sandwich_coef + + ("f",) * sandwich_coef ) else: layer_types = default_block * depth self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + self.num_attn_layers = len(list(filter(equals("a"), layer_types))) for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention( - dim, heads=heads, causal=causal, **attn_kwargs - ) - elif layer_type == 'c': + if layer_type == "a": + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == "c": layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': + elif layer_type == "f": layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: - raise Exception(f'invalid layer type {layer_type}') + raise Exception(f"invalid layer type {layer_type}") if isinstance(layer, Attention) and exists(branch_fn): layer = branch_fn(layer) @@ -558,7 +541,7 @@ class AttentionLayers(nn.Module): ): is_last = ind == (len(self.layers) - 1) - if layer_type == 'a': + if layer_type == "a": hiddens.append(x) layer_mem = mems.pop(0) @@ -567,7 +550,7 @@ class AttentionLayers(nn.Module): if self.pre_norm: x = norm(x) - if layer_type == 'a': + if layer_type == "a": out, inter = block( x, mask=mask, @@ -576,7 +559,7 @@ class AttentionLayers(nn.Module): prev_attn=prev_attn, mem=layer_mem, ) - elif layer_type == 'c': + elif layer_type == "c": out, inter = block( x, context=context, @@ -584,17 +567,17 @@ class AttentionLayers(nn.Module): context_mask=context_mask, prev_attn=prev_cross_attn, ) - elif layer_type == 'f': + elif layer_type == "f": out = block(x) x = residual_fn(out, residual) - if layer_type in ('a', 'c'): + if layer_type in ("a", "c"): intermediates.append(inter) - if layer_type == 'a' and self.residual_attn: + if layer_type == "a" and self.residual_attn: prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: + elif layer_type == "c" and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if not self.pre_norm and not is_last: @@ -612,7 +595,7 @@ class AttentionLayers(nn.Module): class Encoder(AttentionLayers): def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' + assert "causal" not in kwargs, "cannot set causality on encoder" super().__init__(causal=False, **kwargs) @@ -633,7 +616,7 @@ class TransformerWrapper(nn.Module): super().__init__() assert isinstance( attn_layers, AttentionLayers - ), 'attention layers must be one of Encoder or Decoder' + ), "attention layers must be one of Encoder or Decoder" dim = attn_layers.dim emb_dim = default(emb_dim, dim) @@ -650,9 +633,7 @@ class TransformerWrapper(nn.Module): ) self.emb_dropout = nn.Dropout(emb_dropout) - self.project_emb = ( - nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - ) + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.norm = nn.LayerNorm(dim) @@ -668,12 +649,10 @@ class TransformerWrapper(nn.Module): num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter( - torch.randn(num_memory_tokens, dim) - ) + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): + if hasattr(attn_layers, "num_memory_tokens"): attn_layers.num_memory_tokens = num_memory_tokens def init_(self): @@ -705,7 +684,7 @@ class TransformerWrapper(nn.Module): x = self.project_emb(x) if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + mem = repeat(self.memory_tokens, "n d -> b n d", b=b) x = torch.cat((mem, x), dim=1) # auto-handle masking after appending memory tokens @@ -734,9 +713,7 @@ class TransformerWrapper(nn.Module): else hiddens ) new_mems = list( - map( - lambda t: t[..., -self.max_mem_len :, :].detach(), new_mems - ) + map(lambda t: t[..., -self.max_mem_len :, :].detach(), new_mems) ) return out, new_mems diff --git a/invokeai/backend/training/__init.py__ b/invokeai/backend/training/__init__.py similarity index 94% rename from invokeai/backend/training/__init.py__ rename to invokeai/backend/training/__init__.py index 16bde19390..a85842dc72 100644 --- a/invokeai/backend/training/__init.py__ +++ b/invokeai/backend/training/__init__.py @@ -1,4 +1,4 @@ -''' +""" Initialization file for invokeai.backend.training -''' +""" from .textual_inversion_training import do_textual_inversion_training, parse_args diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 4e9bd36eef..cd45fa894b 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -47,8 +47,8 @@ from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer # invokeai stuff -from ldm.invoke.args import ArgFormatter, PagingArgumentParser -from invokeai.backend.globals import Globals, global_cache_dir +from ..args import ArgFormatter, PagingArgumentParser +from ..globals import Globals, global_cache_dir if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { @@ -441,7 +441,10 @@ class TextualInversionDataset(Dataset): self.image_paths = [ self.data_root / file_path for file_path in self.data_root.iterdir() - if file_path.is_file() and file_path.name.endswith(('.png','.PNG','.jpg','.JPG','.jpeg','.JPEG','.gif','.GIF')) + if file_path.is_file() + and file_path.name.endswith( + (".png", ".PNG", ".jpg", ".JPG", ".jpeg", ".JPEG", ".gif", ".GIF") + ) ] self.num_images = len(self.image_paths) @@ -490,7 +493,10 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - h, w, = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py index 434e03c273..ca42f86fd6 100644 --- a/invokeai/backend/util/__init__.py +++ b/invokeai/backend/util/__init__.py @@ -1,18 +1,19 @@ -''' +""" Initialization file for invokeai.backend.util -''' -from .devices import (choose_torch_device, - choose_precision, - normalize_device, - torch_dtype, - CPU_DEVICE, - CUDA_DEVICE, - MPS_DEVICE, - ) -from .util import (ask_user, - download_with_resume, - instantiate_from_config, - url_attachment_name, - ) +""" +from .devices import ( + CPU_DEVICE, + CUDA_DEVICE, + MPS_DEVICE, + choose_precision, + choose_torch_device, + normalize_device, + torch_dtype, +) from .log import write_log - +from .util import ( + ask_user, + download_with_resume, + instantiate_from_config, + url_attachment_name, +) diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 167847eae0..c70a43ff09 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -11,46 +11,51 @@ CPU_DEVICE = torch.device("cpu") CUDA_DEVICE = torch.device("cuda") MPS_DEVICE = torch.device("mps") + def choose_torch_device() -> torch.device: - '''Convenience routine for guessing which GPU device to run model on''' + """Convenience routine for guessing which GPU device to run model on""" if Globals.always_use_cpu: return CPU_DEVICE if torch.cuda.is_available(): - return torch.device('cuda') - if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): - return torch.device('mps') + return torch.device("cuda") + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): + return torch.device("mps") return CPU_DEVICE + def choose_precision(device: torch.device) -> str: - '''Returns an appropriate precision for the given torch device''' - if device.type == 'cuda': + """Returns an appropriate precision for the given torch device""" + if device.type == "cuda": device_name = torch.cuda.get_device_name(device) - if not ('GeForce GTX 1660' in device_name or 'GeForce GTX 1650' in device_name): - return 'float16' - return 'float32' + if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): + return "float16" + return "float32" + def torch_dtype(device: torch.device) -> torch.dtype: if Globals.full_precision: return torch.float32 - if choose_precision(device) == 'float16': + if choose_precision(device) == "float16": return torch.float16 else: return torch.float32 + def choose_autocast(precision): - '''Returns an autocast context or nullcontext for the given precision string''' + """Returns an autocast context or nullcontext for the given precision string""" # float16 currently requires autocast to avoid errors like: # 'expected scalar type Half but found Float' - if precision == 'autocast' or precision == 'float16': + if precision == "autocast" or precision == "float16": return autocast return nullcontext + def normalize_device(device: str | torch.device) -> torch.device: """Ensure device has a device index defined, if appropriate.""" device = torch.device(device) if device.index is None: # cuda might be the only torch backend that currently uses the device index? # I don't see anything like `current_device` for cpu or mps. - if device.type == 'cuda': + if device.type == "cuda": device = torch.device(device.type, torch.cuda.current_device()) return device diff --git a/invokeai/backend/util/log.py b/invokeai/backend/util/log.py index 8aebe62671..4643f61a6b 100644 --- a/invokeai/backend/util/log.py +++ b/invokeai/backend/util/log.py @@ -25,14 +25,15 @@ def write_log_message(results, output_cntr): if len(results) == 0: return output_cntr log_lines = [f"{path}: {prompt}\n" for path, prompt in results] - if len(log_lines)>1: + if len(log_lines) > 1: subcntr = 1 for l in log_lines: - print(f"[{output_cntr}.{subcntr}] {l}", end="") - subcntr += 1 + print(f"[{output_cntr}.{subcntr}] {l}", end="") + subcntr += 1 else: - print(f"[{output_cntr}] {log_lines[0]}", end="") - return output_cntr+1 + print(f"[{output_cntr}] {log_lines[0]}", end="") + return output_cntr + 1 + def write_log_files(results, log_path, file_types): for file_type in file_types: diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 2bd24d3ef0..3fab6e18b0 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -17,6 +17,7 @@ from tqdm import tqdm from .devices import torch_dtype + def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) # xc a list of captions to plot @@ -281,14 +282,14 @@ def ask_user(question: str, answers: list): # ------------------------------------- def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path: - ''' + """ Download a model file. :param url: https, http or ftp URL :param dest: A Path object. If path exists and is a directory, then we try to derive the filename from the URL's Content-Disposition header and copy the URL contents into dest/filename :param access_token: Access token to access this resource - ''' + """ header = {"Authorization": f"Bearer {access_token}"} if access_token else {} open_mode = "wb" exist_size = 0 @@ -298,7 +299,9 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path if dest.is_dir(): try: - file_name = re.search('filename="(.+)"', resp.headers.get("Content-Disposition")).group(1) + file_name = re.search( + 'filename="(.+)"', resp.headers.get("Content-Disposition") + ).group(1) except: file_name = os.path.basename(url) dest = dest / file_name @@ -309,16 +312,14 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path exist_size = dest.stat().st_size header["Range"] = f"bytes={exist_size}-" open_mode = "ab" - resp = requests.get(url, headers=header, stream=True) # new request with range + resp = requests.get(url, headers=header, stream=True) # new request with range if exist_size > content_length: - print('* corrupt existing file found. re-downloading') + print("* corrupt existing file found. re-downloading") os.remove(dest) exist_size = 0 - if ( - resp.status_code == 416 or exist_size == content_length - ): + if resp.status_code == 416 or exist_size == content_length: print(f"* {dest}: complete file found. Skipping.") return dest elif resp.status_code == 206 or exist_size > 0: @@ -334,12 +335,12 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path return None with open(dest, open_mode) as file, tqdm( - desc=str(dest), - initial=exist_size, - total=content_length, - unit="iB", - unit_scale=True, - unit_divisor=1000, + desc=str(dest), + initial=exist_size, + total=content_length, + unit="iB", + unit_scale=True, + unit_divisor=1000, ) as bar: for data in resp.iter_content(chunk_size=1024): size = file.write(data) diff --git a/invokeai/backend/web/__init__.py b/invokeai/backend/web/__init__.py index ef771f61be..c57600f72b 100644 --- a/invokeai/backend/web/__init__.py +++ b/invokeai/backend/web/__init__.py @@ -1,4 +1,4 @@ -''' +""" Initialization file for the web backend. -''' +""" from .invoke_ai_web_server import InvokeAIWebServer diff --git a/invokeai/backend/web/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py index c05de2f831..80c9618098 100644 --- a/invokeai/backend/web/invoke_ai_web_server.py +++ b/invokeai/backend/web/invoke_ai_web_server.py @@ -12,32 +12,29 @@ from threading import Event from uuid import uuid4 import eventlet -import invokeai.frontend.web.dist as frontend +from compel.prompt_parser import Blend +from flask import Flask, make_response, redirect, request, send_from_directory +from flask_socketio import SocketIO from PIL import Image from PIL.Image import Image as ImageType -from compel.prompt_parser import Blend -from flask import Flask, redirect, send_from_directory, request, make_response -from flask_socketio import SocketIO from werkzeug.utils import secure_filename -from .modules.get_canvas_generation_mode import ( - get_canvas_generation_mode, -) -from .modules.parameters import parameters_to_command -from ..prompting import (get_tokens_for_prompt_object, - get_prompt_structure, - get_tokenizer - ) -from ..image_util import PngWriter, retrieve_metadata -from ..generator import infill_methods -from ..stable_diffusion import PipelineIntermediateState +import invokeai.frontend.web.dist as frontend from .. import Generate -from ..args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from ..globals import ( Globals, global_converted_ckpts_dir, - global_models_dir - ) +from ..args import APP_ID, APP_VERSION, Args, calculate_init_img_hash +from ..generator import infill_methods +from ..globals import Globals, global_converted_ckpts_dir, global_models_dir +from ..image_util import PngWriter, retrieve_metadata from ..model_management import merge_diffusion_models +from ..prompting import ( + get_prompt_structure, + get_tokenizer, + get_tokens_for_prompt_object, +) +from ..stable_diffusion import PipelineIntermediateState +from .modules.get_canvas_generation_mode import get_canvas_generation_mode +from .modules.parameters import parameters_to_command # Loading Arguments opt = Args() @@ -197,8 +194,7 @@ class InvokeAIWebServer: (width, height) = pil_image.size thumbnail_path = save_thumbnail( - pil_image, os.path.basename( - file_path), self.thumbnail_image_path + pil_image, os.path.basename(file_path), self.thumbnail_image_path ) response = { @@ -228,7 +224,7 @@ class InvokeAIWebServer: server="flask_socketio", width=1600, height=1000, - port=self.port + port=self.port, ).run() except KeyboardInterrupt: import sys @@ -269,16 +265,14 @@ class InvokeAIWebServer: # location for "finished" images self.result_path = args.outdir # temporary path for intermediates - self.intermediate_path = os.path.join( - self.result_path, "intermediates/") + self.intermediate_path = os.path.join(self.result_path, "intermediates/") # path for user-uploaded init images and masks self.init_image_path = os.path.join(self.result_path, "init-images/") self.mask_image_path = os.path.join(self.result_path, "mask-images/") # path for temp images e.g. gallery generations which are not committed self.temp_image_path = os.path.join(self.result_path, "temp-images/") # path for thumbnail images - self.thumbnail_image_path = os.path.join( - self.result_path, "thumbnails/") + self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/") # txt log self.log_path = os.path.join(self.result_path, "invoke_log.txt") # make all output paths @@ -303,21 +297,22 @@ class InvokeAIWebServer: config["infill_methods"] = infill_methods() socketio.emit("systemConfig", config) - @socketio.on('searchForModels') + @socketio.on("searchForModels") def handle_search_models(search_folder: str): try: if not search_folder: socketio.emit( "foundModels", - {'search_folder': None, 'found_models': None}, + {"search_folder": None, "found_models": None}, ) else: - search_folder, found_models = self.generate.model_manager.search_models( - search_folder) + ( + search_folder, + found_models, + ) = self.generate.model_manager.search_models(search_folder) socketio.emit( "foundModels", - {'search_folder': search_folder, - 'found_models': found_models}, + {"search_folder": search_folder, "found_models": found_models}, ) except Exception as e: self.handle_exceptions(e) @@ -326,11 +321,11 @@ class InvokeAIWebServer: @socketio.on("addNewModel") def handle_add_model(new_model_config: dict): try: - model_name = new_model_config['name'] - del new_model_config['name'] + model_name = new_model_config["name"] + del new_model_config["name"] model_attributes = new_model_config - if len(model_attributes['vae']) == 0: - del model_attributes['vae'] + if len(model_attributes["vae"]) == 0: + del model_attributes["vae"] update = False current_model_list = self.generate.model_manager.list_models() if model_name in current_model_list: @@ -339,14 +334,20 @@ class InvokeAIWebServer: print(f">> Adding New Model: {model_name}") self.generate.model_manager.add_model( - model_name=model_name, model_attributes=model_attributes, clobber=True) + model_name=model_name, + model_attributes=model_attributes, + clobber=True, + ) self.generate.model_manager.commit(opt.conf) new_model_list = self.generate.model_manager.list_models() socketio.emit( "newModelAdded", - {"new_model_name": model_name, - "model_list": new_model_list, 'update': update}, + { + "new_model_name": model_name, + "model_list": new_model_list, + "update": update, + }, ) print(f">> New Model Added: {model_name}") except Exception as e: @@ -361,8 +362,10 @@ class InvokeAIWebServer: updated_model_list = self.generate.model_manager.list_models() socketio.emit( "modelDeleted", - {"deleted_model_name": model_name, - "model_list": updated_model_list}, + { + "deleted_model_name": model_name, + "model_list": updated_model_list, + }, ) print(f">> Model Deleted: {model_name}") except Exception as e: @@ -387,41 +390,48 @@ class InvokeAIWebServer: except Exception as e: self.handle_exceptions(e) - @socketio.on('convertToDiffusers') + @socketio.on("convertToDiffusers") def convert_to_diffusers(model_to_convert: dict): try: - if (model_info := self.generate.model_manager.model_info(model_name=model_to_convert['model_name'])): - if 'weights' in model_info: - ckpt_path = Path(model_info['weights']) - original_config_file = Path(model_info['config']) - model_name = model_to_convert['model_name'] - model_description = model_info['description'] + if model_info := self.generate.model_manager.model_info( + model_name=model_to_convert["model_name"] + ): + if "weights" in model_info: + ckpt_path = Path(model_info["weights"]) + original_config_file = Path(model_info["config"]) + model_name = model_to_convert["model_name"] + model_description = model_info["description"] else: self.socketio.emit( - "error", {"message": "Model is not a valid checkpoint file"}) + "error", {"message": "Model is not a valid checkpoint file"} + ) else: self.socketio.emit( - "error", {"message": "Could not retrieve model info."}) + "error", {"message": "Could not retrieve model info."} + ) if not ckpt_path.is_absolute(): ckpt_path = Path(Globals.root, ckpt_path) if original_config_file and not original_config_file.is_absolute(): - original_config_file = Path( - Globals.root, original_config_file) + original_config_file = Path(Globals.root, original_config_file) diffusers_path = Path( - ckpt_path.parent.absolute(), - f'{model_name}_diffusers' + ckpt_path.parent.absolute(), f"{model_name}_diffusers" ) - if model_to_convert['save_location'] == 'root': + if model_to_convert["save_location"] == "root": diffusers_path = Path( - global_converted_ckpts_dir(), f'{model_name}_diffusers') + global_converted_ckpts_dir(), f"{model_name}_diffusers" + ) - if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None: + if ( + model_to_convert["save_location"] == "custom" + and model_to_convert["custom_location"] is not None + ): diffusers_path = Path( - model_to_convert['custom_location'], f'{model_name}_diffusers') + model_to_convert["custom_location"], f"{model_name}_diffusers" + ) if diffusers_path.exists(): shutil.rmtree(diffusers_path) @@ -439,54 +449,67 @@ class InvokeAIWebServer: new_model_list = self.generate.model_manager.list_models() socketio.emit( "modelConverted", - {"new_model_name": model_name, - "model_list": new_model_list, 'update': True}, + { + "new_model_name": model_name, + "model_list": new_model_list, + "update": True, + }, ) print(f">> Model Converted: {model_name}") except Exception as e: self.handle_exceptions(e) - @socketio.on('mergeDiffusersModels') + @socketio.on("mergeDiffusersModels") def merge_diffusers_models(model_merge_info: dict): try: - models_to_merge = model_merge_info['models_to_merge'] + models_to_merge = model_merge_info["models_to_merge"] model_ids_or_paths = [ - self.generate.model_manager.model_name_or_path(x) for x in models_to_merge] + self.generate.model_manager.model_name_or_path(x) + for x in models_to_merge + ] merged_pipe = merge_diffusion_models( - model_ids_or_paths, model_merge_info['alpha'], model_merge_info['interp'], model_merge_info['force']) + model_ids_or_paths, + model_merge_info["alpha"], + model_merge_info["interp"], + model_merge_info["force"], + ) - dump_path = global_models_dir() / 'merged_models' - if model_merge_info['model_merge_save_path'] is not None: - dump_path = Path(model_merge_info['model_merge_save_path']) + dump_path = global_models_dir() / "merged_models" + if model_merge_info["model_merge_save_path"] is not None: + dump_path = Path(model_merge_info["model_merge_save_path"]) os.makedirs(dump_path, exist_ok=True) - dump_path = dump_path / model_merge_info['merged_model_name'] + dump_path = dump_path / model_merge_info["merged_model_name"] merged_pipe.save_pretrained(dump_path, safe_serialization=1) merged_model_config = dict( - model_name=model_merge_info['merged_model_name'], + model_name=model_merge_info["merged_model_name"], description=f'Merge of models {", ".join(models_to_merge)}', - commit_to_conf=opt.conf + commit_to_conf=opt.conf, ) - if vae := self.generate.model_manager.config[models_to_merge[0]].get("vae", None): - print( - f">> Using configured VAE assigned to {models_to_merge[0]}") + if vae := self.generate.model_manager.config[models_to_merge[0]].get( + "vae", None + ): + print(f">> Using configured VAE assigned to {models_to_merge[0]}") merged_model_config.update(vae=vae) self.generate.model_manager.import_diffuser_model( - dump_path, **merged_model_config) + dump_path, **merged_model_config + ) new_model_list = self.generate.model_manager.list_models() socketio.emit( "modelsMerged", - {"merged_models": models_to_merge, - "merged_model_name": model_merge_info['merged_model_name'], - "model_list": new_model_list, 'update': True}, + { + "merged_models": models_to_merge, + "merged_model_name": model_merge_info["merged_model_name"], + "model_list": new_model_list, + "update": True, + }, ) print(f">> Models Merged: {models_to_merge}") - print( - f">> New Model Added: {model_merge_info['merged_model_name']}") + print(f">> New Model Added: {model_merge_info['merged_model_name']}") except Exception as e: self.handle_exceptions(e) @@ -504,7 +527,8 @@ class InvokeAIWebServer: os.remove(thumbnail_path) except Exception as e: socketio.emit( - "error", {"message": f"Unable to delete {f}: {str(e)}"}) + "error", {"message": f"Unable to delete {f}: {str(e)}"} + ) pass socketio.emit("tempFolderEmptied") @@ -515,8 +539,7 @@ class InvokeAIWebServer: def save_temp_image_to_gallery(url): try: image_path = self.get_image_path_from_url(url) - new_path = os.path.join( - self.result_path, os.path.basename(image_path)) + new_path = os.path.join(self.result_path, os.path.basename(image_path)) shutil.copy2(image_path, new_path) if os.path.splitext(new_path)[1] == ".png": @@ -529,8 +552,7 @@ class InvokeAIWebServer: (width, height) = pil_image.size thumbnail_path = save_thumbnail( - pil_image, os.path.basename( - new_path), self.thumbnail_image_path + pil_image, os.path.basename(new_path), self.thumbnail_image_path ) image_array = [ @@ -589,8 +611,7 @@ class InvokeAIWebServer: (width, height) = pil_image.size thumbnail_path = save_thumbnail( - pil_image, os.path.basename( - path), self.thumbnail_image_path + pil_image, os.path.basename(path), self.thumbnail_image_path ) image_array.append( @@ -609,7 +630,8 @@ class InvokeAIWebServer: ) except Exception as e: socketio.emit( - "error", {"message": f"Unable to load {path}: {str(e)}"}) + "error", {"message": f"Unable to load {path}: {str(e)}"} + ) pass socketio.emit( @@ -659,8 +681,7 @@ class InvokeAIWebServer: (width, height) = pil_image.size thumbnail_path = save_thumbnail( - pil_image, os.path.basename( - path), self.thumbnail_image_path + pil_image, os.path.basename(path), self.thumbnail_image_path ) image_array.append( @@ -680,7 +701,8 @@ class InvokeAIWebServer: except Exception as e: print(f">> Unable to load {path}") socketio.emit( - "error", {"message": f"Unable to load {path}: {str(e)}"}) + "error", {"message": f"Unable to load {path}: {str(e)}"} + ) pass socketio.emit( @@ -714,10 +736,9 @@ class InvokeAIWebServer: printable_parameters["init_mask"][:64] + "..." ) - print( - f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n') - print(f'>> ESRGAN Parameters: {esrgan_parameters}') - print(f'>> Facetool Parameters: {facetool_parameters}') + print(f"\n>> Image Generation Parameters:\n\n{printable_parameters}\n") + print(f">> ESRGAN Parameters: {esrgan_parameters}") + print(f">> Facetool Parameters: {facetool_parameters}") self.generate_images( generation_parameters, @@ -754,11 +775,9 @@ class InvokeAIWebServer: if postprocessing_parameters["type"] == "esrgan": progress.set_current_status("common.statusUpscalingESRGAN") elif postprocessing_parameters["type"] == "gfpgan": - progress.set_current_status( - "common.statusRestoringFacesGFPGAN") + progress.set_current_status("common.statusRestoringFacesGFPGAN") elif postprocessing_parameters["type"] == "codeformer": - progress.set_current_status( - "common.statusRestoringFacesCodeFormer") + progress.set_current_status("common.statusRestoringFacesCodeFormer") socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) @@ -923,8 +942,7 @@ class InvokeAIWebServer: init_img_url = generation_parameters["init_img"] - original_bounding_box = generation_parameters["bounding_box"].copy( - ) + original_bounding_box = generation_parameters["bounding_box"].copy() initial_image = dataURL_to_image( generation_parameters["init_img"] @@ -1001,8 +1019,9 @@ class InvokeAIWebServer: elif generation_parameters["generation_mode"] == "img2img": init_img_url = generation_parameters["init_img"] init_img_path = self.get_image_path_from_url(init_img_url) - generation_parameters["init_img"] = Image.open( - init_img_path).convert('RGB') + generation_parameters["init_img"] = Image.open(init_img_path).convert( + "RGB" + ) def image_progress(sample, step): if self.canceled.is_set(): @@ -1062,8 +1081,7 @@ class InvokeAIWebServer: ) if generation_parameters["progress_latents"]: - image = self.generate.sample_to_lowres_estimated_image( - sample) + image = self.generate.sample_to_lowres_estimated_image(sample) (width, height) = image.size width *= 8 height *= 8 @@ -1082,8 +1100,7 @@ class InvokeAIWebServer: }, ) - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) def image_done(image, seed, first_seed, attention_maps_image=None): @@ -1110,8 +1127,7 @@ class InvokeAIWebServer: progress.set_current_status("common.statusGenerationComplete") - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) all_parameters = generation_parameters @@ -1122,8 +1138,7 @@ class InvokeAIWebServer: and all_parameters["variation_amount"] > 0 ): first_seed = first_seed or seed - this_variation = [ - [seed, all_parameters["variation_amount"]]] + this_variation = [[seed, all_parameters["variation_amount"]]] all_parameters["with_variations"] = ( prior_variations + this_variation ) @@ -1139,14 +1154,13 @@ class InvokeAIWebServer: if esrgan_parameters: progress.set_current_status("common.statusUpscaling") progress.set_current_status_has_steps(False) - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) image = self.esrgan.process( image=image, upsampler_scale=esrgan_parameters["level"], - denoise_str=esrgan_parameters['denoise_str'], + denoise_str=esrgan_parameters["denoise_str"], strength=esrgan_parameters["strength"], seed=seed, ) @@ -1154,7 +1168,7 @@ class InvokeAIWebServer: postprocessing = True all_parameters["upscale"] = [ esrgan_parameters["level"], - esrgan_parameters['denoise_str'], + esrgan_parameters["denoise_str"], esrgan_parameters["strength"], ] @@ -1163,15 +1177,14 @@ class InvokeAIWebServer: if facetool_parameters: if facetool_parameters["type"] == "gfpgan": - progress.set_current_status( - "common.statusRestoringFacesGFPGAN") + progress.set_current_status("common.statusRestoringFacesGFPGAN") elif facetool_parameters["type"] == "codeformer": progress.set_current_status( - "common.statusRestoringFacesCodeFormer") + "common.statusRestoringFacesCodeFormer" + ) progress.set_current_status_has_steps(False) - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) if facetool_parameters["type"] == "gfpgan": @@ -1201,8 +1214,7 @@ class InvokeAIWebServer: all_parameters["facetool_type"] = facetool_parameters["type"] progress.set_current_status("common.statusSavingImage") - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) # restore the stashed URLS and discard the paths, we are about to send the result to client @@ -1219,8 +1231,7 @@ class InvokeAIWebServer: if generation_parameters["generation_mode"] == "unifiedCanvas": all_parameters["bounding_box"] = original_bounding_box - metadata = self.parameters_to_generated_image_metadata( - all_parameters) + metadata = self.parameters_to_generated_image_metadata(all_parameters) command = parameters_to_command(all_parameters) @@ -1250,22 +1261,27 @@ class InvokeAIWebServer: if progress.total_iterations > progress.current_iteration: progress.set_current_step(1) - progress.set_current_status( - "common.statusIterationComplete") + progress.set_current_status("common.statusIterationComplete") progress.set_current_status_has_steps(False) else: progress.mark_complete() - self.socketio.emit( - "progressUpdate", progress.to_formatted_dict()) + self.socketio.emit("progressUpdate", progress.to_formatted_dict()) eventlet.sleep(0) - parsed_prompt, _ = get_prompt_structure( - generation_parameters["prompt"]) - tokens = None if type(parsed_prompt) is Blend else \ - get_tokens_for_prompt_object(get_tokenizer(self.generate.model), parsed_prompt) - attention_maps_image_base64_url = None if attention_maps_image is None \ + parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"]) + tokens = ( + None + if type(parsed_prompt) is Blend + else get_tokens_for_prompt_object( + get_tokenizer(self.generate.model), parsed_prompt + ) + ) + attention_maps_image_base64_url = ( + None + if attention_maps_image is None else image_to_dataURL(attention_maps_image) + ) self.socketio.emit( "generationResult", @@ -1297,7 +1313,7 @@ class InvokeAIWebServer: self.generate.prompt2image( **generation_parameters, step_callback=diffusers_step_callback_adapter, - image_callback=image_done + image_callback=image_done, ) except KeyboardInterrupt: @@ -1420,8 +1436,7 @@ class InvokeAIWebServer: self, parameters, original_image_path ): try: - current_metadata = retrieve_metadata( - original_image_path)["sd-metadata"] + current_metadata = retrieve_metadata(original_image_path)["sd-metadata"] postprocessing_metadata = {} """ @@ -1461,8 +1476,7 @@ class InvokeAIWebServer: postprocessing_metadata ) else: - current_metadata["image"]["postprocessing"] = [ - postprocessing_metadata] + current_metadata["image"]["postprocessing"] = [postprocessing_metadata] return current_metadata @@ -1558,8 +1572,7 @@ class InvokeAIWebServer: ) elif "thumbnails" in url: return os.path.abspath( - os.path.join(self.thumbnail_image_path, - os.path.basename(url)) + os.path.join(self.thumbnail_image_path, os.path.basename(url)) ) else: return os.path.abspath( @@ -1605,7 +1618,7 @@ class InvokeAIWebServer: except Exception as e: self.handle_exceptions(e) - def handle_exceptions(self, exception, emit_key: str = 'error'): + def handle_exceptions(self, exception, emit_key: str = "error"): self.socketio.emit(emit_key, {"message": (str(exception))}) print("\n") traceback.print_exc() @@ -1719,7 +1732,7 @@ def dataURL_to_image(dataURL: str) -> ImageType: return image -def image_to_dataURL(image: ImageType, image_format:str="PNG") -> str: +def image_to_dataURL(image: ImageType, image_format: str = "PNG") -> str: """ Converts an image into a base64 image dataURL. """ diff --git a/invokeai/backend/web/modules/create_cmd_parser.py b/invokeai/backend/web/modules/create_cmd_parser.py index 919caeda91..856522989b 100644 --- a/invokeai/backend/web/modules/create_cmd_parser.py +++ b/invokeai/backend/web/modules/create_cmd_parser.py @@ -1,6 +1,7 @@ import argparse import os -from ldm.invoke.args import PRECISION_CHOICES + +from ...args import PRECISION_CHOICES def create_cmd_parser(): @@ -46,10 +47,10 @@ def create_cmd_parser(): default="auto", ) parser.add_argument( - '--free_gpu_mem', - dest='free_gpu_mem', - action='store_true', - help='Force free gpu memory before final decoding', + "--free_gpu_mem", + dest="free_gpu_mem", + action="store_true", + help="Force free gpu memory before final decoding", ) return parser diff --git a/invokeai/backend/web/modules/get_canvas_generation_mode.py b/invokeai/backend/web/modules/get_canvas_generation_mode.py index 602cef09d5..55955cc33c 100644 --- a/invokeai/backend/web/modules/get_canvas_generation_mode.py +++ b/invokeai/backend/web/modules/get_canvas_generation_mode.py @@ -1,6 +1,8 @@ +from typing import Literal, Union + from PIL import Image, ImageChops from PIL.Image import Image as ImageType -from typing import Union, Literal + # https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent def check_for_any_transparency(img: Union[ImageType, str]) -> bool: @@ -85,9 +87,7 @@ def main(): print( "IMAGE WITH TRANSPARENCY, NO MASK, expect outpainting, got ", - get_canvas_generation_mode( - init_img_partial_transparency, init_mask_no_mask - ), + get_canvas_generation_mode(init_img_partial_transparency, init_mask_no_mask), ) print( @@ -102,9 +102,7 @@ def main(): print( "IMAGE WITH TRANSPARENCY, WITH MASK, expect outpainting, got ", - get_canvas_generation_mode( - init_img_partial_transparency, init_mask_has_mask - ), + get_canvas_generation_mode(init_img_partial_transparency, init_mask_has_mask), ) print( diff --git a/invokeai/backend/web/modules/parameters.py b/invokeai/backend/web/modules/parameters.py index a650fa37f9..3c9c530dd2 100644 --- a/invokeai/backend/web/modules/parameters.py +++ b/invokeai/backend/web/modules/parameters.py @@ -1,6 +1,7 @@ -from .parse_seed_weights import parse_seed_weights import argparse +from .parse_seed_weights import parse_seed_weights + SAMPLER_CHOICES = [ "ddim", "k_dpm_2_a", diff --git a/invokeai/configs/stable-diffusion/v1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-finetune.yaml index 96e7dd338a..8bbdb52159 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.stable_diffusion.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["sculpture"] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -45,7 +45,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: invokeai.backend.ldm.data.personalized.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: invokeai.backend.ldm.data.personalized.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml index 7433390ce3..3442971a5b 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["painting"] @@ -27,7 +27,7 @@ model: num_vectors_per_token: 1 unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -44,7 +44,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -67,7 +67,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -76,14 +76,14 @@ data: num_workers: 16 wrap: false train: - target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized_style.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized_style.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-inference.yaml b/invokeai/configs/stable-diffusion/v1-inference.yaml index 911c756003..7bcfe28f53 100644 --- a/invokeai/configs/stable-diffusion/v1-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inference.yaml @@ -18,7 +18,7 @@ model: use_ema: False scheduler_config: # 10000 warmup steps - target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.stable_diffusion.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 10000 ] cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -53,7 +53,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml index 76ef8d2750..f6433cf97d 100644 --- a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml @@ -18,7 +18,7 @@ model: finetune_keys: null scheduler_config: # 10000 warmup steps - target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.stable_diffusion.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 9 # 4 data + 4 downscaled image + 1 mask @@ -53,7 +53,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml index d9533d9b4b..10255a9b70 100644 --- a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -45,7 +45,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: invokeai.backend.ldm.data.personalized.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: invokeai.backend.ldm.data.personalized.PersonalizedBase + target: invokeai.backend.stable_diffusion.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v2-inference-v.yaml b/invokeai/configs/stable-diffusion/v2-inference-v.yaml index 2a426483eb..47ecbe0334 100644 --- a/invokeai/configs/stable-diffusion/v2-inference-v.yaml +++ b/invokeai/configs/stable-diffusion/v2-inference-v.yaml @@ -19,7 +19,7 @@ model: use_ema: False # we set this to false because this is an inference only config unet_config: - target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel params: use_checkpoint: True use_fp16: True @@ -38,7 +38,7 @@ model: legacy: False first_stage_config: - target: invokeai.models.autoencoder.AutoencoderKL + target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss @@ -62,7 +62,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + target: invokeai.backend.stable_diffusion.encoders.modules.FrozenOpenCLIPEmbedder params: freeze: True layer: "penultimate" diff --git a/invokeai/frontend/CLI/CLI.py b/invokeai/frontend/CLI/CLI.py index 0e74e4bff5..0a46c36ad0 100644 --- a/invokeai/frontend/CLI/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -8,7 +8,6 @@ from pathlib import Path from typing import Union import click - from compel import PromptParser if sys.platform == "darwin": @@ -18,22 +17,23 @@ import pyparsing # type: ignore import invokeai.version -from ...backend import Generate -from ...backend.args import (Args, - dream_cmd_from_png, - metadata_dumps, - metadata_from_png) -from ...backend.stable_diffusion import PipelineIntermediateState -from ...backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata -from ...backend import ModelManager +from ...backend import Generate, ModelManager +from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png from ...backend.globals import Globals -from ...backend.util import write_log +from ...backend.image_util import ( + PngWriter, + make_grid, + retrieve_metadata, + write_metadata, +) +from ...backend.stable_diffusion import PipelineIntermediateState +from ...backend.util import url_attachment_name, write_log from .readline import Completer, get_completer -from ...backend.util import url_attachment_name # global used in multiple functions (fix) infile = None + def main(): """Initialize command-line parsers and the diffusion model""" global infile @@ -494,7 +494,7 @@ def main_loop(gen, opt): def do_command(command: str, gen, opt: Args, completer) -> tuple: global infile operation = "generate" # default operation, alternative is 'postprocess' - command = command.replace('\\','/') # windows + command = command.replace("\\", "/") # windows if command.startswith( "!dream" @@ -537,10 +537,10 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple: import_model(path[1], gen, opt, completer) completer.add_history(command) except KeyboardInterrupt: - print('\n') + print("\n") operation = None - elif command.startswith(("!convert","!optimize")): + elif command.startswith(("!convert", "!optimize")): path = shlex.split(command) if len(path) < 2: print("** please provide the path to a .ckpt or .safetensors model") @@ -549,9 +549,9 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple: convert_model(path[1], gen, opt, completer) completer.add_history(command) except KeyboardInterrupt: - print('\n') + print("\n") operation = None - + elif command.startswith("!edit"): path = shlex.split(command) if len(path) < 2: @@ -639,12 +639,12 @@ def import_model(model_path: str, gen, opt, completer, convert=False): ): pass else: - if model_path.startswith(('http:','https:')): + if model_path.startswith(("http:", "https:")): try: default_name = url_attachment_name(model_path) default_name = Path(default_name).stem except Exception as e: - print(f'** URL: {str(e)}') + print(f"** URL: {str(e)}") model_name, model_desc = _get_model_name_and_desc( gen.model_manager, completer, @@ -672,6 +672,7 @@ def import_model(model_path: str, gen, opt, completer, convert=False): completer.update_models(gen.model_manager.list_models()) print(f">> {imported_name} successfully installed") + def _verify_load(model_name: str, gen) -> bool: print(">> Verifying that new model loads...") current_model = gen.model_name @@ -704,6 +705,7 @@ def _get_model_name_and_desc( ) return model_name, model_description + def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): model_name_or_path = model_name_or_path.replace("\\", "/") # windows manager = gen.model_manager @@ -722,7 +724,9 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): else: print(f"** {model_name_or_path} is not a legacy .ckpt weights file") return - if vae_repo := ldm.invoke.model_manager.VAE_TO_REPO_ID.get(Path(vae).stem): + if vae_repo := invokeai.backend.model_management.model_manager.VAE_TO_REPO_ID.get( + Path(vae).stem + ): vae_repo = dict(repo_id=vae_repo) else: vae_repo = None @@ -742,7 +746,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): except KeyboardInterrupt: return - manager.commit(opt.conf) + manager.commit(opt.conf) if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False): ckpt_path.unlink(missing_ok=True) print(f"{ckpt_path} deleted") @@ -1106,7 +1110,7 @@ def make_step_callback(gen, opt, prefix): if step % opt.save_intermediates == 0 or step == opt.steps - 1: filename = os.path.join(destination, f"{step:04}.png") image = gen.sample_to_lowres_estimated_image(latents) - image = image.resize((image.size[0]*8,image.size[1]*8)) + image = image.resize((image.size[0] * 8, image.size[1] * 8)) image.save(filename, "PNG") return callback @@ -1190,8 +1194,8 @@ def report_model_error(opt: Namespace, e: Exception): ) else: if not click.confirm( - 'Do you want to run invokeai-configure script to select and/or reinstall models?', - default=False + "Do you want to run invokeai-configure script to select and/or reinstall models?", + default=False, ): return @@ -1209,9 +1213,9 @@ def report_model_error(opt: Namespace, e: Exception): for arg in yes_to_all.split(): sys.argv.append(arg) - from ldm.invoke.config import invokeai_configure + from ..install import invokeai_configure - invokeai_configure.main() + invokeai_configure() print("** InvokeAI will now restart") sys.argv = previous_args main() # would rather do a os.exec(), but doesn't exist? @@ -1232,6 +1236,6 @@ def check_internet() -> bool: except: return False -if __name__ == '__main__': + +if __name__ == "__main__": main() - diff --git a/invokeai/frontend/CLI/__init__.py b/invokeai/frontend/CLI/__init__.py index 4db5e778ff..7e48534cb9 100644 --- a/invokeai/frontend/CLI/__init__.py +++ b/invokeai/frontend/CLI/__init__.py @@ -1,4 +1,4 @@ -''' +""" Initialization file for invokeai.frontend.CLI -''' +""" from .CLI import main as invokeai_command_line_interface diff --git a/invokeai/frontend/CLI/readline.py b/invokeai/frontend/CLI/readline.py index 21e064662b..5a877ae810 100644 --- a/invokeai/frontend/CLI/readline.py +++ b/invokeai/frontend/CLI/readline.py @@ -4,13 +4,14 @@ You may import the global singleton `completer` to get access to the completer object itself. This is useful when you want to autocomplete seeds: - from ldm.invoke.readline import completer + from invokeai.frontend.CLI.readline import completer completer.add_seed(18247566) completer.add_seed(9281839) """ +import atexit import os import re -import atexit + from ...backend.args import Args from ...backend.globals import Globals from ...backend.stable_diffusion import HuggingFaceConceptsLibrary @@ -18,92 +19,128 @@ from ...backend.stable_diffusion import HuggingFaceConceptsLibrary # ---------------readline utilities--------------------- try: import readline + readline_available = True -except (ImportError,ModuleNotFoundError) as e: - print(f'** An error occurred when loading the readline module: {str(e)}') +except (ImportError, ModuleNotFoundError) as e: + print(f"** An error occurred when loading the readline module: {str(e)}") readline_available = False -IMG_EXTENSIONS = ('.png','.jpg','.jpeg','.PNG','.JPG','.JPEG','.gif','.GIF') -WEIGHT_EXTENSIONS = ('.ckpt','.vae','.safetensors') -TEXT_EXTENSIONS = ('.txt','.TXT') -CONFIG_EXTENSIONS = ('.yaml','.yml') +IMG_EXTENSIONS = (".png", ".jpg", ".jpeg", ".PNG", ".JPG", ".JPEG", ".gif", ".GIF") +WEIGHT_EXTENSIONS = (".ckpt", ".vae", ".safetensors") +TEXT_EXTENSIONS = (".txt", ".TXT") +CONFIG_EXTENSIONS = (".yaml", ".yml") COMMANDS = ( - '--steps','-s', - '--seed','-S', - '--iterations','-n', - '--width','-W','--height','-H', - '--cfg_scale','-C', - '--threshold', - '--perlin', - '--grid','-g', - '--individual','-i', - '--save_intermediates', - '--init_img','-I', - '--init_mask','-M', - '--init_color', - '--strength','-f', - '--variants','-v', - '--outdir','-o', - '--sampler','-A','-m', - '--embedding_path', - '--device', - '--grid','-g', - '--facetool','-ft', - '--facetool_strength','-G', - '--codeformer_fidelity','-cf', - '--upscale','-U', - '-save_orig','--save_original', - '--log_tokenization','-t', - '--hires_fix', - '--inpaint_replace','-r', - '--png_compression','-z', - '--text_mask','-tm', - '--h_symmetry_time_pct', - '--v_symmetry_time_pct', - '!fix','!fetch','!replay','!history','!search','!clear', - '!models','!switch','!import_model','!optimize_model','!convert_model','!edit_model','!del_model', - '!mask','!triggers', - ) -MODEL_COMMANDS = ( - '!switch', - '!edit_model', - '!del_model', - ) -CKPT_MODEL_COMMANDS = ( - '!optimize_model', + "--steps", + "-s", + "--seed", + "-S", + "--iterations", + "-n", + "--width", + "-W", + "--height", + "-H", + "--cfg_scale", + "-C", + "--threshold", + "--perlin", + "--grid", + "-g", + "--individual", + "-i", + "--save_intermediates", + "--init_img", + "-I", + "--init_mask", + "-M", + "--init_color", + "--strength", + "-f", + "--variants", + "-v", + "--outdir", + "-o", + "--sampler", + "-A", + "-m", + "--embedding_path", + "--device", + "--grid", + "-g", + "--facetool", + "-ft", + "--facetool_strength", + "-G", + "--codeformer_fidelity", + "-cf", + "--upscale", + "-U", + "-save_orig", + "--save_original", + "--log_tokenization", + "-t", + "--hires_fix", + "--inpaint_replace", + "-r", + "--png_compression", + "-z", + "--text_mask", + "-tm", + "--h_symmetry_time_pct", + "--v_symmetry_time_pct", + "!fix", + "!fetch", + "!replay", + "!history", + "!search", + "!clear", + "!models", + "!switch", + "!import_model", + "!optimize_model", + "!convert_model", + "!edit_model", + "!del_model", + "!mask", + "!triggers", ) +MODEL_COMMANDS = ( + "!switch", + "!edit_model", + "!del_model", +) +CKPT_MODEL_COMMANDS = ("!optimize_model",) WEIGHT_COMMANDS = ( - '!import_model', - '!convert_model', - ) -IMG_PATH_COMMANDS = ( - '--outdir[=\s]', - ) -TEXT_PATH_COMMANDS=( - '!replay', - ) -IMG_FILE_COMMANDS=( - '!fix', - '!fetch', - '!mask', - '--init_img[=\s]','-I', - '--init_mask[=\s]','-M', - '--init_color[=\s]', - '--embedding_path[=\s]', - ) + "!import_model", + "!convert_model", +) +IMG_PATH_COMMANDS = ("--outdir[=\s]",) +TEXT_PATH_COMMANDS = ("!replay",) +IMG_FILE_COMMANDS = ( + "!fix", + "!fetch", + "!mask", + "--init_img[=\s]", + "-I", + "--init_mask[=\s]", + "-M", + "--init_color[=\s]", + "--embedding_path[=\s]", +) + +path_regexp = "(" + "|".join(IMG_PATH_COMMANDS + IMG_FILE_COMMANDS) + ")\s*\S*$" +weight_regexp = "(" + "|".join(WEIGHT_COMMANDS) + ")\s*\S*$" +text_regexp = "(" + "|".join(TEXT_PATH_COMMANDS) + ")\s*\S*$" -path_regexp = '(' + '|'.join(IMG_PATH_COMMANDS+IMG_FILE_COMMANDS) + ')\s*\S*$' -weight_regexp = '(' + '|'.join(WEIGHT_COMMANDS) + ')\s*\S*$' -text_regexp = '(' + '|'.join(TEXT_PATH_COMMANDS) + ')\s*\S*$' class Completer(object): def __init__(self, options, models={}): - self.options = sorted(options) - self.models = models - self.seeds = set() - self.matches = list() + self.options = sorted(options) + self.models = models + self.seeds = set() + self.matches = list() self.default_dir = None - self.linebuffer = None + self.linebuffer = None self.auto_history_active = True self.extensions = None self.concepts = None @@ -111,40 +148,41 @@ class Completer(object): return def complete(self, text, state): - ''' + """ Completes invoke command line. BUG: it doesn't correctly complete files that have spaces in the name. - ''' + """ buffer = readline.get_line_buffer() if state == 0: - # extensions defined, so go directly into path completion mode if self.extensions is not None: self.matches = self._path_completions(text, state, self.extensions) # looking for an image file - elif re.search(path_regexp,buffer): - do_shortcut = re.search('^'+'|'.join(IMG_FILE_COMMANDS),buffer) - self.matches = self._path_completions(text, state, IMG_EXTENSIONS,shortcut_ok=do_shortcut) + elif re.search(path_regexp, buffer): + do_shortcut = re.search("^" + "|".join(IMG_FILE_COMMANDS), buffer) + self.matches = self._path_completions( + text, state, IMG_EXTENSIONS, shortcut_ok=do_shortcut + ) # looking for a seed - elif re.search('(-S\s*|--seed[=\s])\d*$',buffer): - self.matches= self._seed_completions(text,state) + elif re.search("(-S\s*|--seed[=\s])\d*$", buffer): + self.matches = self._seed_completions(text, state) # looking for an embedding concept - elif re.search('<[\w-]*$',buffer): - self.matches= self._concept_completions(text,state) + elif re.search("<[\w-]*$", buffer): + self.matches = self._concept_completions(text, state) # looking for a model - elif re.match('^'+'|'.join(MODEL_COMMANDS),buffer): - self.matches= self._model_completions(text, state) + elif re.match("^" + "|".join(MODEL_COMMANDS), buffer): + self.matches = self._model_completions(text, state) # looking for a ckpt model - elif re.match('^'+'|'.join(CKPT_MODEL_COMMANDS),buffer): - self.matches= self._model_completions(text, state, ckpt_only=True) + elif re.match("^" + "|".join(CKPT_MODEL_COMMANDS), buffer): + self.matches = self._model_completions(text, state, ckpt_only=True) - elif re.search(weight_regexp,buffer): + elif re.search(weight_regexp, buffer): self.matches = self._path_completions( text, state, @@ -152,14 +190,12 @@ class Completer(object): default_dir=Globals.root, ) - elif re.search(text_regexp,buffer): + elif re.search(text_regexp, buffer): self.matches = self._path_completions(text, state, TEXT_EXTENSIONS) # This is the first time for this text, so build a match list. elif text: - self.matches = [ - s for s in self.options if s and s.startswith(text) - ] + self.matches = [s for s in self.options if s and s.startswith(text)] else: self.matches = self.options[:] @@ -171,50 +207,50 @@ class Completer(object): response = None return response - def complete_extensions(self, extensions:list): - ''' + def complete_extensions(self, extensions: list): + """ If called with a list of extensions, will force completer to do file path completions. - ''' - self.extensions=extensions + """ + self.extensions = extensions - def add_history(self,line): - ''' + def add_history(self, line): + """ Pass thru to readline - ''' + """ if not self.auto_history_active: readline.add_history(line) def clear_history(self): - ''' + """ Pass clear_history() thru to readline - ''' + """ readline.clear_history() - def search_history(self,match:str): - ''' + def search_history(self, match: str): + """ Like show_history() but only shows items that contain the match string. - ''' + """ self.show_history(match) - def remove_history_item(self,pos): + def remove_history_item(self, pos): readline.remove_history_item(pos) def add_seed(self, seed): - ''' + """ Add a seed to the autocomplete list for display when -S is autocompleted. - ''' + """ if seed is not None: self.seeds.add(str(seed)) def set_default_dir(self, path): - self.default_dir=path + self.default_dir = path - def set_options(self,options): + def set_options(self, options): self.options = options - def get_line(self,index): + def get_line(self, index): try: line = self.get_history_item(index) except IndexError: @@ -224,57 +260,58 @@ class Completer(object): def get_current_history_length(self): return readline.get_current_history_length() - def get_history_item(self,index): + def get_history_item(self, index): return readline.get_history_item(index) - def show_history(self,match=None): - ''' + def show_history(self, match=None): + """ Print the session history using the pydoc pager - ''' + """ import pydoc + lines = list() h_len = self.get_current_history_length() if h_len < 1: - print('') + print("") return - for i in range(0,h_len): - line = self.get_history_item(i+1) + for i in range(0, h_len): + line = self.get_history_item(i + 1) if match and match not in line: continue - lines.append(f'[{i+1}] {line}') - pydoc.pager('\n'.join(lines)) + lines.append(f"[{i+1}] {line}") + pydoc.pager("\n".join(lines)) - def set_line(self,line)->None: - ''' + def set_line(self, line) -> None: + """ Set the default string displayed in the next line of input. - ''' + """ self.linebuffer = line readline.redisplay() - def update_models(self,models:dict)->None: - ''' + def update_models(self, models: dict) -> None: + """ update our list of models - ''' + """ self.models = models def _seed_completions(self, text, state): - m = re.search('(-S\s?|--seed[=\s]?)(\d*)',text) + m = re.search("(-S\s?|--seed[=\s]?)(\d*)", text) if m: - switch = m.groups()[0] + switch = m.groups()[0] partial = m.groups()[1] else: - switch = '' + switch = "" partial = text matches = list() for s in self.seeds: if s.startswith(partial): - matches.append(switch+s) + matches.append(switch + s) matches.sort() return matches - def add_embedding_terms(self, terms:list[str]): + def add_embedding_terms(self, terms: list[str]): self.embedding_terms = set(terms) if self.concepts: self.embedding_terms.update(set(self.concepts.list_concepts())) @@ -294,27 +331,27 @@ class Completer(object): matches = list() for concept in self.embedding_terms: if concept.startswith(partial): - matches.append(f'<{concept}>') + matches.append(f"<{concept}>") matches.sort() return matches def _model_completions(self, text, state, ckpt_only=False): - m = re.search('(!switch\s+)(\w*)',text) + m = re.search("(!switch\s+)(\w*)", text) if m: - switch = m.groups()[0] + switch = m.groups()[0] partial = m.groups()[1] else: - switch = '' + switch = "" partial = text matches = list() for s in self.models: - format = self.models[s]['format'] - if format == 'vae': + format = self.models[s]["format"] + if format == "vae": continue - if ckpt_only and format != 'ckpt': + if ckpt_only and format != "ckpt": continue if s.startswith(partial): - matches.append(switch+s) + matches.append(switch + s) matches.sort() return matches @@ -324,14 +361,16 @@ class Completer(object): readline.redisplay() self.linebuffer = None - def _path_completions(self, text, state, extensions, shortcut_ok=True, default_dir:str=''): + def _path_completions( + self, text, state, extensions, shortcut_ok=True, default_dir: str = "" + ): # separate the switch from the partial path - match = re.search('^(-\w|--\w+=?)(.*)',text) + match = re.search("^(-\w|--\w+=?)(.*)", text) if match is None: switch = None partial_path = text else: - switch,partial_path = match.groups() + switch, partial_path = match.groups() partial_path = partial_path.lstrip() @@ -340,18 +379,18 @@ class Completer(object): if os.path.isdir(path): dir = path - elif os.path.dirname(path) != '': + elif os.path.dirname(path) != "": dir = os.path.dirname(path) else: - dir = default_dir if os.path.exists(default_dir) else '' - path= os.path.join(dir,path) + dir = default_dir if os.path.exists(default_dir) else "" + path = os.path.join(dir, path) - dir_list = os.listdir(dir or '.') - if shortcut_ok and os.path.exists(self.default_dir) and dir=='': + dir_list = os.listdir(dir or ".") + if shortcut_ok and os.path.exists(self.default_dir) and dir == "": dir_list += os.listdir(self.default_dir) for node in dir_list: - if node.startswith('.') and len(node) > 1: + if node.startswith(".") and len(node) > 1: continue full_path = os.path.join(dir, node) @@ -362,25 +401,26 @@ class Completer(object): continue if switch is None: - match_path = os.path.join(dir,node) - matches.append(match_path+'/' if os.path.isdir(full_path) else match_path) + match_path = os.path.join(dir, node) + matches.append( + match_path + "/" if os.path.isdir(full_path) else match_path + ) elif os.path.isdir(full_path): matches.append( - switch+os.path.join(os.path.dirname(full_path), node) + '/' + switch + os.path.join(os.path.dirname(full_path), node) + "/" ) elif node.endswith(extensions): - matches.append( - switch+os.path.join(os.path.dirname(full_path), node) - ) + matches.append(switch + os.path.join(os.path.dirname(full_path), node)) return matches + class DummyCompleter(Completer): - def __init__(self,options): + def __init__(self, options): super().__init__(options) self.history = list() - def add_history(self,line): + def add_history(self, line): self.history.append(line) def clear_history(self): @@ -389,37 +429,37 @@ class DummyCompleter(Completer): def get_current_history_length(self): return len(self.history) - def get_history_item(self,index): - return self.history[index-1] + def get_history_item(self, index): + return self.history[index - 1] - def remove_history_item(self,index): - return self.history.pop(index-1) + def remove_history_item(self, index): + return self.history.pop(index - 1) - def set_line(self,line): - print(f'# {line}') + def set_line(self, line): + print(f"# {line}") -def generic_completer(commands:list)->Completer: + +def generic_completer(commands: list) -> Completer: if readline_available: - completer = Completer(commands,[]) + completer = Completer(commands, []) readline.set_completer(completer.complete) readline.set_pre_input_hook(completer._pre_input_hook) - readline.set_completer_delims(' ') - readline.parse_and_bind('tab: complete') - readline.parse_and_bind('set print-completions-horizontally off') - readline.parse_and_bind('set page-completions on') - readline.parse_and_bind('set skip-completed-text on') - readline.parse_and_bind('set show-all-if-ambiguous on') + readline.set_completer_delims(" ") + readline.parse_and_bind("tab: complete") + readline.parse_and_bind("set print-completions-horizontally off") + readline.parse_and_bind("set page-completions on") + readline.parse_and_bind("set skip-completed-text on") + readline.parse_and_bind("set show-all-if-ambiguous on") else: completer = DummyCompleter(commands) return completer -def get_completer(opt:Args, models=[])->Completer: - if readline_available: - completer = Completer(COMMANDS,models) - readline.set_completer( - completer.complete - ) +def get_completer(opt: Args, models=[]) -> Completer: + if readline_available: + completer = Completer(COMMANDS, models) + + readline.set_completer(completer.complete) # pyreadline3 does not have a set_auto_history() method try: readline.set_auto_history(False) @@ -427,27 +467,29 @@ def get_completer(opt:Args, models=[])->Completer: except: completer.auto_history_active = True readline.set_pre_input_hook(completer._pre_input_hook) - readline.set_completer_delims(' ') - readline.parse_and_bind('tab: complete') - readline.parse_and_bind('set print-completions-horizontally off') - readline.parse_and_bind('set page-completions on') - readline.parse_and_bind('set skip-completed-text on') - readline.parse_and_bind('set show-all-if-ambiguous on') + readline.set_completer_delims(" ") + readline.parse_and_bind("tab: complete") + readline.parse_and_bind("set print-completions-horizontally off") + readline.parse_and_bind("set page-completions on") + readline.parse_and_bind("set skip-completed-text on") + readline.parse_and_bind("set show-all-if-ambiguous on") outdir = os.path.expanduser(opt.outdir) if os.path.isabs(outdir): - histfile = os.path.join(outdir,'.invoke_history') + histfile = os.path.join(outdir, ".invoke_history") else: - histfile = os.path.join(Globals.root, outdir, '.invoke_history') + histfile = os.path.join(Globals.root, outdir, ".invoke_history") try: readline.read_history_file(histfile) readline.set_history_length(1000) except FileNotFoundError: pass - except OSError: # file likely corrupted - newname = f'{histfile}.old' - print(f'## Your history file {histfile} couldn\'t be loaded and may be corrupted. Renaming it to {newname}') - os.replace(histfile,newname) + except OSError: # file likely corrupted + newname = f"{histfile}.old" + print( + f"## Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}" + ) + os.replace(histfile, newname) atexit.register(readline.write_history_file, histfile) else: diff --git a/invokeai/frontend/config/__init__.py b/invokeai/frontend/install/__init__.py similarity index 96% rename from invokeai/frontend/config/__init__.py rename to invokeai/frontend/install/__init__.py index 0d1d2aec75..fb8cdff1b3 100644 --- a/invokeai/frontend/config/__init__.py +++ b/invokeai/frontend/install/__init__.py @@ -1,7 +1,6 @@ -''' +""" Initialization file for invokeai.frontend.config -''' -from .model_install import main as invokeai_model_install +""" from .invokeai_configure import main as invokeai_configure from .invokeai_update import main as invokeai_update - +from .model_install import main as invokeai_model_install diff --git a/invokeai/frontend/config/invokeai_configure.py b/invokeai/frontend/install/invokeai_configure.py similarity index 93% rename from invokeai/frontend/config/invokeai_configure.py rename to invokeai/frontend/install/invokeai_configure.py index 748d7bb8ca..0df5fdb16f 100644 --- a/invokeai/frontend/config/invokeai_configure.py +++ b/invokeai/frontend/install/invokeai_configure.py @@ -1,4 +1,4 @@ -''' +""" Wrapper for invokeai.backend.configure.invokeai_configure -''' +""" from ...backend.config.invokeai_configure import main diff --git a/invokeai/frontend/config/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py similarity index 56% rename from invokeai/frontend/config/invokeai_update.py rename to invokeai/frontend/install/invokeai_update.py index d3a532c627..040067cff9 100644 --- a/invokeai/frontend/config/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -1,9 +1,10 @@ -''' +""" Minimalist updater script. Prompts user for the tag or branch to update to and runs pip install . -''' +""" import os import platform + import requests from rich import box, print from rich.console import Console, Group, group @@ -15,8 +16,8 @@ from rich.text import Text from invokeai.version import __version__ -INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive" -INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases" +INVOKE_AI_SRC = "https://github.com/invoke-ai/InvokeAI/archive" +INVOKE_AI_REL = "https://api.github.com/repos/invoke-ai/InvokeAI/releases" OS = platform.uname().system ARCH = platform.uname().machine @@ -27,21 +28,22 @@ if OS == "Windows": else: console = Console(style=Style(color="grey74", bgcolor="grey19")) -def get_versions()->dict: + +def get_versions() -> dict: return requests.get(url=INVOKE_AI_REL).json() + def welcome(versions: dict): - @group() def text(): - yield f'InvokeAI Version: [bold yellow]{__version__}' - yield '' - yield 'This script will update InvokeAI to the latest release, or to a development version of your choice.' - yield '' - yield '[bold yellow]Options:' - yield f'''[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic]) + yield f"InvokeAI Version: [bold yellow]{__version__}" + yield "" + yield "This script will update InvokeAI to the latest release, or to a development version of your choice." + yield "" + yield "[bold yellow]Options:" + yield f"""[1] Update to the latest official release ([italic]{versions[0]['tag_name']}[/italic]) [2] Update to the bleeding-edge development version ([italic]main[/italic]) -[3] Manually enter the tag or branch name you wish to update''' +[3] Manually enter the tag or branch name you wish to update""" console.rule() print( @@ -57,32 +59,33 @@ def welcome(versions: dict): ) console.line() + def main(): versions = get_versions() welcome(versions) tag = None - choice = Prompt.ask('Choice:',choices=['1','2','3'],default='1') - - if choice=='1': - tag = versions[0]['tag_name'] - elif choice=='2': - tag = 'main' - elif choice=='3': - tag = Prompt.ask('Enter an InvokeAI tag or branch name') + choice = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1") - print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]') - cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517' - print('') - print('') - if os.system(cmd)==0: - print(f':heavy_check_mark: Upgrade successful') + if choice == "1": + tag = versions[0]["tag_name"] + elif choice == "2": + tag = "main" + elif choice == "3": + tag = Prompt.ask("Enter an InvokeAI tag or branch name") + + print(f":crossed_fingers: Upgrading to [yellow]{tag}[/yellow]") + cmd = f"pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517" + print("") + print("") + if os.system(cmd) == 0: + print(f":heavy_check_mark: Upgrade successful") else: - print(f':exclamation: [bold red]Upgrade failed[/red bold]') - + print(f":exclamation: [bold red]Upgrade failed[/red bold]") + + if __name__ == "__main__": try: main() except KeyboardInterrupt: pass - diff --git a/invokeai/frontend/config/model_install.py b/invokeai/frontend/install/model_install.py similarity index 93% rename from invokeai/frontend/config/model_install.py rename to invokeai/frontend/install/model_install.py index f64a656211..e7b10c34e1 100644 --- a/invokeai/frontend/config/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -14,34 +14,42 @@ import os import sys from argparse import Namespace from pathlib import Path +from shutil import get_terminal_size from typing import List import npyscreen import torch from npyscreen import widget from omegaconf import OmegaConf -from shutil import get_terminal_size -from ...backend.util import choose_precision, choose_torch_device from invokeai.backend.globals import Globals, global_config_dir -from ...backend.config.model_install_backend import (Dataset_path, default_config_file, - default_dataset, get_root, - install_requested_models, - recommended_datasets, - ) -from .widgets import (MultiSelectColumns, TextBox, - OffsetButtonPress, CenteredTitleText, - set_min_terminal_size, - ) + +from ...backend.config.model_install_backend import ( + Dataset_path, + default_config_file, + default_dataset, + get_root, + install_requested_models, + recommended_datasets, +) +from ...backend.util import choose_precision, choose_torch_device +from .widgets import ( + CenteredTitleText, + MultiSelectColumns, + OffsetButtonPress, + TextBox, + set_min_terminal_size, +) # minimum size for the UI MIN_COLS = 120 MIN_LINES = 45 + class addModelsForm(npyscreen.FormMultiPage): # for responsive resizing - disabled - #FIX_MINIMUM_SIZE_WHEN_CREATED = False - + # FIX_MINIMUM_SIZE_WHEN_CREATED = False + def __init__(self, parentApp, name, multipage=False, *args, **keywords): self.multipage = multipage self.initial_models = OmegaConf.load(Dataset_path) @@ -71,13 +79,13 @@ class addModelsForm(npyscreen.FormMultiPage): npyscreen.FixedText, value="Use ctrl-N and ctrl-P to move to the ext and

revious fields,", editable=False, - color='CAUTION', + color="CAUTION", ) self.add_widget_intelligent( npyscreen.FixedText, value="Use cursor arrows to make a selection, and space to toggle checkboxes.", editable=False, - color='CAUTION' + color="CAUTION", ) self.nextrely += 1 if len(self.installed_models) > 0: @@ -147,30 +155,26 @@ class addModelsForm(npyscreen.FormMultiPage): ) self.add_widget_intelligent( CenteredTitleText, - name='== IMPORT LOCAL AND REMOTE MODELS ==', + name="== IMPORT LOCAL AND REMOTE MODELS ==", editable=False, color="CONTROL", ) self.nextrely -= 1 for line in [ - "In the box below, enter URLs, file paths, or HuggingFace repository IDs.", - "Separate model names by lines or whitespace (Use shift-control-V to paste):", + "In the box below, enter URLs, file paths, or HuggingFace repository IDs.", + "Separate model names by lines or whitespace (Use shift-control-V to paste):", ]: self.add_widget_intelligent( CenteredTitleText, name=line, editable=False, labelColor="CONTROL", - relx = 4, + relx=4, ) self.nextrely -= 1 self.import_model_paths = self.add_widget_intelligent( - TextBox, - max_height=7, - scroll_exit=True, - editable=True, - relx=4 + TextBox, max_height=7, scroll_exit=True, editable=True, relx=4 ) self.nextrely += 1 self.show_directory_fields = self.add_widget_intelligent( @@ -245,7 +249,7 @@ class addModelsForm(npyscreen.FormMultiPage): def resize(self): super().resize() - if hasattr(self,'models_selected'): + if hasattr(self, "models_selected"): self.models_selected.values = self._get_starter_model_labels() def _clear_scan_directory(self): @@ -325,10 +329,11 @@ class addModelsForm(npyscreen.FormMultiPage): selections = self.parentApp.user_selections # starter models to install/remove - if hasattr(self,'models_selected'): + if hasattr(self, "models_selected"): starter_models = dict( map( - lambda x: (self.starter_model_list[x], True), self.models_selected.value + lambda x: (self.starter_model_list[x], True), + self.models_selected.value, ) ) else: @@ -376,6 +381,7 @@ class AddModelApplication(npyscreen.NPSAppManaged): "MAIN", addModelsForm, name="Install Stable Diffusion Models" ) + # -------------------------------------------------------- def process_and_execute(opt: Namespace, selections: Namespace): models_to_remove = [ @@ -477,9 +483,9 @@ def main(): print( ">> Your InvokeAI root directory is not set up. Calling invokeai-configure." ) - import ldm.invoke.config.invokeai_configure + from invokeai.frontend.install import invokeai_configure - ldm.invoke.config.invokeai_configure.main() + invokeai_configure() sys.exit(0) try: @@ -499,6 +505,7 @@ def main(): "** Insufficient horizontal space for the interface. Please make your window wider and try again." ) + # ------------------------------------- if __name__ == "__main__": main() diff --git a/invokeai/frontend/config/widgets.py b/invokeai/frontend/install/widgets.py similarity index 61% rename from invokeai/frontend/config/widgets.py rename to invokeai/frontend/install/widgets.py index 97832e7bb4..6c57b7cbd2 100644 --- a/invokeai/frontend/config/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -1,35 +1,39 @@ -''' +""" Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py -''' -import math -import platform -import npyscreen -import os -import sys +""" import curses +import math +import os +import platform import struct - +import sys from shutil import get_terminal_size +import npyscreen + + # ------------------------------------- def set_terminal_size(columns: int, lines: int): OS = platform.uname().system - if OS=="Windows": - os.system(f'mode con: cols={columns} lines={lines}') - elif OS in ['Darwin', 'Linux']: - import termios + if OS == "Windows": + os.system(f"mode con: cols={columns} lines={lines}") + elif OS in ["Darwin", "Linux"]: import fcntl + import termios + winsize = struct.pack("HHHH", lines, columns, 0, 0) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize) sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns)) sys.stdout.flush() + def set_min_terminal_size(min_cols: int, min_lines: int): # make sure there's enough room for the ui term_cols, term_lines = get_terminal_size() - cols = max(term_cols, min_cols) + cols = max(term_cols, min_cols) lines = max(term_lines, min_lines) - set_terminal_size(cols,lines) + set_terminal_size(cols, lines) + class IntSlider(npyscreen.Slider): def translate_value(self): @@ -38,18 +42,20 @@ class IntSlider(npyscreen.Slider): stri = stri.rjust(l) return stri + # ------------------------------------- class CenteredTitleText(npyscreen.TitleText): - def __init__(self,*args,**keywords): - super().__init__(*args,**keywords) + def __init__(self, *args, **keywords): + super().__init__(*args, **keywords) self.resize() - + def resize(self): super().resize() maxy, maxx = self.parent.curses_pad.getmaxyx() label = self.name self.relx = (maxx - len(label)) // 2 - + + # ------------------------------------- class CenteredButtonPress(npyscreen.ButtonPress): def resize(self): @@ -57,21 +63,24 @@ class CenteredButtonPress(npyscreen.ButtonPress): maxy, maxx = self.parent.curses_pad.getmaxyx() label = self.name self.relx = (maxx - len(label)) // 2 - + + # ------------------------------------- class OffsetButtonPress(npyscreen.ButtonPress): - def __init__(self, screen, offset=0, *args, **keywords): + def __init__(self, screen, offset=0, *args, **keywords): super().__init__(screen, *args, **keywords) self.offset = offset - + def resize(self): maxy, maxx = self.parent.curses_pad.getmaxyx() width = len(self.name) self.relx = self.offset + (maxx - width) // 2 + class IntTitleSlider(npyscreen.TitleText): _entry_type = IntSlider + class FloatSlider(npyscreen.Slider): # this is supposed to adjust display precision, but doesn't def translate_value(self): @@ -80,85 +89,114 @@ class FloatSlider(npyscreen.Slider): stri = stri.rjust(l) return stri + class FloatTitleSlider(npyscreen.TitleText): _entry_type = FloatSlider + class MultiSelectColumns(npyscreen.MultiSelect): - def __init__(self, screen, columns: int=1, values: list=[], **keywords): + def __init__(self, screen, columns: int = 1, values: list = [], **keywords): self.columns = columns self.value_cnt = len(values) self.rows = math.ceil(self.value_cnt / self.columns) - super().__init__(screen,values=values, **keywords) + super().__init__(screen, values=values, **keywords) def make_contained_widgets(self): self._my_widgets = [] column_width = self.width // self.columns for h in range(self.value_cnt): self._my_widgets.append( - self._contained_widgets(self.parent, - rely=self.rely + (h % self.rows) * self._contained_widget_height, - relx=self.relx + (h // self.rows) * column_width, - max_width=column_width, - max_height=self.__class__._contained_widget_height, - ) + self._contained_widgets( + self.parent, + rely=self.rely + (h % self.rows) * self._contained_widget_height, + relx=self.relx + (h // self.rows) * column_width, + max_width=column_width, + max_height=self.__class__._contained_widget_height, + ) ) def set_up_handlers(self): super().set_up_handlers() - self.handlers.update({ - curses.KEY_UP: self.h_cursor_line_left, - curses.KEY_DOWN: self.h_cursor_line_right, - } - ) + self.handlers.update( + { + curses.KEY_UP: self.h_cursor_line_left, + curses.KEY_DOWN: self.h_cursor_line_right, + } + ) + def h_cursor_line_down(self, ch): self.cursor_line += self.rows if self.cursor_line >= len(self.values): - if self.scroll_exit: - self.cursor_line = len(self.values)-self.rows + if self.scroll_exit: + self.cursor_line = len(self.values) - self.rows self.h_exit_down(ch) return True - else: + else: self.cursor_line -= self.rows return True def h_cursor_line_up(self, ch): self.cursor_line -= self.rows - if self.cursor_line < 0: + if self.cursor_line < 0: if self.scroll_exit: self.cursor_line = 0 self.h_exit_up(ch) - else: + else: self.cursor_line = 0 - def h_cursor_line_left(self,ch): + def h_cursor_line_left(self, ch): super().h_cursor_line_up(ch) - - def h_cursor_line_right(self,ch): + + def h_cursor_line_right(self, ch): super().h_cursor_line_down(ch) + class TextBox(npyscreen.MultiLineEdit): def update(self, clear=True): - if clear: self.clear() + if clear: + self.clear() HEIGHT = self.height - WIDTH = self.width + WIDTH = self.width # draw box. self.parent.curses_pad.hline(self.rely, self.relx, curses.ACS_HLINE, WIDTH) - self.parent.curses_pad.hline(self.rely + HEIGHT, self.relx, curses.ACS_HLINE, WIDTH) - self.parent.curses_pad.vline(self.rely, self.relx, curses.ACS_VLINE, self.height) - self.parent.curses_pad.vline(self.rely, self.relx+WIDTH, curses.ACS_VLINE, HEIGHT) - + self.parent.curses_pad.hline( + self.rely + HEIGHT, self.relx, curses.ACS_HLINE, WIDTH + ) + self.parent.curses_pad.vline( + self.rely, self.relx, curses.ACS_VLINE, self.height + ) + self.parent.curses_pad.vline( + self.rely, self.relx + WIDTH, curses.ACS_VLINE, HEIGHT + ) + # draw corners - self.parent.curses_pad.addch(self.rely, self.relx, curses.ACS_ULCORNER, ) - self.parent.curses_pad.addch(self.rely, self.relx+WIDTH, curses.ACS_URCORNER, ) - self.parent.curses_pad.addch(self.rely+HEIGHT, self.relx, curses.ACS_LLCORNER, ) - self.parent.curses_pad.addch(self.rely+HEIGHT, self.relx+WIDTH, curses.ACS_LRCORNER, ) - + self.parent.curses_pad.addch( + self.rely, + self.relx, + curses.ACS_ULCORNER, + ) + self.parent.curses_pad.addch( + self.rely, + self.relx + WIDTH, + curses.ACS_URCORNER, + ) + self.parent.curses_pad.addch( + self.rely + HEIGHT, + self.relx, + curses.ACS_LLCORNER, + ) + self.parent.curses_pad.addch( + self.rely + HEIGHT, + self.relx + WIDTH, + curses.ACS_LRCORNER, + ) + # fool our superclass into thinking drawing area is smaller - this is really hacky but it seems to work - (relx,rely,height,width) = (self.relx, self.rely, self.height, self.width) + (relx, rely, height, width) = (self.relx, self.rely, self.height, self.width) self.relx += 1 self.rely += 1 self.height -= 1 self.width -= 1 super().update(clear=False) - (self.relx,self.rely,self.height,self.width) = (relx, rely, height, width) + (self.relx, self.rely, self.height, self.width) = (relx, rely, height, width) diff --git a/invokeai/frontend/merge/__init__.py b/invokeai/frontend/merge/__init__.py index 8e46a0621b..3a2e4474a5 100644 --- a/invokeai/frontend/merge/__init__.py +++ b/invokeai/frontend/merge/__init__.py @@ -1,4 +1,4 @@ -''' +""" Initialization file for invokeai.frontend.merge -''' +""" from .merge_diffusers import main as invokeai_merge_diffusers diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 82765af96d..18cdf4ff6a 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -1,5 +1,5 @@ """ -ldm.invoke.merge_diffusers exports a single function call merge_diffusion_models() +invokeai.frontend.merge exports a single function call merge_diffusion_models() used to merge 2-3 models together and create a new InvokeAI-registered diffusion model. Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team @@ -20,13 +20,19 @@ from diffusers import logging as dlogging from npyscreen import widget from omegaconf import OmegaConf -from ...frontend.config.widgets import FloatTitleSlider -from ...backend.globals import (Globals, global_cache_dir, global_config_file, - global_models_dir, global_set_root) +from ...backend.globals import ( + Globals, + global_cache_dir, + global_config_file, + global_models_dir, + global_set_root, +) from ...backend.model_management import ModelManager +from ...frontend.install.widgets import FloatTitleSlider DEST_MERGED_MODEL_DIR = "merged_models" + def merge_diffusion_models( model_ids_or_paths: List[Union[str, Path]], alpha: float = 0.5, @@ -310,8 +316,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): self.merged_model_name.value = merged_model_name if selected_model3 > 0: - self.merge_method.values = ['add_difference ( A+(B-C) )'] - self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one. + self.merge_method.values = ["add_difference ( A+(B-C) )"] + self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one. else: self.merge_method.values = self.interpolations self.merge_method.value = 0 @@ -336,9 +342,9 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): ] if self.model3.value[0] > 0: models.append(model_names[self.model3.value[0] - 1]) - interp='add_difference' + interp = "add_difference" else: - interp=self.interpolations[self.merge_method.value[0]] + interp = self.interpolations[self.merge_method.value[0]] args = dict( models=models, @@ -453,7 +459,9 @@ def main(): "** You need to have at least two diffusers models defined in models.yaml in order to merge" ) else: - print("** Not enough room for the user interface. Try making this window larger.") + print( + "** Not enough room for the user interface. Try making this window larger." + ) sys.exit(-1) except Exception: print(">> An error occurred:") diff --git a/invokeai/frontend/training/__init__.py b/invokeai/frontend/training/__init__.py index 1aeece6b5f..db5d69fc7a 100644 --- a/invokeai/frontend/training/__init__.py +++ b/invokeai/frontend/training/__init__.py @@ -1,5 +1,4 @@ -''' +""" Initialization file for invokeai.frontend.training -''' +""" from .textual_inversion import main as invokeai_textual_inversion - diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py index 5cd5d71909..e97284da3d 100755 --- a/invokeai/frontend/training/textual_inversion.py +++ b/invokeai/frontend/training/textual_inversion.py @@ -21,10 +21,8 @@ from npyscreen import widget from omegaconf import OmegaConf from invokeai.backend.globals import Globals, global_set_root -from ...backend.training import ( - do_textual_inversion_training, - parse_args, -) + +from ...backend.training import do_textual_inversion_training, parse_args TRAINING_DATA = "text-inversion-training-data" TRAINING_DIR = "text-inversion-output" @@ -448,9 +446,9 @@ def main(): print( "** You need to have at least one diffusers models defined in models.yaml in order to train" ) - elif str(e).startswith('addwstr'): + elif str(e).startswith("addwstr"): print( - '** Not enough window space for the interface. Please make your window larger and try again.' + "** Not enough window space for the interface. Please make your window larger and try again." ) else: print(f"** An error has occurred: {str(e)}") diff --git a/invokeai/version/__init__.py b/invokeai/version/__init__.py index 805834bb9a..215477dce8 100644 --- a/invokeai/version/__init__.py +++ b/invokeai/version/__init__.py @@ -1,20 +1,26 @@ -''' +""" initialization file for invokeai -''' +""" import invokeai + from .invokeai_version import __version__ -__app_id__= 'invoke-ai/InvokeAI' -__app_name__= 'InvokeAI' + +__app_id__ = "invoke-ai/InvokeAI" +__app_name__ = "InvokeAI" # copy these attributes into the invokeai namespace -setattr(invokeai,'__version__',__version__) -setattr(invokeai,'__app_id__',__app_id__) -setattr(invokeai,'__app_name__',__app_name__) +setattr(invokeai, "__version__", __version__) +setattr(invokeai, "__app_id__", __app_id__) +setattr(invokeai, "__app_name__", __app_name__) + def _ignore_xformers_triton_message_on_windows(): import logging + logging.getLogger("xformers").addFilter( - lambda record: 'A matching Triton is not available' not in record.getMessage()) + lambda record: "A matching Triton is not available" not in record.getMessage() + ) + # In order to be effective, this needs to happen before anything could possibly import xformers. _ignore_xformers_triton_message_on_windows() diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index 041471f37e..711c95b0cf 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__='3.0.0+a0' +__version__ = "3.0.0+a0" diff --git a/pyproject.toml b/pyproject.toml index 45fe8ef327..ed7c6563dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -105,15 +105,15 @@ dependencies = [ # legacy entrypoints; provided for backwards compatibility "invoke.py" = "invokeai.frontend.CLI:command_line_interface" -"configure_invokeai.py" = "invokeai.frontend.config:invokeai_configure" -"textual_inversion.py" = "invokeai.frontend.training:textual_inversion" +"configure_invokeai.py" = "invokeai.frontend.install:invokeai_configure" +"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion" # modern entrypoints "invokeai" = "invokeai.frontend.CLI:invokeai_command_line_interface" -"invokeai-configure" = "invokeai.frontend.config:invokeai_configure" +"invokeai-configure" = "invokeai.frontend.install:invokeai_configure" "invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers" "invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion" -"invokeai-model-install" = "invokeai.frontend.config:invokeai_model_install" +"invokeai-model-install" = "invokeai.frontend.install:invokeai_model_install" "invokeai-update" = "invokeai.frontend.config:invokeai_update" [project.urls] @@ -144,3 +144,7 @@ version = { attr = "invokeai.version.__version__" } [tool.pytest.ini_options] addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" + +[flake8] +max-line-length = 120 + From 44400d2a66abb2cf7996b6309d21f9725333efeb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:07:31 -0500 Subject: [PATCH 13/19] fix incorrect import of merge code --- invokeai/backend/model_management/__init__.py | 2 ++ invokeai/frontend/merge/__init__.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py index 39411a853a..cf7527740b 100644 --- a/invokeai/backend/model_management/__init__.py +++ b/invokeai/backend/model_management/__init__.py @@ -6,3 +6,5 @@ from .convert_ckpt_to_diffusers import ( load_pipeline_from_original_stable_diffusion_ckpt, ) from .model_manager import ModelManager +from invokeai.frontend.merge import merge_diffusion_models + diff --git a/invokeai/frontend/merge/__init__.py b/invokeai/frontend/merge/__init__.py index 3a2e4474a5..fb892fd7db 100644 --- a/invokeai/frontend/merge/__init__.py +++ b/invokeai/frontend/merge/__init__.py @@ -1,4 +1,4 @@ """ Initialization file for invokeai.frontend.merge """ -from .merge_diffusers import main as invokeai_merge_diffusers +from .merge_diffusers import main as invokeai_merge_diffusers, merge_diffusion_models From d606abc544f660eef2e5b3f08c8055e52a96cce5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:09:56 -0500 Subject: [PATCH 14/19] fix weblint call --- .github/workflows/lint-frontend.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/lint-frontend.yml b/.github/workflows/lint-frontend.yml index de8c1abf77..68fcc9eaef 100644 --- a/.github/workflows/lint-frontend.yml +++ b/.github/workflows/lint-frontend.yml @@ -3,14 +3,14 @@ name: Lint frontend on: pull_request: paths: - - 'invokeai/frontend/**' + - 'invokeai/frontend/web/**' push: paths: - - 'invokeai/frontend/**' + - 'invokeai/frontend/web/**' defaults: run: - working-directory: invokeai/frontend + working-directory: invokeai/frontend/web jobs: lint-frontend: From 955900507f63e66b558cfc97dfa946592e22eea6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:34:38 -0500 Subject: [PATCH 15/19] fix issue with invokeai.version --- installer/lib/installer.py | 6 +++--- invokeai/frontend/CLI/CLI.py | 2 +- invokeai/frontend/web/package.json | 2 +- invokeai/version/__init__.py | 8 -------- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/installer/lib/installer.py b/installer/lib/installer.py index beafa63286..8ab512eee8 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -291,7 +291,7 @@ class InvokeAiInstance: src = Path(__file__).parents[1].expanduser().resolve() # if the above directory contains one of these files, we'll do a source install next(src.glob("pyproject.toml")) - next(src.glob("ldm")) + next(src.glob("invokeai")) except StopIteration: print("Unable to find a wheel or perform a source install. Giving up.") @@ -342,14 +342,14 @@ class InvokeAiInstance: introduction() - from ldm.invoke.config import invokeai_configure + from invokeai.frontend.install import invokeai_configure # NOTE: currently the config script does its own arg parsing! this means the command-line switches # from the installer will also automatically propagate down to the config script. # this may change in the future with config refactoring! succeeded = False try: - invokeai_configure.main() + invokeai_configure() succeeded = True except requests.exceptions.ConnectionError as e: print(f'\nA network error was encountered during configuration and download: {str(e)}') diff --git a/invokeai/frontend/CLI/CLI.py b/invokeai/frontend/CLI/CLI.py index 0a46c36ad0..1c2fbb121f 100644 --- a/invokeai/frontend/CLI/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -15,7 +15,7 @@ if sys.platform == "darwin": import pyparsing # type: ignore -import invokeai.version +import invokeai.version as invokeai from ...backend import Generate, ModelManager from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index c6bf0c6d45..c907a83cd9 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -3,7 +3,7 @@ "private": true, "version": "0.0.1", "scripts": { - "prepare": "cd ../../ && husky install invokeai/frontend/.husky", + "prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky", "dev": "vite dev", "build": "tsc && vite build", "preview": "vite preview", diff --git a/invokeai/version/__init__.py b/invokeai/version/__init__.py index 215477dce8..01ef84ea4d 100644 --- a/invokeai/version/__init__.py +++ b/invokeai/version/__init__.py @@ -1,19 +1,11 @@ """ initialization file for invokeai """ -import invokeai - from .invokeai_version import __version__ __app_id__ = "invoke-ai/InvokeAI" __app_name__ = "InvokeAI" -# copy these attributes into the invokeai namespace -setattr(invokeai, "__version__", __version__) -setattr(invokeai, "__app_id__", __app_id__) -setattr(invokeai, "__app_name__", __app_name__) - - def _ignore_xformers_triton_message_on_windows(): import logging From c0aff396d20ac507a4483d5e0760b6642f9a4998 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:44:55 -0500 Subject: [PATCH 16/19] fix ldm->invokeai pathnames in workflows --- .github/workflows/build-container.yml | 2 +- .github/workflows/pypi-release.yml | 2 +- .github/workflows/test-invoke-pip-skip.yml | 2 +- .github/workflows/test-invoke-pip.yml | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index efff1d2f8a..8444c76a61 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -9,7 +9,7 @@ on: - 'dev/docker/*' paths: - 'pyproject.toml' - - 'ldm/**' + - 'invokeai/**' - 'invokeai/backend/**' - 'invokeai/configs/**' - 'invokeai/frontend/dist/**' diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml index 91d6e6b8e7..261d7d06a1 100644 --- a/.github/workflows/pypi-release.yml +++ b/.github/workflows/pypi-release.yml @@ -3,7 +3,7 @@ name: PyPI Release on: push: paths: - - 'ldm/invoke/_version.py' + - 'invokeai/version/invokeai_version.py' workflow_dispatch: jobs: diff --git a/.github/workflows/test-invoke-pip-skip.yml b/.github/workflows/test-invoke-pip-skip.yml index 6cce8c58da..b4b179db06 100644 --- a/.github/workflows/test-invoke-pip-skip.yml +++ b/.github/workflows/test-invoke-pip-skip.yml @@ -3,7 +3,7 @@ on: pull_request: paths-ignore: - 'pyproject.toml' - - 'ldm/**' + - 'invokeai/**' - 'invokeai/backend/**' - 'invokeai/configs/**' - 'invokeai/frontend/dist/**' diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 3ad3004240..9573700f5a 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -5,14 +5,14 @@ on: - 'main' paths: - 'pyproject.toml' - - 'ldm/**' + - 'invokeai/**' - 'invokeai/backend/**' - 'invokeai/configs/**' - 'invokeai/frontend/dist/**' pull_request: paths: - 'pyproject.toml' - - 'ldm/**' + - 'invokeai/**' - 'invokeai/backend/**' - 'invokeai/configs/**' - 'invokeai/frontend/dist/**' @@ -112,7 +112,7 @@ jobs: - name: set INVOKEAI_OUTDIR run: > python -c - "import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')" + "import os;from invokeai.backend.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')" >> ${{ matrix.github-env }} - name: run invokeai-configure From 3f53f1186dadbbefc05ae5c0b8f4c59a19b016ad Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 01:54:48 -0500 Subject: [PATCH 17/19] move diagnostic message to stderr; was confusing CI --- invokeai/backend/config/invokeai_configure.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index 6f0a218dc1..4efc2166dd 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -6,13 +6,14 @@ # # Coauthor: Kevin Turner http://github.com/keturn # -print("Loading Python libraries...\n") +import sys +print("Loading Python libraries...\n",file=sys.stderr) + import argparse import io import os import re import shutil -import sys import traceback import warnings from argparse import Namespace From 70203e6e5ae9bab92b3fadede494b3c3ae216e18 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 14:36:43 -0500 Subject: [PATCH 18/19] CODEOWNERS coarse draft --- .github/CODEOWNERS | 59 +++++++++++++++++----------------------------- invokeai/README | 14 ++++++++--- 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9363e22853..3abca42b1a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,51 +1,34 @@ # continuous integration -/.github/workflows/ @mauwii @lstein @blessedcoolant +/.github/workflows/ @mauwii @lstein # documentation -/docs/ @lstein @mauwii @tildebyte @blessedcoolant -mkdocs.yml @lstein @mauwii @blessedcoolant +/docs/ @lstein @mauwii @tildebyte +/mkdocs.yml @lstein @mauwii + +# nodes +/invokeai/app/ @Kyle0654 @blessedcoolant # installation and configuration -/pyproject.toml @mauwii @lstein @ebr @blessedcoolant -/docker/ @mauwii @lstein @blessedcoolant -/scripts/ @ebr @lstein @blessedcoolant -/installer/ @ebr @lstein @tildebyte @blessedcoolant -ldm/invoke/config @lstein @ebr @blessedcoolant -invokeai/assets @lstein @ebr @blessedcoolant -invokeai/configs @lstein @ebr @blessedcoolant -/ldm/invoke/_version.py @lstein @blessedcoolant +/pyproject.toml @mauwii @lstein @blessedcoolant +/docker/ @mauwii @lstein +/scripts/ @ebr @lstein +/installer/ @lstein @ebr +/invokeai/assets @lstein @ebr +/invokeai/configs @lstein +/invokeai/version @lstein @blessedcoolant # web ui /invokeai/frontend @blessedcoolant @psychedelicious @lstein /invokeai/backend @blessedcoolant @psychedelicious @lstein -# generation and model management -/ldm/*.py @lstein @blessedcoolant -/ldm/generate.py @lstein @keturn @blessedcoolant -/ldm/invoke/args.py @lstein @blessedcoolant -/ldm/invoke/ckpt* @lstein @blessedcoolant -/ldm/invoke/ckpt_generator @lstein @blessedcoolant -/ldm/invoke/CLI.py @lstein @blessedcoolant -/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant -/ldm/invoke/generator @keturn @damian0815 @blessedcoolant -/ldm/invoke/globals.py @lstein @blessedcoolant -/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant -/ldm/invoke/model_manager.py @lstein @blessedcoolant -/ldm/invoke/txt2mask.py @lstein @blessedcoolant -/ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein -/ldm/invoke/restoration @lstein @blessedcoolant +# generation, model management, postprocessing +/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto -# attention, textual inversion, model configuration -/ldm/models @damian0815 @keturn @lstein @blessedcoolant -/ldm/modules @damian0815 @keturn @lstein @blessedcoolant +# front ends +/invokeai/frontend/CLI @lstein +/invokeai/frontend/install @lstein @ebr @mauwii +/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername +/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername +/invokeai/frontend/web @psychedelicious @blessedcoolant -# Nodes -apps/ @Kyle0654 @lstein @blessedcoolant - -# legacy REST API -# is CapableWeb still engaged? -/ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant -/ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant -/scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant -/tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant diff --git a/invokeai/README b/invokeai/README index 1f1b039ddf..ecf145c85f 100644 --- a/invokeai/README +++ b/invokeai/README @@ -1,3 +1,11 @@ -After version 2.3 is released, the ldm/invoke modules will be migrated to this location -so that we have a proper invokeai distribution. Currently it is only being used for -data files. +Organization of the source tree: + +app -- Home of nodes invocations and services +assets -- Images and other data files used by InvokeAI +backend -- Non-user facing libraries, including the rendering + core. +configs -- Configuration files used at install and run times +frontend -- User-facing scripts, including the CLI and the WebUI +version -- Current InvokeAI version string, stored + in version/invokeai_version.py + \ No newline at end of file From 6477e31c1e990fe49328ba118b41e29c4d7cfe32 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 3 Mar 2023 14:59:17 -0500 Subject: [PATCH 19/19] revert and disable auto-formatting of invocations --- invokeai/app/invocations/generate.py | 49 ++++++------------------- invokeai/app/invocations/image.py | 5 ++- invokeai/app/invocations/prompt.py | 3 +- invokeai/app/invocations/reconstruct.py | 11 +++--- invokeai/app/invocations/upscale.py | 3 +- invokeai/app/services/item_storage.py | 4 +- 6 files changed, 25 insertions(+), 50 deletions(-) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 83ad09a3f7..15c5f17438 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -26,45 +26,18 @@ class TextToImageInvocation(BaseInvocation): # Inputs # TODO: consider making prompt optional to enable providing prompt through a link + # fmt: off prompt: Optional[str] = Field(description="The prompt to generate an image from") - seed: int = Field( - default=-1, - ge=-1, - le=np.iinfo(np.uint32).max, - description="The seed to use (-1 for a random seed)", - ) - steps: int = Field( - default=10, gt=0, description="The number of steps to use to generate the image" - ) - width: int = Field( - default=512, - multiple_of=64, - gt=0, - description="The width of the resulting image", - ) - height: int = Field( - default=512, - multiple_of=64, - gt=0, - description="The height of the resulting image", - ) - cfg_scale: float = Field( - default=7.5, - gt=0, - description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", - ) - sampler_name: SAMPLER_NAME_VALUES = Field( - default="k_lms", description="The sampler to use" - ) - seamless: bool = Field( - default=False, - description="Whether or not to generate an image that can tile without seams", - ) - model: str = Field(default="", description="The model to use (currently ignored)") - progress_images: bool = Field( - default=False, - description="Whether or not to produce progress images during generation", - ) + seed: int = Field(default=-1,ge=-1, le=np.iinfo(np.uint32).max, description="The seed to use (-1 for a random seed)", ) + steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image") + width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", ) + height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", ) + cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", ) + sampler_name: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The sampler to use" ) + seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", ) + model: str = Field(default="", description="The model to use (currently ignored)") + progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", ) + # fmt: on # TODO: pass this an emitter method or something? or a session for dispatching? def dispatch_progress( diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index e0a302c24c..29e1c9e576 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -260,14 +260,15 @@ class LerpInvocation(BaseInvocation): class InverseLerpInvocation(BaseInvocation): """Inverse linear interpolation of all pixels of an image""" - + #fmt: off type: Literal["ilerp"] = "ilerp" # Inputs image: ImageField = Field(default=None, description="The image to lerp") min: int = Field(default=0, ge=0, le=255, description="The minimum input value") max: int = Field(default=255, ge=0, le=255, description="The maximum input value") - + #fmt: on + def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get( self.image.image_type, self.image.image_name diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index 2c8a1c4989..3544f30859 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -7,7 +7,8 @@ from .baseinvocation import BaseInvocationOutput class PromptOutput(BaseInvocationOutput): """Base class for invocations that output a prompt""" - + #fmt: off type: Literal["prompt"] = "prompt" prompt: str = Field(default=None, description="The output prompt") + #fmt: on diff --git a/invokeai/app/invocations/reconstruct.py b/invokeai/app/invocations/reconstruct.py index 71a15c57e9..a90c33605e 100644 --- a/invokeai/app/invocations/reconstruct.py +++ b/invokeai/app/invocations/reconstruct.py @@ -11,15 +11,14 @@ from .image import ImageField, ImageOutput class RestoreFaceInvocation(BaseInvocation): """Restores faces in an image.""" - - type: Literal["restore_face"] = "restore_face" + #fmt: off + type: Literal["restore_face"] = "restore_face" # Inputs image: Union[ImageField, None] = Field(description="The input image") - strength: float = Field( - default=0.75, gt=0, le=1, description="The strength of the restoration" - ) - + strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" ) + #fmt: on + def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get( self.image.image_type, self.image.image_name diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index d1d8e4e2d4..dcc39fc9ad 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -13,13 +13,14 @@ from .image import ImageField, ImageOutput class UpscaleInvocation(BaseInvocation): """Upscales an image.""" - + #fmt: off type: Literal["upscale"] = "upscale" # Inputs image: Union[ImageField, None] = Field(description="The input image", default=None) strength: float = Field(default=0.75, gt=0, le=1, description="The strength") level: Literal[2, 4] = Field(default=2, description="The upscale level") + #fmt: on def invoke(self, context: InvocationContext) -> ImageOutput: image = context.services.images.get( diff --git a/invokeai/app/services/item_storage.py b/invokeai/app/services/item_storage.py index 83044e5a52..394f67797d 100644 --- a/invokeai/app/services/item_storage.py +++ b/invokeai/app/services/item_storage.py @@ -9,13 +9,13 @@ T = TypeVar("T", bound=BaseModel) class PaginatedResults(GenericModel, Generic[T]): """Paginated results""" - + #fmt: off items: list[T] = Field(description="Items") page: int = Field(description="Current Page") pages: int = Field(description="Total number of pages") per_page: int = Field(description="Number of items per page") total: int = Field(description="Total number of items in result") - + #fmt: on class ItemStorageABC(ABC, Generic[T]): _on_changed_callbacks: list[Callable[[T], None]]