From f79bebed8698a77b795511a438db7d9d8dbf0950 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Sat, 5 Nov 2022 09:49:13 -0700 Subject: [PATCH] =?UTF-8?q?lint(ldm.invoke.generator):=20=F0=9F=9A=AE=20re?= =?UTF-8?q?move=20unused=20imports?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ldm/invoke/generator/base.py | 24 ++++++++++---------- ldm/invoke/generator/embiggen.py | 14 +++++++----- ldm/invoke/generator/img2img.py | 10 ++++----- ldm/invoke/generator/inpaint.py | 34 ++++++++++++++--------------- ldm/invoke/generator/omnibus.py | 16 ++++++-------- ldm/invoke/generator/txt2img.py | 3 +-- ldm/invoke/generator/txt2img2img.py | 13 ++++++----- 7 files changed, 58 insertions(+), 56 deletions(-) diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index ba3172e9dc..1e30353066 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -2,17 +2,19 @@ Base class for ldm.invoke.generator.* including img2img, txt2img, and inpaint ''' -import torch -import numpy as np -import random import os import os.path as osp +import random import traceback -from tqdm import tqdm, trange + +import numpy as np +import torch from PIL import Image, ImageFilter, ImageChops import cv2 as cv -from einops import rearrange, repeat +from einops import rearrange from pytorch_lightning import seed_everything +from tqdm import trange + from ldm.invoke.devices import choose_autocast from ldm.util import rand_perlin_2d @@ -103,7 +105,7 @@ class Generator(): seed = self.new_seed() return results - + def sample_to_image(self,samples)->Image.Image: """ Given samples returned from a sampler, converts @@ -166,12 +168,12 @@ class Generator(): blurred_init_mask = pil_init_mask multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1]) - + # Paste original on color-corrected generation (using blurred mask) matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask) return matched_result - + def sample_to_lowres_estimated_image(self,samples): # origingally adapted from code by @erucipe and @keturn here: @@ -219,11 +221,11 @@ class Generator(): (txt2img) or from the latent image (img2img, inpaint) """ raise NotImplementedError("get_noise() must be implemented in a descendent class") - + def get_perlin_noise(self,width,height): fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device) - + def new_seed(self): self.seed = random.randrange(0, np.iinfo(np.uint32).max) return self.seed @@ -325,4 +327,4 @@ class Generator(): os.makedirs(dirname, exist_ok=True) image.save(filepath,'PNG') - + diff --git a/ldm/invoke/generator/embiggen.py b/ldm/invoke/generator/embiggen.py index dc6af35a6c..0b9fda7ac2 100644 --- a/ldm/invoke/generator/embiggen.py +++ b/ldm/invoke/generator/embiggen.py @@ -3,14 +3,16 @@ ldm.invoke.generator.embiggen descends from ldm.invoke.generator and generates with ldm.invoke.generator.img2img ''' +import numpy as np import torch -import numpy as np +from PIL import Image from tqdm import trange -from PIL import Image -from ldm.invoke.generator.base import Generator -from ldm.invoke.generator.img2img import Img2Img + from ldm.invoke.devices import choose_autocast -from ldm.models.diffusion.ddim import DDIMSampler +from ldm.invoke.generator.base import Generator +from ldm.invoke.generator.img2img import Img2Img +from ldm.models.diffusion.ddim import DDIMSampler + class Embiggen(Generator): def __init__(self, model, precision): @@ -493,7 +495,7 @@ class Embiggen(Generator): # Layer tile onto final image outputsuperimage.alpha_composite(intileimage, (left, top)) else: - print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') + print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.') # after internal loops and patching up return Embiggen image return outputsuperimage diff --git a/ldm/invoke/generator/img2img.py b/ldm/invoke/generator/img2img.py index 1981b4eacb..edcc855a29 100644 --- a/ldm/invoke/generator/img2img.py +++ b/ldm/invoke/generator/img2img.py @@ -2,15 +2,15 @@ ldm.invoke.generator.img2img descends from ldm.invoke.generator ''' -import torch -import numpy as np import PIL -from torch import Tensor +import numpy as np +import torch from PIL import Image +from torch import Tensor + from ldm.invoke.devices import choose_autocast from ldm.invoke.generator.base import Generator -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent + class Img2Img(Generator): def __init__(self, model, precision): diff --git a/ldm/invoke/generator/inpaint.py b/ldm/invoke/generator/inpaint.py index 7798ed00c5..02bac7c999 100644 --- a/ldm/invoke/generator/inpaint.py +++ b/ldm/invoke/generator/inpaint.py @@ -3,21 +3,21 @@ ldm.invoke.generator.inpaint descends from ldm.invoke.generator ''' import math -import torch -import torchvision.transforms as T -import numpy as np -import cv2 as cv + import PIL +import cv2 as cv +import numpy as np +import torch from PIL import Image, ImageFilter, ImageOps, ImageChops -from skimage.exposure.histogram_matching import match_histograms -from einops import rearrange, repeat -from ldm.invoke.devices import choose_autocast -from ldm.invoke.generator.img2img import Img2Img -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.ksampler import KSampler +from einops import repeat + +from ldm.invoke.devices import choose_autocast from ldm.invoke.generator.base import downsampling -from ldm.util import debug_image +from ldm.invoke.generator.img2img import Img2Img from ldm.invoke.globals import Globals +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.ksampler import KSampler +from ldm.util import debug_image infill_methods: list[str] = list() @@ -59,7 +59,7 @@ class Inpaint(Img2Img): writeable=False ) - def infill_patchmatch(self, im: Image.Image) -> Image: + def infill_patchmatch(self, im: Image.Image) -> Image: if im.mode != 'RGBA': return im @@ -128,7 +128,7 @@ class Inpaint(Img2Img): # Combine npmask = npgradient + npedge - # Expand + # Expand npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) new_mask = Image.fromarray(npmask) @@ -221,7 +221,7 @@ class Inpaint(Img2Img): init_filled = init_filled.resize((inpaint_width, inpaint_height)) debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) - + # Create init tensor init_image = self._image_to_tensor(init_filled.convert('RGB')) @@ -251,10 +251,10 @@ class Inpaint(Img2Img): # klms samplers not supported yet, so ignore previous sampler if isinstance(sampler,KSampler): print( - f">> Using recommended DDIM sampler for inpainting." + ">> Using recommended DDIM sampler for inpainting." ) sampler = DDIMSampler(self.model, device=self.model.device) - + sampler.make_schedule( ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False ) @@ -353,7 +353,7 @@ class Inpaint(Img2Img): if self.pil_image is None or self.pil_mask is None: return gen_result - + corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) diff --git a/ldm/invoke/generator/omnibus.py b/ldm/invoke/generator/omnibus.py index 277e897021..3a17062d01 100644 --- a/ldm/invoke/generator/omnibus.py +++ b/ldm/invoke/generator/omnibus.py @@ -1,14 +1,14 @@ """omnibus module to be used with the runwayml 9-channel custom inpainting model""" import torch -import numpy as np +from PIL import Image, ImageOps from einops import repeat -from PIL import Image, ImageOps, ImageChops + from ldm.invoke.devices import choose_autocast -from ldm.invoke.generator.base import downsampling from ldm.invoke.generator.img2img import Img2Img from ldm.invoke.generator.txt2img import Txt2Img + class Omnibus(Img2Img,Txt2Img): def __init__(self, model, precision): super().__init__(model, precision) @@ -58,11 +58,9 @@ class Omnibus(Img2Img,Txt2Img): self.mask_blur_radius = mask_blur_radius - t_enc = steps - if init_image is not None and mask_image is not None: # inpainting masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero - + elif init_image is not None: # img2img scope = choose_autocast(self.precision) @@ -99,7 +97,7 @@ class Omnibus(Img2Img,Txt2Img): device=model.device, num_samples=num_samples, ) - + c = model.cond_stage_model.encode(batch["txt"]) c_cat = list() for ck in model.concat_keys: @@ -164,10 +162,10 @@ class Omnibus(Img2Img,Txt2Img): def sample_to_image(self, samples)->Image.Image: gen_result = super().sample_to_image(samples).convert('RGB') - + if self.pil_image is None or self.pil_mask is None: return gen_result corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) - + return corrected_result diff --git a/ldm/invoke/generator/txt2img.py b/ldm/invoke/generator/txt2img.py index ba49d2ef55..a04207259b 100644 --- a/ldm/invoke/generator/txt2img.py +++ b/ldm/invoke/generator/txt2img.py @@ -3,9 +3,8 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator ''' import torch -import numpy as np + from ldm.invoke.generator.base import Generator -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent class Txt2Img(Generator): diff --git a/ldm/invoke/generator/txt2img2img.py b/ldm/invoke/generator/txt2img2img.py index 759ba2dba4..3da42ebb8a 100644 --- a/ldm/invoke/generator/txt2img2img.py +++ b/ldm/invoke/generator/txt2img2img.py @@ -2,15 +2,16 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator ''' -import torch -import numpy as np import math -from ldm.invoke.generator.base import Generator -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.invoke.generator.omnibus import Omnibus -from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent + +import torch from PIL import Image +from ldm.invoke.generator.base import Generator +from ldm.invoke.generator.omnibus import Omnibus +from ldm.models.diffusion.ddim import DDIMSampler + + class Txt2Img2Img(Generator): def __init__(self, model, precision): super().__init__(model, precision)