lint(ldm.invoke.generator): 🚮 remove unused imports

This commit is contained in:
Kevin Turner 2022-11-05 09:49:13 -07:00
parent 0f4d71ed63
commit f79bebed86
7 changed files with 58 additions and 56 deletions

View File

@ -2,17 +2,19 @@
Base class for ldm.invoke.generator.*
including img2img, txt2img, and inpaint
'''
import torch
import numpy as np
import random
import os
import os.path as osp
import random
import traceback
from tqdm import tqdm, trange
import numpy as np
import torch
from PIL import Image, ImageFilter, ImageChops
import cv2 as cv
from einops import rearrange, repeat
from einops import rearrange
from pytorch_lightning import seed_everything
from tqdm import trange
from ldm.invoke.devices import choose_autocast
from ldm.util import rand_perlin_2d
@ -103,7 +105,7 @@ class Generator():
seed = self.new_seed()
return results
def sample_to_image(self,samples)->Image.Image:
"""
Given samples returned from a sampler, converts
@ -166,12 +168,12 @@ class Generator():
blurred_init_mask = pil_init_mask
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
# Paste original on color-corrected generation (using blurred mask)
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
return matched_result
def sample_to_lowres_estimated_image(self,samples):
# origingally adapted from code by @erucipe and @keturn here:
@ -219,11 +221,11 @@ class Generator():
(txt2img) or from the latent image (img2img, inpaint)
"""
raise NotImplementedError("get_noise() must be implemented in a descendent class")
def get_perlin_noise(self,width,height):
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
def new_seed(self):
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
return self.seed
@ -325,4 +327,4 @@ class Generator():
os.makedirs(dirname, exist_ok=True)
image.save(filepath,'PNG')

View File

@ -3,14 +3,16 @@ ldm.invoke.generator.embiggen descends from ldm.invoke.generator
and generates with ldm.invoke.generator.img2img
'''
import numpy as np
import torch
import numpy as np
from PIL import Image
from tqdm import trange
from PIL import Image
from ldm.invoke.generator.base import Generator
from ldm.invoke.generator.img2img import Img2Img
from ldm.invoke.devices import choose_autocast
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.invoke.generator.base import Generator
from ldm.invoke.generator.img2img import Img2Img
from ldm.models.diffusion.ddim import DDIMSampler
class Embiggen(Generator):
def __init__(self, model, precision):
@ -493,7 +495,7 @@ class Embiggen(Generator):
# Layer tile onto final image
outputsuperimage.alpha_composite(intileimage, (left, top))
else:
print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
# after internal loops and patching up return Embiggen image
return outputsuperimage

View File

@ -2,15 +2,15 @@
ldm.invoke.generator.img2img descends from ldm.invoke.generator
'''
import torch
import numpy as np
import PIL
from torch import Tensor
import numpy as np
import torch
from PIL import Image
from torch import Tensor
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.base import Generator
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
class Img2Img(Generator):
def __init__(self, model, precision):

View File

@ -3,21 +3,21 @@ ldm.invoke.generator.inpaint descends from ldm.invoke.generator
'''
import math
import torch
import torchvision.transforms as T
import numpy as np
import cv2 as cv
import PIL
import cv2 as cv
import numpy as np
import torch
from PIL import Image, ImageFilter, ImageOps, ImageChops
from skimage.exposure.histogram_matching import match_histograms
from einops import rearrange, repeat
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.img2img import Img2Img
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ksampler import KSampler
from einops import repeat
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.base import downsampling
from ldm.util import debug_image
from ldm.invoke.generator.img2img import Img2Img
from ldm.invoke.globals import Globals
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ksampler import KSampler
from ldm.util import debug_image
infill_methods: list[str] = list()
@ -59,7 +59,7 @@ class Inpaint(Img2Img):
writeable=False
)
def infill_patchmatch(self, im: Image.Image) -> Image:
def infill_patchmatch(self, im: Image.Image) -> Image:
if im.mode != 'RGBA':
return im
@ -128,7 +128,7 @@ class Inpaint(Img2Img):
# Combine
npmask = npgradient + npedge
# Expand
# Expand
npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2))
new_mask = Image.fromarray(npmask)
@ -221,7 +221,7 @@ class Inpaint(Img2Img):
init_filled = init_filled.resize((inpaint_width, inpaint_height))
debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging)
# Create init tensor
init_image = self._image_to_tensor(init_filled.convert('RGB'))
@ -251,10 +251,10 @@ class Inpaint(Img2Img):
# klms samplers not supported yet, so ignore previous sampler
if isinstance(sampler,KSampler):
print(
f">> Using recommended DDIM sampler for inpainting."
">> Using recommended DDIM sampler for inpainting."
)
sampler = DDIMSampler(self.model, device=self.model.device)
sampler.make_schedule(
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
)
@ -353,7 +353,7 @@ class Inpaint(Img2Img):
if self.pil_image is None or self.pil_mask is None:
return gen_result
corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging)

View File

@ -1,14 +1,14 @@
"""omnibus module to be used with the runwayml 9-channel custom inpainting model"""
import torch
import numpy as np
from PIL import Image, ImageOps
from einops import repeat
from PIL import Image, ImageOps, ImageChops
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.base import downsampling
from ldm.invoke.generator.img2img import Img2Img
from ldm.invoke.generator.txt2img import Txt2Img
class Omnibus(Img2Img,Txt2Img):
def __init__(self, model, precision):
super().__init__(model, precision)
@ -58,11 +58,9 @@ class Omnibus(Img2Img,Txt2Img):
self.mask_blur_radius = mask_blur_radius
t_enc = steps
if init_image is not None and mask_image is not None: # inpainting
masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero
elif init_image is not None: # img2img
scope = choose_autocast(self.precision)
@ -99,7 +97,7 @@ class Omnibus(Img2Img,Txt2Img):
device=model.device,
num_samples=num_samples,
)
c = model.cond_stage_model.encode(batch["txt"])
c_cat = list()
for ck in model.concat_keys:
@ -164,10 +162,10 @@ class Omnibus(Img2Img,Txt2Img):
def sample_to_image(self, samples)->Image.Image:
gen_result = super().sample_to_image(samples).convert('RGB')
if self.pil_image is None or self.pil_mask is None:
return gen_result
corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
return corrected_result

View File

@ -3,9 +3,8 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
'''
import torch
import numpy as np
from ldm.invoke.generator.base import Generator
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
class Txt2Img(Generator):

View File

@ -2,15 +2,16 @@
ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
'''
import torch
import numpy as np
import math
from ldm.invoke.generator.base import Generator
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.invoke.generator.omnibus import Omnibus
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
import torch
from PIL import Image
from ldm.invoke.generator.base import Generator
from ldm.invoke.generator.omnibus import Omnibus
from ldm.models.diffusion.ddim import DDIMSampler
class Txt2Img2Img(Generator):
def __init__(self, model, precision):
super().__init__(model, precision)