mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
lint(ldm.invoke.generator): 🚮 remove unused imports
This commit is contained in:
parent
0f4d71ed63
commit
f79bebed86
@ -2,17 +2,19 @@
|
|||||||
Base class for ldm.invoke.generator.*
|
Base class for ldm.invoke.generator.*
|
||||||
including img2img, txt2img, and inpaint
|
including img2img, txt2img, and inpaint
|
||||||
'''
|
'''
|
||||||
import torch
|
|
||||||
import numpy as np
|
|
||||||
import random
|
|
||||||
import os
|
import os
|
||||||
import os.path as osp
|
import os.path as osp
|
||||||
|
import random
|
||||||
import traceback
|
import traceback
|
||||||
from tqdm import tqdm, trange
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
from PIL import Image, ImageFilter, ImageChops
|
from PIL import Image, ImageFilter, ImageChops
|
||||||
import cv2 as cv
|
import cv2 as cv
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange
|
||||||
from pytorch_lightning import seed_everything
|
from pytorch_lightning import seed_everything
|
||||||
|
from tqdm import trange
|
||||||
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
from ldm.invoke.devices import choose_autocast
|
||||||
from ldm.util import rand_perlin_2d
|
from ldm.util import rand_perlin_2d
|
||||||
|
|
||||||
@ -103,7 +105,7 @@ class Generator():
|
|||||||
seed = self.new_seed()
|
seed = self.new_seed()
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def sample_to_image(self,samples)->Image.Image:
|
def sample_to_image(self,samples)->Image.Image:
|
||||||
"""
|
"""
|
||||||
Given samples returned from a sampler, converts
|
Given samples returned from a sampler, converts
|
||||||
@ -166,12 +168,12 @@ class Generator():
|
|||||||
blurred_init_mask = pil_init_mask
|
blurred_init_mask = pil_init_mask
|
||||||
|
|
||||||
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
|
multiplied_blurred_init_mask = ImageChops.multiply(blurred_init_mask, self.pil_image.split()[-1])
|
||||||
|
|
||||||
# Paste original on color-corrected generation (using blurred mask)
|
# Paste original on color-corrected generation (using blurred mask)
|
||||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||||
return matched_result
|
return matched_result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def sample_to_lowres_estimated_image(self,samples):
|
def sample_to_lowres_estimated_image(self,samples):
|
||||||
# origingally adapted from code by @erucipe and @keturn here:
|
# origingally adapted from code by @erucipe and @keturn here:
|
||||||
@ -219,11 +221,11 @@ class Generator():
|
|||||||
(txt2img) or from the latent image (img2img, inpaint)
|
(txt2img) or from the latent image (img2img, inpaint)
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
||||||
|
|
||||||
def get_perlin_noise(self,width,height):
|
def get_perlin_noise(self,width,height):
|
||||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||||
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||||
|
|
||||||
def new_seed(self):
|
def new_seed(self):
|
||||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||||
return self.seed
|
return self.seed
|
||||||
@ -325,4 +327,4 @@ class Generator():
|
|||||||
os.makedirs(dirname, exist_ok=True)
|
os.makedirs(dirname, exist_ok=True)
|
||||||
image.save(filepath,'PNG')
|
image.save(filepath,'PNG')
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,14 +3,16 @@ ldm.invoke.generator.embiggen descends from ldm.invoke.generator
|
|||||||
and generates with ldm.invoke.generator.img2img
|
and generates with ldm.invoke.generator.img2img
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
from PIL import Image
|
||||||
from tqdm import trange
|
from tqdm import trange
|
||||||
from PIL import Image
|
|
||||||
from ldm.invoke.generator.base import Generator
|
|
||||||
from ldm.invoke.generator.img2img import Img2Img
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
from ldm.invoke.devices import choose_autocast
|
||||||
from ldm.models.diffusion.ddim import DDIMSampler
|
from ldm.invoke.generator.base import Generator
|
||||||
|
from ldm.invoke.generator.img2img import Img2Img
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
|
||||||
|
|
||||||
class Embiggen(Generator):
|
class Embiggen(Generator):
|
||||||
def __init__(self, model, precision):
|
def __init__(self, model, precision):
|
||||||
@ -493,7 +495,7 @@ class Embiggen(Generator):
|
|||||||
# Layer tile onto final image
|
# Layer tile onto final image
|
||||||
outputsuperimage.alpha_composite(intileimage, (left, top))
|
outputsuperimage.alpha_composite(intileimage, (left, top))
|
||||||
else:
|
else:
|
||||||
print(f'Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
|
print('Error: could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation.')
|
||||||
|
|
||||||
# after internal loops and patching up return Embiggen image
|
# after internal loops and patching up return Embiggen image
|
||||||
return outputsuperimage
|
return outputsuperimage
|
||||||
|
@ -2,15 +2,15 @@
|
|||||||
ldm.invoke.generator.img2img descends from ldm.invoke.generator
|
ldm.invoke.generator.img2img descends from ldm.invoke.generator
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import torch
|
|
||||||
import numpy as np
|
|
||||||
import PIL
|
import PIL
|
||||||
from torch import Tensor
|
import numpy as np
|
||||||
|
import torch
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
from torch import Tensor
|
||||||
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
from ldm.invoke.devices import choose_autocast
|
||||||
from ldm.invoke.generator.base import Generator
|
from ldm.invoke.generator.base import Generator
|
||||||
from ldm.models.diffusion.ddim import DDIMSampler
|
|
||||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
|
||||||
|
|
||||||
class Img2Img(Generator):
|
class Img2Img(Generator):
|
||||||
def __init__(self, model, precision):
|
def __init__(self, model, precision):
|
||||||
|
@ -3,21 +3,21 @@ ldm.invoke.generator.inpaint descends from ldm.invoke.generator
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import torch
|
|
||||||
import torchvision.transforms as T
|
|
||||||
import numpy as np
|
|
||||||
import cv2 as cv
|
|
||||||
import PIL
|
import PIL
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
from PIL import Image, ImageFilter, ImageOps, ImageChops
|
from PIL import Image, ImageFilter, ImageOps, ImageChops
|
||||||
from skimage.exposure.histogram_matching import match_histograms
|
from einops import repeat
|
||||||
from einops import rearrange, repeat
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
from ldm.invoke.devices import choose_autocast
|
||||||
from ldm.invoke.generator.img2img import Img2Img
|
|
||||||
from ldm.models.diffusion.ddim import DDIMSampler
|
|
||||||
from ldm.models.diffusion.ksampler import KSampler
|
|
||||||
from ldm.invoke.generator.base import downsampling
|
from ldm.invoke.generator.base import downsampling
|
||||||
from ldm.util import debug_image
|
from ldm.invoke.generator.img2img import Img2Img
|
||||||
from ldm.invoke.globals import Globals
|
from ldm.invoke.globals import Globals
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
from ldm.models.diffusion.ksampler import KSampler
|
||||||
|
from ldm.util import debug_image
|
||||||
|
|
||||||
infill_methods: list[str] = list()
|
infill_methods: list[str] = list()
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ class Inpaint(Img2Img):
|
|||||||
writeable=False
|
writeable=False
|
||||||
)
|
)
|
||||||
|
|
||||||
def infill_patchmatch(self, im: Image.Image) -> Image:
|
def infill_patchmatch(self, im: Image.Image) -> Image:
|
||||||
if im.mode != 'RGBA':
|
if im.mode != 'RGBA':
|
||||||
return im
|
return im
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ class Inpaint(Img2Img):
|
|||||||
# Combine
|
# Combine
|
||||||
npmask = npgradient + npedge
|
npmask = npgradient + npedge
|
||||||
|
|
||||||
# Expand
|
# Expand
|
||||||
npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2))
|
npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2))
|
||||||
|
|
||||||
new_mask = Image.fromarray(npmask)
|
new_mask = Image.fromarray(npmask)
|
||||||
@ -221,7 +221,7 @@ class Inpaint(Img2Img):
|
|||||||
init_filled = init_filled.resize((inpaint_width, inpaint_height))
|
init_filled = init_filled.resize((inpaint_width, inpaint_height))
|
||||||
|
|
||||||
debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging)
|
debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging)
|
||||||
|
|
||||||
# Create init tensor
|
# Create init tensor
|
||||||
init_image = self._image_to_tensor(init_filled.convert('RGB'))
|
init_image = self._image_to_tensor(init_filled.convert('RGB'))
|
||||||
|
|
||||||
@ -251,10 +251,10 @@ class Inpaint(Img2Img):
|
|||||||
# klms samplers not supported yet, so ignore previous sampler
|
# klms samplers not supported yet, so ignore previous sampler
|
||||||
if isinstance(sampler,KSampler):
|
if isinstance(sampler,KSampler):
|
||||||
print(
|
print(
|
||||||
f">> Using recommended DDIM sampler for inpainting."
|
">> Using recommended DDIM sampler for inpainting."
|
||||||
)
|
)
|
||||||
sampler = DDIMSampler(self.model, device=self.model.device)
|
sampler = DDIMSampler(self.model, device=self.model.device)
|
||||||
|
|
||||||
sampler.make_schedule(
|
sampler.make_schedule(
|
||||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||||
)
|
)
|
||||||
@ -353,7 +353,7 @@ class Inpaint(Img2Img):
|
|||||||
|
|
||||||
if self.pil_image is None or self.pil_mask is None:
|
if self.pil_image is None or self.pil_mask is None:
|
||||||
return gen_result
|
return gen_result
|
||||||
|
|
||||||
corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
||||||
debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging)
|
debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging)
|
||||||
|
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
"""omnibus module to be used with the runwayml 9-channel custom inpainting model"""
|
"""omnibus module to be used with the runwayml 9-channel custom inpainting model"""
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
from PIL import Image, ImageOps
|
||||||
from einops import repeat
|
from einops import repeat
|
||||||
from PIL import Image, ImageOps, ImageChops
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
from ldm.invoke.devices import choose_autocast
|
||||||
from ldm.invoke.generator.base import downsampling
|
|
||||||
from ldm.invoke.generator.img2img import Img2Img
|
from ldm.invoke.generator.img2img import Img2Img
|
||||||
from ldm.invoke.generator.txt2img import Txt2Img
|
from ldm.invoke.generator.txt2img import Txt2Img
|
||||||
|
|
||||||
|
|
||||||
class Omnibus(Img2Img,Txt2Img):
|
class Omnibus(Img2Img,Txt2Img):
|
||||||
def __init__(self, model, precision):
|
def __init__(self, model, precision):
|
||||||
super().__init__(model, precision)
|
super().__init__(model, precision)
|
||||||
@ -58,11 +58,9 @@ class Omnibus(Img2Img,Txt2Img):
|
|||||||
|
|
||||||
self.mask_blur_radius = mask_blur_radius
|
self.mask_blur_radius = mask_blur_radius
|
||||||
|
|
||||||
t_enc = steps
|
|
||||||
|
|
||||||
if init_image is not None and mask_image is not None: # inpainting
|
if init_image is not None and mask_image is not None: # inpainting
|
||||||
masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero
|
masked_image = init_image * (1 - mask_image) # masked image is the image masked by mask - masked regions zero
|
||||||
|
|
||||||
elif init_image is not None: # img2img
|
elif init_image is not None: # img2img
|
||||||
scope = choose_autocast(self.precision)
|
scope = choose_autocast(self.precision)
|
||||||
|
|
||||||
@ -99,7 +97,7 @@ class Omnibus(Img2Img,Txt2Img):
|
|||||||
device=model.device,
|
device=model.device,
|
||||||
num_samples=num_samples,
|
num_samples=num_samples,
|
||||||
)
|
)
|
||||||
|
|
||||||
c = model.cond_stage_model.encode(batch["txt"])
|
c = model.cond_stage_model.encode(batch["txt"])
|
||||||
c_cat = list()
|
c_cat = list()
|
||||||
for ck in model.concat_keys:
|
for ck in model.concat_keys:
|
||||||
@ -164,10 +162,10 @@ class Omnibus(Img2Img,Txt2Img):
|
|||||||
|
|
||||||
def sample_to_image(self, samples)->Image.Image:
|
def sample_to_image(self, samples)->Image.Image:
|
||||||
gen_result = super().sample_to_image(samples).convert('RGB')
|
gen_result = super().sample_to_image(samples).convert('RGB')
|
||||||
|
|
||||||
if self.pil_image is None or self.pil_mask is None:
|
if self.pil_image is None or self.pil_mask is None:
|
||||||
return gen_result
|
return gen_result
|
||||||
|
|
||||||
corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
corrected_result = super(Img2Img, self).repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius)
|
||||||
|
|
||||||
return corrected_result
|
return corrected_result
|
||||||
|
@ -3,9 +3,8 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
|
||||||
from ldm.invoke.generator.base import Generator
|
from ldm.invoke.generator.base import Generator
|
||||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
|
||||||
|
|
||||||
|
|
||||||
class Txt2Img(Generator):
|
class Txt2Img(Generator):
|
||||||
|
@ -2,15 +2,16 @@
|
|||||||
ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
|
ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import torch
|
|
||||||
import numpy as np
|
|
||||||
import math
|
import math
|
||||||
from ldm.invoke.generator.base import Generator
|
|
||||||
from ldm.models.diffusion.ddim import DDIMSampler
|
import torch
|
||||||
from ldm.invoke.generator.omnibus import Omnibus
|
|
||||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
from ldm.invoke.generator.base import Generator
|
||||||
|
from ldm.invoke.generator.omnibus import Omnibus
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
|
||||||
|
|
||||||
class Txt2Img2Img(Generator):
|
class Txt2Img2Img(Generator):
|
||||||
def __init__(self, model, precision):
|
def __init__(self, model, precision):
|
||||||
super().__init__(model, precision)
|
super().__init__(model, precision)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user