mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
parent
3fc5cb09f8
commit
66c920fc19
@ -10,8 +10,6 @@ from ldm.models.diffusion.ddim import DDIMSampler
|
|||||||
from ldm.invoke.generator.omnibus import Omnibus
|
from ldm.invoke.generator.omnibus import Omnibus
|
||||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from ldm.invoke.devices import choose_autocast
|
|
||||||
from ldm.invoke.image_util import InitImageResizer
|
|
||||||
|
|
||||||
class Txt2Img2Img(Generator):
|
class Txt2Img2Img(Generator):
|
||||||
def __init__(self, model, precision):
|
def __init__(self, model, precision):
|
||||||
@ -46,13 +44,16 @@ class Txt2Img2Img(Generator):
|
|||||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
#x = self.get_noise(init_width, init_height)
|
||||||
|
x = x_T
|
||||||
|
|
||||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||||
self.model.model.to(self.model.device)
|
self.model.model.to(self.model.device)
|
||||||
|
|
||||||
samples, _ = sampler.sample(
|
samples, _ = sampler.sample(
|
||||||
batch_size = 1,
|
batch_size = 1,
|
||||||
S = steps,
|
S = steps,
|
||||||
x_T = x_T,
|
x_T = x,
|
||||||
conditioning = c,
|
conditioning = c,
|
||||||
shape = shape,
|
shape = shape,
|
||||||
verbose = False,
|
verbose = False,
|
||||||
@ -68,21 +69,11 @@ class Txt2Img2Img(Generator):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# resizing
|
# resizing
|
||||||
|
samples = torch.nn.functional.interpolate(
|
||||||
image = self.sample_to_image(samples)
|
samples,
|
||||||
image = InitImageResizer(image).resize(width, height)
|
size=(height // self.downsampling_factor, width // self.downsampling_factor),
|
||||||
|
mode="bilinear"
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
)
|
||||||
image = image[None].transpose(0, 3, 1, 2)
|
|
||||||
image = torch.from_numpy(image)
|
|
||||||
image = 2.0 * image - 1.0
|
|
||||||
image = image.to(self.model.device)
|
|
||||||
|
|
||||||
scope = choose_autocast(self.precision)
|
|
||||||
with scope(self.model.device.type):
|
|
||||||
samples = self.model.get_first_stage_encoding(
|
|
||||||
self.model.encode_first_stage(image)
|
|
||||||
) # move back to latent space
|
|
||||||
|
|
||||||
t_enc = int(strength * steps)
|
t_enc = int(strength * steps)
|
||||||
ddim_sampler = DDIMSampler(self.model, device=self.model.device)
|
ddim_sampler = DDIMSampler(self.model, device=self.model.device)
|
||||||
|
Loading…
Reference in New Issue
Block a user