mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
optionally scale initial image to fit box defined by width x height
* This functionality is triggered by the --fit option in the CLI (default false), and by the "fit" checkbox in the WebGUI (default True) * In addition, this commit contains a number of whitespace changes to make the code more readable, as well as an attempt to unify the visual appearance of info and warning messages.
This commit is contained in:
parent
4b560b50c2
commit
28fe84177e
@ -8,11 +8,10 @@ class InitImageResizer():
|
|||||||
|
|
||||||
def resize(self,width=None,height=None) -> Image:
|
def resize(self,width=None,height=None) -> Image:
|
||||||
"""
|
"""
|
||||||
Return a copy of the image resized to width x height.
|
Return a copy of the image resized to fit within
|
||||||
The aspect ratio is maintained, with any excess space
|
a box width x height. The aspect ratio is
|
||||||
filled using black borders (i.e. letterboxed). If
|
maintained. If neither width nor height are provided,
|
||||||
neither width nor height are provided, then returns
|
then returns a copy of the original image. If one or the other is
|
||||||
a copy of the original image. If one or the other is
|
|
||||||
provided, then the other will be calculated from the
|
provided, then the other will be calculated from the
|
||||||
aspect ratio.
|
aspect ratio.
|
||||||
|
|
||||||
@ -21,38 +20,34 @@ class InitImageResizer():
|
|||||||
"""
|
"""
|
||||||
im = self.image
|
im = self.image
|
||||||
|
|
||||||
if not(width or height):
|
ar = im.width/float(im.height)
|
||||||
return im.copy()
|
|
||||||
|
|
||||||
ar = im.width/im.height
|
|
||||||
|
|
||||||
# Infer missing values from aspect ratio
|
# Infer missing values from aspect ratio
|
||||||
if not height: # height missing
|
if not(width or height): # both missing
|
||||||
|
width = im.width
|
||||||
|
height = im.height
|
||||||
|
elif not height: # height missing
|
||||||
height = int(width/ar)
|
height = int(width/ar)
|
||||||
if not width: # width missing
|
elif not width: # width missing
|
||||||
width = int(height*ar)
|
width = int(height*ar)
|
||||||
|
|
||||||
# rw and rh are the resizing width and height for the image
|
# rw and rh are the resizing width and height for the image
|
||||||
# they maintain the aspect ratio, but may not completelyl fill up
|
# they maintain the aspect ratio, but may not completelyl fill up
|
||||||
# the requested destination size
|
# the requested destination size
|
||||||
(rw,rh) = (width,int(width/ar)) if im.width>=im.height else (int(height*ar),width)
|
(rw,rh) = (width,int(width/ar)) if im.width>=im.height else (int(height*ar),height)
|
||||||
|
|
||||||
#round everything to multiples of 64
|
#round everything to multiples of 64
|
||||||
width,height,rw,rh = map(
|
width,height,rw,rh = map(
|
||||||
lambda x: x-x%64, (width,height,rw,rh)
|
lambda x: x-x%64, (width,height,rw,rh)
|
||||||
)
|
)
|
||||||
|
|
||||||
# resize the original image so that it fits inside the dest
|
# no resize necessary, but return a copy
|
||||||
|
if im.width == width and im.height == height:
|
||||||
|
return im.copy()
|
||||||
|
|
||||||
|
# otherwise resize the original image so that it fits inside the bounding box
|
||||||
resized_image = self.image.resize((rw,rh),resample=Image.Resampling.LANCZOS)
|
resized_image = self.image.resize((rw,rh),resample=Image.Resampling.LANCZOS)
|
||||||
|
return resized_image
|
||||||
# create new destination image of specified dimensions
|
|
||||||
# and paste the resized image into it centered appropriately
|
|
||||||
new_image = Image.new('RGB',(width,height))
|
|
||||||
new_image.paste(resized_image,((width-rw)//2,(height-rh)//2))
|
|
||||||
|
|
||||||
print(f'>> Resized image size to {width}x{height}')
|
|
||||||
|
|
||||||
return new_image
|
|
||||||
|
|
||||||
def make_grid(image_list, rows=None, cols=None):
|
def make_grid(image_list, rows=None, cols=None):
|
||||||
image_cnt = len(image_list)
|
image_cnt = len(image_list)
|
||||||
|
@ -61,6 +61,8 @@ class PromptFormatter:
|
|||||||
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
||||||
if opt.init_img:
|
if opt.init_img:
|
||||||
switches.append(f'-I{opt.init_img}')
|
switches.append(f'-I{opt.init_img}')
|
||||||
|
if opt.fit:
|
||||||
|
switches.append(f'--fit')
|
||||||
if opt.strength and opt.init_img is not None:
|
if opt.strength and opt.init_img is not None:
|
||||||
switches.append(f'-f{opt.strength or t2i.strength}')
|
switches.append(f'-f{opt.strength or t2i.strength}')
|
||||||
if opt.gfpgan_strength:
|
if opt.gfpgan_strength:
|
||||||
|
@ -70,6 +70,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
steps = int(post_data['steps'])
|
steps = int(post_data['steps'])
|
||||||
width = int(post_data['width'])
|
width = int(post_data['width'])
|
||||||
height = int(post_data['height'])
|
height = int(post_data['height'])
|
||||||
|
fit = 'fit' in post_data
|
||||||
cfgscale = float(post_data['cfgscale'])
|
cfgscale = float(post_data['cfgscale'])
|
||||||
sampler_name = post_data['sampler']
|
sampler_name = post_data['sampler']
|
||||||
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
||||||
@ -80,7 +81,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
seed = self.model.seed if int(post_data['seed']) == -1 else int(post_data['seed'])
|
seed = self.model.seed if int(post_data['seed']) == -1 else int(post_data['seed'])
|
||||||
|
|
||||||
self.canceled.clear()
|
self.canceled.clear()
|
||||||
print(f"Request to generate with prompt: {prompt}")
|
print(f">> Request to generate with prompt: {prompt}")
|
||||||
# In order to handle upscaled images, the PngWriter needs to maintain state
|
# In order to handle upscaled images, the PngWriter needs to maintain state
|
||||||
# across images generated by each call to prompt2img(), so we define it in
|
# across images generated by each call to prompt2img(), so we define it in
|
||||||
# the outer scope of image_done()
|
# the outer scope of image_done()
|
||||||
@ -177,10 +178,13 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
init_img = "./img2img-tmp.png",
|
init_img = "./img2img-tmp.png",
|
||||||
strength = strength,
|
strength = strength,
|
||||||
iterations = iterations,
|
iterations = iterations,
|
||||||
cfg_scale = cfgscale,
|
cfg_scale = cfgscale,
|
||||||
seed = seed,
|
seed = seed,
|
||||||
steps = steps,
|
steps = steps,
|
||||||
sampler_name = sampler_name,
|
sampler_name = sampler_name,
|
||||||
|
width = width,
|
||||||
|
height = height,
|
||||||
|
fit = fit,
|
||||||
gfpgan_strength=gfpgan_strength,
|
gfpgan_strength=gfpgan_strength,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
step_callback=image_progress,
|
step_callback=image_progress,
|
||||||
@ -192,8 +196,6 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
print(f"Canceled.")
|
print(f"Canceled.")
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f"Prompt generated!")
|
|
||||||
|
|
||||||
|
|
||||||
class ThreadingDreamServer(ThreadingHTTPServer):
|
class ThreadingDreamServer(ThreadingHTTPServer):
|
||||||
def __init__(self, server_address):
|
def __init__(self, server_address):
|
||||||
|
@ -14,7 +14,7 @@ model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path)
|
|||||||
gfpgan_model_exists = os.path.isfile(model_path)
|
gfpgan_model_exists = os.path.isfile(model_path)
|
||||||
|
|
||||||
def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4):
|
def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4):
|
||||||
print(f'\n* GFPGAN - Restoring Faces: {prompt} : seed:{seed}')
|
print(f'>> GFPGAN - Restoring Faces: {prompt} : seed:{seed}')
|
||||||
gfpgan = None
|
gfpgan = None
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
||||||
@ -41,12 +41,12 @@ def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4):
|
|||||||
except Exception:
|
except Exception:
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
print('Error loading GFPGAN:', file=sys.stderr)
|
print('>> Error loading GFPGAN:', file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
if gfpgan is None:
|
if gfpgan is None:
|
||||||
print(
|
print(
|
||||||
f'GFPGAN not initialized, it must be loaded via the --gfpgan argument'
|
f'>> GFPGAN not initialized, it must be loaded via the --gfpgan argument'
|
||||||
)
|
)
|
||||||
return image
|
return image
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ def _load_gfpgan_bg_upsampler(bg_upsampler, upsampler_scale, bg_tile=400):
|
|||||||
|
|
||||||
def real_esrgan_upscale(image, strength, upsampler_scale, prompt, seed):
|
def real_esrgan_upscale(image, strength, upsampler_scale, prompt, seed):
|
||||||
print(
|
print(
|
||||||
f'\n* Real-ESRGAN Upscaling: {prompt} : seed:{seed} : scale:{upsampler_scale}x'
|
f'>> Real-ESRGAN Upscaling: {prompt} : seed:{seed} : scale:{upsampler_scale}x'
|
||||||
)
|
)
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
@ -143,7 +143,7 @@ def real_esrgan_upscale(image, strength, upsampler_scale, prompt, seed):
|
|||||||
except Exception:
|
except Exception:
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
print('Error loading Real-ESRGAN:', file=sys.stderr)
|
print('>> Error loading Real-ESRGAN:', file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
output, img_mode = upsampler.enhance(
|
output, img_mode = upsampler.enhance(
|
||||||
|
173
ldm/simplet2i.py
173
ldm/simplet2i.py
@ -194,29 +194,29 @@ class T2I:
|
|||||||
return self.prompt2png(prompt, outdir, **kwargs)
|
return self.prompt2png(prompt, outdir, **kwargs)
|
||||||
|
|
||||||
def prompt2image(
|
def prompt2image(
|
||||||
self,
|
self,
|
||||||
# these are common
|
# these are common
|
||||||
prompt,
|
prompt,
|
||||||
iterations=None,
|
iterations = None,
|
||||||
steps=None,
|
steps = None,
|
||||||
seed=None,
|
seed = None,
|
||||||
cfg_scale=None,
|
cfg_scale = None,
|
||||||
ddim_eta=None,
|
ddim_eta = None,
|
||||||
skip_normalize=False,
|
skip_normalize = False,
|
||||||
image_callback=None,
|
image_callback = None,
|
||||||
step_callback=None,
|
step_callback = None,
|
||||||
width=None,
|
width = None,
|
||||||
height=None,
|
height = None,
|
||||||
# these are specific to img2img
|
# these are specific to img2img
|
||||||
init_img=None,
|
init_img = None,
|
||||||
strength=None,
|
fit = False,
|
||||||
gfpgan_strength=0,
|
strength = None,
|
||||||
save_original=False,
|
gfpgan_strength= 0,
|
||||||
upscale=None,
|
save_original = False,
|
||||||
variants=None,
|
upscale = None,
|
||||||
sampler_name=None,
|
sampler_name = None,
|
||||||
log_tokenization=False,
|
log_tokenization= False,
|
||||||
**args,
|
**args,
|
||||||
): # eat up additional cruft
|
): # eat up additional cruft
|
||||||
"""
|
"""
|
||||||
ldm.prompt2image() is the common entry point for txt2img() and img2img()
|
ldm.prompt2image() is the common entry point for txt2img() and img2img()
|
||||||
@ -232,7 +232,6 @@ class T2I:
|
|||||||
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
|
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
|
||||||
variants // if >0, the 1st generated image will be passed back to img2img to generate the requested number of variants
|
|
||||||
step_callback // a function or method that will be called each step
|
step_callback // a function or method that will be called each step
|
||||||
image_callback // a function or method that will be called each time an image is generated
|
image_callback // a function or method that will be called each time an image is generated
|
||||||
|
|
||||||
@ -251,14 +250,14 @@ class T2I:
|
|||||||
to create the requested output directory, select a unique informative name for each image, and
|
to create the requested output directory, select a unique informative name for each image, and
|
||||||
write the prompt into the PNG metadata.
|
write the prompt into the PNG metadata.
|
||||||
"""
|
"""
|
||||||
steps = steps or self.steps
|
steps = steps or self.steps
|
||||||
seed = seed or self.seed
|
seed = seed or self.seed
|
||||||
width = width or self.width
|
width = width or self.width
|
||||||
height = height or self.height
|
height = height or self.height
|
||||||
cfg_scale = cfg_scale or self.cfg_scale
|
cfg_scale = cfg_scale or self.cfg_scale
|
||||||
ddim_eta = ddim_eta or self.ddim_eta
|
ddim_eta = ddim_eta or self.ddim_eta
|
||||||
iterations = iterations or self.iterations
|
iterations = iterations or self.iterations
|
||||||
strength = strength or self.strength
|
strength = strength or self.strength
|
||||||
self.log_tokenization = log_tokenization
|
self.log_tokenization = log_tokenization
|
||||||
|
|
||||||
model = (
|
model = (
|
||||||
@ -269,9 +268,7 @@ class T2I:
|
|||||||
0.0 <= strength <= 1.0
|
0.0 <= strength <= 1.0
|
||||||
), 'can only work with strength in [0.0, 1.0]'
|
), 'can only work with strength in [0.0, 1.0]'
|
||||||
|
|
||||||
if not(width == self.width and height == self.height):
|
width, height, _ = self._resolution_check(width, height, log=True)
|
||||||
width, height, _ = self._resolution_check(width, height, log=True)
|
|
||||||
|
|
||||||
scope = autocast if self.precision == 'autocast' else nullcontext
|
scope = autocast if self.precision == 'autocast' else nullcontext
|
||||||
|
|
||||||
if sampler_name and (sampler_name != self.sampler_name):
|
if sampler_name and (sampler_name != self.sampler_name):
|
||||||
@ -295,6 +292,7 @@ class T2I:
|
|||||||
init_img=init_img,
|
init_img=init_img,
|
||||||
width=width,
|
width=width,
|
||||||
height=height,
|
height=height,
|
||||||
|
fit=fit,
|
||||||
strength=strength,
|
strength=strength,
|
||||||
callback=step_callback,
|
callback=step_callback,
|
||||||
)
|
)
|
||||||
@ -312,7 +310,7 @@ class T2I:
|
|||||||
)
|
)
|
||||||
|
|
||||||
with scope(self.device.type), self.model.ema_scope():
|
with scope(self.device.type), self.model.ema_scope():
|
||||||
for n in trange(iterations, desc='Generating'):
|
for n in trange(iterations, desc='>> Generating'):
|
||||||
seed_everything(seed)
|
seed_everything(seed)
|
||||||
image = next(images_iterator)
|
image = next(images_iterator)
|
||||||
results.append([image, seed])
|
results.append([image, seed])
|
||||||
@ -365,12 +363,12 @@ class T2I:
|
|||||||
print('Are you sure your system has an adequate NVIDIA GPU?')
|
print('Are you sure your system has an adequate NVIDIA GPU?')
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
print('Usage stats:')
|
print('>> Usage stats:')
|
||||||
print(
|
print(
|
||||||
f' {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
||||||
)
|
)
|
||||||
print(
|
print(
|
||||||
f' Max VRAM used for this generation:',
|
f'>> Max VRAM used for this generation:',
|
||||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -379,7 +377,7 @@ class T2I:
|
|||||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||||
)
|
)
|
||||||
print(
|
print(
|
||||||
f' Max VRAM used since script start: ',
|
f'>> Max VRAM used since script start: ',
|
||||||
'%4.2fG' % (self.session_peakmem / 1e9),
|
'%4.2fG' % (self.session_peakmem / 1e9),
|
||||||
)
|
)
|
||||||
return results
|
return results
|
||||||
@ -425,18 +423,19 @@ class T2I:
|
|||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def _img2img(
|
def _img2img(
|
||||||
self,
|
self,
|
||||||
prompt,
|
prompt,
|
||||||
precision_scope,
|
precision_scope,
|
||||||
steps,
|
steps,
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
ddim_eta,
|
ddim_eta,
|
||||||
skip_normalize,
|
skip_normalize,
|
||||||
init_img,
|
init_img,
|
||||||
width,
|
width,
|
||||||
height,
|
height,
|
||||||
strength,
|
fit,
|
||||||
callback, # Currently not implemented for img2img
|
strength,
|
||||||
|
callback, # Currently not implemented for img2img
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
An infinite iterator of images from the prompt and the initial image
|
An infinite iterator of images from the prompt and the initial image
|
||||||
@ -445,13 +444,13 @@ class T2I:
|
|||||||
# PLMS sampler not supported yet, so ignore previous sampler
|
# PLMS sampler not supported yet, so ignore previous sampler
|
||||||
if self.sampler_name != 'ddim':
|
if self.sampler_name != 'ddim':
|
||||||
print(
|
print(
|
||||||
f"sampler '{self.sampler_name}' is not yet supported. Using DDIM sampler"
|
f">> sampler '{self.sampler_name}' is not yet supported. Using DDIM sampler"
|
||||||
)
|
)
|
||||||
sampler = DDIMSampler(self.model, device=self.device)
|
sampler = DDIMSampler(self.model, device=self.device)
|
||||||
else:
|
else:
|
||||||
sampler = self.sampler
|
sampler = self.sampler
|
||||||
|
|
||||||
init_image = self._load_img(init_img, width, height).to(self.device)
|
init_image = self._load_img(init_img, width, height,fit).to(self.device)
|
||||||
with precision_scope(self.device.type):
|
with precision_scope(self.device.type):
|
||||||
init_latent = self.model.get_first_stage_encoding(
|
init_latent = self.model.get_first_stage_encoding(
|
||||||
self.model.encode_first_stage(init_image)
|
self.model.encode_first_stage(init_image)
|
||||||
@ -581,7 +580,7 @@ class T2I:
|
|||||||
print(msg)
|
print(msg)
|
||||||
|
|
||||||
def _load_model_from_config(self, config, ckpt):
|
def _load_model_from_config(self, config, ckpt):
|
||||||
print(f'Loading model from {ckpt}')
|
print(f'>> Loading model from {ckpt}')
|
||||||
pl_sd = torch.load(ckpt, map_location='cpu')
|
pl_sd = torch.load(ckpt, map_location='cpu')
|
||||||
# if "global_step" in pl_sd:
|
# if "global_step" in pl_sd:
|
||||||
# print(f"Global Step: {pl_sd['global_step']}")
|
# print(f"Global Step: {pl_sd['global_step']}")
|
||||||
@ -596,41 +595,63 @@ class T2I:
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
'Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.'
|
'>> Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.'
|
||||||
)
|
)
|
||||||
model.half()
|
model.half()
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def _load_img(self, path, width, height):
|
def _load_img(self, path, width, height, fit=False):
|
||||||
print(f'image path = {path}, cwd = {os.getcwd()}')
|
|
||||||
with Image.open(path) as img:
|
with Image.open(path) as img:
|
||||||
image = img.convert('RGB')
|
image = img.convert('RGB')
|
||||||
print(
|
print(
|
||||||
f'loaded input image of size {image.width}x{image.height} from {path}')
|
f'>> loaded input image of size {image.width}x{image.height} from {path}'
|
||||||
|
)
|
||||||
|
|
||||||
from ldm.dream.image_util import InitImageResizer
|
# The logic here is:
|
||||||
if width == self.width and height == self.height:
|
# 1. If "fit" is true, then the image will be fit into the bounding box defined
|
||||||
new_image_width, new_image_height, resize_needed = self._resolution_check(
|
# by width and height. It will do this in a way that preserves the init image's
|
||||||
image.width, image.height)
|
# aspect ratio while preventing letterboxing. This means that if there is
|
||||||
|
# leftover horizontal space after rescaling the image to fit in the bounding box,
|
||||||
|
# the generated image's width will be reduced to the rescaled init image's width.
|
||||||
|
# Similarly for the vertical space.
|
||||||
|
# 2. Otherwise, if "fit" is false, then the image will be scaled, preserving its
|
||||||
|
# aspect ratio, to the nearest multiple of 64. Large images may generate an
|
||||||
|
# unexpected OOM error.
|
||||||
|
if fit:
|
||||||
|
image = self._fit_image(image,(width,height))
|
||||||
else:
|
else:
|
||||||
if height == self.height:
|
image = self._squeeze_image(image)
|
||||||
new_image_width, new_image_height, resize_needed = self._resolution_check(
|
|
||||||
width, image.height)
|
|
||||||
if width == self.width:
|
|
||||||
new_image_width, new_image_height, resize_needed = self._resolution_check(
|
|
||||||
image.width, height)
|
|
||||||
else:
|
|
||||||
image = InitImageResizer(image).resize(width, height)
|
|
||||||
resize_needed=False
|
|
||||||
if resize_needed:
|
|
||||||
image = InitImageResizer(image).resize(
|
|
||||||
new_image_width, new_image_height)
|
|
||||||
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
image = np.array(image).astype(np.float32) / 255.0
|
||||||
image = image[None].transpose(0, 3, 1, 2)
|
image = image[None].transpose(0, 3, 1, 2)
|
||||||
image = torch.from_numpy(image)
|
image = torch.from_numpy(image)
|
||||||
return 2.0 * image - 1.0
|
return 2.0 * image - 1.0
|
||||||
|
|
||||||
|
def _squeeze_image(self,image):
|
||||||
|
x,y,resize_needed = self._resolution_check(image.width,image.height)
|
||||||
|
if resize_needed:
|
||||||
|
return InitImageResizer(image).resize(x,y)
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
|
def _fit_image(self,image,max_dimensions):
|
||||||
|
w,h = max_dimensions
|
||||||
|
print(
|
||||||
|
f'>> image will be resized to fit inside a box {w}x{h} in size.'
|
||||||
|
)
|
||||||
|
if image.width > image.height:
|
||||||
|
h = None # by setting h to none, we tell InitImageResizer to fit into the width and calculate height
|
||||||
|
elif image.height > image.width:
|
||||||
|
w = None # ditto for w
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
image = InitImageResizer(image).resize(w,h) # note that InitImageResizer does the multiple of 64 truncation internally
|
||||||
|
print(
|
||||||
|
f'>> after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}'
|
||||||
|
)
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
|
# TO DO: Move this and related weighted subprompt code into its own module.
|
||||||
def _split_weighted_subprompts(text, skip_normalize=False):
|
def _split_weighted_subprompts(text, skip_normalize=False):
|
||||||
"""
|
"""
|
||||||
grabs all text up to the first occurrence of ':'
|
grabs all text up to the first occurrence of ':'
|
||||||
@ -698,6 +719,6 @@ class T2I:
|
|||||||
f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}'
|
f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}'
|
||||||
)
|
)
|
||||||
height = h
|
height = h
|
||||||
width = w
|
width = w
|
||||||
resize_needed = True
|
resize_needed = True
|
||||||
return width, height, resize_needed
|
return width, height, resize_needed
|
||||||
|
@ -88,7 +88,7 @@ def main():
|
|||||||
tic = time.time()
|
tic = time.time()
|
||||||
t2i.load_model()
|
t2i.load_model()
|
||||||
print(
|
print(
|
||||||
f'model loaded in', '%4.2fs' % (time.time() - tic)
|
f'>> model loaded in', '%4.2fs' % (time.time() - tic)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not infile:
|
if not infile:
|
||||||
@ -483,6 +483,13 @@ def create_cmd_parser():
|
|||||||
type=str,
|
type=str,
|
||||||
help='Path to input image for img2img mode (supersedes width and height)',
|
help='Path to input image for img2img mode (supersedes width and height)',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-T',
|
||||||
|
'-fit',
|
||||||
|
'--fit',
|
||||||
|
action='store_true',
|
||||||
|
help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-f',
|
'-f',
|
||||||
'--strength',
|
'--strength',
|
||||||
|
@ -8,13 +8,15 @@
|
|||||||
margin-top: 20vh;
|
margin-top: 20vh;
|
||||||
margin-left: auto;
|
margin-left: auto;
|
||||||
margin-right: auto;
|
margin-right: auto;
|
||||||
max-width: 800px;
|
max-width: 1024px;
|
||||||
|
|
||||||
text-align: center;
|
text-align: center;
|
||||||
}
|
}
|
||||||
fieldset {
|
fieldset {
|
||||||
border: none;
|
border: none;
|
||||||
}
|
}
|
||||||
|
div {
|
||||||
|
padding: 10px 10px 10px 10px;
|
||||||
|
}
|
||||||
#fieldset-search {
|
#fieldset-search {
|
||||||
display: flex;
|
display: flex;
|
||||||
}
|
}
|
||||||
@ -78,3 +80,18 @@ label {
|
|||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
color: red;
|
color: red;
|
||||||
}
|
}
|
||||||
|
#txt2img {
|
||||||
|
background-color: #DCDCDC;
|
||||||
|
}
|
||||||
|
#img2img {
|
||||||
|
background-color: #F5F5F5;
|
||||||
|
}
|
||||||
|
#gfpgan {
|
||||||
|
background-color: #DCDCDC;
|
||||||
|
}
|
||||||
|
#progress-section {
|
||||||
|
background-color: #F5F5F5;
|
||||||
|
}
|
||||||
|
#about {
|
||||||
|
background-color: #DCDCDC;
|
||||||
|
}
|
||||||
|
@ -14,78 +14,84 @@
|
|||||||
<h2 id="header">Stable Diffusion Dream Server</h2>
|
<h2 id="header">Stable Diffusion Dream Server</h2>
|
||||||
|
|
||||||
<form id="generate-form" method="post" action="#">
|
<form id="generate-form" method="post" action="#">
|
||||||
<fieldset id="fieldset-search">
|
<div id="txt2img">
|
||||||
<input type="text" id="prompt" name="prompt">
|
<fieldset id="fieldset-search">
|
||||||
<input type="submit" id="submit" value="Generate">
|
<input type="text" id="prompt" name="prompt">
|
||||||
</fieldset>
|
<input type="submit" id="submit" value="Generate">
|
||||||
<fieldset id="fieldset-config">
|
</fieldset>
|
||||||
<label for="iterations">Images to generate:</label>
|
<fieldset id="fieldset-config">
|
||||||
<input value="1" type="number" id="iterations" name="iterations" size="4">
|
<label for="iterations">Images to generate:</label>
|
||||||
<label for="steps">Steps:</label>
|
<input value="1" type="number" id="iterations" name="iterations" size="4">
|
||||||
<input value="50" type="number" id="steps" name="steps">
|
<label for="steps">Steps:</label>
|
||||||
<label for="cfgscale">Cfg Scale:</label>
|
<input value="50" type="number" id="steps" name="steps">
|
||||||
<input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
|
<label for="cfgscale">Cfg Scale:</label>
|
||||||
<label for="sampler">Sampler:</label>
|
<input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
|
||||||
<select id="sampler" name="sampler" value="k_lms">
|
<label for="sampler">Sampler:</label>
|
||||||
<option value="ddim">DDIM</option>
|
<select id="sampler" name="sampler" value="k_lms">
|
||||||
<option value="plms">PLMS</option>
|
<option value="ddim">DDIM</option>
|
||||||
<option value="k_lms" selected>KLMS</option>
|
<option value="plms">PLMS</option>
|
||||||
<option value="k_dpm_2">KDPM_2</option>
|
<option value="k_lms" selected>KLMS</option>
|
||||||
<option value="k_dpm_2_a">KDPM_2A</option>
|
<option value="k_dpm_2">KDPM_2</option>
|
||||||
<option value="k_euler">KEULER</option>
|
<option value="k_dpm_2_a">KDPM_2A</option>
|
||||||
<option value="k_euler_a">KEULER_A</option>
|
<option value="k_euler">KEULER</option>
|
||||||
<option value="k_heun">KHEUN</option>
|
<option value="k_euler_a">KEULER_A</option>
|
||||||
</select>
|
<option value="k_heun">KHEUN</option>
|
||||||
<br>
|
</select>
|
||||||
<label title="Set to multiple of 64" for="width">Width:</label>
|
<br>
|
||||||
<select id="width" name="width" value="512">
|
<label title="Set to multiple of 64" for="width">Width:</label>
|
||||||
<option value="64">64</option> <option value="128">128</option>
|
<select id="width" name="width" value="512">
|
||||||
<option value="192">192</option> <option value="256">256</option>
|
<option value="64">64</option> <option value="128">128</option>
|
||||||
<option value="320">320</option> <option value="384">384</option>
|
<option value="192">192</option> <option value="256">256</option>
|
||||||
<option value="448">448</option> <option value="512" selected>512</option>
|
<option value="320">320</option> <option value="384">384</option>
|
||||||
<option value="576">576</option> <option value="640">640</option>
|
<option value="448">448</option> <option value="512" selected>512</option>
|
||||||
<option value="704">704</option> <option value="768">768</option>
|
<option value="576">576</option> <option value="640">640</option>
|
||||||
<option value="832">832</option> <option value="896">896</option>
|
<option value="704">704</option> <option value="768">768</option>
|
||||||
<option value="960">960</option> <option value="1024">1024</option>
|
<option value="832">832</option> <option value="896">896</option>
|
||||||
</select>
|
<option value="960">960</option> <option value="1024">1024</option>
|
||||||
<label title="Set to multiple of 64" for="height">Height:</label>
|
</select>
|
||||||
<select id="height" name="height" value="512">
|
<label title="Set to multiple of 64" for="height">Height:</label>
|
||||||
<option value="64">64</option> <option value="128">128</option>
|
<select id="height" name="height" value="512">
|
||||||
<option value="192">192</option> <option value="256">256</option>
|
<option value="64">64</option> <option value="128">128</option>
|
||||||
<option value="320">320</option> <option value="384">384</option>
|
<option value="192">192</option> <option value="256">256</option>
|
||||||
<option value="448">448</option> <option value="512" selected>512</option>
|
<option value="320">320</option> <option value="384">384</option>
|
||||||
<option value="576">576</option> <option value="640">640</option>
|
<option value="448">448</option> <option value="512" selected>512</option>
|
||||||
<option value="704">704</option> <option value="768">768</option>
|
<option value="576">576</option> <option value="640">640</option>
|
||||||
<option value="832">832</option> <option value="896">896</option>
|
<option value="704">704</option> <option value="768">768</option>
|
||||||
<option value="960">960</option> <option value="1024">1024</option>
|
<option value="832">832</option> <option value="896">896</option>
|
||||||
</select>
|
<option value="960">960</option> <option value="1024">1024</option>
|
||||||
<label title="Set to -1 for random seed" for="seed">Seed:</label>
|
</select>
|
||||||
<input value="-1" type="number" id="seed" name="seed">
|
<label title="Set to -1 for random seed" for="seed">Seed:</label>
|
||||||
<button type="button" id="reset-seed">↺</button>
|
<input value="-1" type="number" id="seed" name="seed">
|
||||||
<br>
|
<button type="button" id="reset-seed">↺</button>
|
||||||
|
<input type="checkbox" name="progress_images" id="progress_images">
|
||||||
|
<label for="progress_images">Display in-progress images (slows down generation):</label>
|
||||||
|
<button type="button" id="reset-all">Reset to Defaults</button>
|
||||||
|
</div>
|
||||||
|
<div id="img2img">
|
||||||
|
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
|
||||||
|
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
|
||||||
|
<br>
|
||||||
<label for="strength">Img2Img Strength:</label>
|
<label for="strength">Img2Img Strength:</label>
|
||||||
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
||||||
<label title="Upload an image to use img2img" for="initimg">Init:</label>
|
<input type="checkbox" id="fit" name="fit" checked>
|
||||||
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
|
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
|
||||||
<button type="button" id="reset-all">Reset to Defaults</button>
|
</div>
|
||||||
<br>
|
<div id="gfpgan">
|
||||||
<label for="progress_images">Display in-progress images (slows down generation):</label>
|
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
||||||
<input type="checkbox" name="progress_images" id="progress_images">
|
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05">
|
||||||
<div id="gfpgan">
|
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
|
||||||
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
<select id="upscale_level" name="upscale_level" value="">
|
||||||
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05">
|
<option value="" selected>None</option>
|
||||||
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
|
<option value="2">2x</option>
|
||||||
<select id="upscale_level" name="upscale_level" value="">
|
<option value="4">4x</option>
|
||||||
<option value="" selected>None</option>
|
</select>
|
||||||
<option value="2">2x</option>
|
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
|
||||||
<option value="4">4x</option>
|
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
|
||||||
</select>
|
</div>
|
||||||
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
|
|
||||||
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
|
|
||||||
</div>
|
|
||||||
</fieldset>
|
</fieldset>
|
||||||
</form>
|
</form>
|
||||||
<div id="about">For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a></div>
|
<div id="about">For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a></div>
|
||||||
|
<br>
|
||||||
<div id="progress-section">
|
<div id="progress-section">
|
||||||
<progress id="progress-bar" value="0" max="1"></progress>
|
<progress id="progress-bar" value="0" max="1"></progress>
|
||||||
<span id="cancel-button" title="Cancel">✖</span>
|
<span id="cancel-button" title="Cancel">✖</span>
|
||||||
|
Loading…
Reference in New Issue
Block a user