mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add web interface for seamless option
This commit is contained in:
commit
a01b7bdc40
@ -138,6 +138,13 @@ You may also pass a -v<count> option to generate count variants on the original
|
|||||||
passing the first generated image back into img2img the requested number of times. It generates interesting
|
passing the first generated image back into img2img the requested number of times. It generates interesting
|
||||||
variants.
|
variants.
|
||||||
|
|
||||||
|
## Seamless Tiling
|
||||||
|
|
||||||
|
The seamless tiling mode causes generated images to seamlessly tile with itself. To use it, add the --seamless option when starting the script which will result in all generated images to tile, or for each dream> prompt as shown here:
|
||||||
|
```
|
||||||
|
dream> "pond garden with lotus by claude monet" --seamless -s100 -n4
|
||||||
|
```
|
||||||
|
|
||||||
## GFPGAN and Real-ESRGAN Support
|
## GFPGAN and Real-ESRGAN Support
|
||||||
|
|
||||||
The script also provides the ability to do face restoration and
|
The script also provides the ability to do face restoration and
|
||||||
|
@ -59,6 +59,8 @@ class PromptFormatter:
|
|||||||
switches.append(f'-H{opt.height or t2i.height}')
|
switches.append(f'-H{opt.height or t2i.height}')
|
||||||
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
||||||
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
||||||
|
if opt.seamless or t2i.seamless:
|
||||||
|
switches.append(f'--seamless')
|
||||||
if opt.init_img:
|
if opt.init_img:
|
||||||
switches.append(f'-I{opt.init_img}')
|
switches.append(f'-I{opt.init_img}')
|
||||||
if opt.fit:
|
if opt.fit:
|
||||||
|
@ -71,6 +71,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
width = int(post_data['width'])
|
width = int(post_data['width'])
|
||||||
height = int(post_data['height'])
|
height = int(post_data['height'])
|
||||||
fit = 'fit' in post_data
|
fit = 'fit' in post_data
|
||||||
|
seamless = 'seamless' in post_data
|
||||||
cfgscale = float(post_data['cfgscale'])
|
cfgscale = float(post_data['cfgscale'])
|
||||||
sampler_name = post_data['sampler']
|
sampler_name = post_data['sampler']
|
||||||
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
||||||
@ -164,6 +165,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
gfpgan_strength = gfpgan_strength,
|
gfpgan_strength = gfpgan_strength,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
sampler_name = sampler_name,
|
sampler_name = sampler_name,
|
||||||
|
seamless = seamless,
|
||||||
step_callback=image_progress,
|
step_callback=image_progress,
|
||||||
image_callback=image_done)
|
image_callback=image_done)
|
||||||
else:
|
else:
|
||||||
@ -185,6 +187,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
width = width,
|
width = width,
|
||||||
height = height,
|
height = height,
|
||||||
fit = fit,
|
fit = fit,
|
||||||
|
seamless = seamless,
|
||||||
gfpgan_strength=gfpgan_strength,
|
gfpgan_strength=gfpgan_strength,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
step_callback=image_progress,
|
step_callback=image_progress,
|
||||||
|
@ -14,6 +14,7 @@ from PIL import Image
|
|||||||
from tqdm import tqdm, trange
|
from tqdm import tqdm, trange
|
||||||
from itertools import islice
|
from itertools import islice
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
|
from torch import nn
|
||||||
from torchvision.utils import make_grid
|
from torchvision.utils import make_grid
|
||||||
from pytorch_lightning import seed_everything
|
from pytorch_lightning import seed_everything
|
||||||
from torch import autocast
|
from torch import autocast
|
||||||
@ -109,6 +110,7 @@ class T2I:
|
|||||||
downsampling_factor
|
downsampling_factor
|
||||||
precision
|
precision
|
||||||
strength
|
strength
|
||||||
|
seamless
|
||||||
embedding_path
|
embedding_path
|
||||||
|
|
||||||
The vast majority of these arguments default to reasonable values.
|
The vast majority of these arguments default to reasonable values.
|
||||||
@ -132,6 +134,7 @@ class T2I:
|
|||||||
precision='autocast',
|
precision='autocast',
|
||||||
full_precision=False,
|
full_precision=False,
|
||||||
strength=0.75, # default in scripts/img2img.py
|
strength=0.75, # default in scripts/img2img.py
|
||||||
|
seamless=False,
|
||||||
embedding_path=None,
|
embedding_path=None,
|
||||||
device_type = 'cuda',
|
device_type = 'cuda',
|
||||||
# just to keep track of this parameter when regenerating prompt
|
# just to keep track of this parameter when regenerating prompt
|
||||||
@ -153,6 +156,7 @@ class T2I:
|
|||||||
self.precision = precision
|
self.precision = precision
|
||||||
self.full_precision = True if choose_torch_device() == 'mps' else full_precision
|
self.full_precision = True if choose_torch_device() == 'mps' else full_precision
|
||||||
self.strength = strength
|
self.strength = strength
|
||||||
|
self.seamless = seamless
|
||||||
self.embedding_path = embedding_path
|
self.embedding_path = embedding_path
|
||||||
self.device_type = device_type
|
self.device_type = device_type
|
||||||
self.model = None # empty for now
|
self.model = None # empty for now
|
||||||
@ -217,6 +221,7 @@ class T2I:
|
|||||||
step_callback = None,
|
step_callback = None,
|
||||||
width = None,
|
width = None,
|
||||||
height = None,
|
height = None,
|
||||||
|
seamless = False,
|
||||||
# these are specific to img2img
|
# these are specific to img2img
|
||||||
init_img = None,
|
init_img = None,
|
||||||
fit = False,
|
fit = False,
|
||||||
@ -240,6 +245,7 @@ class T2I:
|
|||||||
width // width of image, in multiples of 64 (512)
|
width // width of image, in multiples of 64 (512)
|
||||||
height // height of image, in multiples of 64 (512)
|
height // height of image, in multiples of 64 (512)
|
||||||
cfg_scale // how strongly the prompt influences the image (7.5) (must be >1)
|
cfg_scale // how strongly the prompt influences the image (7.5) (must be >1)
|
||||||
|
seamless // whether the generated image should tile
|
||||||
init_img // path to an initial image - its dimensions override width and height
|
init_img // path to an initial image - its dimensions override width and height
|
||||||
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
@ -268,6 +274,7 @@ class T2I:
|
|||||||
steps = steps or self.steps
|
steps = steps or self.steps
|
||||||
width = width or self.width
|
width = width or self.width
|
||||||
height = height or self.height
|
height = height or self.height
|
||||||
|
seamless = seamless or self.seamless
|
||||||
cfg_scale = cfg_scale or self.cfg_scale
|
cfg_scale = cfg_scale or self.cfg_scale
|
||||||
ddim_eta = ddim_eta or self.ddim_eta
|
ddim_eta = ddim_eta or self.ddim_eta
|
||||||
iterations = iterations or self.iterations
|
iterations = iterations or self.iterations
|
||||||
@ -278,6 +285,10 @@ class T2I:
|
|||||||
model = (
|
model = (
|
||||||
self.load_model()
|
self.load_model()
|
||||||
) # will instantiate the model or return it from cache
|
) # will instantiate the model or return it from cache
|
||||||
|
for m in model.modules():
|
||||||
|
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||||
|
m.padding_mode = 'circular' if seamless else m._orig_padding_mode
|
||||||
|
|
||||||
assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
|
assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
|
||||||
assert (
|
assert (
|
||||||
0.0 <= strength <= 1.0
|
0.0 <= strength <= 1.0
|
||||||
@ -603,6 +614,10 @@ class T2I:
|
|||||||
|
|
||||||
self._set_sampler()
|
self._set_sampler()
|
||||||
|
|
||||||
|
for m in self.model.modules():
|
||||||
|
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||||
|
m._orig_padding_mode = m.padding_mode
|
||||||
|
|
||||||
return self.model
|
return self.model
|
||||||
|
|
||||||
# returns a tensor filled with random numbers from a normal distribution
|
# returns a tensor filled with random numbers from a normal distribution
|
||||||
|
@ -62,6 +62,7 @@ def main():
|
|||||||
grid = opt.grid,
|
grid = opt.grid,
|
||||||
# this is solely for recreating the prompt
|
# this is solely for recreating the prompt
|
||||||
latent_diffusion_weights=opt.laion400m,
|
latent_diffusion_weights=opt.laion400m,
|
||||||
|
seamless=opt.seamless,
|
||||||
embedding_path=opt.embedding_path,
|
embedding_path=opt.embedding_path,
|
||||||
device_type=opt.device
|
device_type=opt.device
|
||||||
)
|
)
|
||||||
@ -87,6 +88,9 @@ def main():
|
|||||||
print(f'{e}. Aborting.')
|
print(f'{e}. Aborting.')
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
if opt.seamless:
|
||||||
|
print(">> changed to seamless tiling mode")
|
||||||
|
|
||||||
# preload the model
|
# preload the model
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
t2i.load_model()
|
t2i.load_model()
|
||||||
@ -418,6 +422,11 @@ def create_argv_parser():
|
|||||||
default='outputs/img-samples',
|
default='outputs/img-samples',
|
||||||
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
|
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--seamless',
|
||||||
|
action='store_true',
|
||||||
|
help='Change the model to seamless tiling (circular) mode',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--embedding_path',
|
'--embedding_path',
|
||||||
type=str,
|
type=str,
|
||||||
@ -540,6 +549,11 @@ def create_cmd_parser():
|
|||||||
default=None,
|
default=None,
|
||||||
help='Directory to save generated images and a log of prompts and seeds',
|
help='Directory to save generated images and a log of prompts and seeds',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--seamless',
|
||||||
|
action='store_true',
|
||||||
|
help='Change the model to seamless tiling (circular) mode',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i',
|
'-i',
|
||||||
'--individual',
|
'--individual',
|
||||||
|
@ -37,6 +37,8 @@
|
|||||||
<option value="k_euler_a">KEULER_A</option>
|
<option value="k_euler_a">KEULER_A</option>
|
||||||
<option value="k_heun">KHEUN</option>
|
<option value="k_heun">KHEUN</option>
|
||||||
</select>
|
</select>
|
||||||
|
<input type="checkbox" name="seamless" id="seamless">
|
||||||
|
<label for="seamless">Seamless circular tiling</label>
|
||||||
<br>
|
<br>
|
||||||
<label title="Set to multiple of 64" for="width">Width:</label>
|
<label title="Set to multiple of 64" for="width">Width:</label>
|
||||||
<select id="width" name="width" value="512">
|
<select id="width" name="width" value="512">
|
||||||
@ -64,7 +66,7 @@
|
|||||||
<input value="-1" type="number" id="seed" name="seed">
|
<input value="-1" type="number" id="seed" name="seed">
|
||||||
<button type="button" id="reset-seed">↺</button>
|
<button type="button" id="reset-seed">↺</button>
|
||||||
<input type="checkbox" name="progress_images" id="progress_images">
|
<input type="checkbox" name="progress_images" id="progress_images">
|
||||||
<label for="progress_images">Display in-progress images (slows down generation):</label>
|
<label for="progress_images">Display in-progress images (slower)</label>
|
||||||
<button type="button" id="reset-all">Reset to Defaults</button>
|
<button type="button" id="reset-all">Reset to Defaults</button>
|
||||||
</div>
|
</div>
|
||||||
<div id="img2img">
|
<div id="img2img">
|
||||||
@ -74,7 +76,7 @@
|
|||||||
<label for="strength">Img2Img Strength:</label>
|
<label for="strength">Img2Img Strength:</label>
|
||||||
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
||||||
<input type="checkbox" id="fit" name="fit" checked>
|
<input type="checkbox" id="fit" name="fit" checked>
|
||||||
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
|
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height</label>
|
||||||
</div>
|
</div>
|
||||||
<div id="gfpgan">
|
<div id="gfpgan">
|
||||||
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
||||||
|
Loading…
Reference in New Issue
Block a user