Merge branch 'main' into feat/xformers-startup-message

This commit is contained in:
Kevin Turner
2023-01-31 18:48:09 -08:00
committed by GitHub
11 changed files with 183 additions and 144 deletions

View File

@ -21,6 +21,38 @@ import ldm.invoke
# global used in multiple functions (fix)
infile = None
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
if not str("--yes") in os.environ['INVOKE_MODEL_RECONFIGURE'].split():
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
print('configure_invokeai is launching....\n')
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
if os.getenv('INVOKE_MODEL_RECONFIGURE'):
yes_to_all = os.environ['INVOKE_MODEL_RECONFIGURE'].split()
else:
yes_to_all = None
previous_args = sys.argv
sys.argv = [ 'configure_invokeai' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
for argv in yes_to_all:
sys.argv.append(argv)
import ldm.invoke.configure_invokeai as configure_invokeai
sys.exit(configure_invokeai.main())
print('** InvokeAI will now restart')
sys.argv = previous_args
sys.exit(main()) # would rather do a os.exec(), but doesn't exist?
def main():
"""Initialize command-line parsers and the diffusion model"""
global infile
@ -50,10 +82,11 @@ def main():
if not args.conf:
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
print('** This script will now exit.')
sys.exit(-1)
report_model_error(opt, e)
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
# print('** This script will now exit.')
# sys.exit(-1)
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
@ -574,7 +607,7 @@ def import_model(model_path:str, gen, opt, completer):
if model_path.startswith(('http:','https:','ftp:')):
model_name = import_ckpt_model(model_path, gen, opt, completer)
elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path):
model_name = import_ckpt_model(model_path, gen, opt, completer)
model_name = import_ckpt_model(model_path, gen, opt, completer)
elif re.match('^[\w.+-]+/[\w.+-]+$',model_path):
model_name = import_diffuser_model(model_path, gen, opt, completer)
elif os.path.isdir(model_path):
@ -743,7 +776,7 @@ def del_config(model_name:str, gen, opt, completer):
if input(f'Remove {model_name} from the list of models known to InvokeAI? [y] ').strip().startswith(('n','N')):
return
delete_completely = input('Completely remove the model file or directory from disk? [n] ').startswith(('y','Y'))
gen.model_manager.del_model(model_name,delete_files=delete_completely)
gen.model_manager.commit(opt.conf)
@ -786,8 +819,8 @@ def _get_model_name(existing_names,completer,default_name:str='')->str:
model_name = input(f'Short name for this model [{default_name}]: ').strip()
if len(model_name)==0:
model_name = default_name
if not re.match('^[\w._+-]+$',model_name):
print('** model name must contain only words, digits and the characters "._+-" **')
if not re.match('^[\w._+:/-]+$',model_name):
print('** model name must contain only words, digits and the characters "._+:/-" **')
elif model_name != default_name and model_name in existing_names:
print(f'** the name {model_name} is already in use. Pick another.')
else:
@ -1097,34 +1130,6 @@ def write_commands(opt, file_path:str, outfilepath:str):
f.write('\n'.join(commands))
print(f'>> File {outfilepath} with commands created')
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
print('configure_invokeai is launching....\n')
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
previous_args = sys.argv
sys.argv = [ 'configure_invokeai' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
sys.argv.append(yes_to_all)
import ldm.invoke.configure_invokeai as configure_invokeai
configure_invokeai.main()
print('** InvokeAI will now restart')
sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist?
sys.exit(0)
def check_internet()->bool:
'''
Return true if the internet is reachable.

View File

@ -544,6 +544,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
init_image = image_resized_to_grid_as_tensor(init_image.convert('RGB'))
init_image = init_image.to(device=device, dtype=latents_dtype)
mask = mask.to(device=device, dtype=latents_dtype)
if init_image.dim() == 3:
init_image = init_image.unsqueeze(0)
@ -562,17 +563,22 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if mask.dim() == 3:
mask = mask.unsqueeze(0)
mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR) \
latent_mask = tv_resize(mask, init_image_latents.shape[-2:], T.InterpolationMode.BILINEAR) \
.to(device=device, dtype=latents_dtype)
guidance: List[Callable] = []
if is_inpainting_model(self.unet):
# You'd think the inpainting model wouldn't be paying attention to the area it is going to repaint
# (that's why there's a mask!) but it seems to really want that blanked out.
masked_init_image = init_image * torch.where(mask < 0.5, 1, 0)
masked_latents = self.non_noised_latents_from_image(masked_init_image, device=device, dtype=latents_dtype)
# TODO: we should probably pass this in so we don't have to try/finally around setting it.
self.invokeai_diffuser.model_forward_callback = \
AddsMaskLatents(self._unet_forward, mask, init_image_latents)
AddsMaskLatents(self._unet_forward, latent_mask, masked_latents)
else:
guidance.append(AddsMaskGuidance(mask, init_image_latents, self.scheduler, noise))
guidance.append(AddsMaskGuidance(latent_mask, init_image_latents, self.scheduler, noise))
try:
result_latents, result_attention_maps = self.latents_from_embeddings(
@ -591,11 +597,20 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps)
return self.check_for_safety(output, dtype=conditioning_data.dtype)
def non_noised_latents_from_image(self, init_image, *, device, dtype):
def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype):
init_image = init_image.to(device=device, dtype=dtype)
with torch.inference_mode():
if device.type == 'mps':
# workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
self.vae.to('cpu')
init_image = init_image.to('cpu')
init_latent_dist = self.vae.encode(init_image).latent_dist
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
if device.type == 'mps':
self.vae.to(device)
init_latents = init_latents.to(device)
init_latents = 0.18215 * init_latents
return init_latents

View File

@ -19,10 +19,12 @@ from ldm.util import debug_image
def infill_methods()->list[str]:
methods = list()
methods = [
"tile",
"solid",
]
if PatchMatch.patchmatch_available():
methods.append('patchmatch')
methods.append('tile')
methods.insert(0, 'patchmatch')
return methods
class Inpaint(Img2Img):
@ -182,6 +184,7 @@ class Inpaint(Img2Img):
infill_method = None,
inpaint_width=None,
inpaint_height=None,
inpaint_fill:tuple(int)=(0x7F, 0x7F, 0x7F, 0xFF),
attention_maps_callback=None,
**kwargs):
"""
@ -202,12 +205,17 @@ class Inpaint(Img2Img):
# Do infill
if infill_method == 'patchmatch' and PatchMatch.patchmatch_available():
init_filled = self.infill_patchmatch(self.pil_image.copy())
else: # if infill_method == 'tile': # Only two methods right now, so always use 'tile' if not patchmatch
elif infill_method == 'tile':
init_filled = self.tile_fill_missing(
self.pil_image.copy(),
seed = self.seed,
tile_size = tile_size
)
elif infill_method == 'solid':
solid_bg = PIL.Image.new("RGBA", init_image.size, inpaint_fill)
init_filled = PIL.Image.alpha_composite(solid_bg, init_image)
else:
raise ValueError(f"Non-supported infill type {infill_method}", infill_method)
init_filled.paste(init_image, (0,0), init_image.split()[-1])
# Resize if requested for inpainting

View File

@ -3,10 +3,10 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
'''
import math
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
from typing import Callable, Optional
import torch
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
from ldm.invoke.generator.base import Generator
from ldm.invoke.generator.diffusers_pipeline import trim_to_multiple_of, StableDiffusionGeneratorPipeline, \
@ -128,18 +128,13 @@ class Txt2Img2Img(Generator):
scaled_width = width
scaled_height = height
device = self.model.device
device = self.model.device
channels = self.latent_channels
if channels == 9:
channels = 4 # we don't really want noise for all the mask channels
shape = (1, channels,
scaled_height // self.downsampling_factor, scaled_width // self.downsampling_factor)
if self.use_mps_noise or device.type == 'mps':
return torch.randn([1,
self.latent_channels,
scaled_height // self.downsampling_factor,
scaled_width // self.downsampling_factor],
dtype=self.torch_dtype(),
device='cpu').to(device)
return torch.randn(shape, dtype=self.torch_dtype(), device='cpu').to(device)
else:
return torch.randn([1,
self.latent_channels,
scaled_height // self.downsampling_factor,
scaled_width // self.downsampling_factor],
dtype=self.torch_dtype(),
device=device)
return torch.randn(shape, dtype=self.torch_dtype(), device=device)

View File

@ -125,7 +125,7 @@ class ModelManager(object):
Set the default model. The change will not take
effect until you call model_manager.commit()
'''
assert model_name in self.models,f"unknown model '{model_name}'"
assert model_name in self.model_names(), f"unknown model '{model_name}'"
config = self.config
for model in config: