mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into lstein/xformers-instructions
This commit is contained in:
commit
279ffcfe15
@ -52,7 +52,7 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
|
||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
|
||||
=== "Mac users"
|
||||
|
||||
|
@ -45,6 +45,7 @@ def main():
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
Globals.always_use_cpu = args.always_use_cpu
|
||||
Globals.internet_available = args.internet_available and check_internet()
|
||||
Globals.disable_xformers = not args.xformers
|
||||
print(f'>> Internet connectivity is {Globals.internet_available}')
|
||||
|
||||
if not args.conf:
|
||||
@ -124,7 +125,7 @@ def main():
|
||||
# preload the model
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError as e:
|
||||
except KeyError:
|
||||
pass
|
||||
except Exception as e:
|
||||
report_model_error(opt, e)
|
||||
@ -731,11 +732,6 @@ def del_config(model_name:str, gen, opt, completer):
|
||||
completer.update_models(gen.model_manager.list_models())
|
||||
|
||||
def edit_model(model_name:str, gen, opt, completer):
|
||||
current_model = gen.model_name
|
||||
# if model_name == current_model:
|
||||
# print("** Can't edit the active model. !switch to another model first. **")
|
||||
# return
|
||||
|
||||
manager = gen.model_manager
|
||||
if not (info := manager.model_info(model_name)):
|
||||
print(f'** Unknown model {model_name}')
|
||||
@ -887,7 +883,7 @@ def prepare_image_metadata(
|
||||
try:
|
||||
filename = opt.fnformat.format(**wildcards)
|
||||
except KeyError as e:
|
||||
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use \'{{prefix}}.{{seed}}.png\' instead')
|
||||
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
|
||||
filename = f'{prefix}.{seed}.png'
|
||||
except IndexError:
|
||||
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead')
|
||||
|
@ -482,6 +482,12 @@ class Args(object):
|
||||
action='store_true',
|
||||
help='Force free gpu memory before final decoding',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--xformers',
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help='Enable/disable xformers support (default enabled if installed)',
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--always_use_cpu",
|
||||
dest="always_use_cpu",
|
||||
|
@ -39,6 +39,7 @@ from diffusers.utils.outputs import BaseOutput
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, ThresholdSettings
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
|
||||
@ -306,7 +307,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
textual_inversion_manager=self.textual_inversion_manager
|
||||
)
|
||||
|
||||
if is_xformers_available():
|
||||
if is_xformers_available() and not Globals.disable_xformers:
|
||||
self.enable_xformers_memory_efficient_attention()
|
||||
|
||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||
|
@ -3,6 +3,7 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
|
||||
'''
|
||||
|
||||
import math
|
||||
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
@ -66,6 +67,8 @@ class Txt2Img2Img(Generator):
|
||||
|
||||
second_pass_noise = self.get_noise_like(resized_latents)
|
||||
|
||||
verbosity = get_verbosity()
|
||||
set_verbosity_error()
|
||||
pipeline_output = pipeline.img2img_from_latents_and_embeddings(
|
||||
resized_latents,
|
||||
num_inference_steps=steps,
|
||||
@ -73,6 +76,7 @@ class Txt2Img2Img(Generator):
|
||||
strength=strength,
|
||||
noise=second_pass_noise,
|
||||
callback=step_callback)
|
||||
set_verbosity(verbosity)
|
||||
|
||||
return pipeline.numpy_to_pil(pipeline_output.images)[0]
|
||||
|
||||
|
@ -43,6 +43,9 @@ Globals.always_use_cpu = False
|
||||
# The CLI will test connectivity at startup time.
|
||||
Globals.internet_available = True
|
||||
|
||||
# Whether to disable xformers
|
||||
Globals.disable_xformers = False
|
||||
|
||||
# whether we are forcing full precision
|
||||
Globals.full_precision = False
|
||||
|
||||
@ -62,11 +65,21 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path:
|
||||
'''
|
||||
Returns Path to the model cache directory. If a subdirectory
|
||||
is provided, it will be appended to the end of the path, allowing
|
||||
for huggingface-style conventions:
|
||||
for huggingface-style conventions:
|
||||
global_cache_dir('diffusers')
|
||||
global_cache_dir('transformers')
|
||||
'''
|
||||
if (home := os.environ.get('HF_HOME')):
|
||||
home: str = os.getenv('HF_HOME')
|
||||
|
||||
if home is None:
|
||||
home = os.getenv('XDG_CACHE_HOME')
|
||||
|
||||
if home is not None:
|
||||
# Set `home` to $XDG_CACHE_HOME/huggingface, which is the default location mentioned in HuggingFace Hub Client Library.
|
||||
# See: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome
|
||||
home += os.sep + 'huggingface'
|
||||
|
||||
if home is not None:
|
||||
return Path(home,subdir)
|
||||
else:
|
||||
return Path(Globals.root,'models',subdir)
|
||||
|
@ -25,6 +25,7 @@ import torch
|
||||
import safetensors
|
||||
import transformers
|
||||
from diffusers import AutoencoderKL, logging as dlogging
|
||||
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
|
||||
from omegaconf import OmegaConf
|
||||
from omegaconf.dictconfig import DictConfig
|
||||
from picklescan.scanner import scan_file_path
|
||||
@ -166,7 +167,7 @@ class ModelManager(object):
|
||||
# don't include VAEs in listing (legacy style)
|
||||
if 'config' in stanza and '/VAE/' in stanza['config']:
|
||||
continue
|
||||
|
||||
|
||||
models[name] = dict()
|
||||
format = stanza.get('format','ckpt') # Determine Format
|
||||
|
||||
@ -183,7 +184,7 @@ class ModelManager(object):
|
||||
format = format,
|
||||
status = status,
|
||||
)
|
||||
|
||||
|
||||
# Checkpoint Config Parse
|
||||
if format == 'ckpt':
|
||||
models[name].update(
|
||||
@ -193,7 +194,7 @@ class ModelManager(object):
|
||||
width = str(stanza.get('width', 512)),
|
||||
height = str(stanza.get('height', 512)),
|
||||
)
|
||||
|
||||
|
||||
# Diffusers Config Parse
|
||||
if (vae := stanza.get('vae',None)):
|
||||
if isinstance(vae,DictConfig):
|
||||
@ -202,14 +203,14 @@ class ModelManager(object):
|
||||
path = str(vae.get('path',None)),
|
||||
subfolder = str(vae.get('subfolder',None))
|
||||
)
|
||||
|
||||
|
||||
if format == 'diffusers':
|
||||
models[name].update(
|
||||
vae = vae,
|
||||
repo_id = str(stanza.get('repo_id', None)),
|
||||
path = str(stanza.get('path',None)),
|
||||
)
|
||||
|
||||
|
||||
return models
|
||||
|
||||
def print_models(self) -> None:
|
||||
@ -257,7 +258,7 @@ class ModelManager(object):
|
||||
assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"'
|
||||
|
||||
omega[model_name] = model_attributes
|
||||
|
||||
|
||||
if 'weights' in omega[model_name]:
|
||||
omega[model_name]['weights'].replace('\\','/')
|
||||
|
||||
@ -554,12 +555,12 @@ class ModelManager(object):
|
||||
'''
|
||||
Attempts to install the indicated ckpt file and returns True if successful.
|
||||
|
||||
"weights" can be either a path-like object corresponding to a local .ckpt file
|
||||
"weights" can be either a path-like object corresponding to a local .ckpt file
|
||||
or a http/https URL pointing to a remote model.
|
||||
|
||||
"config" is the model config file to use with this ckpt file. It defaults to
|
||||
v1-inference.yaml. If a URL is provided, the config will be downloaded.
|
||||
|
||||
|
||||
You can optionally provide a model name and/or description. If not provided,
|
||||
then these will be derived from the weight file name. If you provide a commit_to_conf
|
||||
path to the configuration file, then the new entry will be committed to the
|
||||
@ -572,7 +573,7 @@ class ModelManager(object):
|
||||
return False
|
||||
if config_path is None or not config_path.exists():
|
||||
return False
|
||||
|
||||
|
||||
model_name = model_name or Path(weights).stem
|
||||
model_description = model_description or f'imported stable diffusion weights file {model_name}'
|
||||
new_config = dict(
|
||||
@ -587,7 +588,7 @@ class ModelManager(object):
|
||||
if commit_to_conf:
|
||||
self.commit(commit_to_conf)
|
||||
return True
|
||||
|
||||
|
||||
def autoconvert_weights(
|
||||
self,
|
||||
conf_path:Path,
|
||||
@ -660,7 +661,7 @@ class ModelManager(object):
|
||||
except Exception as e:
|
||||
print(f'** Conversion failed: {str(e)}')
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
print('done.')
|
||||
return new_config
|
||||
|
||||
@ -756,9 +757,13 @@ class ModelManager(object):
|
||||
print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.')
|
||||
print('** This is a quick one-time operation.')
|
||||
from shutil import move, rmtree
|
||||
|
||||
|
||||
# transformer files get moved into the hub directory
|
||||
hub = models_dir / 'hub'
|
||||
if cls._is_huggingface_hub_directory_present():
|
||||
hub = global_cache_dir('hub')
|
||||
else:
|
||||
hub = models_dir / 'hub'
|
||||
|
||||
os.makedirs(hub, exist_ok=True)
|
||||
for model in legacy_locations:
|
||||
source = models_dir / model
|
||||
@ -771,7 +776,11 @@ class ModelManager(object):
|
||||
move(source, dest)
|
||||
|
||||
# anything else gets moved into the diffusers directory
|
||||
diffusers = models_dir / 'diffusers'
|
||||
if cls._is_huggingface_hub_directory_present():
|
||||
diffusers = global_cache_dir('diffusers')
|
||||
else:
|
||||
diffusers = models_dir / 'diffusers'
|
||||
|
||||
os.makedirs(diffusers, exist_ok=True)
|
||||
for root, dirs, _ in os.walk(models_dir, topdown=False):
|
||||
for dir in dirs:
|
||||
@ -819,11 +828,11 @@ class ModelManager(object):
|
||||
return model
|
||||
|
||||
# diffusers really really doesn't like us moving a float16 model onto CPU
|
||||
import logging
|
||||
logging.getLogger('diffusers.pipeline_utils').setLevel(logging.CRITICAL)
|
||||
verbosity = get_verbosity()
|
||||
set_verbosity_error()
|
||||
model.cond_stage_model.device = 'cpu'
|
||||
model.to('cpu')
|
||||
logging.getLogger('pipeline_utils').setLevel(logging.INFO)
|
||||
set_verbosity(verbosity)
|
||||
|
||||
for submodel in ('first_stage_model','cond_stage_model','model'):
|
||||
try:
|
||||
@ -962,3 +971,7 @@ class ModelManager(object):
|
||||
print(f'** Could not load VAE {name_or_path}: {str(deferred_error)}')
|
||||
|
||||
return vae
|
||||
|
||||
@staticmethod
|
||||
def _is_huggingface_hub_directory_present() -> bool:
|
||||
return os.getenv('HF_HOME') is not None or os.getenv('XDG_CACHE_HOME') is not None
|
||||
|
@ -291,7 +291,7 @@ for more information.
|
||||
|
||||
Visit https://huggingface.co/settings/tokens to generate a token. (Sign up for an account if needed).
|
||||
|
||||
Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows.
|
||||
Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows.
|
||||
Alternatively press 'Enter' to skip this step and continue.
|
||||
You may re-run the configuration script again in the future if you do not wish to set the token right now.
|
||||
''')
|
||||
@ -676,7 +676,8 @@ def download_weights(opt:dict) -> Union[str, None]:
|
||||
return
|
||||
|
||||
access_token = authenticate()
|
||||
HfFolder.save_token(access_token)
|
||||
if access_token is not None:
|
||||
HfFolder.save_token(access_token)
|
||||
|
||||
print('\n** DOWNLOADING WEIGHTS **')
|
||||
successfully_downloaded = download_weight_datasets(models, access_token, precision=precision)
|
||||
|
@ -115,6 +115,14 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
value=self.precisions.index(saved_args.get('mixed_precision','fp16')),
|
||||
max_height=4,
|
||||
)
|
||||
self.num_train_epochs = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Number of training epochs:',
|
||||
out_of=1000,
|
||||
step=50,
|
||||
lowest=1,
|
||||
value=saved_args.get('num_train_epochs',100)
|
||||
)
|
||||
self.max_train_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Max Training Steps:',
|
||||
@ -131,6 +139,22 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
lowest=1,
|
||||
value=saved_args.get('train_batch_size',8),
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):',
|
||||
out_of=10,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get('gradient_accumulation_steps',4)
|
||||
)
|
||||
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Warmup Steps:',
|
||||
out_of=100,
|
||||
step=1,
|
||||
lowest=0,
|
||||
value=saved_args.get('lr_warmup_steps',0),
|
||||
)
|
||||
self.learning_rate = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Learning Rate:",
|
||||
@ -154,22 +178,6 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
scroll_exit = True,
|
||||
value=self.lr_schedulers.index(saved_args.get('lr_scheduler','constant')),
|
||||
)
|
||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Gradient Accumulation Steps:',
|
||||
out_of=10,
|
||||
step=1,
|
||||
lowest=1,
|
||||
value=saved_args.get('gradient_accumulation_steps',4)
|
||||
)
|
||||
self.lr_warmup_steps = self.add_widget_intelligent(
|
||||
npyscreen.TitleSlider,
|
||||
name='Warmup Steps:',
|
||||
out_of=100,
|
||||
step=1,
|
||||
lowest=0,
|
||||
value=saved_args.get('lr_warmup_steps',0),
|
||||
)
|
||||
|
||||
def initializer_changed(self):
|
||||
placeholder = self.placeholder_token.value
|
||||
@ -236,7 +244,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
# all the integers
|
||||
for attr in ('train_batch_size','gradient_accumulation_steps',
|
||||
'max_train_steps','lr_warmup_steps'):
|
||||
'num_train_epochs','max_train_steps','lr_warmup_steps'):
|
||||
args[attr] = int(getattr(self,attr).value)
|
||||
|
||||
# the floats (just one)
|
||||
@ -324,6 +332,7 @@ if __name__ == '__main__':
|
||||
save_args(args)
|
||||
|
||||
try:
|
||||
print(f'DEBUG: args = {args}')
|
||||
do_textual_inversion_training(**args)
|
||||
copy_to_embeddings_folder(args)
|
||||
except Exception as e:
|
||||
|
Loading…
Reference in New Issue
Block a user