From 241313c4a651dac5662fff01ba65e2473ca631cb Mon Sep 17 00:00:00 2001 From: gogurtenjoyer <36354352+gogurtenjoyer@users.noreply.github.com> Date: Thu, 12 Jan 2023 14:09:35 -0800 Subject: [PATCH 1/8] Update automated install doc - link to MS C libs Updated the link for the MS Visual C libraries - I'm not sure if MS changed the location of the files but this new one leads right to the file downloads. --- docs/installation/010_INSTALL_AUTOMATED.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation/010_INSTALL_AUTOMATED.md b/docs/installation/010_INSTALL_AUTOMATED.md index 0bafc5c861..8007c59b6d 100644 --- a/docs/installation/010_INSTALL_AUTOMATED.md +++ b/docs/installation/010_INSTALL_AUTOMATED.md @@ -52,7 +52,7 @@ version of InvokeAI with the option to upgrade to experimental versions later. find python, then open the Python installer again and choose "Modify" existing installation. - - Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170 + - Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170 === "Mac users" From 2282e681f79a4c3550de04dda93e67c23ed1613a Mon Sep 17 00:00:00 2001 From: Daya Adianto Date: Wed, 18 Jan 2023 19:02:16 +0700 Subject: [PATCH 2/8] =?UTF-8?q?Store=20&=20load=20=F0=9F=A4=97=20models=20?= =?UTF-8?q?at=20XDG=5FCACHE=5FHOME=20if=20HF=5FHOME=20is=20not=20set?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit allows InvokeAI to store & load 🤗 models at a location set by `XDG_CACHE_HOME` environment variable if `HF_HOME` is not set. Reference: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome --- ldm/invoke/globals.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ldm/invoke/globals.py b/ldm/invoke/globals.py index 897bf5e204..137171aa33 100644 --- a/ldm/invoke/globals.py +++ b/ldm/invoke/globals.py @@ -62,11 +62,21 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path: ''' Returns Path to the model cache directory. If a subdirectory is provided, it will be appended to the end of the path, allowing - for huggingface-style conventions: + for huggingface-style conventions: global_cache_dir('diffusers') global_cache_dir('transformers') ''' - if (home := os.environ.get('HF_HOME')): + home: str = os.getenv('HF_HOME') + + if home is None: + home = os.getenv('XDG_CACHE_HOME') + + if home is not None: + # Set `home` to $XDG_CACHE_HOME/huggingface, which is the default location mentioned in HuggingFace Hub Client Library. + # See: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome + home += os.sep + 'huggingface' + + if home is not None: return Path(home,subdir) else: return Path(Globals.root,'models',subdir) From aa4e8d8cf3e5930380442311203e96995e7f8471 Mon Sep 17 00:00:00 2001 From: Daya Adianto Date: Wed, 18 Jan 2023 21:02:31 +0700 Subject: [PATCH 3/8] =?UTF-8?q?Migrate=20legacy=20models=20(pre-2.3.0)=20t?= =?UTF-8?q?o=20=F0=9F=A4=97=20cache=20directory=20if=20exists?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ldm/invoke/model_manager.py | 40 ++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 6dce95b415..399d9bc934 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -166,7 +166,7 @@ class ModelManager(object): # don't include VAEs in listing (legacy style) if 'config' in stanza and '/VAE/' in stanza['config']: continue - + models[name] = dict() format = stanza.get('format','ckpt') # Determine Format @@ -183,7 +183,7 @@ class ModelManager(object): format = format, status = status, ) - + # Checkpoint Config Parse if format == 'ckpt': models[name].update( @@ -193,7 +193,7 @@ class ModelManager(object): width = str(stanza.get('width', 512)), height = str(stanza.get('height', 512)), ) - + # Diffusers Config Parse if (vae := stanza.get('vae',None)): if isinstance(vae,DictConfig): @@ -202,14 +202,14 @@ class ModelManager(object): path = str(vae.get('path',None)), subfolder = str(vae.get('subfolder',None)) ) - + if format == 'diffusers': models[name].update( vae = vae, repo_id = str(stanza.get('repo_id', None)), path = str(stanza.get('path',None)), ) - + return models def print_models(self) -> None: @@ -257,7 +257,7 @@ class ModelManager(object): assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"' omega[model_name] = model_attributes - + if 'weights' in omega[model_name]: omega[model_name]['weights'].replace('\\','/') @@ -554,12 +554,12 @@ class ModelManager(object): ''' Attempts to install the indicated ckpt file and returns True if successful. - "weights" can be either a path-like object corresponding to a local .ckpt file + "weights" can be either a path-like object corresponding to a local .ckpt file or a http/https URL pointing to a remote model. "config" is the model config file to use with this ckpt file. It defaults to v1-inference.yaml. If a URL is provided, the config will be downloaded. - + You can optionally provide a model name and/or description. If not provided, then these will be derived from the weight file name. If you provide a commit_to_conf path to the configuration file, then the new entry will be committed to the @@ -572,7 +572,7 @@ class ModelManager(object): return False if config_path is None or not config_path.exists(): return False - + model_name = model_name or Path(weights).stem model_description = model_description or f'imported stable diffusion weights file {model_name}' new_config = dict( @@ -587,7 +587,7 @@ class ModelManager(object): if commit_to_conf: self.commit(commit_to_conf) return True - + def autoconvert_weights( self, conf_path:Path, @@ -660,7 +660,7 @@ class ModelManager(object): except Exception as e: print(f'** Conversion failed: {str(e)}') traceback.print_exc() - + print('done.') return new_config @@ -756,9 +756,13 @@ class ModelManager(object): print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.') print('** This is a quick one-time operation.') from shutil import move, rmtree - + # transformer files get moved into the hub directory - hub = models_dir / 'hub' + if cls._is_huggingface_hub_directory_present(): + hub = global_cache_dir() / 'hub' + else: + hub = models_dir / 'hub' + os.makedirs(hub, exist_ok=True) for model in legacy_locations: source = models_dir / model @@ -771,7 +775,11 @@ class ModelManager(object): move(source, dest) # anything else gets moved into the diffusers directory - diffusers = models_dir / 'diffusers' + if cls._is_huggingface_hub_directory_present(): + diffusers = global_cache_dir() / 'diffusers' + else: + diffusers = models_dir / 'diffusers' + os.makedirs(diffusers, exist_ok=True) for root, dirs, _ in os.walk(models_dir, topdown=False): for dir in dirs: @@ -962,3 +970,7 @@ class ModelManager(object): print(f'** Could not load VAE {name_or_path}: {str(deferred_error)}') return vae + + @staticmethod + def _is_huggingface_hub_directory_present() -> bool: + return os.getenv('HF_HOME') is not None or os.getenv('XDG_CACHE_HOME') is not None From f3e952ecf0dcf26d7b2401b8fb20882952f33d8d Mon Sep 17 00:00:00 2001 From: Daya Adianto Date: Wed, 18 Jan 2023 21:06:01 +0700 Subject: [PATCH 4/8] Use global_cache_dir calls properly --- ldm/invoke/model_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 399d9bc934..0193baefba 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -759,7 +759,7 @@ class ModelManager(object): # transformer files get moved into the hub directory if cls._is_huggingface_hub_directory_present(): - hub = global_cache_dir() / 'hub' + hub = global_cache_dir('hub') else: hub = models_dir / 'hub' @@ -776,7 +776,7 @@ class ModelManager(object): # anything else gets moved into the diffusers directory if cls._is_huggingface_hub_directory_present(): - diffusers = global_cache_dir() / 'diffusers' + diffusers = global_cache_dir('diffusers') else: diffusers = models_dir / 'diffusers' From 171f4aa71bd170caf810b76bbf30cd310a02b32b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 19 Jan 2023 16:16:35 -0500 Subject: [PATCH 5/8] [feat] Provide option to disable xformers from command line Starting `invoke.py` with --no-xformers will disable memory-efficient-attention support if xformers is installed. --xformers will enable support, but this is already the default. --- ldm/invoke/CLI.py | 10 +++------- ldm/invoke/args.py | 6 ++++++ ldm/invoke/generator/diffusers_pipeline.py | 3 ++- ldm/invoke/globals.py | 3 +++ 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 6fb0efeb8d..ef6389c7cc 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -45,6 +45,7 @@ def main(): Globals.try_patchmatch = args.patchmatch Globals.always_use_cpu = args.always_use_cpu Globals.internet_available = args.internet_available and check_internet() + Globals.disable_xformers = not args.xformers print(f'>> Internet connectivity is {Globals.internet_available}') if not args.conf: @@ -124,7 +125,7 @@ def main(): # preload the model try: gen.load_model() - except KeyError as e: + except KeyError: pass except Exception as e: report_model_error(opt, e) @@ -731,11 +732,6 @@ def del_config(model_name:str, gen, opt, completer): completer.update_models(gen.model_manager.list_models()) def edit_model(model_name:str, gen, opt, completer): - current_model = gen.model_name -# if model_name == current_model: -# print("** Can't edit the active model. !switch to another model first. **") -# return - manager = gen.model_manager if not (info := manager.model_info(model_name)): print(f'** Unknown model {model_name}') @@ -887,7 +883,7 @@ def prepare_image_metadata( try: filename = opt.fnformat.format(**wildcards) except KeyError as e: - print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use \'{{prefix}}.{{seed}}.png\' instead') + print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead') filename = f'{prefix}.{seed}.png' except IndexError: print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead') diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 400d1f720d..c918e4fba7 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -482,6 +482,12 @@ class Args(object): action='store_true', help='Force free gpu memory before final decoding', ) + model_group.add_argument( + '--xformers', + action=argparse.BooleanOptionalAction, + default=True, + help='Enable/disable xformers support (default enabled if installed)', + ) model_group.add_argument( "--always_use_cpu", dest="always_use_cpu", diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index 5e62abf9df..54e9d555af 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -39,6 +39,7 @@ from diffusers.utils.outputs import BaseOutput from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer +from ldm.invoke.globals import Globals from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, ThresholdSettings from ldm.modules.textual_inversion_manager import TextualInversionManager @@ -306,7 +307,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): textual_inversion_manager=self.textual_inversion_manager ) - if is_xformers_available(): + if is_xformers_available() and not Globals.disable_xformers: self.enable_xformers_memory_efficient_attention() def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, diff --git a/ldm/invoke/globals.py b/ldm/invoke/globals.py index 137171aa33..5bd5597b78 100644 --- a/ldm/invoke/globals.py +++ b/ldm/invoke/globals.py @@ -43,6 +43,9 @@ Globals.always_use_cpu = False # The CLI will test connectivity at startup time. Globals.internet_available = True +# Whether to disable xformers +Globals.disable_xformers = False + # whether we are forcing full precision Globals.full_precision = False From 895505976ea3a98c6c9d8c753cc41737cdf0a5ed Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 19 Jan 2023 16:49:40 -0500 Subject: [PATCH 6/8] [bugfix] suppress extraneous warning messages generated by diffusers This commit suppresses a few irrelevant warning messages that the diffusers module produces: 1. The warning that turning off the NSFW detector makes you an irresponsible person. 2. Warnings about running fp16 models stored in CPU (we are not running them in CPU, just caching them in CPU RAM) --- ldm/invoke/generator/txt2img2img.py | 4 ++++ ldm/invoke/model_manager.py | 7 ++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ldm/invoke/generator/txt2img2img.py b/ldm/invoke/generator/txt2img2img.py index 1dba0cfafb..47692a6bbb 100644 --- a/ldm/invoke/generator/txt2img2img.py +++ b/ldm/invoke/generator/txt2img2img.py @@ -3,6 +3,7 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator ''' import math +from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error from typing import Callable, Optional import torch @@ -66,6 +67,8 @@ class Txt2Img2Img(Generator): second_pass_noise = self.get_noise_like(resized_latents) + verbosity = get_verbosity() + set_verbosity_error() pipeline_output = pipeline.img2img_from_latents_and_embeddings( resized_latents, num_inference_steps=steps, @@ -73,6 +76,7 @@ class Txt2Img2Img(Generator): strength=strength, noise=second_pass_noise, callback=step_callback) + set_verbosity(verbosity) return pipeline.numpy_to_pil(pipeline_output.images)[0] diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 0193baefba..a5f9a47d41 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -25,6 +25,7 @@ import torch import safetensors import transformers from diffusers import AutoencoderKL, logging as dlogging +from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path @@ -827,11 +828,11 @@ class ModelManager(object): return model # diffusers really really doesn't like us moving a float16 model onto CPU - import logging - logging.getLogger('diffusers.pipeline_utils').setLevel(logging.CRITICAL) + verbosity = get_verbosity() + set_verbosity_error() model.cond_stage_model.device = 'cpu' model.to('cpu') - logging.getLogger('pipeline_utils').setLevel(logging.INFO) + set_verbosity(verbosity) for submodel in ('first_stage_model','cond_stage_model','model'): try: From 9b1843307b3fea55572572280770884acf97fd26 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 19 Jan 2023 18:43:12 -0500 Subject: [PATCH 7/8] [enhancement] Reorganize form for textual inversion training - Add num_train_epochs - Reorganize widgets so all sliders that control # of steps are together --- scripts/textual_inversion_fe.py | 43 ++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/scripts/textual_inversion_fe.py b/scripts/textual_inversion_fe.py index 941afcf613..82446e98a7 100755 --- a/scripts/textual_inversion_fe.py +++ b/scripts/textual_inversion_fe.py @@ -115,6 +115,14 @@ class textualInversionForm(npyscreen.FormMultiPageAction): value=self.precisions.index(saved_args.get('mixed_precision','fp16')), max_height=4, ) + self.num_train_epochs = self.add_widget_intelligent( + npyscreen.TitleSlider, + name='Number of training epochs:', + out_of=1000, + step=50, + lowest=1, + value=saved_args.get('num_train_epochs',100) + ) self.max_train_steps = self.add_widget_intelligent( npyscreen.TitleSlider, name='Max Training Steps:', @@ -131,6 +139,22 @@ class textualInversionForm(npyscreen.FormMultiPageAction): lowest=1, value=saved_args.get('train_batch_size',8), ) + self.gradient_accumulation_steps = self.add_widget_intelligent( + npyscreen.TitleSlider, + name='Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):', + out_of=10, + step=1, + lowest=1, + value=saved_args.get('gradient_accumulation_steps',4) + ) + self.lr_warmup_steps = self.add_widget_intelligent( + npyscreen.TitleSlider, + name='Warmup Steps:', + out_of=100, + step=1, + lowest=0, + value=saved_args.get('lr_warmup_steps',0), + ) self.learning_rate = self.add_widget_intelligent( npyscreen.TitleText, name="Learning Rate:", @@ -154,22 +178,6 @@ class textualInversionForm(npyscreen.FormMultiPageAction): scroll_exit = True, value=self.lr_schedulers.index(saved_args.get('lr_scheduler','constant')), ) - self.gradient_accumulation_steps = self.add_widget_intelligent( - npyscreen.TitleSlider, - name='Gradient Accumulation Steps:', - out_of=10, - step=1, - lowest=1, - value=saved_args.get('gradient_accumulation_steps',4) - ) - self.lr_warmup_steps = self.add_widget_intelligent( - npyscreen.TitleSlider, - name='Warmup Steps:', - out_of=100, - step=1, - lowest=0, - value=saved_args.get('lr_warmup_steps',0), - ) def initializer_changed(self): placeholder = self.placeholder_token.value @@ -236,7 +244,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): # all the integers for attr in ('train_batch_size','gradient_accumulation_steps', - 'max_train_steps','lr_warmup_steps'): + 'num_train_epochs','max_train_steps','lr_warmup_steps'): args[attr] = int(getattr(self,attr).value) # the floats (just one) @@ -324,6 +332,7 @@ if __name__ == '__main__': save_args(args) try: + print(f'DEBUG: args = {args}') do_textual_inversion_training(**args) copy_to_embeddings_folder(args) except Exception as e: From a8bb1a110991cace8f8f7bda35994d569c591b19 Mon Sep 17 00:00:00 2001 From: Nicholas Koh Date: Thu, 19 Jan 2023 14:41:41 +0800 Subject: [PATCH 8/8] Save HFToken only if it is present --- scripts/configure_invokeai.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/configure_invokeai.py b/scripts/configure_invokeai.py index 9d17a73317..fec1cc6135 100755 --- a/scripts/configure_invokeai.py +++ b/scripts/configure_invokeai.py @@ -291,7 +291,7 @@ for more information. Visit https://huggingface.co/settings/tokens to generate a token. (Sign up for an account if needed). -Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows. +Paste the token below using Ctrl-V on macOS/Linux, or Ctrl-Shift-V or right-click on Windows. Alternatively press 'Enter' to skip this step and continue. You may re-run the configuration script again in the future if you do not wish to set the token right now. ''') @@ -676,7 +676,8 @@ def download_weights(opt:dict) -> Union[str, None]: return access_token = authenticate() - HfFolder.save_token(access_token) + if access_token is not None: + HfFolder.save_token(access_token) print('\n** DOWNLOADING WEIGHTS **') successfully_downloaded = download_weight_datasets(models, access_token, precision=precision)