From 023db8ac4146cbd511cd589897a41fee41382236 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 22:35:43 -0500 Subject: [PATCH 1/2] use diffusers 0.14 cache layout This PR ports the `main` PR #2871 to the v2.3 branch. This adjusts the global diffusers model cache to work with the 0.14 diffusers layout of placing models in HF_HOME/hub rather than HF_HOME/diffusers. --- ldm/invoke/CLI.py | 1 - ldm/invoke/config/invokeai_configure.py | 2 +- ldm/invoke/config/model_install_backend.py | 1 - ldm/invoke/globals.py | 15 ++--- ldm/invoke/merge_diffusers.py | 4 +- ldm/invoke/model_manager.py | 59 +++++++++---------- .../training/textual_inversion_training.py | 2 +- pyproject.toml | 2 +- 8 files changed, 40 insertions(+), 46 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index cd10516fdd..53457d2da3 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -960,7 +960,6 @@ def prepare_image_metadata( wildcards["seed"] = seed wildcards["model_id"] = model_id try: - print(f'DEBUG: fnformat={opt.fnformat}') filename = opt.fnformat.format(**wildcards) except KeyError as e: print( diff --git a/ldm/invoke/config/invokeai_configure.py b/ldm/invoke/config/invokeai_configure.py index bdcecc55b0..3f11b058d2 100755 --- a/ldm/invoke/config/invokeai_configure.py +++ b/ldm/invoke/config/invokeai_configure.py @@ -290,7 +290,7 @@ def download_vaes(): # first the diffusers version repo_id = "stabilityai/sd-vae-ft-mse" args = dict( - cache_dir=global_cache_dir("diffusers"), + cache_dir=global_cache_dir("hub"), ) if not AutoencoderKL.from_pretrained(repo_id, **args): raise Exception(f"download of {repo_id} failed") diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index beab4f9b51..91bead0948 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -262,7 +262,6 @@ def _download_diffusion_weights( path = download_from_hf( model_class, repo_id, - cache_subdir="diffusers", safety_checker=None, **extra_args, ) diff --git a/ldm/invoke/globals.py b/ldm/invoke/globals.py index e47b5c059e..0774507b86 100644 --- a/ldm/invoke/globals.py +++ b/ldm/invoke/globals.py @@ -88,16 +88,13 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path: ''' Returns Path to the model cache directory. If a subdirectory is provided, it will be appended to the end of the path, allowing - for huggingface-style conventions: - global_cache_dir('diffusers') + for Hugging Face-style conventions. Currently, Hugging Face has + moved all models into the "hub" subfolder, so for any pretrained + HF model, use: global_cache_dir('hub') - Current HuggingFace documentation (mid-Jan 2023) indicates that - transformers models will be cached into a "transformers" subdirectory, - but in practice they seem to go into "hub". But if needed: - global_cache_dir('transformers') - One other caveat is that HuggingFace is moving some diffusers models - into the "hub" subdirectory as well, so this will need to be revisited - from time to time. + + The legacy location for transformers used to be global_cache_dir('transformers') + and global_cache_dir('diffusers') for diffusers. ''' home: str = os.getenv('HF_HOME') diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 3cb3613ee3..5c4e7da499 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -437,10 +437,10 @@ def main(): args = _parse_args() global_set_root(args.root_dir) - cache_dir = str(global_cache_dir("diffusers")) + cache_dir = str(global_cache_dir("hub")) os.environ[ "HF_HOME" - ] = cache_dir # because not clear the merge pipeline is honoring cache_dir + ] = str(global_cache_dir()) # because not clear the merge pipeline is honoring cache_dir args.cache_dir = cache_dir try: diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index a2d721d2d4..c1a4575338 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -507,7 +507,7 @@ class ModelManager(object): if vae := self._load_vae(mconfig["vae"]): pipeline_args.update(vae=vae) if not isinstance(name_or_path, Path): - pipeline_args.update(cache_dir=global_cache_dir("diffusers")) + pipeline_args.update(cache_dir=global_cache_dir("hub")) if using_fp16: pipeline_args.update(torch_dtype=torch.float16) fp_args_list = [{"revision": "fp16"}, {}] @@ -1093,9 +1093,12 @@ class ModelManager(object): to the 2.3.0 "diffusers" version. This should be a one-time operation, called at script startup time. """ - # Three transformer models to check: bert, clip and safety checker + # Three transformer models to check: bert, clip and safety checker, and + # the diffusers as well + models_dir = Path(Globals.root, "models") legacy_locations = [ Path( + models_dir, "CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker" ), Path("bert-base-uncased/models--bert-base-uncased"), @@ -1103,17 +1106,26 @@ class ModelManager(object): "openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14" ), ] - models_dir = Path(Globals.root, "models") + legacy_locations.extend(list(global_cache_dir("diffusers").glob('*'))) legacy_layout = False for model in legacy_locations: - legacy_layout = legacy_layout or Path(models_dir, model).exists() + legacy_layout = legacy_layout or model.exists() if not legacy_layout: return print( - "** Legacy version <= 2.2.5 model directory layout detected. Reorganizing." + """ +>> ALERT: +>> The location of your previously-installed diffusers models needs to move from +>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by +>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory +>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time +>> operation. However if you have customized either of these directories and need to +>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready. +>> Otherwise press to continue.""" ) print("** This is a quick one-time operation.") + input("continue> ") # transformer files get moved into the hub directory if cls._is_huggingface_hub_directory_present(): @@ -1125,33 +1137,20 @@ class ModelManager(object): for model in legacy_locations: source = models_dir / model dest = hub / model.stem + if dest.exists() and not source.exists(): + continue print(f"** {source} => {dest}") if source.exists(): - if dest.exists(): - rmtree(source) + if dest.is_symlink(): + print(f"** Found symlink at {dest.name}. Not migrating.") + elif dest.exists(): + if source.is_dir(): + rmtree(source) + else: + source.unlink() else: move(source, dest) - - # anything else gets moved into the diffusers directory - if cls._is_huggingface_hub_directory_present(): - diffusers = global_cache_dir("diffusers") - else: - diffusers = models_dir / "diffusers" - - os.makedirs(diffusers, exist_ok=True) - for root, dirs, _ in os.walk(models_dir, topdown=False): - for dir in dirs: - full_path = Path(root, dir) - if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers): - continue - if Path(dir).match("models--*--*"): - dest = diffusers / dir - print(f"** {full_path} => {dest}") - if dest.exists(): - rmtree(full_path) - else: - move(full_path, dest) - + # now clean up by removing any empty directories empty = [ root @@ -1249,7 +1248,7 @@ class ModelManager(object): path = name_or_path else: owner, repo = name_or_path.split("/") - path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}") + path = Path(global_cache_dir("hub") / f"models--{owner}--{repo}") if not path.exists(): return None hashpath = path / "checksum.sha256" @@ -1310,7 +1309,7 @@ class ModelManager(object): using_fp16 = self.precision == "float16" vae_args.update( - cache_dir=global_cache_dir("diffusers"), + cache_dir=global_cache_dir("hug"), local_files_only=not Globals.internet_available, ) diff --git a/ldm/invoke/training/textual_inversion_training.py b/ldm/invoke/training/textual_inversion_training.py index 58c67b2ca8..7794712bc1 100644 --- a/ldm/invoke/training/textual_inversion_training.py +++ b/ldm/invoke/training/textual_inversion_training.py @@ -634,7 +634,7 @@ def do_textual_inversion_training( assert ( pretrained_model_name_or_path ), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}" - pipeline_args = dict(cache_dir=global_cache_dir("diffusers")) + pipeline_args = dict(cache_dir=global_cache_dir("hub")) # Load tokenizer if tokenizer_name: diff --git a/pyproject.toml b/pyproject.toml index 27de4b8626..28eab88423 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ "clip_anytorch", "compel==0.1.7", "datasets", - "diffusers[torch]~=0.13", + "diffusers[torch]~=0.14", "dnspython==2.2.1", "einops", "eventlet", From acf955fc7bae509b2f5865c9abef122e7a741004 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 10 Mar 2023 06:58:46 -0500 Subject: [PATCH 2/2] upgrade transformers, accelerate, safetensors --- ldm/invoke/_version.py | 2 +- pyproject.toml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ldm/invoke/_version.py b/ldm/invoke/_version.py index d2a097c4ed..3c783d4986 100644 --- a/ldm/invoke/_version.py +++ b/ldm/invoke/_version.py @@ -1 +1 @@ -__version__='2.3.1.post2' +__version__='2.3.2' diff --git a/pyproject.toml b/pyproject.toml index 28eab88423..52adf4c957 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ classifiers = [ "Topic :: Scientific/Engineering :: Image Processing", ] dependencies = [ - "accelerate", + "accelerate~=0.16", "albumentations", "click", "clip_anytorch", @@ -63,7 +63,7 @@ dependencies = [ "pytorch-lightning==1.7.7", "realesrgan", "requests==2.28.2", - "safetensors", + "safetensors~=0.3.0", "scikit-image>=0.19", "send2trash", "streamlit", @@ -73,7 +73,7 @@ dependencies = [ "torch>=1.13.1", "torchmetrics", "torchvision>=0.14.1", - "transformers~=4.25", + "transformers~=4.26", "windows-curses; sys_platform=='win32'", ] description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"