From 8a31e5c5e3dd7be361fdd984170fd1bac801712a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 17 Jan 2023 00:18:09 -0500 Subject: [PATCH 1/7] allow safetensors models to be imported --- ldm/invoke/CLI.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index cfe9a64ed5..83b5281847 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -572,7 +572,7 @@ def import_model(model_path:str, gen, opt, completer): if model_path.startswith(('http:','https:','ftp:')): model_name = import_ckpt_model(model_path, gen, opt, completer) - elif os.path.exists(model_path) and model_path.endswith('.ckpt') and os.path.isfile(model_path): + elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path): model_name = import_ckpt_model(model_path, gen, opt, completer) elif re.match('^[\w.+-]+/[\w.+-]+$',model_path): model_name = import_diffuser_model(model_path, gen, opt, completer) @@ -628,9 +628,9 @@ def import_ckpt_model(path_or_url:str, gen, opt, completer)->str: model_description=default_description ) config_file = None - + default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') completer.complete_extensions(('.yaml','.yml')) - completer.set_line('configs/stable-diffusion/v1-inference.yaml') + completer.set_line(str(default)) done = False while not done: config_file = input('Configuration file for this model: ').strip() From fc2098834d51f673eabdc58e311cff4e6e6bfe3e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 17 Jan 2023 08:11:19 -0500 Subject: [PATCH 2/7] support direct loading of .safetensors models - Small fix to allow ckpt files with the .safetensors suffix to be directly loaded, rather than undergo a conversion step first. --- ldm/invoke/model_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 880d75476f..eeb2208c91 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -142,7 +142,7 @@ class ModelManager(object): Return true if this is a legacy (.ckpt) model ''' info = self.model_info(model_name) - if 'weights' in info and info['weights'].endswith('.ckpt'): + if 'weights' in info and info['weights'].endswith(('.ckpt','.safetensors')): return True return False From 0b5c0c374e235b160238d36c6ba30d150df81d1a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 17 Jan 2023 22:51:57 -0500 Subject: [PATCH 3/7] load safetensors vaes --- ldm/invoke/model_manager.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index eeb2208c91..9b0d21546a 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -359,7 +359,9 @@ class ModelManager(object): vae = os.path.normpath(os.path.join(Globals.root,vae)) if os.path.exists(vae): print(f' | Loading VAE weights from: {vae}') - vae_ckpt = torch.load(vae, map_location="cpu") + vae_ckpt = safetensors.torch.load_file(vae) \ + if vae.endswith('.safetensors') \ + else torch.load(vae, map_location="cpu") vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} model.first_stage_model.load_state_dict(vae_dict, strict=False) else: From 1c62ae461e410c9c933571351cbae2e719cf3c68 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 18 Jan 2023 12:15:57 -0500 Subject: [PATCH 4/7] fix vae safetensor loading --- ldm/invoke/model_manager.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 8aeeda650a..671e00aacd 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -359,10 +359,14 @@ class ModelManager(object): vae = os.path.normpath(os.path.join(Globals.root,vae)) if os.path.exists(vae): print(f' | Loading VAE weights from: {vae}') - vae_ckpt = safetensors.torch.load_file(vae) \ - if vae.endswith('.safetensors') \ - else torch.load(vae, map_location="cpu") - vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} + vae_ckpt = None + vae_dict = None + if vae.endswith('.safetensors'): + vae_ckpt = safetensors.torch.load_file(vae) + vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"} + else: + vae_ckpt = torch.load(vae, map_location="cpu") + vae_dict = {k: v for k, v in vae_ckpt['state_dict'].items() if k[0:4] != "loss"} model.first_stage_model.load_state_dict(vae_dict, strict=False) else: print(f' | VAE file {vae} not found. Skipping.') From 3c3d893b9d17b4360acbab3ac1f179847065b4d7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 19 Jan 2023 15:43:52 -0500 Subject: [PATCH 5/7] improve status reporting when loading local and remote embeddings - During trigger token processing, emit better status messages indicating which triggers were found. - Suppress message " is not known to HuggingFace library, when token is in fact a local embed. --- ldm/generate.py | 6 +++++- ldm/invoke/concepts_lib.py | 10 +++++++++- ldm/modules/textual_inversion_manager.py | 6 +++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/ldm/generate.py b/ldm/generate.py index 63eaf79b50..ad40b5f564 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -445,7 +445,11 @@ class Generate: self._set_sampler() # apply the concepts library to the prompt - prompt = self.huggingface_concepts_library.replace_concepts_with_triggers(prompt, lambda concepts: self.load_huggingface_concepts(concepts)) + prompt = self.huggingface_concepts_library.replace_concepts_with_triggers( + prompt, + lambda concepts: self.load_huggingface_concepts(concepts), + self.model.textual_inversion_manager.get_all_trigger_strings() + ) # bit of a hack to change the cached sampler's karras threshold to # whatever the user asked for diff --git a/ldm/invoke/concepts_lib.py b/ldm/invoke/concepts_lib.py index 246dea362a..0d4f2e296d 100644 --- a/ldm/invoke/concepts_lib.py +++ b/ldm/invoke/concepts_lib.py @@ -115,13 +115,19 @@ class HuggingFaceConceptsLibrary(object): return self.trigger_to_concept(match.group(1)) or f'<{match.group(1)}>' return self.match_trigger.sub(do_replace, prompt) - def replace_concepts_with_triggers(self, prompt:str, load_concepts_callback: Callable[[list], any])->str: + def replace_concepts_with_triggers(self, + prompt:str, + load_concepts_callback: Callable[[list], any], + excluded_tokens:list[str])->str: ''' Given a prompt string that contains `` tags, replace these tags with the appropriate trigger. If any `` tags are found, `load_concepts_callback()` is called with a list of `concepts_name` strings. + + `excluded_tokens` are any tokens that should not be replaced, typically because they + are trigger tokens from a locally-loaded embedding. ''' concepts = self.match_concept.findall(prompt) if not concepts: @@ -129,6 +135,8 @@ class HuggingFaceConceptsLibrary(object): load_concepts_callback(concepts) def do_replace(match)->str: + if excluded_tokens and f'<{match.group(1)}>' in excluded_tokens: + return f'<{match.group(1)}>' return self.concept_to_trigger(match.group(1)) or f'<{match.group(1)}>' return self.match_concept.sub(do_replace, prompt) diff --git a/ldm/modules/textual_inversion_manager.py b/ldm/modules/textual_inversion_manager.py index f7ced79a52..dcb77d6098 100644 --- a/ldm/modules/textual_inversion_manager.py +++ b/ldm/modules/textual_inversion_manager.py @@ -38,11 +38,15 @@ class TextualInversionManager(): if concept_name in self.hf_concepts_library.concepts_loaded: continue trigger = self.hf_concepts_library.concept_to_trigger(concept_name) - if self.has_textual_inversion_for_trigger_string(trigger): + if self.has_textual_inversion_for_trigger_string(trigger) \ + or self.has_textual_inversion_for_trigger_string(concept_name) \ + or self.has_textual_inversion_for_trigger_string(f'<{concept_name}>'): # in case a token with literal angle brackets encountered + print(f'>> Loaded local embedding for trigger {concept_name}') continue bin_file = self.hf_concepts_library.get_concept_model_path(concept_name) if not bin_file: continue + print(f'>> Loaded remote embedding for trigger {concept_name}') self.load_textual_inversion(bin_file) self.hf_concepts_library.concepts_loaded[concept_name]=True From 775e1a21c7a7095f6abb35e8858a34cb9786a2fe Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 19 Jan 2023 15:46:58 -0500 Subject: [PATCH 6/7] improve embed trigger token not found error - Now indicates that the trigger is *neither* a huggingface concept, nor the trigger of a locally loaded embed. --- ldm/invoke/concepts_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/invoke/concepts_lib.py b/ldm/invoke/concepts_lib.py index 0d4f2e296d..c774f29674 100644 --- a/ldm/invoke/concepts_lib.py +++ b/ldm/invoke/concepts_lib.py @@ -59,7 +59,7 @@ class HuggingFaceConceptsLibrary(object): be downloaded. ''' if not concept_name in self.list_concepts(): - print(f'This concept is not known to the Hugging Face library. Generation will continue without the concept.') + print(f'This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept.') return None return self.get_concept_file(concept_name.lower(),'learned_embeds.bin') From a1c0818a081bab8aace84c0ee7ecbcf27bf2ea46 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 20 Jan 2023 17:13:32 -0500 Subject: [PATCH 7/7] ignore .DS_Store files when scanning Mac embeddings --- ldm/modules/textual_inversion_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ldm/modules/textual_inversion_manager.py b/ldm/modules/textual_inversion_manager.py index dcb77d6098..cf28cf8c7a 100644 --- a/ldm/modules/textual_inversion_manager.py +++ b/ldm/modules/textual_inversion_manager.py @@ -54,6 +54,8 @@ class TextualInversionManager(): return [ti.trigger_string for ti in self.textual_inversions] def load_textual_inversion(self, ckpt_path, defer_injecting_tokens: bool=False): + if str(ckpt_path).endswith('.DS_Store'): + return try: scan_result = scan_file_path(ckpt_path) if scan_result.infected_files == 1: