Merge branch 'v2.3' into enhance/simple-param-scanner-script

This commit is contained in:
Jonathan 2023-03-07 11:45:45 -06:00 committed by GitHub
commit d669e69755
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 42 additions and 25 deletions

50
.github/CODEOWNERS vendored
View File

@ -2,50 +2,60 @@
/.github/workflows/ @mauwii @lstein @blessedcoolant /.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation # documentation
/docs/ @lstein @mauwii @tildebyte @blessedcoolant /docs/ @lstein @mauwii @blessedcoolant
mkdocs.yml @lstein @mauwii @blessedcoolant mkdocs.yml @mauwii @lstein
# installation and configuration # installation and configuration
/pyproject.toml @mauwii @lstein @ebr @blessedcoolant /pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii @lstein @blessedcoolant /docker/ @mauwii
/scripts/ @ebr @lstein @blessedcoolant /scripts/ @ebr @lstein @blessedcoolant
/installer/ @ebr @lstein @tildebyte @blessedcoolant /installer/ @ebr @lstein
ldm/invoke/config @lstein @ebr @blessedcoolant ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @ebr @blessedcoolant invokeai/assets @lstein @blessedcoolant
invokeai/configs @lstein @ebr @blessedcoolant invokeai/configs @lstein @ebr @blessedcoolant
/ldm/invoke/_version.py @lstein @blessedcoolant /ldm/invoke/_version.py @lstein @blessedcoolant
# web ui # web ui
/invokeai/frontend @blessedcoolant @psychedelicious @lstein /invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious @lstein /invokeai/backend @blessedcoolant @psychedelicious
# generation and model management # generation and model management
/ldm/*.py @lstein @blessedcoolant /ldm/*.py @lstein @blessedcoolant
/ldm/generate.py @lstein @keturn @blessedcoolant /ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant /ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein @blessedcoolant /ldm/invoke/ckpt* @lstein @blessedcoolant
/ldm/invoke/ckpt_generator @lstein @blessedcoolant /ldm/invoke/ckpt_generator @lstein @blessedcoolant
/ldm/invoke/CLI.py @lstein @blessedcoolant /ldm/invoke/CLI.py @lstein @blessedcoolant
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant /ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
/ldm/invoke/generator @keturn @damian0815 @blessedcoolant /ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant /ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant /ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
/ldm/invoke/model_manager.py @lstein @blessedcoolant /ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein @blessedcoolant /ldm/invoke/txt2mask.py @lstein @blessedcoolant
/ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein /ldm/invoke/patchmatch.py @Kyle0654 @lstein
/ldm/invoke/restoration @lstein @blessedcoolant /ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration # attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn @lstein @blessedcoolant /ldm/models @damian0815 @keturn @blessedcoolant
/ldm/modules @damian0815 @keturn @lstein @blessedcoolant /ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
/ldm/modules/attention.py @damian0815 @keturn
/ldm/modules/diffusionmodules @damian0815 @keturn
/ldm/modules/distributions @damian0815 @keturn
/ldm/modules/ema.py @damian0815 @keturn
/ldm/modules/embedding_manager.py @lstein
/ldm/modules/encoders @damian0815 @keturn
/ldm/modules/image_degradation @damian0815 @keturn
/ldm/modules/losses @damian0815 @keturn
/ldm/modules/x_transformer.py @damian0815 @keturn
# Nodes # Nodes
apps/ @Kyle0654 @lstein @blessedcoolant apps/ @Kyle0654 @jpphoto
# legacy REST API # legacy REST API
# is CapableWeb still engaged? # these are dead code
/ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant #/ldm/invoke/pngwriter.py @CapableWeb
/ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant #/ldm/invoke/server_legacy.py @CapableWeb
/scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant #/scripts/legacy_api.py @CapableWeb
/tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant #/tests/legacy_tests.sh @CapableWeb

View File

@ -747,7 +747,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
return return
manager.commit(opt.conf) manager.commit(opt.conf)
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False): if ckpt_path and click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
ckpt_path.unlink(missing_ok=True) ckpt_path.unlink(missing_ok=True)
print(f"{ckpt_path} deleted") print(f"{ckpt_path} deleted")

View File

@ -9,7 +9,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
import re import re
from typing import Union, Optional, Any from typing import Union, Optional, Any
from transformers import CLIPTokenizer, CLIPTextModel from transformers import CLIPTokenizer
from compel import Compel from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
@ -52,6 +52,8 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
textual_inversion_manager=model.textual_inversion_manager, textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype) dtype_for_device_getter=torch_dtype)
# get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ")
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend) legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_prompt: FlattenedPrompt|Blend positive_prompt: FlattenedPrompt|Blend
@ -113,7 +115,7 @@ def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, trun
return tokens return tokens
def split_prompt_to_positive_and_negative(prompt_string_uncleaned): def split_prompt_to_positive_and_negative(prompt_string_uncleaned: str):
unconditioned_words = '' unconditioned_words = ''
unconditional_regex = r'\[(.*?)\]' unconditional_regex = r'\[(.*?)\]'
unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned) unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned)

View File

@ -781,6 +781,7 @@ class ModelManager(object):
""" """
model_path: Path = None model_path: Path = None
thing = path_url_or_repo # to save typing thing = path_url_or_repo # to save typing
is_temporary = False
print(f">> Probing {thing} for import") print(f">> Probing {thing} for import")
@ -789,7 +790,7 @@ class ModelManager(object):
model_path = self._resolve_path( model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1" thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed ) # _resolve_path does a download if needed
is_temporary = True
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]: if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
print( print(
@ -896,6 +897,10 @@ class ModelManager(object):
original_config_file=model_config_file, original_config_file=model_config_file,
commit_to_conf=commit_to_conf, commit_to_conf=commit_to_conf,
) )
# in the event that this file was downloaded automatically prior to conversion
# we do not keep the original .ckpt/.safetensors around
if is_temporary:
model_path.unlink(missing_ok=True)
else: else:
model_name = self.import_ckpt_model( model_name = self.import_ckpt_model(
model_path, model_path,