mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
7 Commits
lstein/bug
...
invokeai-b
Author | SHA1 | Date | |
---|---|---|---|
dbd2161601 | |||
1f83ac2eae | |||
f7bb68d01c | |||
8cddf9c5b3 | |||
9b546ccf06 | |||
73dbf73a95 | |||
18a1f3893f |
34
.github/CODEOWNERS
vendored
34
.github/CODEOWNERS
vendored
@ -1,13 +1,13 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant
|
||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant
|
||||
mkdocs.yml @lstein @ebr
|
||||
/docs/ @lstein @mauwii @blessedcoolant
|
||||
mkdocs.yml @mauwii @lstein
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @ebr
|
||||
/docker/ @lstein
|
||||
/pyproject.toml @mauwii @lstein @ebr
|
||||
/docker/ @mauwii
|
||||
/scripts/ @ebr @lstein @blessedcoolant
|
||||
/installer/ @ebr @lstein
|
||||
ldm/invoke/config @lstein @ebr
|
||||
@ -21,13 +21,13 @@ invokeai/configs @lstein @ebr @blessedcoolant
|
||||
|
||||
# generation and model management
|
||||
/ldm/*.py @lstein @blessedcoolant
|
||||
/ldm/generate.py @lstein @gregghelt2
|
||||
/ldm/generate.py @lstein @keturn
|
||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
||||
/ldm/invoke/config @lstein @ebr @blessedcoolant
|
||||
/ldm/invoke/generator @gregghelt2 @damian0815
|
||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
||||
/ldm/invoke/generator @keturn @damian0815
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
||||
@ -36,17 +36,17 @@ invokeai/configs @lstein @ebr @blessedcoolant
|
||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
||||
|
||||
# attention, textual inversion, model configuration
|
||||
/ldm/models @damian0815 @gregghelt2 @blessedcoolant
|
||||
/ldm/models @damian0815 @keturn @blessedcoolant
|
||||
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
|
||||
/ldm/modules/attention.py @damian0815 @gregghelt2
|
||||
/ldm/modules/diffusionmodules @damian0815 @gregghelt2
|
||||
/ldm/modules/distributions @damian0815 @gregghelt2
|
||||
/ldm/modules/ema.py @damian0815 @gregghelt2
|
||||
/ldm/modules/attention.py @damian0815 @keturn
|
||||
/ldm/modules/diffusionmodules @damian0815 @keturn
|
||||
/ldm/modules/distributions @damian0815 @keturn
|
||||
/ldm/modules/ema.py @damian0815 @keturn
|
||||
/ldm/modules/embedding_manager.py @lstein
|
||||
/ldm/modules/encoders @damian0815 @gregghelt2
|
||||
/ldm/modules/image_degradation @damian0815 @gregghelt2
|
||||
/ldm/modules/losses @damian0815 @gregghelt2
|
||||
/ldm/modules/x_transformer.py @damian0815 @gregghelt2
|
||||
/ldm/modules/encoders @damian0815 @keturn
|
||||
/ldm/modules/image_degradation @damian0815 @keturn
|
||||
/ldm/modules/losses @damian0815 @keturn
|
||||
/ldm/modules/x_transformer.py @damian0815 @keturn
|
||||
|
||||
# Nodes
|
||||
apps/ @Kyle0654 @jpphoto
|
||||
|
@ -132,13 +132,12 @@ class Installer:
|
||||
|
||||
# Prefer to copy python executables
|
||||
# so that updates to system python don't break InvokeAI
|
||||
if not venv_dir.exists():
|
||||
try:
|
||||
venv.create(venv_dir, with_pip=True)
|
||||
# If installing over an existing environment previously created with symlinks,
|
||||
# the executables will fail to copy. Keep symlinks in that case
|
||||
except shutil.SameFileError:
|
||||
venv.create(venv_dir, with_pip=True, symlinks=True)
|
||||
try:
|
||||
venv.create(venv_dir, with_pip=True)
|
||||
# If installing over an existing environment previously created with symlinks,
|
||||
# the executables will fail to copy. Keep symlinks in that case
|
||||
except shutil.SameFileError:
|
||||
venv.create(venv_dir, with_pip=True, symlinks=True)
|
||||
|
||||
# upgrade pip in Python 3.9 environments
|
||||
if int(platform.python_version_tuple()[1]) == 9:
|
||||
|
@ -30,6 +30,7 @@ from ldm.invoke.conditioning import (
|
||||
get_tokens_for_prompt_object,
|
||||
get_prompt_structure,
|
||||
split_weighted_subprompts,
|
||||
get_tokenizer,
|
||||
)
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
@ -1313,7 +1314,7 @@ class InvokeAIWebServer:
|
||||
None
|
||||
if type(parsed_prompt) is Blend
|
||||
else get_tokens_for_prompt_object(
|
||||
self.generate.model.tokenizer, parsed_prompt
|
||||
get_tokenizer(self.generate.model), parsed_prompt
|
||||
)
|
||||
)
|
||||
attention_maps_image_base64_url = (
|
||||
|
@ -4,6 +4,7 @@ import shlex
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
from packaging import version
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
@ -16,6 +17,8 @@ if sys.platform == "darwin":
|
||||
|
||||
import pyparsing # type: ignore
|
||||
|
||||
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
|
||||
|
||||
import ldm.invoke
|
||||
|
||||
from ..generate import Generate
|
||||
@ -25,17 +28,26 @@ from .generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from .globals import Globals, global_config_dir
|
||||
from .image_util import make_grid
|
||||
from .log import write_log
|
||||
from .model_manager import ModelManager
|
||||
from .pngwriter import PngWriter, retrieve_metadata, write_metadata
|
||||
from .readline import Completer, get_completer
|
||||
from ..util import url_attachment_name
|
||||
|
||||
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
|
||||
|
||||
# global used in multiple functions (fix)
|
||||
infile = None
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
|
||||
|
||||
print('DEBUG: [3] Entered main()', file=sys.stderr)
|
||||
print('DEBUG: INVOKEAI ENVIRONMENT:')
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@ -64,6 +76,13 @@ def main():
|
||||
Globals.sequential_guidance = args.sequential_guidance
|
||||
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
||||
|
||||
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
|
||||
|
||||
# run any post-install patches needed
|
||||
run_patches()
|
||||
|
||||
print(f'DEBUG: [5] Patches run', file=sys.stderr)
|
||||
|
||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||
|
||||
if not args.conf:
|
||||
@ -79,8 +98,9 @@ def main():
|
||||
# loading here to avoid long delays on startup
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
|
||||
|
||||
import transformers # type: ignore
|
||||
|
||||
from ldm.generate import Generate
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
@ -88,6 +108,7 @@ def main():
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
||||
|
||||
@ -108,6 +129,10 @@ def main():
|
||||
if opt.lora_path:
|
||||
Globals.lora_models_dir = opt.lora_path
|
||||
|
||||
# migrate legacy models
|
||||
print(f'DEBUG: [8] migrating models', file=sys.stderr)
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# load the infile as a list of lines
|
||||
if opt.infile:
|
||||
try:
|
||||
@ -123,6 +148,7 @@ def main():
|
||||
|
||||
model = opt.model or retrieve_last_used_model()
|
||||
|
||||
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
@ -149,6 +175,7 @@ def main():
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError:
|
||||
@ -196,6 +223,7 @@ def main():
|
||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||
def main_loop(gen, opt, completer):
|
||||
"""prompt/read/execute loop"""
|
||||
print(f'DEBUG: [11] In main loop', file=sys.stderr)
|
||||
global infile
|
||||
done = False
|
||||
doneAfterInFile = infile is not None
|
||||
@ -1291,6 +1319,63 @@ def retrieve_last_used_model()->str:
|
||||
with open(model_file_path,'r') as f:
|
||||
return f.readline()
|
||||
|
||||
# This routine performs any patch-ups needed after installation
|
||||
def run_patches():
|
||||
install_missing_config_files()
|
||||
version_file = Path(Globals.root,'.version')
|
||||
if version_file.exists():
|
||||
with open(version_file,'r') as f:
|
||||
root_version = version.parse(f.readline() or 'v2.3.2')
|
||||
else:
|
||||
root_version = version.parse('v2.3.2')
|
||||
app_version = version.parse(ldm.invoke.__version__)
|
||||
if root_version < app_version:
|
||||
try:
|
||||
do_version_update(root_version, ldm.invoke.__version__)
|
||||
with open(version_file,'w') as f:
|
||||
f.write(ldm.invoke.__version__)
|
||||
except:
|
||||
print("** Update failed. Will try again on next launch")
|
||||
|
||||
def install_missing_config_files():
|
||||
"""
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
pass
|
||||
# import invokeai.configs as conf
|
||||
# from shutil import copyfile
|
||||
|
||||
# root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
# for src in repo_configs.iterdir():
|
||||
# dest = root_configs / src.name
|
||||
# if not dest.exists():
|
||||
# copyfile(src,dest)
|
||||
|
||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
Make any updates to the launcher .sh and .bat scripts that may be needed
|
||||
from release to release. This is not an elegant solution. Instead, the
|
||||
launcher should be moved into the source tree and installed using pip.
|
||||
"""
|
||||
if root_version < version.Version('v2.3.4'):
|
||||
dest = Path(Globals.root,'loras')
|
||||
dest.mkdir(exist_ok=True)
|
||||
if root_version < version.Version('v2.3.3'):
|
||||
if sys.platform == "linux":
|
||||
print('>> Downloading new version of launcher script and its config file')
|
||||
from ldm.util import download_with_progress_bar
|
||||
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
|
||||
|
||||
dest = Path(Globals.root,'invoke.sh.in')
|
||||
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
|
||||
dest.replace(Path(Globals.root,'invoke.sh'))
|
||||
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
|
||||
|
||||
dest = Path(Globals.root,'dialogrc')
|
||||
assert download_with_progress_bar(url_base+'dialogrc',dest)
|
||||
dest.replace(Path(Globals.root,'.dialogrc'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1 +1 @@
|
||||
__version__='2.3.4.post1'
|
||||
__version__='2.3.4'
|
||||
|
@ -15,10 +15,19 @@ from compel import Compel
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
|
||||
Conjunction
|
||||
from .devices import torch_dtype
|
||||
from .generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ldm.invoke.globals import Globals
|
||||
|
||||
def get_tokenizer(model) -> CLIPTokenizer:
|
||||
# TODO remove legacy ckpt fallback handling
|
||||
return (getattr(model, 'tokenizer', None) # diffusers
|
||||
or model.cond_stage_model.tokenizer) # ldm
|
||||
|
||||
def get_text_encoder(model) -> Any:
|
||||
# TODO remove legacy ckpt fallback handling
|
||||
return (getattr(model, 'text_encoder', None) # diffusers
|
||||
or UnsqueezingLDMTransformer(model.cond_stage_model.transformer)) # ldm
|
||||
|
||||
class UnsqueezingLDMTransformer:
|
||||
def __init__(self, ldm_transformer):
|
||||
self.ldm_transformer = ldm_transformer
|
||||
@ -32,15 +41,15 @@ class UnsqueezingLDMTransformer:
|
||||
return insufficiently_unsqueezed_tensor.unsqueeze(0)
|
||||
|
||||
|
||||
def get_uc_and_c_and_ec(prompt_string,
|
||||
model: StableDiffusionGeneratorPipeline,
|
||||
log_tokens=False, skip_normalize_legacy_blend=False):
|
||||
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
|
||||
# lazy-load any deferred textual inversions.
|
||||
# this might take a couple of seconds the first time a textual inversion is used.
|
||||
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string)
|
||||
|
||||
compel = Compel(tokenizer=model.tokenizer,
|
||||
text_encoder=model.text_encoder,
|
||||
tokenizer = get_tokenizer(model)
|
||||
text_encoder = get_text_encoder(model)
|
||||
compel = Compel(tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
textual_inversion_manager=model.textual_inversion_manager,
|
||||
dtype_for_device_getter=torch_dtype)
|
||||
|
||||
@ -69,20 +78,14 @@ def get_uc_and_c_and_ec(prompt_string,
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
|
||||
|
||||
tokens_count = get_max_token_count(model.tokenizer, positive_prompt)
|
||||
if log_tokens or getattr(Globals, "log_tokenization", False):
|
||||
log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer)
|
||||
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
|
||||
|
||||
# some LoRA models also mess with the text encoder, so they must be active while compel builds conditioning tensors
|
||||
lora_conditioning_ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
|
||||
lora_conditions=lora_conditions)
|
||||
with InvokeAIDiffuserComponent.custom_attention_context(model.unet,
|
||||
extra_conditioning_info=lora_conditioning_ec,
|
||||
step_count=-1):
|
||||
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
|
||||
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
|
||||
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
|
||||
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
|
||||
|
||||
tokens_count = get_max_token_count(tokenizer, positive_prompt)
|
||||
|
||||
# now build the "real" ec
|
||||
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
|
||||
cross_attention_control_args=options.get(
|
||||
'cross_attention_control', None),
|
||||
|
@ -21,6 +21,7 @@ from urllib import request
|
||||
from shutil import get_terminal_size
|
||||
|
||||
import npyscreen
|
||||
import torch
|
||||
import transformers
|
||||
from diffusers import AutoencoderKL
|
||||
from huggingface_hub import HfFolder
|
||||
@ -663,19 +664,8 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
||||
configs_src = Path(configs.__path__[0])
|
||||
configs_dest = Path(root) / "configs"
|
||||
if not os.path.samefile(configs_src, configs_dest):
|
||||
shutil.copytree(configs_src,
|
||||
configs_dest,
|
||||
dirs_exist_ok=True,
|
||||
copy_function=shutil.copyfile,
|
||||
)
|
||||
# Fix up directory permissions so that they are writable
|
||||
# This can happen when running under Nix environment which
|
||||
# makes the runtime directory template immutable.
|
||||
for root,dirs,files in os.walk(os.path.join(root,name)):
|
||||
for d in dirs:
|
||||
Path(root,d).chmod(0o775)
|
||||
for f in files:
|
||||
Path(root,d).chmod(0o644)
|
||||
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def run_console_ui(
|
||||
|
@ -42,18 +42,8 @@ def invokeai_is_running()->bool:
|
||||
except psutil.AccessDenied:
|
||||
continue
|
||||
return False
|
||||
|
||||
def do_post_install():
|
||||
'''
|
||||
Run postinstallation script.
|
||||
'''
|
||||
print("Looking for postinstallation script to run on this version...")
|
||||
try:
|
||||
from ldm.invoke.config.post_install.py import post_install
|
||||
post_install()
|
||||
except:
|
||||
print("Postinstallation script not available for this version of InvokeAI")
|
||||
|
||||
|
||||
def welcome(versions: dict):
|
||||
|
||||
@group()
|
||||
@ -117,7 +107,6 @@ def main():
|
||||
print(f':heavy_check_mark: Upgrade successful')
|
||||
else:
|
||||
print(f':exclamation: [bold red]Upgrade failed[/red bold]')
|
||||
do_post_install()
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
|
@ -196,6 +196,16 @@ class addModelsForm(npyscreen.FormMultiPage):
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.convert_models = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==",
|
||||
values=["Keep original format", "Convert to diffusers"],
|
||||
value=0,
|
||||
begin_entry_at=4,
|
||||
max_height=4,
|
||||
hidden=True, # will appear when imported models box is edited
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.cancel = self.add_widget_intelligent(
|
||||
npyscreen.ButtonPress,
|
||||
name="CANCEL",
|
||||
@ -230,6 +240,8 @@ class addModelsForm(npyscreen.FormMultiPage):
|
||||
self.show_directory_fields.addVisibleWhenSelected(i)
|
||||
|
||||
self.show_directory_fields.when_value_edited = self._clear_scan_directory
|
||||
self.import_model_paths.when_value_edited = self._show_hide_convert
|
||||
self.autoload_directory.when_value_edited = self._show_hide_convert
|
||||
|
||||
def resize(self):
|
||||
super().resize()
|
||||
@ -240,6 +252,13 @@ class addModelsForm(npyscreen.FormMultiPage):
|
||||
if not self.show_directory_fields.value:
|
||||
self.autoload_directory.value = ""
|
||||
|
||||
def _show_hide_convert(self):
|
||||
model_paths = self.import_model_paths.value or ""
|
||||
autoload_directory = self.autoload_directory.value or ""
|
||||
self.convert_models.hidden = (
|
||||
len(model_paths) == 0 and len(autoload_directory) == 0
|
||||
)
|
||||
|
||||
def _get_starter_model_labels(self) -> List[str]:
|
||||
window_width, window_height = get_terminal_size()
|
||||
label_width = 25
|
||||
@ -299,6 +318,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
||||
.scan_directory: Path to a directory of models to scan and import
|
||||
.autoscan_on_startup: True if invokeai should scan and import at startup time
|
||||
.import_model_paths: list of URLs, repo_ids and file paths to import
|
||||
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
|
||||
"""
|
||||
# we're using a global here rather than storing the result in the parentapp
|
||||
# due to some bug in npyscreen that is causing attributes to be lost
|
||||
@ -334,6 +354,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
||||
|
||||
# URLs and the like
|
||||
selections.import_model_paths = self.import_model_paths.value.split()
|
||||
selections.convert_to_diffusers = self.convert_models.value[0] == 1
|
||||
|
||||
|
||||
class AddModelApplication(npyscreen.NPSAppManaged):
|
||||
@ -346,6 +367,7 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
||||
scan_directory=None,
|
||||
autoscan_on_startup=None,
|
||||
import_model_paths=None,
|
||||
convert_to_diffusers=None,
|
||||
)
|
||||
|
||||
def onStart(self):
|
||||
@ -365,6 +387,7 @@ def process_and_execute(opt: Namespace, selections: Namespace):
|
||||
directory_to_scan = selections.scan_directory
|
||||
scan_at_startup = selections.autoscan_on_startup
|
||||
potential_models_to_install = selections.import_model_paths
|
||||
convert_to_diffusers = selections.convert_to_diffusers
|
||||
|
||||
install_requested_models(
|
||||
install_initial_models=models_to_install,
|
||||
@ -372,6 +395,7 @@ def process_and_execute(opt: Namespace, selections: Namespace):
|
||||
scan_directory=Path(directory_to_scan) if directory_to_scan else None,
|
||||
external_models=potential_models_to_install,
|
||||
scan_at_startup=scan_at_startup,
|
||||
convert_to_diffusers=convert_to_diffusers,
|
||||
precision="float32"
|
||||
if opt.full_precision
|
||||
else choose_precision(torch.device(choose_torch_device())),
|
||||
|
@ -68,6 +68,7 @@ def install_requested_models(
|
||||
scan_directory: Path = None,
|
||||
external_models: List[str] = None,
|
||||
scan_at_startup: bool = False,
|
||||
convert_to_diffusers: bool = False,
|
||||
precision: str = "float16",
|
||||
purge_deleted: bool = False,
|
||||
config_file_path: Path = None,
|
||||
@ -110,20 +111,20 @@ def install_requested_models(
|
||||
if len(external_models)>0:
|
||||
print("== INSTALLING EXTERNAL MODELS ==")
|
||||
for path_url_or_repo in external_models:
|
||||
print(f'DEBUG: path_url_or_repo = {path_url_or_repo}')
|
||||
try:
|
||||
model_manager.heuristic_import(
|
||||
path_url_or_repo,
|
||||
convert=convert_to_diffusers,
|
||||
config_file_callback=_pick_configuration_file,
|
||||
commit_to_conf=config_file_path
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(-1)
|
||||
except Exception as e:
|
||||
print(f'An exception has occurred: {str(e)}')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if scan_at_startup and scan_directory.is_dir():
|
||||
argument = '--autoconvert'
|
||||
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
|
||||
initfile = Path(Globals.root, Globals.initfile)
|
||||
replacement = Path(Globals.root, f'{Globals.initfile}.new')
|
||||
directory = str(scan_directory).replace('\\','/')
|
||||
@ -388,19 +389,7 @@ def update_config_file(successfully_downloaded: dict, config_file: Path):
|
||||
if config_file is default_config_file() and not config_file.parent.exists():
|
||||
configs_src = Dataset_path.parent
|
||||
configs_dest = default_config_file().parent
|
||||
shutil.copytree(configs_src,
|
||||
configs_dest,
|
||||
dirs_exist_ok=True,
|
||||
copy_function=shutil.copyfile,
|
||||
)
|
||||
# Fix up directory permissions so that they are writable
|
||||
# This can happen when running under Nix environment which
|
||||
# makes the runtime directory template immutable.
|
||||
for root,dirs,files in os.walk(default_config_file().parent):
|
||||
for d in dirs:
|
||||
Path(root,d).chmod(0o775)
|
||||
for f in files:
|
||||
Path(root,d).chmod(0o644)
|
||||
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
||||
|
||||
yaml = new_config_file_contents(successfully_downloaded, config_file)
|
||||
|
||||
|
@ -1,168 +0,0 @@
|
||||
'''ldm.invoke.config.post_install
|
||||
|
||||
This defines a single exportable function, post_install(), which does
|
||||
post-installation stuff like migrating models directories, adding new
|
||||
config files, etc.
|
||||
|
||||
From the command line, its entry point is invokeai-postinstall.
|
||||
'''
|
||||
|
||||
import os
|
||||
import sys
|
||||
from packaging import version
|
||||
from pathlib import Path
|
||||
from shutil import move,rmtree,copyfile
|
||||
from typing import Union
|
||||
|
||||
import invokeai.configs as conf
|
||||
import ldm.invoke
|
||||
from ..globals import Globals, global_cache_dir, global_config_dir
|
||||
|
||||
def post_install():
|
||||
'''
|
||||
Do version and model updates, etc.
|
||||
Should be called once after every version update.
|
||||
'''
|
||||
_migrate_models()
|
||||
_run_patches()
|
||||
|
||||
|
||||
def _migrate_models():
|
||||
"""
|
||||
Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5
|
||||
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
|
||||
script startup time.
|
||||
"""
|
||||
# Three transformer models to check: bert, clip and safety checker, and
|
||||
# the diffusers as well
|
||||
models_dir = Path(Globals.root, "models")
|
||||
legacy_locations = [
|
||||
Path(
|
||||
models_dir,
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
|
||||
),
|
||||
Path("bert-base-uncased/models--bert-base-uncased"),
|
||||
Path(
|
||||
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
||||
),
|
||||
]
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
|
||||
legacy_layout = False
|
||||
for model in legacy_locations:
|
||||
legacy_layout = legacy_layout or model.exists()
|
||||
if not legacy_layout:
|
||||
return
|
||||
|
||||
print(
|
||||
"""
|
||||
>> ALERT:
|
||||
>> The location of your previously-installed diffusers models needs to move from
|
||||
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
|
||||
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
|
||||
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
|
||||
>> operation. However if you have customized either of these directories and need to
|
||||
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
|
||||
>> Otherwise press <enter> to continue."""
|
||||
)
|
||||
print("** This is a quick one-time operation.")
|
||||
input("continue> ")
|
||||
|
||||
# transformer files get moved into the hub directory
|
||||
if _is_huggingface_hub_directory_present():
|
||||
hub = global_cache_dir("hub")
|
||||
else:
|
||||
hub = models_dir / "hub"
|
||||
|
||||
os.makedirs(hub, exist_ok=True)
|
||||
for model in legacy_locations:
|
||||
source = models_dir / model
|
||||
dest = hub / model.stem
|
||||
if dest.exists() and not source.exists():
|
||||
continue
|
||||
print(f"** {source} => {dest}")
|
||||
if source.exists():
|
||||
if dest.is_symlink():
|
||||
print(f"** Found symlink at {dest.name}. Not migrating.")
|
||||
elif dest.exists():
|
||||
if source.is_dir():
|
||||
rmtree(source)
|
||||
else:
|
||||
source.unlink()
|
||||
else:
|
||||
move(source, dest)
|
||||
|
||||
# now clean up by removing any empty directories
|
||||
empty = [
|
||||
root
|
||||
for root, dirs, files, in os.walk(models_dir)
|
||||
if not len(dirs) and not len(files)
|
||||
]
|
||||
for d in empty:
|
||||
os.rmdir(d)
|
||||
print("** Migration is done. Continuing...")
|
||||
|
||||
|
||||
def _is_huggingface_hub_directory_present() -> bool:
|
||||
return (
|
||||
os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None
|
||||
)
|
||||
|
||||
# This routine performs any patch-ups needed after installation
|
||||
def _run_patches():
|
||||
_install_missing_config_files()
|
||||
version_file = Path(Globals.root,'.version')
|
||||
if version_file.exists():
|
||||
with open(version_file,'r') as f:
|
||||
root_version = version.parse(f.readline() or 'v2.3.2')
|
||||
else:
|
||||
root_version = version.parse('v2.3.2')
|
||||
app_version = version.parse(ldm.invoke.__version__)
|
||||
if root_version < app_version:
|
||||
try:
|
||||
_do_version_update(root_version, ldm.invoke.__version__)
|
||||
with open(version_file,'w') as f:
|
||||
f.write(ldm.invoke.__version__)
|
||||
except:
|
||||
print("** Version patching failed. Please try invokeai-postinstall later.")
|
||||
|
||||
def _install_missing_config_files():
|
||||
"""
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
repo_configs = None
|
||||
for f in conf.__path__:
|
||||
if Path(f, 'stable-diffusion', 'v1-inference.yaml').exists():
|
||||
repo_configs = Path(f, 'stable-diffusion')
|
||||
break
|
||||
if not repo_configs:
|
||||
return
|
||||
for src in repo_configs.iterdir():
|
||||
dest = root_configs / src.name
|
||||
if not dest.exists():
|
||||
copyfile(src,dest)
|
||||
|
||||
def _do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
Make any updates to the launcher .sh and .bat scripts that may be needed
|
||||
from release to release. This is not an elegant solution. Instead, the
|
||||
launcher should be moved into the source tree and installed using pip.
|
||||
"""
|
||||
if root_version < version.Version('v2.3.4'):
|
||||
dest = Path(Globals.root,'loras')
|
||||
dest.mkdir(exist_ok=True)
|
||||
if root_version < version.Version('v2.3.3'):
|
||||
if sys.platform == "linux":
|
||||
print('>> Downloading new version of launcher script and its config file')
|
||||
from ldm.util import download_with_progress_bar
|
||||
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
|
||||
|
||||
dest = Path(Globals.root,'invoke.sh.in')
|
||||
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
|
||||
dest.replace(Path(Globals.root,'invoke.sh'))
|
||||
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
|
||||
|
||||
dest = Path(Globals.root,'dialogrc')
|
||||
assert download_with_progress_bar(url_base+'dialogrc',dest)
|
||||
dest.replace(Path(Globals.root,'.dialogrc'))
|
@ -32,7 +32,8 @@ def expand_prompts(
|
||||
template_file: Path,
|
||||
run_invoke: bool = False,
|
||||
invoke_model: str = None,
|
||||
invoke_outdir: Path = None,
|
||||
invoke_outdir: str = None,
|
||||
invoke_root: str = None,
|
||||
processes_per_gpu: int = 1,
|
||||
):
|
||||
"""
|
||||
@ -61,6 +62,8 @@ def expand_prompts(
|
||||
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||
if invoke_model:
|
||||
invokeai_args.extend(("--model", invoke_model))
|
||||
if invoke_root:
|
||||
invokeai_args.extend(("--root", invoke_root))
|
||||
if invoke_outdir:
|
||||
outdir = os.path.expanduser(invoke_outdir)
|
||||
invokeai_args.extend(("--outdir", outdir))
|
||||
@ -79,6 +82,11 @@ def expand_prompts(
|
||||
)
|
||||
import ldm.invoke.CLI
|
||||
|
||||
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
parent_conn, child_conn = Pipe()
|
||||
children = set()
|
||||
for i in range(processes_to_launch):
|
||||
@ -111,6 +119,13 @@ def expand_prompts(
|
||||
for p in children:
|
||||
p.terminate()
|
||||
|
||||
def _dummy_cli_main():
|
||||
counter = 0
|
||||
while line := sys.stdin.readline():
|
||||
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
|
||||
counter += 1
|
||||
time.sleep(1)
|
||||
|
||||
def _get_fn_format(directory:str, sequence:int)->str:
|
||||
"""
|
||||
Get a filename that doesn't exceed filename length restrictions
|
||||
@ -179,9 +194,9 @@ def _run_invoke(
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||
sys.argv = args
|
||||
sys.stdin = MessageToStdin(conn_in)
|
||||
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
|
||||
|
||||
def _filter_output(stream: TextIOBase):
|
||||
@ -238,6 +253,10 @@ def main():
|
||||
default=1,
|
||||
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
|
||||
opt = parser.parse_args()
|
||||
|
||||
if opt.example:
|
||||
@ -261,6 +280,7 @@ def main():
|
||||
run_invoke=opt.invoke,
|
||||
invoke_model=opt.model,
|
||||
invoke_outdir=opt.outdir,
|
||||
invoke_root=opt.root,
|
||||
processes_per_gpu=opt.processes_per_gpu,
|
||||
)
|
||||
|
||||
|
@ -467,9 +467,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if additional_guidance is None:
|
||||
additional_guidance = []
|
||||
extra_conditioning_info = conditioning_data.extra
|
||||
with InvokeAIDiffuserComponent.custom_attention_context(self.invokeai_diffuser.model,
|
||||
extra_conditioning_info=extra_conditioning_info,
|
||||
step_count=len(self.scheduler.timesteps)
|
||||
with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info,
|
||||
step_count=len(self.scheduler.timesteps)
|
||||
):
|
||||
|
||||
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,
|
||||
|
@ -1007,6 +1007,81 @@ class ModelManager(object):
|
||||
"""
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def migrate_models(cls):
|
||||
"""
|
||||
Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5
|
||||
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
|
||||
script startup time.
|
||||
"""
|
||||
# Three transformer models to check: bert, clip and safety checker, and
|
||||
# the diffusers as well
|
||||
models_dir = Path(Globals.root, "models")
|
||||
legacy_locations = [
|
||||
Path(
|
||||
models_dir,
|
||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker",
|
||||
),
|
||||
Path("bert-base-uncased/models--bert-base-uncased"),
|
||||
Path(
|
||||
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
||||
),
|
||||
]
|
||||
legacy_locations.extend(list(global_cache_dir("diffusers").glob("*")))
|
||||
legacy_layout = False
|
||||
for model in legacy_locations:
|
||||
legacy_layout = legacy_layout or model.exists()
|
||||
if not legacy_layout:
|
||||
return
|
||||
|
||||
print(
|
||||
"""
|
||||
>> ALERT:
|
||||
>> The location of your previously-installed diffusers models needs to move from
|
||||
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
|
||||
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
|
||||
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
|
||||
>> operation. However if you have customized either of these directories and need to
|
||||
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
|
||||
>> Otherwise press <enter> to continue."""
|
||||
)
|
||||
print("** This is a quick one-time operation.")
|
||||
input("continue> ")
|
||||
|
||||
# transformer files get moved into the hub directory
|
||||
if cls._is_huggingface_hub_directory_present():
|
||||
hub = global_cache_dir("hub")
|
||||
else:
|
||||
hub = models_dir / "hub"
|
||||
|
||||
os.makedirs(hub, exist_ok=True)
|
||||
for model in legacy_locations:
|
||||
source = models_dir / model
|
||||
dest = hub / model.stem
|
||||
if dest.exists() and not source.exists():
|
||||
continue
|
||||
print(f"** {source} => {dest}")
|
||||
if source.exists():
|
||||
if dest.is_symlink():
|
||||
print(f"** Found symlink at {dest.name}. Not migrating.")
|
||||
elif dest.exists():
|
||||
if source.is_dir():
|
||||
rmtree(source)
|
||||
else:
|
||||
source.unlink()
|
||||
else:
|
||||
move(source, dest)
|
||||
|
||||
# now clean up by removing any empty directories
|
||||
empty = [
|
||||
root
|
||||
for root, dirs, files, in os.walk(models_dir)
|
||||
if not len(dirs) and not len(files)
|
||||
]
|
||||
for d in empty:
|
||||
os.rmdir(d)
|
||||
print("** Migration is done. Continuing...")
|
||||
|
||||
def _resolve_path(
|
||||
self, source: Union[str, Path], dest_directory: str
|
||||
) -> Optional[Path]:
|
||||
@ -1231,3 +1306,8 @@ class ModelManager(object):
|
||||
return path
|
||||
return Path(Globals.root, path).resolve()
|
||||
|
||||
@staticmethod
|
||||
def _is_huggingface_hub_directory_present() -> bool:
|
||||
return (
|
||||
os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None
|
||||
)
|
||||
|
@ -288,7 +288,16 @@ class InvokeAICrossAttentionMixin:
|
||||
return self.einsum_op_tensor_mem(q, k, v, 32)
|
||||
|
||||
|
||||
def setup_cross_attention_control_attention_processors(unet: UNet2DConditionModel, context: Context):
|
||||
|
||||
def restore_default_cross_attention(model, is_running_diffusers: bool, processors_to_restore: Optional[AttnProcessor]=None):
|
||||
if is_running_diffusers:
|
||||
unet = model
|
||||
unet.set_attn_processor(processors_to_restore or CrossAttnProcessor())
|
||||
else:
|
||||
remove_attention_function(model)
|
||||
|
||||
|
||||
def override_cross_attention(model, context: Context, is_running_diffusers = False):
|
||||
"""
|
||||
Inject attention parameters and functions into the passed in model to enable cross attention editing.
|
||||
|
||||
@ -314,15 +323,22 @@ def setup_cross_attention_control_attention_processors(unet: UNet2DConditionMode
|
||||
|
||||
context.cross_attention_mask = mask.to(device)
|
||||
context.cross_attention_index_map = indices.to(device)
|
||||
old_attn_processors = unet.attn_processors
|
||||
if torch.backends.mps.is_available():
|
||||
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
|
||||
unet.set_attn_processor(SwapCrossAttnProcessor())
|
||||
if is_running_diffusers:
|
||||
unet = model
|
||||
old_attn_processors = unet.attn_processors
|
||||
if torch.backends.mps.is_available():
|
||||
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
|
||||
unet.set_attn_processor(SwapCrossAttnProcessor())
|
||||
else:
|
||||
# try to re-use an existing slice size
|
||||
default_slice_size = 4
|
||||
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
|
||||
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
|
||||
else:
|
||||
# try to re-use an existing slice size
|
||||
default_slice_size = 4
|
||||
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
|
||||
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
|
||||
context.register_cross_attention_modules(model)
|
||||
inject_attention_function(model, context)
|
||||
|
||||
|
||||
|
||||
|
||||
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
|
||||
|
@ -12,6 +12,17 @@ class DDIMSampler(Sampler):
|
||||
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model,
|
||||
model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond))
|
||||
|
||||
def prepare_to_sample(self, t_enc, **kwargs):
|
||||
super().prepare_to_sample(t_enc, **kwargs)
|
||||
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
# This is the central routine
|
||||
@torch.no_grad()
|
||||
|
@ -38,6 +38,15 @@ class CFGDenoiser(nn.Module):
|
||||
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
|
||||
|
||||
|
||||
def prepare_to_sample(self, t_enc, **kwargs):
|
||||
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = t_enc)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale):
|
||||
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)
|
||||
|
@ -14,6 +14,17 @@ class PLMSSampler(Sampler):
|
||||
def __init__(self, model, schedule='linear', device=None, **kwargs):
|
||||
super().__init__(model,schedule,model.num_timesteps, device)
|
||||
|
||||
def prepare_to_sample(self, t_enc, **kwargs):
|
||||
super().prepare_to_sample(t_enc, **kwargs)
|
||||
|
||||
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
|
||||
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
|
||||
|
||||
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
|
||||
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
|
||||
else:
|
||||
self.invokeai_diffuser.restore_default_cross_attention()
|
||||
|
||||
|
||||
# this is the essential routine
|
||||
@torch.no_grad()
|
||||
|
@ -1,18 +1,18 @@
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
from math import ceil
|
||||
from typing import Callable, Optional, Union, Any
|
||||
from typing import Callable, Optional, Union, Any, Dict
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from diffusers import UNet2DConditionModel
|
||||
from diffusers.models.cross_attention import AttnProcessor
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.models.diffusion.cross_attention_control import (
|
||||
Arguments,
|
||||
setup_cross_attention_control_attention_processors,
|
||||
restore_default_cross_attention,
|
||||
override_cross_attention,
|
||||
Context,
|
||||
get_cross_attention_modules,
|
||||
CrossAttentionType,
|
||||
@ -84,45 +84,66 @@ class InvokeAIDiffuserComponent:
|
||||
self.cross_attention_control_context = None
|
||||
self.sequential_guidance = Globals.sequential_guidance
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def custom_attention_context(
|
||||
clss,
|
||||
unet: UNet2DConditionModel, # note: also may futz with the text encoder depending on requested LoRAs
|
||||
extra_conditioning_info: Optional[ExtraConditioningInfo],
|
||||
step_count: int
|
||||
self, extra_conditioning_info: Optional[ExtraConditioningInfo], step_count: int
|
||||
):
|
||||
old_attn_processors = None
|
||||
old_attn_processor = None
|
||||
if extra_conditioning_info and (
|
||||
extra_conditioning_info.wants_cross_attention_control
|
||||
| extra_conditioning_info.has_lora_conditions
|
||||
):
|
||||
old_attn_processors = unet.attn_processors
|
||||
# Load lora conditions into the model
|
||||
if extra_conditioning_info.has_lora_conditions:
|
||||
for condition in extra_conditioning_info.lora_conditions:
|
||||
condition() # target model is stored in condition state for some reason
|
||||
if extra_conditioning_info.wants_cross_attention_control:
|
||||
cross_attention_control_context = Context(
|
||||
arguments=extra_conditioning_info.cross_attention_control_args,
|
||||
step_count=step_count,
|
||||
)
|
||||
setup_cross_attention_control_attention_processors(
|
||||
unet,
|
||||
cross_attention_control_context,
|
||||
)
|
||||
old_attn_processor = self.override_attention_processors(
|
||||
extra_conditioning_info, step_count=step_count
|
||||
)
|
||||
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
if old_attn_processors is not None:
|
||||
unet.set_attn_processor(old_attn_processors)
|
||||
if old_attn_processor is not None:
|
||||
self.restore_default_cross_attention(old_attn_processor)
|
||||
if extra_conditioning_info and extra_conditioning_info.has_lora_conditions:
|
||||
for lora_condition in extra_conditioning_info.lora_conditions:
|
||||
lora_condition.unload()
|
||||
# TODO resuscitate attention map saving
|
||||
# self.remove_attention_map_saving()
|
||||
|
||||
def override_attention_processors(
|
||||
self, conditioning: ExtraConditioningInfo, step_count: int
|
||||
) -> Dict[str, AttnProcessor]:
|
||||
"""
|
||||
setup cross attention .swap control. for diffusers this replaces the attention processor, so
|
||||
the previous attention processor is returned so that the caller can restore it later.
|
||||
"""
|
||||
old_attn_processors = self.model.attn_processors
|
||||
|
||||
# Load lora conditions into the model
|
||||
if conditioning.has_lora_conditions:
|
||||
for condition in conditioning.lora_conditions:
|
||||
condition(self.model)
|
||||
|
||||
if conditioning.wants_cross_attention_control:
|
||||
self.cross_attention_control_context = Context(
|
||||
arguments=conditioning.cross_attention_control_args,
|
||||
step_count=step_count,
|
||||
)
|
||||
override_cross_attention(
|
||||
self.model,
|
||||
self.cross_attention_control_context,
|
||||
is_running_diffusers=self.is_running_diffusers,
|
||||
)
|
||||
return old_attn_processors
|
||||
|
||||
def restore_default_cross_attention(
|
||||
self, processors_to_restore: Optional[dict[str, "AttnProcessor"]] = None
|
||||
):
|
||||
self.cross_attention_control_context = None
|
||||
restore_default_cross_attention(
|
||||
self.model,
|
||||
is_running_diffusers=self.is_running_diffusers,
|
||||
processors_to_restore=processors_to_restore,
|
||||
)
|
||||
|
||||
def setup_attention_map_saving(self, saver: AttentionMapSaver):
|
||||
def callback(slice, dim, offset, slice_size, key):
|
||||
if dim is not None:
|
||||
|
@ -31,13 +31,18 @@ class LoRALayer:
|
||||
self.name = name
|
||||
self.scale = alpha / rank if (alpha and rank) else 1.0
|
||||
|
||||
def forward(self, lora, input_h):
|
||||
def forward(self, lora, input_h, output):
|
||||
if self.mid is None:
|
||||
weight = self.up(self.down(*input_h))
|
||||
output = (
|
||||
output
|
||||
+ self.up(self.down(*input_h)) * lora.multiplier * self.scale
|
||||
)
|
||||
else:
|
||||
weight = self.up(self.mid(self.down(*input_h)))
|
||||
|
||||
return weight * lora.multiplier * self.scale
|
||||
output = (
|
||||
output
|
||||
+ self.up(self.mid(self.down(*input_h))) * lora.multiplier * self.scale
|
||||
)
|
||||
return output
|
||||
|
||||
class LoHALayer:
|
||||
lora_name: str
|
||||
@ -59,7 +64,7 @@ class LoHALayer:
|
||||
self.name = name
|
||||
self.scale = alpha / rank if (alpha and rank) else 1.0
|
||||
|
||||
def forward(self, lora, input_h):
|
||||
def forward(self, lora, input_h, output):
|
||||
|
||||
if type(self.org_module) == torch.nn.Conv2d:
|
||||
op = torch.nn.functional.conv2d
|
||||
@ -81,9 +86,9 @@ class LoHALayer:
|
||||
rebuild1 = torch.einsum('i j k l, j r, i p -> p r k l', self.t1, self.w1_b, self.w1_a)
|
||||
rebuild2 = torch.einsum('i j k l, j r, i p -> p r k l', self.t2, self.w2_b, self.w2_a)
|
||||
weight = rebuild1 * rebuild2
|
||||
|
||||
|
||||
bias = self.bias if self.bias is not None else 0
|
||||
return op(
|
||||
return output + op(
|
||||
*input_h,
|
||||
(weight + bias).view(self.org_module.weight.shape),
|
||||
None,
|
||||
@ -91,69 +96,6 @@ class LoHALayer:
|
||||
) * lora.multiplier * self.scale
|
||||
|
||||
|
||||
class LoKRLayer:
|
||||
lora_name: str
|
||||
name: str
|
||||
scale: float
|
||||
|
||||
w1: Optional[torch.Tensor] = None
|
||||
w1_a: Optional[torch.Tensor] = None
|
||||
w1_b: Optional[torch.Tensor] = None
|
||||
w2: Optional[torch.Tensor] = None
|
||||
w2_a: Optional[torch.Tensor] = None
|
||||
w2_b: Optional[torch.Tensor] = None
|
||||
t2: Optional[torch.Tensor] = None
|
||||
bias: Optional[torch.Tensor] = None
|
||||
|
||||
org_module: torch.nn.Module
|
||||
|
||||
def __init__(self, lora_name: str, name: str, rank=4, alpha=1.0):
|
||||
self.lora_name = lora_name
|
||||
self.name = name
|
||||
self.scale = alpha / rank if (alpha and rank) else 1.0
|
||||
|
||||
def forward(self, lora, input_h):
|
||||
|
||||
if type(self.org_module) == torch.nn.Conv2d:
|
||||
op = torch.nn.functional.conv2d
|
||||
extra_args = dict(
|
||||
stride=self.org_module.stride,
|
||||
padding=self.org_module.padding,
|
||||
dilation=self.org_module.dilation,
|
||||
groups=self.org_module.groups,
|
||||
)
|
||||
|
||||
else:
|
||||
op = torch.nn.functional.linear
|
||||
extra_args = {}
|
||||
|
||||
w1 = self.w1
|
||||
if w1 is None:
|
||||
w1 = self.w1_a @ self.w1_b
|
||||
|
||||
w2 = self.w2
|
||||
if w2 is None:
|
||||
if self.t2 is None:
|
||||
w2 = self.w2_a @ self.w2_b
|
||||
else:
|
||||
w2 = torch.einsum('i j k l, i p, j r -> p r k l', self.t2, self.w2_a, self.w2_b)
|
||||
|
||||
|
||||
if len(w2.shape) == 4:
|
||||
w1 = w1.unsqueeze(2).unsqueeze(2)
|
||||
w2 = w2.contiguous()
|
||||
weight = torch.kron(w1, w2).reshape(self.org_module.weight.shape)
|
||||
|
||||
|
||||
bias = self.bias if self.bias is not None else 0
|
||||
return op(
|
||||
*input_h,
|
||||
(weight + bias).view(self.org_module.weight.shape),
|
||||
None,
|
||||
**extra_args
|
||||
) * lora.multiplier * self.scale
|
||||
|
||||
|
||||
class LoRAModuleWrapper:
|
||||
unet: UNet2DConditionModel
|
||||
text_encoder: CLIPTextModel
|
||||
@ -217,7 +159,7 @@ class LoRAModuleWrapper:
|
||||
layer = lora.layers.get(name, None)
|
||||
if layer is None:
|
||||
continue
|
||||
output += layer.forward(lora, input_h)
|
||||
output = layer.forward(lora, input_h, output)
|
||||
return output
|
||||
|
||||
return lora_forward
|
||||
@ -365,36 +307,6 @@ class LoRA:
|
||||
else:
|
||||
layer.t2 = None
|
||||
|
||||
# lokr
|
||||
elif "lokr_w1_b" in values or "lokr_w1" in values:
|
||||
|
||||
if "lokr_w1_b" in values:
|
||||
rank = values["lokr_w1_b"].shape[0]
|
||||
elif "lokr_w2_b" in values:
|
||||
rank = values["lokr_w2_b"].shape[0]
|
||||
else:
|
||||
rank = None # unscaled
|
||||
|
||||
layer = LoKRLayer(self.name, stem, rank, alpha)
|
||||
layer.org_module = wrapped
|
||||
layer.bias = bias
|
||||
|
||||
if "lokr_w1" in values:
|
||||
layer.w1 = values["lokr_w1"].to(device=self.device, dtype=self.dtype)
|
||||
else:
|
||||
layer.w1_a = values["lokr_w1_a"].to(device=self.device, dtype=self.dtype)
|
||||
layer.w1_b = values["lokr_w1_b"].to(device=self.device, dtype=self.dtype)
|
||||
|
||||
if "lokr_w2" in values:
|
||||
layer.w2 = values["lokr_w2"].to(device=self.device, dtype=self.dtype)
|
||||
else:
|
||||
layer.w2_a = values["lokr_w2_a"].to(device=self.device, dtype=self.dtype)
|
||||
layer.w2_b = values["lokr_w2_b"].to(device=self.device, dtype=self.dtype)
|
||||
|
||||
if "lokr_t2" in values:
|
||||
layer.t2 = values["lokr_t2"].to(device=self.device, dtype=self.dtype)
|
||||
|
||||
|
||||
else:
|
||||
print(
|
||||
f">> Encountered unknown lora layer module in {self.name}: {stem} - {type(wrapped).__name__}"
|
||||
@ -427,14 +339,12 @@ class KohyaLoraManager:
|
||||
return lora
|
||||
|
||||
def apply_lora_model(self, name, mult: float = 1.0):
|
||||
path_file = None
|
||||
for suffix in ["ckpt", "safetensors", "pt"]:
|
||||
path_files = [x for x in Path(self.lora_path).glob(f"**/{name}.{suffix}")]
|
||||
if len(path_files):
|
||||
path_file = path_files[0]
|
||||
path_file = Path(self.lora_path, f"{name}.{suffix}")
|
||||
if path_file.exists():
|
||||
print(f" | Loading lora {path_file.name} with weight {mult}")
|
||||
break
|
||||
if not path_file:
|
||||
if not path_file.exists():
|
||||
print(f" ** Unable to find lora: {name}")
|
||||
return
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from diffusers import UNet2DConditionModel, StableDiffusionPipeline
|
||||
from ldm.invoke.globals import global_lora_models_dir
|
||||
from .kohya_lora_manager import KohyaLoraManager
|
||||
from typing import Optional, Dict
|
||||
@ -10,29 +8,20 @@ class LoraCondition:
|
||||
name: str
|
||||
weight: float
|
||||
|
||||
def __init__(self,
|
||||
name,
|
||||
weight: float = 1.0,
|
||||
unet: UNet2DConditionModel=None, # for diffusers format LoRAs
|
||||
kohya_manager: Optional[KohyaLoraManager]=None, # for KohyaLoraManager-compatible LoRAs
|
||||
):
|
||||
def __init__(self, name, weight: float = 1.0, kohya_manager: Optional[KohyaLoraManager]=None):
|
||||
self.name = name
|
||||
self.weight = weight
|
||||
self.kohya_manager = kohya_manager
|
||||
self.unet = unet
|
||||
|
||||
def __call__(self):
|
||||
def __call__(self, model):
|
||||
# TODO: make model able to load from huggingface, rather then just local files
|
||||
path = Path(global_lora_models_dir(), self.name)
|
||||
if path.is_dir():
|
||||
if not self.unet:
|
||||
print(f" ** Unable to load diffusers-format LoRA {self.name}: unet is None")
|
||||
return
|
||||
if self.unet.load_attn_procs:
|
||||
if model.load_attn_procs:
|
||||
file = Path(path, "pytorch_lora_weights.bin")
|
||||
if file.is_file():
|
||||
print(f">> Loading LoRA: {path}")
|
||||
self.unet.load_attn_procs(path.absolute().as_posix())
|
||||
model.load_attn_procs(path.absolute().as_posix())
|
||||
else:
|
||||
print(f" ** Unable to find valid LoRA at: {path}")
|
||||
else:
|
||||
@ -48,16 +37,15 @@ class LoraCondition:
|
||||
self.kohya_manager.unload_applied_lora(self.name)
|
||||
|
||||
class LoraManager:
|
||||
def __init__(self, pipe: StableDiffusionPipeline):
|
||||
def __init__(self, pipe):
|
||||
# Kohya class handles lora not generated through diffusers
|
||||
self.kohya = KohyaLoraManager(pipe, global_lora_models_dir())
|
||||
self.unet = pipe.unet
|
||||
|
||||
def set_loras_conditions(self, lora_weights: list):
|
||||
conditions = []
|
||||
if len(lora_weights) > 0:
|
||||
for lora in lora_weights:
|
||||
conditions.append(LoraCondition(lora.model, lora.weight, self.unet, self.kohya))
|
||||
conditions.append(LoraCondition(lora.model, lora.weight, self.kohya))
|
||||
|
||||
if len(conditions) > 0:
|
||||
return conditions
|
||||
@ -75,4 +63,4 @@ class LoraManager:
|
||||
if suffix in [".ckpt", ".pt", ".safetensors"]:
|
||||
models_found[name]=Path(root,x)
|
||||
return models_found
|
||||
|
||||
|
||||
|
@ -34,7 +34,7 @@ dependencies = [
|
||||
"clip_anytorch",
|
||||
"compel~=1.1.0",
|
||||
"datasets",
|
||||
"diffusers[torch]==0.14",
|
||||
"diffusers[torch]~=0.14",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
@ -53,7 +53,7 @@ dependencies = [
|
||||
"imageio-ffmpeg",
|
||||
"k-diffusion",
|
||||
"kornia",
|
||||
"npyscreen~=4.10.5",
|
||||
"npyscreen",
|
||||
"numpy<1.24",
|
||||
"omegaconf",
|
||||
"opencv-python",
|
||||
@ -128,7 +128,6 @@ requires-python = ">=3.9, <3.11"
|
||||
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
|
||||
"invokeai-batch" = "ldm.invoke.dynamic_prompts:main"
|
||||
"invokeai-metadata" = "ldm.invoke.invokeai_metadata:main"
|
||||
"invokeai-postinstall" = "ldm.invoke.config.post_install:post_install"
|
||||
|
||||
[project.urls]
|
||||
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
||||
|
Reference in New Issue
Block a user