all files migrated; tweaks needed

This commit is contained in:
Lincoln Stein 2023-03-03 00:02:15 -05:00
parent 3f0b0f3250
commit 6a990565ff
496 changed files with 276 additions and 934 deletions

2
.gitignore vendored
View File

@ -198,7 +198,7 @@ checkpoints
.DS_Store .DS_Store
# Let the frontend manage its own gitignore # Let the frontend manage its own gitignore
!invokeai/frontend/* !invokeai/frontend/web/*
# Scratch folder # Scratch folder
.scratch/ .scratch/

View File

@ -3,11 +3,10 @@ import os
import sys import sys
import traceback import traceback
from invokeai.backend.models import ModelManager from invokeai.backend import ModelManager, Generate
from ...globals import Globals from ...globals import Globals
from ....generate import Generate import invokeai.version
import ldm.invoke
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated # TODO: most of this code should be split into individual services as the Generate.py code is deprecated
def get_generate(args, config) -> Generate: def get_generate(args, config) -> Generate:
@ -16,7 +15,7 @@ def get_generate(args, config) -> Generate:
if not os.path.exists(config_file): if not os.path.exists(config_file):
report_model_error(args, FileNotFoundError(f"The file {config_file} could not be found.")) report_model_error(args, FileNotFoundError(f"The file {config_file} could not be found."))
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> {invokeai.version.__app_name__}, version {invokeai.version.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"') print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing # these two lines prevent a horrible warning message from appearing
@ -160,7 +159,7 @@ def report_model_error(opt:Namespace, e:Exception):
# Temporary initializer for Generate until we migrate off of it # Temporary initializer for Generate until we migrate off of it
def old_get_generate(args, config) -> Generate: def old_get_generate(args, config) -> Generate:
# TODO: Remove the need for globals # TODO: Remove the need for globals
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
# alert - setting globals here # alert - setting globals here
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.')) Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))

View File

@ -3,8 +3,7 @@ from .invocation_queue import InvocationQueueABC
from .item_storage import ItemStorageABC from .item_storage import ItemStorageABC
from .image_storage import ImageStorageBase from .image_storage import ImageStorageBase
from .events import EventServiceBase from .events import EventServiceBase
from ....generate import Generate from invokeai.backend import Generate
class InvocationServices(): class InvocationServices():
"""Services that can be used by invocations""" """Services that can be used by invocations"""

View File

@ -1,8 +1,8 @@
''' '''
Initialization file for invokeai.backend Initialization file for invokeai.backend
''' '''
# this is causing circular import issues from .model_management import ModelManager
# from .invoke_ai_web_server import InvokeAIWebServer from .generate import Generate
from .model_manager import ModelManager

View File

@ -98,8 +98,8 @@ from typing import List
from invokeai.backend.image_util import retrieve_metadata from invokeai.backend.image_util import retrieve_metadata
import invokeai.version import invokeai.version
from ldm.invoke.globals import Globals from .globals import Globals
from invokeai.backend.prompting import split_weighted_subprompts from .prompting import split_weighted_subprompts
APP_ID = invokeai.version.__app_id__ APP_ID = invokeai.version.__app_id__
APP_NAME = invokeai.version.__app_name__ APP_NAME = invokeai.version.__app_name__

View File

@ -39,14 +39,14 @@ import invokeai.configs as configs
from ..args import PRECISION_CHOICES, Args from ..args import PRECISION_CHOICES, Args
from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir
from .model_install import addModelsForm, process_and_execute from ...frontend.config.model_install import addModelsForm, process_and_execute
from .model_install_backend import ( from .model_install_backend import (
default_dataset, default_dataset,
download_from_hf, download_from_hf,
recommended_datasets, recommended_datasets,
hf_download_with_resume, hf_download_with_resume,
) )
from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size from ...frontend.config.widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")

View File

@ -18,9 +18,9 @@ from tqdm import tqdm
from typing import List from typing import List
import invokeai.configs as configs import invokeai.configs as configs
from invokeai.backend.stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..globals import Globals, global_cache_dir, global_config_dir from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.backend import ModelManager from ..model_management import ModelManager
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")

View File

@ -25,17 +25,18 @@ from omegaconf import OmegaConf
from PIL import Image, ImageOps from PIL import Image, ImageOps
from pytorch_lightning import logging, seed_everything from pytorch_lightning import logging, seed_everything
from invokeai.backend import ModelManager from . import ModelManager
from invokeai.backend.prompting import get_uc_and_c_and_ec from .prompting import get_uc_and_c_and_ec
from invokeai.backend.stable_diffusion import (DDIMSampler, KSampler, PLMSSampler) from .stable_diffusion import (DDIMSampler, KSampler, PLMSSampler, HuggingFaceConceptsLibrary)
from invokeai.backend.generator import infill_methods from .generator import infill_methods
from invokeai.backend.stable_diffusion.concepts_lib import HuggingFaceConceptsLibrary from .util import choose_precision, choose_torch_device
from invokeai.backend.devices import choose_precision, choose_torch_device from .image_util import (InitImageResizer,
from invokeai.backend.image_util import InitImageResizer, PngWriter, Txt2Mask PngWriter,
Txt2Mask,
configure_model_padding)
from ldm.invoke.globals import Globals, global_cache_dir from .globals import Globals, global_cache_dir
from ldm.invoke.args import metadata_from_png from .args import metadata_from_png
from ldm.invoke.seamless import configure_model_padding
def fix_func(orig): def fix_func(orig):
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():

View File

@ -23,7 +23,7 @@ from tqdm import trange
import invokeai.assets.web as web_assets import invokeai.assets.web as web_assets
from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper
from ..util import rand_perlin_2d from ..util.util import rand_perlin_2d
downsampling = 8 downsampling = 8
CAUTION_IMG = 'caution.png' CAUTION_IMG = 'caution.png'

View File

@ -1,5 +1,5 @@
''' '''
ldm.invoke.globals defines a small number of global variables that would invokeai.backend.globals defines a small number of global variables that would
otherwise have to be passed through long and complex call chains. otherwise have to be passed through long and complex call chains.
It defines a Namespace object named "Globals" that contains It defines a Namespace object named "Globals" that contains

View File

@ -9,6 +9,7 @@ from .pngwriter import (PngWriter,
retrieve_metadata, retrieve_metadata,
write_metadata, write_metadata,
) )
from .seamless import configure_model_padding
def debug_image( def debug_image(
debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False

View File

@ -4,7 +4,7 @@ wraps the actual patchmatch object. It respects the global
"try_patchmatch" attribute, so that patchmatch loading can "try_patchmatch" attribute, so that patchmatch loading can
be suppressed or deferred be suppressed or deferred
''' '''
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
import numpy as np import numpy as np
class PatchMatch: class PatchMatch:

View File

@ -32,7 +32,7 @@ import numpy as np
from transformers import AutoProcessor, CLIPSegForImageSegmentation from transformers import AutoProcessor, CLIPSegForImageSegmentation
from PIL import Image, ImageOps from PIL import Image, ImageOps
from torchvision import transforms from torchvision import transforms
from ldm.invoke.globals import global_cache_dir from invokeai.backend.globals import global_cache_dir
CLIPSEG_MODEL = 'CIDAS/clipseg-rd64-refined' CLIPSEG_MODEL = 'CIDAS/clipseg-rd64-refined'
CLIPSEG_SIZE = 352 CLIPSEG_SIZE = 352

View File

@ -0,0 +1,8 @@
'''
Initialization file for invokeai.backend.model_management
'''
from .model_manager import ModelManager
from .convert_ckpt_to_diffusers import (load_pipeline_from_original_stable_diffusion_ckpt,
convert_ckpt_to_diffusers)
from ...frontend.merge.merge_diffusers import (merge_diffusion_models,
merge_diffusion_models_and_commit)

View File

@ -21,11 +21,11 @@ import re
import torch import torch
import warnings import warnings
from pathlib import Path from pathlib import Path
from ldm.invoke.globals import ( from invokeai.backend.globals import (
global_cache_dir, global_cache_dir,
global_config_dir, global_config_dir,
) )
from invokeai.models import ModelManager, SDLegacyType from .model_manager import ModelManager, SDLegacyType
from safetensors.torch import load_file from safetensors.torch import load_file
from typing import Union from typing import Union
@ -56,7 +56,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.utils import is_safetensors_available from diffusers.utils import is_safetensors_available
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig
from invokeai.generator import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
def shave_segments(path, n_shave_prefix_segments=1): def shave_segments(path, n_shave_prefix_segments=1):
""" """
@ -1014,7 +1014,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
return pipe return pipe
def convert_ckpt_to_diffuser( def convert_ckpt_to_diffusers(
checkpoint_path:Union[str,Path], checkpoint_path:Union[str,Path],
dump_path:Union[str,Path], dump_path:Union[str,Path],
**kwargs, **kwargs,

View File

@ -31,14 +31,13 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path from picklescan.scanner import scan_file_path
from .devices import CPU_DEVICE from ..util import CPU_DEVICE
from ldm.invoke.globals import Globals, global_cache_dir from invokeai.backend.globals import Globals, global_cache_dir
from .util import ( from ..util import (
ask_user, ask_user,
download_with_resume, download_with_resume,
url_attachment_name,
) )
from .stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
class SDLegacyType(Enum): class SDLegacyType(Enum):
V1 = 1 V1 = 1
@ -416,6 +415,51 @@ class ModelManager(object):
return pipeline, width, height, model_hash return pipeline, width, height, model_hash
def _load_ckpt_model(self, model_name, mconfig):
config = mconfig.config
weights = mconfig.weights
vae = mconfig.get("vae")
width = mconfig.width
height = mconfig.height
if not os.path.isabs(config):
config = os.path.join(Globals.root, config)
if not os.path.isabs(weights):
weights = os.path.normpath(os.path.join(Globals.root, weights))
# Convert to diffusers and return a diffusers pipeline
print(
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
)
from . import load_pipeline_from_original_stable_diffusion_ckpt
self.offload_model(self.current_model)
if vae_config := self._choose_diffusers_vae(model_name):
vae = self._load_vae(vae_config)
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path=weights,
original_config_file=config,
vae=vae,
return_generator_pipeline=True,
precision=torch.float16
if self.precision == "float16"
else torch.float32,
)
if self.sequential_offload:
pipeline.enable_offload_submodels(self.device)
else:
pipeline.to(self.device)
return (
pipeline,
width,
height,
"NOHASH",
)
def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path: def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path:
if isinstance(model_name, DictConfig) or isinstance(model_name, dict): if isinstance(model_name, DictConfig) or isinstance(model_name, dict):
mconfig = model_name mconfig = model_name
@ -519,66 +563,6 @@ class ModelManager(object):
self.commit(commit_to_conf) self.commit(commit_to_conf)
return model_name return model_name
def import_ckpt_model(
self,
weights: Union[str, Path],
config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml",
vae: Union[str, Path] = None,
model_name: str = None,
model_description: str = None,
commit_to_conf: Path = None,
) -> str:
"""
Attempts to install the indicated ckpt file and returns True if successful.
"weights" can be either a path-like object corresponding to a local .ckpt file
or a http/https URL pointing to a remote model.
"vae" is a Path or str object pointing to a ckpt or safetensors file to be used
as the VAE for this model.
"config" is the model config file to use with this ckpt file. It defaults to
v1-inference.yaml. If a URL is provided, the config will be downloaded.
You can optionally provide a model name and/or description. If not provided,
then these will be derived from the weight file name. If you provide a commit_to_conf
path to the configuration file, then the new entry will be committed to the
models.yaml file.
Return value is the name of the imported file, or None if an error occurred.
"""
if str(weights).startswith(("http:", "https:")):
model_name = model_name or url_attachment_name(weights)
weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1")
config_path = self._resolve_path(config, "configs/stable-diffusion")
if weights_path is None or not weights_path.exists():
return
if config_path is None or not config_path.exists():
return
model_name = (
model_name or Path(weights).stem
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
model_description = (
model_description or f"Imported stable diffusion weights file {model_name}"
)
new_config = dict(
weights=str(weights_path),
config=str(config_path),
description=model_description,
format="ckpt",
width=512,
height=512,
)
if vae:
new_config["vae"] = vae
self.add_model(model_name, new_config, True)
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
@classmethod @classmethod
def probe_model_type(self, checkpoint: dict) -> SDLegacyType: def probe_model_type(self, checkpoint: dict) -> SDLegacyType:
""" """
@ -746,36 +730,18 @@ class ModelManager(object):
) )
return return
if convert: diffuser_path = Path(
diffuser_path = Path( Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem )
) model_name = self.convert_and_import(
model_name = self.convert_and_import( model_path,
model_path, diffusers_path=diffuser_path,
diffusers_path=diffuser_path, vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), model_name=model_name,
model_name=model_name, model_description=description,
model_description=description, original_config_file=model_config_file,
original_config_file=model_config_file, commit_to_conf=commit_to_conf,
commit_to_conf=commit_to_conf, )
)
else:
model_name = self.import_ckpt_model(
model_path,
config=model_config_file,
model_name=model_name,
model_description=description,
vae=str(
Path(
Globals.root,
"models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
)
),
commit_to_conf=commit_to_conf,
)
if commit_to_conf:
self.commit(commit_to_conf)
return model_name return model_name
def convert_and_import( def convert_and_import(
@ -800,7 +766,7 @@ class ModelManager(object):
new_config = None new_config = None
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser from . import convert_ckpt_to_diffusers
if diffusers_path.exists(): if diffusers_path.exists():
print( print(
@ -815,7 +781,7 @@ class ModelManager(object):
# By passing the specified VAE to the conversion function, the autoencoder # By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file # will be built into the model rather than tacked on afterward via the config file
vae_model = self._load_vae(vae) if vae else None vae_model = self._load_vae(vae) if vae else None
convert_ckpt_to_diffuser( convert_ckpt_to_diffusers (
ckpt_path, ckpt_path,
diffusers_path, diffusers_path,
extract_ema=True, extract_ema=True,

View File

@ -13,9 +13,9 @@ from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from ..devices import torch_dtype from ..util import torch_dtype
from ..stable_diffusion import InvokeAIDiffuserComponent from ..stable_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer: def get_tokenizer(model) -> CLIPTokenizer:
# TODO remove legacy ckpt fallback handling # TODO remove legacy ckpt fallback handling

View File

@ -23,16 +23,16 @@ class Restoration():
# Face Restore Models # Face Restore Models
def load_gfpgan(self, gfpgan_model_path): def load_gfpgan(self, gfpgan_model_path):
from ldm.invoke.restoration.gfpgan import GFPGAN from .gfpgan import GFPGAN
return GFPGAN(gfpgan_model_path) return GFPGAN(gfpgan_model_path)
def load_codeformer(self): def load_codeformer(self):
from ldm.invoke.restoration.codeformer import CodeFormerRestoration from .codeformer import CodeFormerRestoration
return CodeFormerRestoration() return CodeFormerRestoration()
# Upscale Models # Upscale Models
def load_esrgan(self, esrgan_bg_tile=400): def load_esrgan(self, esrgan_bg_tile=400):
from ldm.invoke.restoration.realesrgan import ESRGAN from .realesrgan import ESRGAN
esrgan = ESRGAN(esrgan_bg_tile) esrgan = ESRGAN(esrgan_bg_tile)
print('>> ESRGAN Initialized') print('>> ESRGAN Initialized')
return esrgan; return esrgan;

View File

@ -3,7 +3,7 @@ import torch
import numpy as np import numpy as np
import warnings import warnings
import sys import sys
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'

View File

@ -5,7 +5,7 @@ from torch import nn, Tensor
import torch.nn.functional as F import torch.nn.functional as F
from typing import Optional, List from typing import Optional, List
from ldm.invoke.restoration.vqgan_arch import * from .vqgan_arch import *
from basicsr.utils import get_root_logger from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY from basicsr.utils.registry import ARCH_REGISTRY
@ -25,7 +25,6 @@ def calc_mean_std(feat, eps=1e-5):
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
return feat_mean, feat_std return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat): def adaptive_instance_normalization(content_feat, style_feat):
"""Adaptive instance normalization. """Adaptive instance normalization.

View File

@ -3,7 +3,7 @@ import warnings
import os import os
import sys import sys
import numpy as np import numpy as np
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
from PIL import Image from PIL import Image

View File

@ -3,7 +3,7 @@ import warnings
import numpy as np import numpy as np
import os import os
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
from PIL import Image from PIL import Image
from PIL.Image import Image as ImageType from PIL.Image import Image as ImageType

View File

@ -10,7 +10,7 @@ import traceback
from typing import Callable from typing import Callable
from urllib import request, error as ul_error from urllib import request, error as ul_error
from huggingface_hub import HfFolder, hf_hub_url, ModelSearchArguments, ModelFilter, HfApi from huggingface_hub import HfFolder, hf_hub_url, ModelSearchArguments, ModelFilter, HfApi
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
class HuggingFaceConceptsLibrary(object): class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None): def __init__(self, root=None):

View File

@ -26,11 +26,11 @@ from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
from ..stable_diffusion.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver from .diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver
from ..stable_diffusion.textual_inversion_manager import TextualInversionManager from .textual_inversion_manager import TextualInversionManager
from ..stable_diffusion.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup from .offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..devices import normalize_device, CPU_DEVICE from ..util import normalize_device, CPU_DEVICE
from compel import EmbeddingsProvider from compel import EmbeddingsProvider
@dataclass @dataclass

View File

@ -15,7 +15,7 @@ from torch import nn
from compel.cross_attention_control import Arguments from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.cross_attention import AttnProcessor from diffusers.models.cross_attention import AttnProcessor
from ...devices import torch_dtype from ...util import torch_dtype
class CrossAttentionType(enum.Enum): class CrossAttentionType(enum.Enum):

View File

@ -23,7 +23,7 @@ from omegaconf import ListConfig
import urllib import urllib
from ..textual_inversion_manager import TextualInversionManager from ..textual_inversion_manager import TextualInversionManager
from ...util import ( from ...util.util import (
log_txt_as_img, log_txt_as_img,
exists, exists,
default, default,

View File

@ -4,7 +4,7 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from functools import partial from functools import partial
from ...devices import choose_torch_device from ...util import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler from .sampler import Sampler
from ..diffusionmodules.util import noise_like from ..diffusionmodules.util import noise_like

View File

@ -7,7 +7,7 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from functools import partial from functools import partial
from ...devices import choose_torch_device from ...util import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ..diffusionmodules.util import ( from ..diffusionmodules.util import (

View File

@ -8,7 +8,7 @@ import torch
from diffusers.models.cross_attention import AttnProcessor from diffusers.models.cross_attention import AttnProcessor
from typing_extensions import TypeAlias from typing_extensions import TypeAlias
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
from .cross_attention_control import Arguments, \ from .cross_attention_control import Arguments, \
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
CrossAttentionType, SwapCrossAttnContext CrossAttentionType, SwapCrossAttnContext

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np import numpy as np
from einops import repeat from einops import repeat
from ...util import instantiate_from_config from ...util.util import instantiate_from_config
def make_beta_schedule( def make_beta_schedule(

View File

@ -10,7 +10,7 @@ from einops import repeat
from transformers import CLIPTokenizer, CLIPTextModel from transformers import CLIPTokenizer, CLIPTextModel
from ldm.invoke.devices import choose_torch_device from ldm.invoke.devices import choose_torch_device
from ldm.invoke.globals import global_cache_dir from invokeai.backend.globals import global_cache_dir
from ldm.modules.x_transformer import ( from ldm.modules.x_transformer import (
Encoder, Encoder,
TransformerWrapper, TransformerWrapper,

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.backend.training
'''
from .textual_inversion_training import do_textual_inversion_training, parse_args

View File

@ -48,7 +48,7 @@ from transformers import CLIPTextModel, CLIPTokenizer
# invokeai stuff # invokeai stuff
from ldm.invoke.args import ArgFormatter, PagingArgumentParser from ldm.invoke.args import ArgFormatter, PagingArgumentParser
from ldm.invoke.globals import Globals, global_cache_dir from invokeai.backend.globals import Globals, global_cache_dir
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = { PIL_INTERPOLATION = {

View File

@ -0,0 +1,18 @@
'''
Initialization file for invokeai.backend.util
'''
from .devices import (choose_torch_device,
choose_precision,
normalize_device,
torch_dtype,
CPU_DEVICE,
CUDA_DEVICE,
MPS_DEVICE,
)
from .util import (ask_user,
download_with_resume,
instantiate_from_config,
url_attachment_name,
)
from .log import write_log

View File

@ -5,9 +5,11 @@ from contextlib import nullcontext
import torch import torch
from torch import autocast from torch import autocast
from ldm.invoke.globals import Globals from invokeai.backend.globals import Globals
CPU_DEVICE = torch.device("cpu") CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps")
def choose_torch_device() -> torch.device: def choose_torch_device() -> torch.device:
'''Convenience routine for guessing which GPU device to run model on''' '''Convenience routine for guessing which GPU device to run model on'''

View File

@ -0,0 +1,4 @@
'''
Initialization file for the web backend.
'''
from .invoke_ai_web_server import InvokeAIWebServer

View File

@ -12,7 +12,7 @@ from threading import Event
from uuid import uuid4 from uuid import uuid4
import eventlet import eventlet
import invokeai.frontend.dist as frontend import invokeai.frontend.web.dist as frontend
from PIL import Image from PIL import Image
from PIL.Image import Image as ImageType from PIL.Image import Image as ImageType
from compel.prompt_parser import Blend from compel.prompt_parser import Blend
@ -20,24 +20,24 @@ from flask import Flask, redirect, send_from_directory, request, make_response
from flask_socketio import SocketIO from flask_socketio import SocketIO
from werkzeug.utils import secure_filename from werkzeug.utils import secure_filename
from invokeai.backend.modules.get_canvas_generation_mode import ( from .modules.get_canvas_generation_mode import (
get_canvas_generation_mode, get_canvas_generation_mode,
) )
from .modules.parameters import parameters_to_command from .modules.parameters import parameters_to_command
from .prompting import (get_tokens_for_prompt_object, from ..prompting import (get_tokens_for_prompt_object,
get_prompt_structure, get_prompt_structure,
get_tokenizer get_tokenizer
) )
from .image_util import PngWriter, retrieve_metadata from ..image_util import PngWriter, retrieve_metadata
from .generator import infill_methods from ..generator import infill_methods
from .stable_diffusion import PipelineIntermediateState from ..stable_diffusion import PipelineIntermediateState
from ldm.generate import Generate from .. import Generate
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash from ..args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir, from ..globals import ( Globals, global_converted_ckpts_dir,
global_models_dir global_models_dir
) )
from ldm.invoke.merge_diffusers import merge_diffusion_models from ..model_management import merge_diffusion_models
# Loading Arguments # Loading Arguments
opt = Args() opt = Args()
@ -236,7 +236,7 @@ class InvokeAIWebServer:
sys.exit(0) sys.exit(0)
else: else:
useSSL = args.certfile or args.keyfile useSSL = args.certfile or args.keyfile
print(">> Started Invoke AI Web Server!") print(">> Started Invoke AI Web Server")
if self.host == "0.0.0.0": if self.host == "0.0.0.0":
print( print(
f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address." f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address."

View File

@ -1,4 +1,4 @@
from invokeai.backend.modules.parse_seed_weights import parse_seed_weights from .parse_seed_weights import parse_seed_weights
import argparse import argparse
SAMPLER_CHOICES = [ SAMPLER_CHOICES = [

View File

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

Before

Width:  |  Height:  |  Size: 292 KiB

After

Width:  |  Height:  |  Size: 292 KiB

View File

Before

Width:  |  Height:  |  Size: 9.5 KiB

After

Width:  |  Height:  |  Size: 9.5 KiB

View File

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

View File

@ -18,21 +18,22 @@ import pyparsing # type: ignore
import invokeai.version import invokeai.version
from ..generate import Generate from ...backend import Generate
from .args import (Args, dream_cmd_from_png, metadata_dumps, from ...backend.args import (Args,
dream_cmd_from_png,
metadata_dumps,
metadata_from_png) metadata_from_png)
from invokeai.backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion import PipelineIntermediateState
from invokeai.backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata from ...backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata
from invokeai.backend import ModelManager from ...backend import ModelManager
from .globals import Globals from ...backend.globals import Globals
from .log import write_log from ...backend.util import write_log
from .readline import Completer, get_completer from .readline import Completer, get_completer
from invokeai.backend.util import url_attachment_name from ...backend.util import url_attachment_name
# global used in multiple functions (fix) # global used in multiple functions (fix)
infile = None infile = None
def main(): def main():
"""Initialize command-line parsers and the diffusion model""" """Initialize command-line parsers and the diffusion model"""
global infile global infile
@ -82,8 +83,6 @@ def main():
# when the frozen CLIP tokenizer is imported # when the frozen CLIP tokenizer is imported
import transformers # type: ignore import transformers # type: ignore
from ldm.generate import Generate
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
import diffusers import diffusers
@ -1021,7 +1020,7 @@ def get_next_command(infile=None, model_name="no model") -> str: # command stri
def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan): def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan):
print("\n* --web was specified, starting web server...") print("\n* --web was specified, starting web server...")
from invokeai.backend.invoke_ai_web_server import InvokeAIWebServer from invokeai.backend.web import InvokeAIWebServer
# Change working directory to the stable-diffusion directory # Change working directory to the stable-diffusion directory
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
@ -1075,7 +1074,7 @@ def load_face_restoration(opt):
try: try:
gfpgan, codeformer, esrgan = None, None, None gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan: if opt.restore or opt.esrgan:
from ldm.invoke.restoration import Restoration from invokeai.backend.restoration import Restoration
restoration = Restoration() restoration = Restoration()
if opt.restore: if opt.restore:

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.frontend.CLI
'''
from .CLI import main as invokeai_command_line_interface

View File

@ -11,9 +11,9 @@ seeds:
import os import os
import re import re
import atexit import atexit
from ldm.invoke.args import Args from ...backend.args import Args
from ldm.invoke.globals import Globals from ...backend.globals import Globals
from invokeai.backend.stable_diffusion import HuggingFaceConceptsLibrary from ...backend.stable_diffusion import HuggingFaceConceptsLibrary
# ---------------readline utilities--------------------- # ---------------readline utilities---------------------
try: try:

View File

@ -0,0 +1,3 @@
'''
Initialization file for invokeai.frontend
'''

View File

@ -0,0 +1,7 @@
'''
Initialization file for invokeai.frontend.config
'''
from .model_install import main as invokeai_model_install
from .invokeai_configure import main as invokeai_configure
from .invokeai_update import main as invokeai_update

View File

@ -0,0 +1,4 @@
'''
Wrapper for invokeai.backend.configure.invokeai_configure
'''
from ...backend.config.invokeai_configure import main

View File

@ -13,7 +13,7 @@ from rich.style import Style
from rich.syntax import Syntax from rich.syntax import Syntax
from rich.text import Text from rich.text import Text
from ldm.invoke import __version__ from invokeai.version import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive" INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases" INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"

View File

@ -22,13 +22,13 @@ from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from shutil import get_terminal_size from shutil import get_terminal_size
from invokeai.backend.devices import choose_precision, choose_torch_device from ...backend.util import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir from invokeai.backend.globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file, from ...backend.config.model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root, default_dataset, get_root,
install_requested_models, install_requested_models,
recommended_datasets, recommended_datasets,
) )
from .widgets import (MultiSelectColumns, TextBox, from .widgets import (MultiSelectColumns, TextBox,
OffsetButtonPress, CenteredTitleText, OffsetButtonPress, CenteredTitleText,
set_min_terminal_size, set_min_terminal_size,

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.frontend.merge
'''
from .merge_diffusers import main as invokeai_merge_diffusers

View File

@ -20,10 +20,10 @@ from diffusers import logging as dlogging
from npyscreen import widget from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from ldm.invoke.config.widgets import FloatTitleSlider from ...frontend.config.widgets import FloatTitleSlider
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, from ...backend.globals import (Globals, global_cache_dir, global_config_file,
global_models_dir, global_set_root) global_models_dir, global_set_root)
from invokeai.backend import ModelManager from ...backend.model_management import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models" DEST_MERGED_MODEL_DIR = "merged_models"
@ -199,13 +199,13 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
color="CONTROL", color="CONTROL",
value=f"Select two models to merge and optionally a third.", value="Select two models to merge and optionally a third.",
editable=False, editable=False,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
color="CONTROL", color="CONTROL",
value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.", value="Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
editable=False, editable=False,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
@ -453,9 +453,9 @@ def main():
"** You need to have at least two diffusers models defined in models.yaml in order to merge" "** You need to have at least two diffusers models defined in models.yaml in order to merge"
) )
else: else:
print(f"** Not enough room for the user interface. Try making this window larger.") print("** Not enough room for the user interface. Try making this window larger.")
sys.exit(-1) sys.exit(-1)
except Exception as e: except Exception:
print(">> An error occurred:") print(">> An error occurred:")
traceback.print_exc() traceback.print_exc()
sys.exit(-1) sys.exit(-1)

View File

@ -0,0 +1,5 @@
'''
Initialization file for invokeai.frontend.training
'''
from .textual_inversion import main as invokeai_textual_inversion

View File

@ -20,8 +20,8 @@ import npyscreen
from npyscreen import widget from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from ldm.invoke.globals import Globals, global_set_root from invokeai.backend.globals import Globals, global_set_root
from ldm.invoke.training.textual_inversion_training import ( from ...backend.training import (
do_textual_inversion_training, do_textual_inversion_training,
parse_args, parse_args,
) )

Some files were not shown because too many files have changed in this diff Show More