all files migrated; tweaks needed

This commit is contained in:
Lincoln Stein 2023-03-03 00:02:15 -05:00
parent 3f0b0f3250
commit 6a990565ff
496 changed files with 276 additions and 934 deletions

2
.gitignore vendored
View File

@ -198,7 +198,7 @@ checkpoints
.DS_Store
# Let the frontend manage its own gitignore
!invokeai/frontend/*
!invokeai/frontend/web/*
# Scratch folder
.scratch/

View File

@ -3,11 +3,10 @@ import os
import sys
import traceback
from invokeai.backend.models import ModelManager
from invokeai.backend import ModelManager, Generate
from ...globals import Globals
from ....generate import Generate
import ldm.invoke
import invokeai.version
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated
def get_generate(args, config) -> Generate:
@ -16,7 +15,7 @@ def get_generate(args, config) -> Generate:
if not os.path.exists(config_file):
report_model_error(args, FileNotFoundError(f"The file {config_file} could not be found."))
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> {invokeai.version.__app_name__}, version {invokeai.version.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
@ -160,7 +159,7 @@ def report_model_error(opt:Namespace, e:Exception):
# Temporary initializer for Generate until we migrate off of it
def old_get_generate(args, config) -> Generate:
# TODO: Remove the need for globals
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
# alert - setting globals here
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))

View File

@ -3,8 +3,7 @@ from .invocation_queue import InvocationQueueABC
from .item_storage import ItemStorageABC
from .image_storage import ImageStorageBase
from .events import EventServiceBase
from ....generate import Generate
from invokeai.backend import Generate
class InvocationServices():
"""Services that can be used by invocations"""

View File

@ -1,8 +1,8 @@
'''
Initialization file for invokeai.backend
'''
# this is causing circular import issues
# from .invoke_ai_web_server import InvokeAIWebServer
from .model_manager import ModelManager
from .model_management import ModelManager
from .generate import Generate

View File

@ -98,8 +98,8 @@ from typing import List
from invokeai.backend.image_util import retrieve_metadata
import invokeai.version
from ldm.invoke.globals import Globals
from invokeai.backend.prompting import split_weighted_subprompts
from .globals import Globals
from .prompting import split_weighted_subprompts
APP_ID = invokeai.version.__app_id__
APP_NAME = invokeai.version.__app_name__

View File

@ -39,14 +39,14 @@ import invokeai.configs as configs
from ..args import PRECISION_CHOICES, Args
from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir
from .model_install import addModelsForm, process_and_execute
from ...frontend.config.model_install import addModelsForm, process_and_execute
from .model_install_backend import (
default_dataset,
download_from_hf,
recommended_datasets,
hf_download_with_resume,
)
from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
from ...frontend.config.widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
warnings.filterwarnings("ignore")

View File

@ -18,9 +18,9 @@ from tqdm import tqdm
from typing import List
import invokeai.configs as configs
from invokeai.backend.stable_diffusion import StableDiffusionGeneratorPipeline
from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.backend import ModelManager
from ..model_management import ModelManager
warnings.filterwarnings("ignore")

View File

@ -25,17 +25,18 @@ from omegaconf import OmegaConf
from PIL import Image, ImageOps
from pytorch_lightning import logging, seed_everything
from invokeai.backend import ModelManager
from invokeai.backend.prompting import get_uc_and_c_and_ec
from invokeai.backend.stable_diffusion import (DDIMSampler, KSampler, PLMSSampler)
from invokeai.backend.generator import infill_methods
from invokeai.backend.stable_diffusion.concepts_lib import HuggingFaceConceptsLibrary
from invokeai.backend.devices import choose_precision, choose_torch_device
from invokeai.backend.image_util import InitImageResizer, PngWriter, Txt2Mask
from . import ModelManager
from .prompting import get_uc_and_c_and_ec
from .stable_diffusion import (DDIMSampler, KSampler, PLMSSampler, HuggingFaceConceptsLibrary)
from .generator import infill_methods
from .util import choose_precision, choose_torch_device
from .image_util import (InitImageResizer,
PngWriter,
Txt2Mask,
configure_model_padding)
from ldm.invoke.globals import Globals, global_cache_dir
from ldm.invoke.args import metadata_from_png
from ldm.invoke.seamless import configure_model_padding
from .globals import Globals, global_cache_dir
from .args import metadata_from_png
def fix_func(orig):
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():

View File

@ -23,7 +23,7 @@ from tqdm import trange
import invokeai.assets.web as web_assets
from ..stable_diffusion.diffusion.ddpm import DiffusionWrapper
from ..util import rand_perlin_2d
from ..util.util import rand_perlin_2d
downsampling = 8
CAUTION_IMG = 'caution.png'

View File

@ -1,5 +1,5 @@
'''
ldm.invoke.globals defines a small number of global variables that would
invokeai.backend.globals defines a small number of global variables that would
otherwise have to be passed through long and complex call chains.
It defines a Namespace object named "Globals" that contains

View File

@ -9,6 +9,7 @@ from .pngwriter import (PngWriter,
retrieve_metadata,
write_metadata,
)
from .seamless import configure_model_padding
def debug_image(
debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False

View File

@ -4,7 +4,7 @@ wraps the actual patchmatch object. It respects the global
"try_patchmatch" attribute, so that patchmatch loading can
be suppressed or deferred
'''
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
import numpy as np
class PatchMatch:

View File

@ -32,7 +32,7 @@ import numpy as np
from transformers import AutoProcessor, CLIPSegForImageSegmentation
from PIL import Image, ImageOps
from torchvision import transforms
from ldm.invoke.globals import global_cache_dir
from invokeai.backend.globals import global_cache_dir
CLIPSEG_MODEL = 'CIDAS/clipseg-rd64-refined'
CLIPSEG_SIZE = 352

View File

@ -0,0 +1,8 @@
'''
Initialization file for invokeai.backend.model_management
'''
from .model_manager import ModelManager
from .convert_ckpt_to_diffusers import (load_pipeline_from_original_stable_diffusion_ckpt,
convert_ckpt_to_diffusers)
from ...frontend.merge.merge_diffusers import (merge_diffusion_models,
merge_diffusion_models_and_commit)

View File

@ -21,11 +21,11 @@ import re
import torch
import warnings
from pathlib import Path
from ldm.invoke.globals import (
from invokeai.backend.globals import (
global_cache_dir,
global_config_dir,
)
from invokeai.models import ModelManager, SDLegacyType
from .model_manager import ModelManager, SDLegacyType
from safetensors.torch import load_file
from typing import Union
@ -56,7 +56,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.utils import is_safetensors_available
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig
from invokeai.generator import StableDiffusionGeneratorPipeline
from ..stable_diffusion import StableDiffusionGeneratorPipeline
def shave_segments(path, n_shave_prefix_segments=1):
"""
@ -1014,7 +1014,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
return pipe
def convert_ckpt_to_diffuser(
def convert_ckpt_to_diffusers(
checkpoint_path:Union[str,Path],
dump_path:Union[str,Path],
**kwargs,

View File

@ -31,14 +31,13 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
from .devices import CPU_DEVICE
from ldm.invoke.globals import Globals, global_cache_dir
from .util import (
from ..util import CPU_DEVICE
from invokeai.backend.globals import Globals, global_cache_dir
from ..util import (
ask_user,
download_with_resume,
url_attachment_name,
)
from .stable_diffusion import StableDiffusionGeneratorPipeline
from ..stable_diffusion import StableDiffusionGeneratorPipeline
class SDLegacyType(Enum):
V1 = 1
@ -416,6 +415,51 @@ class ModelManager(object):
return pipeline, width, height, model_hash
def _load_ckpt_model(self, model_name, mconfig):
config = mconfig.config
weights = mconfig.weights
vae = mconfig.get("vae")
width = mconfig.width
height = mconfig.height
if not os.path.isabs(config):
config = os.path.join(Globals.root, config)
if not os.path.isabs(weights):
weights = os.path.normpath(os.path.join(Globals.root, weights))
# Convert to diffusers and return a diffusers pipeline
print(
f">> Converting legacy checkpoint {model_name} into a diffusers model..."
)
from . import load_pipeline_from_original_stable_diffusion_ckpt
self.offload_model(self.current_model)
if vae_config := self._choose_diffusers_vae(model_name):
vae = self._load_vae(vae_config)
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path=weights,
original_config_file=config,
vae=vae,
return_generator_pipeline=True,
precision=torch.float16
if self.precision == "float16"
else torch.float32,
)
if self.sequential_offload:
pipeline.enable_offload_submodels(self.device)
else:
pipeline.to(self.device)
return (
pipeline,
width,
height,
"NOHASH",
)
def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path:
if isinstance(model_name, DictConfig) or isinstance(model_name, dict):
mconfig = model_name
@ -519,66 +563,6 @@ class ModelManager(object):
self.commit(commit_to_conf)
return model_name
def import_ckpt_model(
self,
weights: Union[str, Path],
config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml",
vae: Union[str, Path] = None,
model_name: str = None,
model_description: str = None,
commit_to_conf: Path = None,
) -> str:
"""
Attempts to install the indicated ckpt file and returns True if successful.
"weights" can be either a path-like object corresponding to a local .ckpt file
or a http/https URL pointing to a remote model.
"vae" is a Path or str object pointing to a ckpt or safetensors file to be used
as the VAE for this model.
"config" is the model config file to use with this ckpt file. It defaults to
v1-inference.yaml. If a URL is provided, the config will be downloaded.
You can optionally provide a model name and/or description. If not provided,
then these will be derived from the weight file name. If you provide a commit_to_conf
path to the configuration file, then the new entry will be committed to the
models.yaml file.
Return value is the name of the imported file, or None if an error occurred.
"""
if str(weights).startswith(("http:", "https:")):
model_name = model_name or url_attachment_name(weights)
weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1")
config_path = self._resolve_path(config, "configs/stable-diffusion")
if weights_path is None or not weights_path.exists():
return
if config_path is None or not config_path.exists():
return
model_name = (
model_name or Path(weights).stem
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
model_description = (
model_description or f"Imported stable diffusion weights file {model_name}"
)
new_config = dict(
weights=str(weights_path),
config=str(config_path),
description=model_description,
format="ckpt",
width=512,
height=512,
)
if vae:
new_config["vae"] = vae
self.add_model(model_name, new_config, True)
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
@classmethod
def probe_model_type(self, checkpoint: dict) -> SDLegacyType:
"""
@ -746,36 +730,18 @@ class ModelManager(object):
)
return
if convert:
diffuser_path = Path(
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
)
model_name = self.convert_and_import(
model_path,
diffusers_path=diffuser_path,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
model_name=model_name,
model_description=description,
original_config_file=model_config_file,
commit_to_conf=commit_to_conf,
)
else:
model_name = self.import_ckpt_model(
model_path,
config=model_config_file,
model_name=model_name,
model_description=description,
vae=str(
Path(
Globals.root,
"models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
)
),
commit_to_conf=commit_to_conf,
)
if commit_to_conf:
self.commit(commit_to_conf)
diffuser_path = Path(
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
)
model_name = self.convert_and_import(
model_path,
diffusers_path=diffuser_path,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
model_name=model_name,
model_description=description,
original_config_file=model_config_file,
commit_to_conf=commit_to_conf,
)
return model_name
def convert_and_import(
@ -800,7 +766,7 @@ class ModelManager(object):
new_config = None
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
from . import convert_ckpt_to_diffusers
if diffusers_path.exists():
print(
@ -815,7 +781,7 @@ class ModelManager(object):
# By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file
vae_model = self._load_vae(vae) if vae else None
convert_ckpt_to_diffuser(
convert_ckpt_to_diffusers (
ckpt_path,
diffusers_path,
extract_ema=True,

View File

@ -13,9 +13,9 @@ from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from ..devices import torch_dtype
from ..util import torch_dtype
from ..stable_diffusion import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer:
# TODO remove legacy ckpt fallback handling

View File

@ -23,16 +23,16 @@ class Restoration():
# Face Restore Models
def load_gfpgan(self, gfpgan_model_path):
from ldm.invoke.restoration.gfpgan import GFPGAN
from .gfpgan import GFPGAN
return GFPGAN(gfpgan_model_path)
def load_codeformer(self):
from ldm.invoke.restoration.codeformer import CodeFormerRestoration
from .codeformer import CodeFormerRestoration
return CodeFormerRestoration()
# Upscale Models
def load_esrgan(self, esrgan_bg_tile=400):
from ldm.invoke.restoration.realesrgan import ESRGAN
from .realesrgan import ESRGAN
esrgan = ESRGAN(esrgan_bg_tile)
print('>> ESRGAN Initialized')
return esrgan;

View File

@ -3,7 +3,7 @@ import torch
import numpy as np
import warnings
import sys
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'

View File

@ -5,7 +5,7 @@ from torch import nn, Tensor
import torch.nn.functional as F
from typing import Optional, List
from ldm.invoke.restoration.vqgan_arch import *
from .vqgan_arch import *
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY
@ -25,7 +25,6 @@ def calc_mean_std(feat, eps=1e-5):
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
"""Adaptive instance normalization.

View File

@ -3,7 +3,7 @@ import warnings
import os
import sys
import numpy as np
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
from PIL import Image

View File

@ -3,7 +3,7 @@ import warnings
import numpy as np
import os
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
from PIL import Image
from PIL.Image import Image as ImageType

View File

@ -10,7 +10,7 @@ import traceback
from typing import Callable
from urllib import request, error as ul_error
from huggingface_hub import HfFolder, hf_hub_url, ModelSearchArguments, ModelFilter, HfApi
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None):

View File

@ -26,11 +26,11 @@ from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals
from ..stable_diffusion.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver
from ..stable_diffusion.textual_inversion_manager import TextualInversionManager
from ..stable_diffusion.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..devices import normalize_device, CPU_DEVICE
from invokeai.backend.globals import Globals
from .diffusion import InvokeAIDiffuserComponent, PostprocessingSettings, AttentionMapSaver
from .textual_inversion_manager import TextualInversionManager
from .offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..util import normalize_device, CPU_DEVICE
from compel import EmbeddingsProvider
@dataclass

View File

@ -15,7 +15,7 @@ from torch import nn
from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.cross_attention import AttnProcessor
from ...devices import torch_dtype
from ...util import torch_dtype
class CrossAttentionType(enum.Enum):

View File

@ -23,7 +23,7 @@ from omegaconf import ListConfig
import urllib
from ..textual_inversion_manager import TextualInversionManager
from ...util import (
from ...util.util import (
log_txt_as_img,
exists,
default,

View File

@ -4,7 +4,7 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ...devices import choose_torch_device
from ...util import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler
from ..diffusionmodules.util import noise_like

View File

@ -7,7 +7,7 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ...devices import choose_torch_device
from ...util import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ..diffusionmodules.util import (

View File

@ -8,7 +8,7 @@ import torch
from diffusers.models.cross_attention import AttnProcessor
from typing_extensions import TypeAlias
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
from .cross_attention_control import Arguments, \
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
CrossAttentionType, SwapCrossAttnContext

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np
from einops import repeat
from ...util import instantiate_from_config
from ...util.util import instantiate_from_config
def make_beta_schedule(

View File

@ -10,7 +10,7 @@ from einops import repeat
from transformers import CLIPTokenizer, CLIPTextModel
from ldm.invoke.devices import choose_torch_device
from ldm.invoke.globals import global_cache_dir
from invokeai.backend.globals import global_cache_dir
from ldm.modules.x_transformer import (
Encoder,
TransformerWrapper,

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.backend.training
'''
from .textual_inversion_training import do_textual_inversion_training, parse_args

View File

@ -48,7 +48,7 @@ from transformers import CLIPTextModel, CLIPTokenizer
# invokeai stuff
from ldm.invoke.args import ArgFormatter, PagingArgumentParser
from ldm.invoke.globals import Globals, global_cache_dir
from invokeai.backend.globals import Globals, global_cache_dir
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {

View File

@ -0,0 +1,18 @@
'''
Initialization file for invokeai.backend.util
'''
from .devices import (choose_torch_device,
choose_precision,
normalize_device,
torch_dtype,
CPU_DEVICE,
CUDA_DEVICE,
MPS_DEVICE,
)
from .util import (ask_user,
download_with_resume,
instantiate_from_config,
url_attachment_name,
)
from .log import write_log

View File

@ -5,9 +5,11 @@ from contextlib import nullcontext
import torch
from torch import autocast
from ldm.invoke.globals import Globals
from invokeai.backend.globals import Globals
CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps")
def choose_torch_device() -> torch.device:
'''Convenience routine for guessing which GPU device to run model on'''

View File

@ -0,0 +1,4 @@
'''
Initialization file for the web backend.
'''
from .invoke_ai_web_server import InvokeAIWebServer

View File

@ -12,7 +12,7 @@ from threading import Event
from uuid import uuid4
import eventlet
import invokeai.frontend.dist as frontend
import invokeai.frontend.web.dist as frontend
from PIL import Image
from PIL.Image import Image as ImageType
from compel.prompt_parser import Blend
@ -20,24 +20,24 @@ from flask import Flask, redirect, send_from_directory, request, make_response
from flask_socketio import SocketIO
from werkzeug.utils import secure_filename
from invokeai.backend.modules.get_canvas_generation_mode import (
from .modules.get_canvas_generation_mode import (
get_canvas_generation_mode,
)
from .modules.parameters import parameters_to_command
from .prompting import (get_tokens_for_prompt_object,
get_prompt_structure,
get_tokenizer
)
from .image_util import PngWriter, retrieve_metadata
from .generator import infill_methods
from .stable_diffusion import PipelineIntermediateState
from ..prompting import (get_tokens_for_prompt_object,
get_prompt_structure,
get_tokenizer
)
from ..image_util import PngWriter, retrieve_metadata
from ..generator import infill_methods
from ..stable_diffusion import PipelineIntermediateState
from ldm.generate import Generate
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir,
global_models_dir
)
from ldm.invoke.merge_diffusers import merge_diffusion_models
from .. import Generate
from ..args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ..globals import ( Globals, global_converted_ckpts_dir,
global_models_dir
)
from ..model_management import merge_diffusion_models
# Loading Arguments
opt = Args()
@ -236,7 +236,7 @@ class InvokeAIWebServer:
sys.exit(0)
else:
useSSL = args.certfile or args.keyfile
print(">> Started Invoke AI Web Server!")
print(">> Started Invoke AI Web Server")
if self.host == "0.0.0.0":
print(
f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address."

View File

@ -1,4 +1,4 @@
from invokeai.backend.modules.parse_seed_weights import parse_seed_weights
from .parse_seed_weights import parse_seed_weights
import argparse
SAMPLER_CHOICES = [

View File

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 2.7 KiB

View File

Before

Width:  |  Height:  |  Size: 292 KiB

After

Width:  |  Height:  |  Size: 292 KiB

View File

Before

Width:  |  Height:  |  Size: 9.5 KiB

After

Width:  |  Height:  |  Size: 9.5 KiB

View File

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

View File

@ -18,21 +18,22 @@ import pyparsing # type: ignore
import invokeai.version
from ..generate import Generate
from .args import (Args, dream_cmd_from_png, metadata_dumps,
from ...backend import Generate
from ...backend.args import (Args,
dream_cmd_from_png,
metadata_dumps,
metadata_from_png)
from invokeai.backend.stable_diffusion import PipelineIntermediateState
from invokeai.backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata
from invokeai.backend import ModelManager
from .globals import Globals
from .log import write_log
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.image_util import make_grid, PngWriter, retrieve_metadata, write_metadata
from ...backend import ModelManager
from ...backend.globals import Globals
from ...backend.util import write_log
from .readline import Completer, get_completer
from invokeai.backend.util import url_attachment_name
from ...backend.util import url_attachment_name
# global used in multiple functions (fix)
infile = None
def main():
"""Initialize command-line parsers and the diffusion model"""
global infile
@ -82,8 +83,6 @@ def main():
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
from ldm.generate import Generate
transformers.logging.set_verbosity_error()
import diffusers
@ -1021,7 +1020,7 @@ def get_next_command(infile=None, model_name="no model") -> str: # command stri
def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan):
print("\n* --web was specified, starting web server...")
from invokeai.backend.invoke_ai_web_server import InvokeAIWebServer
from invokeai.backend.web import InvokeAIWebServer
# Change working directory to the stable-diffusion directory
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
@ -1075,7 +1074,7 @@ def load_face_restoration(opt):
try:
gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan:
from ldm.invoke.restoration import Restoration
from invokeai.backend.restoration import Restoration
restoration = Restoration()
if opt.restore:

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.frontend.CLI
'''
from .CLI import main as invokeai_command_line_interface

View File

@ -11,9 +11,9 @@ seeds:
import os
import re
import atexit
from ldm.invoke.args import Args
from ldm.invoke.globals import Globals
from invokeai.backend.stable_diffusion import HuggingFaceConceptsLibrary
from ...backend.args import Args
from ...backend.globals import Globals
from ...backend.stable_diffusion import HuggingFaceConceptsLibrary
# ---------------readline utilities---------------------
try:

View File

@ -0,0 +1,3 @@
'''
Initialization file for invokeai.frontend
'''

View File

@ -0,0 +1,7 @@
'''
Initialization file for invokeai.frontend.config
'''
from .model_install import main as invokeai_model_install
from .invokeai_configure import main as invokeai_configure
from .invokeai_update import main as invokeai_update

View File

@ -0,0 +1,4 @@
'''
Wrapper for invokeai.backend.configure.invokeai_configure
'''
from ...backend.config.invokeai_configure import main

View File

@ -13,7 +13,7 @@ from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from ldm.invoke import __version__
from invokeai.version import __version__
INVOKE_AI_SRC="https://github.com/invoke-ai/InvokeAI/archive"
INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"

View File

@ -22,13 +22,13 @@ from npyscreen import widget
from omegaconf import OmegaConf
from shutil import get_terminal_size
from invokeai.backend.devices import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root,
install_requested_models,
recommended_datasets,
)
from ...backend.util import choose_precision, choose_torch_device
from invokeai.backend.globals import Globals, global_config_dir
from ...backend.config.model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root,
install_requested_models,
recommended_datasets,
)
from .widgets import (MultiSelectColumns, TextBox,
OffsetButtonPress, CenteredTitleText,
set_min_terminal_size,

View File

@ -0,0 +1,4 @@
'''
Initialization file for invokeai.frontend.merge
'''
from .merge_diffusers import main as invokeai_merge_diffusers

View File

@ -20,10 +20,10 @@ from diffusers import logging as dlogging
from npyscreen import widget
from omegaconf import OmegaConf
from ldm.invoke.config.widgets import FloatTitleSlider
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
from ...frontend.config.widgets import FloatTitleSlider
from ...backend.globals import (Globals, global_cache_dir, global_config_file,
global_models_dir, global_set_root)
from invokeai.backend import ModelManager
from ...backend.model_management import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models"
@ -199,13 +199,13 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.add_widget_intelligent(
npyscreen.FixedText,
color="CONTROL",
value=f"Select two models to merge and optionally a third.",
value="Select two models to merge and optionally a third.",
editable=False,
)
self.add_widget_intelligent(
npyscreen.FixedText,
color="CONTROL",
value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
value="Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
editable=False,
)
self.add_widget_intelligent(
@ -453,9 +453,9 @@ def main():
"** You need to have at least two diffusers models defined in models.yaml in order to merge"
)
else:
print(f"** Not enough room for the user interface. Try making this window larger.")
print("** Not enough room for the user interface. Try making this window larger.")
sys.exit(-1)
except Exception as e:
except Exception:
print(">> An error occurred:")
traceback.print_exc()
sys.exit(-1)

View File

@ -0,0 +1,5 @@
'''
Initialization file for invokeai.frontend.training
'''
from .textual_inversion import main as invokeai_textual_inversion

View File

@ -20,8 +20,8 @@ import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf
from ldm.invoke.globals import Globals, global_set_root
from ldm.invoke.training.textual_inversion_training import (
from invokeai.backend.globals import Globals, global_set_root
from ...backend.training import (
do_textual_inversion_training,
parse_args,
)

Some files were not shown because too many files have changed in this diff Show More