move models and generator into backend

This commit is contained in:
Lincoln Stein 2023-02-28 08:32:11 -05:00
parent 1d77581d96
commit 5b6c61fc75
46 changed files with 25 additions and 24 deletions

View File

@ -1,6 +1,7 @@
'''
Initialization file for invokeai.backend
'''
from .invoke_ai_web_server import InvokeAIWebServer
# this is causing circular import issues
# from .invoke_ai_web_server import InvokeAIWebServer

View File

@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals
from invokeai.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager
from ldm.invoke.devices import normalize_device, CPU_DEVICE
from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..models.diffusion.cross_attention_map_saving import AttentionMapSaver
from ..models.diffusion import AttentionMapSaver
from compel import EmbeddingsProvider
@dataclass

View File

@ -7,7 +7,7 @@ from diffusers import logging
from .base import Generator
from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData
from ..models.diffusion.shared_invokeai_diffusion import PostprocessingSettings
from ..models.diffusion import PostprocessingSettings
class Img2Img(Generator):
def __init__(self, model, precision):

View File

@ -27,7 +27,7 @@ from invokeai.backend.modules.parameters import parameters_to_command
from ldm.generate import Generate
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.conditioning import get_tokens_for_prompt_object, get_prompt_structure, get_tokenizer
from ..generator import infill_methods, PipelineIntermediateState
from .generator import infill_methods, PipelineIntermediateState
from ldm.invoke.globals import ( Globals, global_converted_ckpts_dir,
global_models_dir
)

View File

@ -1,5 +1,5 @@
'''
Initialization file for the invokeai.models package
Initialization file for the invokeai.backend.models package
'''
from .model_manager import ModelManager, SDLegacyType
from .diffusion import InvokeAIDiffuserComponent

View File

@ -0,0 +1,6 @@
'''
Initialization file for invokeai.models.diffusion
'''
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from .cross_attention_control import InvokeAICrossAttentionMixin
from .cross_attention_map_saving import AttentionMapSaver

View File

@ -32,13 +32,13 @@ from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
from ldm.invoke.devices import CPU_DEVICE
from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ldm.invoke.globals import Globals, global_cache_dir
from ldm.util import (
ask_user,
download_with_resume,
url_attachment_name,
)
from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
class SDLegacyType(Enum):

View File

@ -1,4 +0,0 @@
'''
Initialization file for invokeai.models.diffusion
'''
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent

View File

@ -27,9 +27,8 @@ from pytorch_lightning import logging, seed_everything
import ldm.invoke.conditioning
from invokeai.models import ModelManager
from invokeai.generator import infill_methods
from invokeai.models import (DDIMSampler, KSampler, PLMSSampler )
from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler)
from invokeai.backend.generator import infill_methods
from ldm.invoke.args import metadata_from_png
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.conditioning import get_uc_and_c_and_ec
@ -898,7 +897,7 @@ class Generate:
return self._load_generator(".inpaint", "Inpaint")
def _load_generator(self, module, class_name):
mn = f"invokeai.generator{module}"
mn = f"invokeai.backend.generator{module}"
cn = class_name
module = importlib.import_module(mn)
constructor = getattr(module, cn)

View File

@ -21,11 +21,11 @@ import ldm.invoke
from ..generate import Generate
from .args import (Args, dream_cmd_from_png, metadata_dumps,
metadata_from_png)
from invokeai.generator import PipelineIntermediateState
from invokeai.backend.generator import PipelineIntermediateState
from .globals import Globals
from .image_util import make_grid
from .log import write_log
from invokeai.models import ModelManager
from invokeai.backend.models import ModelManager
from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from .readline import Completer, get_completer
from ..util import url_attachment_name
@ -1022,7 +1022,7 @@ def get_next_command(infile=None, model_name="no model") -> str: # command stri
def invoke_ai_web_server_loop(gen: Generate, gfpgan, codeformer, esrgan):
print("\n* --web was specified, starting web server...")
from invokeai.backend import InvokeAIWebServer
from invokeai.backend.invoke_ai_web_server import InvokeAIWebServer
# Change working directory to the stable-diffusion directory
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

View File

@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from .devices import torch_dtype
from invokeai.models import InvokeAIDiffuserComponent
from invokeai.backend.models import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer:

View File

@ -18,9 +18,9 @@ from tqdm import tqdm
from typing import List
import invokeai.configs as configs
from invokeai.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from invokeai.backend.generator import StableDiffusionGeneratorPipeline
from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.models import ModelManager
from invokeai.backend.models import ModelManager
warnings.filterwarnings("ignore")

View File

@ -23,11 +23,10 @@ from omegaconf import OmegaConf
from ldm.invoke.config.widgets import FloatTitleSlider
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
global_models_dir, global_set_root)
from invokeai.models import ModelManager
from invokeai.backend.models import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models"
def merge_diffusion_models(
model_ids_or_paths: List[Union[str, Path]],
alpha: float = 0.5,

View File

@ -7,7 +7,7 @@ import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from invokeai.models.diffusion.cross_attention_control import InvokeAICrossAttentionMixin
from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin
from ldm.modules.diffusionmodules.util import checkpoint
def exists(val):