diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 7b7e1ff126..994a06f461 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -22,8 +22,8 @@ from pytorch_lightning import seed_everything from tqdm import trange import invokeai.assets.web as web_assets -from ..models.diffusion.ddpm import DiffusionWrapper -from ldm.util import rand_perlin_2d +from ..ldm.models.diffusion.ddpm import DiffusionWrapper +from ..ldm.util import rand_perlin_2d downsampling = 8 CAUTION_IMG = 'caution.png' diff --git a/invokeai/backend/generator/diffusers_pipeline.py b/invokeai/backend/generator/diffusers_pipeline.py index db86fbaf11..e9aa9fb86f 100644 --- a/invokeai/backend/generator/diffusers_pipeline.py +++ b/invokeai/backend/generator/diffusers_pipeline.py @@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec from ldm.invoke.globals import Globals -from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ldm.invoke.devices import normalize_device, CPU_DEVICE -from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup -from ..models.diffusion import AttentionMapSaver +from ..ldm.models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings +from ..ldm.modules.textual_inversion_manager import TextualInversionManager +from ..ldm.devices import normalize_device, CPU_DEVICE +from ..ldm.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup +from ..ldm.models.diffusion import AttentionMapSaver from compel import EmbeddingsProvider @dataclass diff --git a/invokeai/backend/generator/img2img.py b/invokeai/backend/generator/img2img.py index 8cc2004745..c50120fc98 100644 --- a/invokeai/backend/generator/img2img.py +++ b/invokeai/backend/generator/img2img.py @@ -7,7 +7,7 @@ from diffusers import logging from .base import Generator from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..models.diffusion import PostprocessingSettings +from ..ldm.models.diffusion import PostprocessingSettings class Img2Img(Generator): def __init__(self, model, precision): diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index 02f3de4531..ec5e4087ea 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -15,7 +15,7 @@ from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusion ConditioningData from .img2img import Img2Img from ldm.invoke.patchmatch import PatchMatch -from ldm.util import debug_image +from ..ldm.util import debug_image def infill_methods()->list[str]: diff --git a/invokeai/backend/generator/txt2img.py b/invokeai/backend/generator/txt2img.py index 7802e3a913..6ff4cedd6c 100644 --- a/invokeai/backend/generator/txt2img.py +++ b/invokeai/backend/generator/txt2img.py @@ -6,7 +6,7 @@ import torch from .base import Generator from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData -from ..models import PostprocessingSettings +from ..ldm.models import PostprocessingSettings class Txt2Img(Generator): def __init__(self, model, precision): diff --git a/ldm/invoke/concepts_lib.py b/invokeai/backend/ldm/concepts_lib.py similarity index 100% rename from ldm/invoke/concepts_lib.py rename to invokeai/backend/ldm/concepts_lib.py diff --git a/ldm/invoke/conditioning.py b/invokeai/backend/ldm/conditioning.py similarity index 99% rename from ldm/invoke/conditioning.py rename to invokeai/backend/ldm/conditioning.py index 17231ec4cb..918fa2def1 100644 --- a/ldm/invoke/conditioning.py +++ b/invokeai/backend/ldm/conditioning.py @@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel from compel import Compel from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from .devices import torch_dtype -from invokeai.backend.models import InvokeAIDiffuserComponent +from .models import InvokeAIDiffuserComponent from ldm.invoke.globals import Globals def get_tokenizer(model) -> CLIPTokenizer: diff --git a/ldm/data/__init__.py b/invokeai/backend/ldm/data/__init__.py similarity index 100% rename from ldm/data/__init__.py rename to invokeai/backend/ldm/data/__init__.py diff --git a/ldm/data/base.py b/invokeai/backend/ldm/data/base.py similarity index 100% rename from ldm/data/base.py rename to invokeai/backend/ldm/data/base.py diff --git a/ldm/data/imagenet.py b/invokeai/backend/ldm/data/imagenet.py similarity index 100% rename from ldm/data/imagenet.py rename to invokeai/backend/ldm/data/imagenet.py diff --git a/ldm/data/lsun.py b/invokeai/backend/ldm/data/lsun.py similarity index 100% rename from ldm/data/lsun.py rename to invokeai/backend/ldm/data/lsun.py diff --git a/ldm/data/personalized.py b/invokeai/backend/ldm/data/personalized.py similarity index 100% rename from ldm/data/personalized.py rename to invokeai/backend/ldm/data/personalized.py diff --git a/ldm/data/personalized_style.py b/invokeai/backend/ldm/data/personalized_style.py similarity index 100% rename from ldm/data/personalized_style.py rename to invokeai/backend/ldm/data/personalized_style.py diff --git a/ldm/invoke/devices.py b/invokeai/backend/ldm/devices.py similarity index 100% rename from ldm/invoke/devices.py rename to invokeai/backend/ldm/devices.py diff --git a/invokeai/backend/models/__init__.py b/invokeai/backend/ldm/models/__init__.py similarity index 100% rename from invokeai/backend/models/__init__.py rename to invokeai/backend/ldm/models/__init__.py diff --git a/invokeai/backend/models/__init__.py~ b/invokeai/backend/ldm/models/__init__.py~ similarity index 100% rename from invokeai/backend/models/__init__.py~ rename to invokeai/backend/ldm/models/__init__.py~ diff --git a/invokeai/backend/models/autoencoder.py b/invokeai/backend/ldm/models/autoencoder.py similarity index 99% rename from invokeai/backend/models/autoencoder.py rename to invokeai/backend/ldm/models/autoencoder.py index 3db7b6fd73..49c413a5fb 100644 --- a/invokeai/backend/models/autoencoder.py +++ b/invokeai/backend/ldm/models/autoencoder.py @@ -5,12 +5,12 @@ from contextlib import contextmanager from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import ( +from ..modules.diffusionmodules.model import Encoder, Decoder +from ..modules.distributions.distributions import ( DiagonalGaussianDistribution, ) -from ldm.util import instantiate_from_config +from ..util import instantiate_from_config class VQModel(pl.LightningModule): diff --git a/invokeai/backend/models/diffusion/__init__.py b/invokeai/backend/ldm/models/diffusion/__init__.py similarity index 100% rename from invokeai/backend/models/diffusion/__init__.py rename to invokeai/backend/ldm/models/diffusion/__init__.py diff --git a/invokeai/backend/models/diffusion/__init__.py~ b/invokeai/backend/ldm/models/diffusion/__init__.py~ similarity index 100% rename from invokeai/backend/models/diffusion/__init__.py~ rename to invokeai/backend/ldm/models/diffusion/__init__.py~ diff --git a/invokeai/backend/models/diffusion/classifier.py b/invokeai/backend/ldm/models/diffusion/classifier.py similarity index 100% rename from invokeai/backend/models/diffusion/classifier.py rename to invokeai/backend/ldm/models/diffusion/classifier.py diff --git a/invokeai/backend/models/diffusion/cross_attention_control.py b/invokeai/backend/ldm/models/diffusion/cross_attention_control.py similarity index 99% rename from invokeai/backend/models/diffusion/cross_attention_control.py rename to invokeai/backend/ldm/models/diffusion/cross_attention_control.py index a34f22e683..6e91e5c868 100644 --- a/invokeai/backend/models/diffusion/cross_attention_control.py +++ b/invokeai/backend/ldm/models/diffusion/cross_attention_control.py @@ -15,7 +15,7 @@ from torch import nn from compel.cross_attention_control import Arguments from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.cross_attention import AttnProcessor -from ldm.invoke.devices import torch_dtype +from ...devices import torch_dtype class CrossAttentionType(enum.Enum): diff --git a/invokeai/backend/models/diffusion/cross_attention_map_saving.py b/invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py similarity index 100% rename from invokeai/backend/models/diffusion/cross_attention_map_saving.py rename to invokeai/backend/ldm/models/diffusion/cross_attention_map_saving.py diff --git a/invokeai/backend/models/diffusion/ddim.py b/invokeai/backend/ldm/models/diffusion/ddim.py similarity index 98% rename from invokeai/backend/models/diffusion/ddim.py rename to invokeai/backend/ldm/models/diffusion/ddim.py index f2c6f4c591..f36e970552 100644 --- a/invokeai/backend/models/diffusion/ddim.py +++ b/invokeai/backend/ldm/models/diffusion/ddim.py @@ -3,7 +3,7 @@ import torch from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like +from ...modules.diffusionmodules.util import noise_like class DDIMSampler(Sampler): def __init__(self, model, schedule='linear', device=None, **kwargs): diff --git a/invokeai/backend/models/diffusion/ddpm.py b/invokeai/backend/ldm/models/diffusion/ddpm.py similarity index 99% rename from invokeai/backend/models/diffusion/ddpm.py rename to invokeai/backend/ldm/models/diffusion/ddpm.py index 1fe059cef4..f9e9485dd3 100644 --- a/invokeai/backend/models/diffusion/ddpm.py +++ b/invokeai/backend/ldm/models/diffusion/ddpm.py @@ -22,8 +22,8 @@ from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig import urllib -from ldm.modules.textual_inversion_manager import TextualInversionManager -from ldm.util import ( +from ...modules.textual_inversion_manager import TextualInversionManager +from ...util import ( log_txt_as_img, exists, default, @@ -33,8 +33,8 @@ from ldm.util import ( count_params, instantiate_from_config, ) -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import ( +from ...modules.ema import LitEma +from ...modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) @@ -43,7 +43,7 @@ from ..autoencoder import ( IdentityFirstStage, AutoencoderKL, ) -from ldm.modules.diffusionmodules.util import ( +from ...modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, diff --git a/invokeai/backend/models/diffusion/ksampler.py b/invokeai/backend/ldm/models/diffusion/ksampler.py similarity index 100% rename from invokeai/backend/models/diffusion/ksampler.py rename to invokeai/backend/ldm/models/diffusion/ksampler.py diff --git a/invokeai/backend/models/diffusion/plms.py b/invokeai/backend/ldm/models/diffusion/plms.py similarity index 98% rename from invokeai/backend/models/diffusion/plms.py rename to invokeai/backend/ldm/models/diffusion/plms.py index 4df703bed5..e0a187e900 100644 --- a/invokeai/backend/models/diffusion/plms.py +++ b/invokeai/backend/ldm/models/diffusion/plms.py @@ -4,10 +4,10 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ldm.invoke.devices import choose_torch_device +from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .sampler import Sampler -from ldm.modules.diffusionmodules.util import noise_like +from ...modules.diffusionmodules.util import noise_like class PLMSSampler(Sampler): diff --git a/invokeai/backend/models/diffusion/sampler.py b/invokeai/backend/ldm/models/diffusion/sampler.py similarity index 99% rename from invokeai/backend/models/diffusion/sampler.py rename to invokeai/backend/ldm/models/diffusion/sampler.py index 29479ff15f..bd9d8c157b 100644 --- a/invokeai/backend/models/diffusion/sampler.py +++ b/invokeai/backend/ldm/models/diffusion/sampler.py @@ -7,10 +7,10 @@ import torch import numpy as np from tqdm import tqdm from functools import partial -from ldm.invoke.devices import choose_torch_device +from ...devices import choose_torch_device from .shared_invokeai_diffusion import InvokeAIDiffuserComponent -from ldm.modules.diffusionmodules.util import ( +from ...modules.diffusionmodules.util import ( make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, diff --git a/invokeai/backend/models/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py similarity index 100% rename from invokeai/backend/models/diffusion/shared_invokeai_diffusion.py rename to invokeai/backend/ldm/models/diffusion/shared_invokeai_diffusion.py diff --git a/invokeai/backend/models/model_manager.py b/invokeai/backend/ldm/models/model_manager.py similarity index 99% rename from invokeai/backend/models/model_manager.py rename to invokeai/backend/ldm/models/model_manager.py index e4dc5ffe40..8edf220a4c 100644 --- a/invokeai/backend/models/model_manager.py +++ b/invokeai/backend/ldm/models/model_manager.py @@ -31,14 +31,14 @@ from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ldm.invoke.devices import CPU_DEVICE +from ..devices import CPU_DEVICE from ldm.invoke.globals import Globals, global_cache_dir -from ldm.util import ( +from ..util import ( ask_user, download_with_resume, url_attachment_name, ) -from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline +from ...generator.diffusers_pipeline import StableDiffusionGeneratorPipeline class SDLegacyType(Enum): diff --git a/ldm/modules/__init__.py b/invokeai/backend/ldm/modules/__init__.py similarity index 100% rename from ldm/modules/__init__.py rename to invokeai/backend/ldm/modules/__init__.py diff --git a/ldm/modules/attention.py b/invokeai/backend/ldm/modules/attention.py similarity index 98% rename from ldm/modules/attention.py rename to invokeai/backend/ldm/modules/attention.py index 11b2b45cff..4fb426130f 100644 --- a/ldm/modules/attention.py +++ b/invokeai/backend/ldm/modules/attention.py @@ -7,8 +7,8 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat -from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin -from ldm.modules.diffusionmodules.util import checkpoint +from ..models.diffusion import InvokeAICrossAttentionMixin +from .diffusionmodules.util import checkpoint def exists(val): return val is not None diff --git a/ldm/modules/diffusionmodules/__init__.py b/invokeai/backend/ldm/modules/diffusionmodules/__init__.py similarity index 100% rename from ldm/modules/diffusionmodules/__init__.py rename to invokeai/backend/ldm/modules/diffusionmodules/__init__.py diff --git a/ldm/modules/diffusionmodules/model.py b/invokeai/backend/ldm/modules/diffusionmodules/model.py similarity index 99% rename from ldm/modules/diffusionmodules/model.py rename to invokeai/backend/ldm/modules/diffusionmodules/model.py index c7288e7a7b..94b0dfc4c7 100644 --- a/ldm/modules/diffusionmodules/model.py +++ b/invokeai/backend/ldm/modules/diffusionmodules/model.py @@ -7,8 +7,8 @@ from torch.nn.functional import silu import numpy as np from einops import rearrange -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention +from ...util import instantiate_from_config +from ..attention import LinearAttention import psutil diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py similarity index 100% rename from ldm/modules/diffusionmodules/openaimodel.py rename to invokeai/backend/ldm/modules/diffusionmodules/openaimodel.py diff --git a/ldm/modules/diffusionmodules/util.py b/invokeai/backend/ldm/modules/diffusionmodules/util.py similarity index 99% rename from ldm/modules/diffusionmodules/util.py rename to invokeai/backend/ldm/modules/diffusionmodules/util.py index e93cf49a4d..a943e51d9e 100644 --- a/ldm/modules/diffusionmodules/util.py +++ b/invokeai/backend/ldm/modules/diffusionmodules/util.py @@ -15,7 +15,7 @@ import torch.nn as nn import numpy as np from einops import repeat -from ldm.util import instantiate_from_config +from ...util import instantiate_from_config def make_beta_schedule( diff --git a/ldm/modules/distributions/__init__.py b/invokeai/backend/ldm/modules/distributions/__init__.py similarity index 100% rename from ldm/modules/distributions/__init__.py rename to invokeai/backend/ldm/modules/distributions/__init__.py diff --git a/ldm/modules/distributions/distributions.py b/invokeai/backend/ldm/modules/distributions/distributions.py similarity index 100% rename from ldm/modules/distributions/distributions.py rename to invokeai/backend/ldm/modules/distributions/distributions.py diff --git a/ldm/modules/ema.py b/invokeai/backend/ldm/modules/ema.py similarity index 100% rename from ldm/modules/ema.py rename to invokeai/backend/ldm/modules/ema.py diff --git a/ldm/modules/embedding_manager.py b/invokeai/backend/ldm/modules/embedding_manager.py similarity index 100% rename from ldm/modules/embedding_manager.py rename to invokeai/backend/ldm/modules/embedding_manager.py diff --git a/ldm/modules/encoders/__init__.py b/invokeai/backend/ldm/modules/encoders/__init__.py similarity index 100% rename from ldm/modules/encoders/__init__.py rename to invokeai/backend/ldm/modules/encoders/__init__.py diff --git a/ldm/modules/encoders/modules.py b/invokeai/backend/ldm/modules/encoders/modules.py similarity index 100% rename from ldm/modules/encoders/modules.py rename to invokeai/backend/ldm/modules/encoders/modules.py diff --git a/ldm/modules/image_degradation/__init__.py b/invokeai/backend/ldm/modules/image_degradation/__init__.py similarity index 100% rename from ldm/modules/image_degradation/__init__.py rename to invokeai/backend/ldm/modules/image_degradation/__init__.py diff --git a/ldm/modules/image_degradation/bsrgan.py b/invokeai/backend/ldm/modules/image_degradation/bsrgan.py similarity index 100% rename from ldm/modules/image_degradation/bsrgan.py rename to invokeai/backend/ldm/modules/image_degradation/bsrgan.py diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py similarity index 100% rename from ldm/modules/image_degradation/bsrgan_light.py rename to invokeai/backend/ldm/modules/image_degradation/bsrgan_light.py diff --git a/ldm/modules/image_degradation/utils/test.png b/invokeai/backend/ldm/modules/image_degradation/utils/test.png similarity index 100% rename from ldm/modules/image_degradation/utils/test.png rename to invokeai/backend/ldm/modules/image_degradation/utils/test.png diff --git a/ldm/modules/image_degradation/utils_image.py b/invokeai/backend/ldm/modules/image_degradation/utils_image.py similarity index 100% rename from ldm/modules/image_degradation/utils_image.py rename to invokeai/backend/ldm/modules/image_degradation/utils_image.py diff --git a/ldm/modules/losses/__init__.py b/invokeai/backend/ldm/modules/losses/__init__.py similarity index 100% rename from ldm/modules/losses/__init__.py rename to invokeai/backend/ldm/modules/losses/__init__.py diff --git a/ldm/modules/losses/contperceptual.py b/invokeai/backend/ldm/modules/losses/contperceptual.py similarity index 100% rename from ldm/modules/losses/contperceptual.py rename to invokeai/backend/ldm/modules/losses/contperceptual.py diff --git a/ldm/modules/losses/vqperceptual.py b/invokeai/backend/ldm/modules/losses/vqperceptual.py similarity index 100% rename from ldm/modules/losses/vqperceptual.py rename to invokeai/backend/ldm/modules/losses/vqperceptual.py diff --git a/ldm/modules/textual_inversion_manager.py b/invokeai/backend/ldm/modules/textual_inversion_manager.py similarity index 99% rename from ldm/modules/textual_inversion_manager.py rename to invokeai/backend/ldm/modules/textual_inversion_manager.py index c3ca69e992..2c5721ea7b 100644 --- a/ldm/modules/textual_inversion_manager.py +++ b/invokeai/backend/ldm/modules/textual_inversion_manager.py @@ -9,7 +9,7 @@ from picklescan.scanner import scan_file_path from transformers import CLIPTextModel, CLIPTokenizer from compel.embeddings_provider import BaseTextualInversionManager -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary +from ..concepts_lib import HuggingFaceConceptsLibrary @dataclass diff --git a/ldm/modules/x_transformer.py b/invokeai/backend/ldm/modules/x_transformer.py similarity index 100% rename from ldm/modules/x_transformer.py rename to invokeai/backend/ldm/modules/x_transformer.py diff --git a/ldm/invoke/offloading.py b/invokeai/backend/ldm/offloading.py similarity index 100% rename from ldm/invoke/offloading.py rename to invokeai/backend/ldm/offloading.py diff --git a/ldm/util.py b/invokeai/backend/ldm/util.py similarity index 99% rename from ldm/util.py rename to invokeai/backend/ldm/util.py index 34075613b6..0aac014ef9 100644 --- a/ldm/util.py +++ b/invokeai/backend/ldm/util.py @@ -15,7 +15,7 @@ import torch from PIL import Image, ImageDraw, ImageFont from tqdm import tqdm -from ldm.invoke.devices import torch_dtype +from .devices import torch_dtype def log_txt_as_img(wh, xc, size=10): diff --git a/invokeai/configs/stable-diffusion/v1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-finetune.yaml index 9fea4ae01f..96e7dd338a 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["sculpture"] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml index fdecca9b72..7433390ce3 100644 --- a/invokeai/configs/stable-diffusion/v1-finetune_style.yaml +++ b/invokeai/configs/stable-diffusion/v1-finetune_style.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ["painting"] @@ -27,7 +27,7 @@ model: num_vectors_per_token: 1 unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -67,7 +67,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -76,14 +76,14 @@ data: num_workers: 16 wrap: false train: - target: ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized_style.PersonalizedBase + target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v1-inference.yaml b/invokeai/configs/stable-diffusion/v1-inference.yaml index 913cbbf310..911c756003 100644 --- a/invokeai/configs/stable-diffusion/v1-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-04 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -18,7 +18,7 @@ model: use_ema: False scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 10000 ] cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml index 78458a7e54..76ef8d2750 100644 --- a/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml +++ b/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 7.5e-05 - target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -18,7 +18,7 @@ model: finetune_keys: null scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler + target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler params: warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases @@ -27,7 +27,7 @@ model: f_min: [ 1. ] personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -36,7 +36,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 9 # 4 data + 4 downscaled image + 1 mask @@ -76,4 +76,4 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder diff --git a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml index e6db3ac067..d9533d9b4b 100644 --- a/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml +++ b/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 5.0e-03 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: linear_start: 0.00085 linear_end: 0.0120 @@ -19,7 +19,7 @@ model: embedding_reg_weight: 0.0 personalization_config: - target: ldm.modules.embedding_manager.EmbeddingManager + target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] initializer_words: ['sculpture'] @@ -28,7 +28,7 @@ model: progressive_words: False unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 # unused in_channels: 4 @@ -68,7 +68,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder data: target: main.DataModuleFromConfig @@ -77,14 +77,14 @@ data: num_workers: 2 wrap: false train: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 validation: - target: ldm.data.personalized.PersonalizedBase + target: invokeai.backend.ldm.data.personalized.PersonalizedBase params: size: 512 set: val diff --git a/invokeai/configs/stable-diffusion/v2-inference-v.yaml b/invokeai/configs/stable-diffusion/v2-inference-v.yaml index 6b6828fbe7..2a426483eb 100644 --- a/invokeai/configs/stable-diffusion/v2-inference-v.yaml +++ b/invokeai/configs/stable-diffusion/v2-inference-v.yaml @@ -1,6 +1,6 @@ model: base_learning_rate: 1.0e-4 - target: invokeai.models.diffusion.ddpm.LatentDiffusion + target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion params: parameterization: "v" linear_start: 0.00085 @@ -19,7 +19,7 @@ model: use_ema: False # we set this to false because this is an inference only config unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel + target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel params: use_checkpoint: True use_fp16: True @@ -62,7 +62,7 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder params: freeze: True layer: "penultimate" diff --git a/ldm/generate.py b/ldm/generate.py index 00c94b6a83..536a7f2df8 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -25,14 +25,14 @@ from omegaconf import OmegaConf from PIL import Image, ImageOps from pytorch_lightning import logging, seed_everything -import ldm.invoke.conditioning +import invokeai.backend.ldm.conditioning -from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) +from invokeai.backend.ldm.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) from invokeai.backend.generator import infill_methods from ldm.invoke.args import metadata_from_png -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary -from ldm.invoke.conditioning import get_uc_and_c_and_ec -from ldm.invoke.devices import choose_precision, choose_torch_device +from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary +from invokeai.backend.ldm.conditioning import get_uc_and_c_and_ec +from invokeai.backend.ldm.devices import choose_precision, choose_torch_device from ldm.invoke.globals import Globals, global_cache_dir from ldm.invoke.image_util import InitImageResizer from ldm.invoke.pngwriter import PngWriter diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 42fe6638aa..16f64e3bd1 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -25,10 +25,10 @@ from invokeai.backend.generator import PipelineIntermediateState from .globals import Globals from .image_util import make_grid from .log import write_log -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .readline import Completer, get_completer -from ..util import url_attachment_name +from invokeai.backend.ldm.util import url_attachment_name # global used in multiple functions (fix) infile = None diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 1a5dbe334a..17bb8005cb 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -97,7 +97,7 @@ from typing import List import ldm.invoke import ldm.invoke.pngwriter -from ldm.invoke.conditioning import split_weighted_subprompts +from invokeai.backend.ldm.conditioning import split_weighted_subprompts from ldm.invoke.globals import Globals diff --git a/ldm/invoke/config/model_install.py b/ldm/invoke/config/model_install.py index 287283ca27..9dd22d2ac7 100644 --- a/ldm/invoke/config/model_install.py +++ b/ldm/invoke/config/model_install.py @@ -22,7 +22,7 @@ from npyscreen import widget from omegaconf import OmegaConf from shutil import get_terminal_size -from ..devices import choose_precision, choose_torch_device +from invokeai.backend.ldm.devices import choose_precision, choose_torch_device from ..globals import Globals, global_config_dir from .model_install_backend import (Dataset_path, default_config_file, default_dataset, get_root, diff --git a/ldm/invoke/config/model_install_backend.py b/ldm/invoke/config/model_install_backend.py index 428a46b96b..c50e5ebc3c 100644 --- a/ldm/invoke/config/model_install_backend.py +++ b/ldm/invoke/config/model_install_backend.py @@ -20,7 +20,7 @@ from typing import List import invokeai.configs as configs from invokeai.backend.generator import StableDiffusionGeneratorPipeline from ..globals import Globals, global_cache_dir, global_config_dir -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager warnings.filterwarnings("ignore") diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 16e5340e8f..28abb5d432 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -23,7 +23,7 @@ from omegaconf import OmegaConf from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, global_models_dir, global_set_root) -from invokeai.backend.models import ModelManager +from invokeai.backend.ldm.models import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" diff --git a/ldm/invoke/readline.py b/ldm/invoke/readline.py index 542bdeeaed..a605358a9b 100644 --- a/ldm/invoke/readline.py +++ b/ldm/invoke/readline.py @@ -12,7 +12,7 @@ import os import re import atexit from ldm.invoke.args import Args -from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary +from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary from ldm.invoke.globals import Globals # ---------------readline utilities--------------------- diff --git a/pyproject.toml b/pyproject.toml index 4b5a5d5fda..22e6f9282c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,7 +130,7 @@ version = { attr = "ldm.invoke.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ - "invokeai.assets.web*", "invokeai.models*", + "invokeai.assets.web*", "invokeai.generator*","invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"