move models and modules under invokeai/backend/ldm

This commit is contained in:
Lincoln Stein 2023-03-01 18:24:18 -05:00
parent 2c7928b163
commit 850d1ee984
67 changed files with 79 additions and 79 deletions

View File

@ -22,8 +22,8 @@ from pytorch_lightning import seed_everything
from tqdm import trange
import invokeai.assets.web as web_assets
from ..models.diffusion.ddpm import DiffusionWrapper
from ldm.util import rand_perlin_2d
from ..ldm.models.diffusion.ddpm import DiffusionWrapper
from ..ldm.util import rand_perlin_2d
downsampling = 8
CAUTION_IMG = 'caution.png'

View File

@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals
from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager
from ldm.invoke.devices import normalize_device, CPU_DEVICE
from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..models.diffusion import AttentionMapSaver
from ..ldm.models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ..ldm.modules.textual_inversion_manager import TextualInversionManager
from ..ldm.devices import normalize_device, CPU_DEVICE
from ..ldm.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..ldm.models.diffusion import AttentionMapSaver
from compel import EmbeddingsProvider
@dataclass

View File

@ -7,7 +7,7 @@ from diffusers import logging
from .base import Generator
from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData
from ..models.diffusion import PostprocessingSettings
from ..ldm.models.diffusion import PostprocessingSettings
class Img2Img(Generator):
def __init__(self, model, precision):

View File

@ -15,7 +15,7 @@ from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusion
ConditioningData
from .img2img import Img2Img
from ldm.invoke.patchmatch import PatchMatch
from ldm.util import debug_image
from ..ldm.util import debug_image
def infill_methods()->list[str]:

View File

@ -6,7 +6,7 @@ import torch
from .base import Generator
from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData
from ..models import PostprocessingSettings
from ..ldm.models import PostprocessingSettings
class Txt2Img(Generator):
def __init__(self, model, precision):

View File

@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from .devices import torch_dtype
from invokeai.backend.models import InvokeAIDiffuserComponent
from .models import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer:

View File

@ -5,12 +5,12 @@ from contextlib import contextmanager
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import (
from ..modules.diffusionmodules.model import Encoder, Decoder
from ..modules.distributions.distributions import (
DiagonalGaussianDistribution,
)
from ldm.util import instantiate_from_config
from ..util import instantiate_from_config
class VQModel(pl.LightningModule):

View File

@ -15,7 +15,7 @@ from torch import nn
from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.cross_attention import AttnProcessor
from ldm.invoke.devices import torch_dtype
from ...devices import torch_dtype
class CrossAttentionType(enum.Enum):

View File

@ -3,7 +3,7 @@
import torch
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like
from ...modules.diffusionmodules.util import noise_like
class DDIMSampler(Sampler):
def __init__(self, model, schedule='linear', device=None, **kwargs):

View File

@ -22,8 +22,8 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
import urllib
from ldm.modules.textual_inversion_manager import TextualInversionManager
from ldm.util import (
from ...modules.textual_inversion_manager import TextualInversionManager
from ...util import (
log_txt_as_img,
exists,
default,
@ -33,8 +33,8 @@ from ldm.util import (
count_params,
instantiate_from_config,
)
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import (
from ...modules.ema import LitEma
from ...modules.distributions.distributions import (
normal_kl,
DiagonalGaussianDistribution,
)
@ -43,7 +43,7 @@ from ..autoencoder import (
IdentityFirstStage,
AutoencoderKL,
)
from ldm.modules.diffusionmodules.util import (
from ...modules.diffusionmodules.util import (
make_beta_schedule,
extract_into_tensor,
noise_like,

View File

@ -4,10 +4,10 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.invoke.devices import choose_torch_device
from ...devices import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like
from ...modules.diffusionmodules.util import noise_like
class PLMSSampler(Sampler):

View File

@ -7,10 +7,10 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.invoke.devices import choose_torch_device
from ...devices import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.modules.diffusionmodules.util import (
from ...modules.diffusionmodules.util import (
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,

View File

@ -31,14 +31,14 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
from ldm.invoke.devices import CPU_DEVICE
from ..devices import CPU_DEVICE
from ldm.invoke.globals import Globals, global_cache_dir
from ldm.util import (
from ..util import (
ask_user,
download_with_resume,
url_attachment_name,
)
from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ...generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
class SDLegacyType(Enum):

View File

@ -7,8 +7,8 @@ import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin
from ldm.modules.diffusionmodules.util import checkpoint
from ..models.diffusion import InvokeAICrossAttentionMixin
from .diffusionmodules.util import checkpoint
def exists(val):
return val is not None

View File

@ -7,8 +7,8 @@ from torch.nn.functional import silu
import numpy as np
from einops import rearrange
from ldm.util import instantiate_from_config
from ldm.modules.attention import LinearAttention
from ...util import instantiate_from_config
from ..attention import LinearAttention
import psutil

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np
from einops import repeat
from ldm.util import instantiate_from_config
from ...util import instantiate_from_config
def make_beta_schedule(

View File

Before

Width:  |  Height:  |  Size: 431 KiB

After

Width:  |  Height:  |  Size: 431 KiB

View File

@ -9,7 +9,7 @@ from picklescan.scanner import scan_file_path
from transformers import CLIPTextModel, CLIPTokenizer
from compel.embeddings_provider import BaseTextualInversionManager
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from ..concepts_lib import HuggingFaceConceptsLibrary
@dataclass

View File

@ -15,7 +15,7 @@ import torch
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
from ldm.invoke.devices import torch_dtype
from .devices import torch_dtype
def log_txt_as_img(wh, xc, size=10):

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ["sculpture"]
@ -28,7 +28,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2
wrap: false
train:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ["painting"]
@ -27,7 +27,7 @@ model:
num_vectors_per_token: 1
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -67,7 +67,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -76,14 +76,14 @@ data:
num_workers: 16
wrap: false
train:
target: ldm.data.personalized_style.PersonalizedBase
target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized_style.PersonalizedBase
target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 1.0e-04
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -18,7 +18,7 @@ model:
use_ema: False
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ]
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 7.5e-05
target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -18,7 +18,7 @@ model:
finetune_keys: null
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ]
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -28,7 +28,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2
wrap: false
train:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 1.0e-4
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
parameterization: "v"
linear_start: 0.00085
@ -19,7 +19,7 @@ model:
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: True
@ -62,7 +62,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"

View File

@ -25,14 +25,14 @@ from omegaconf import OmegaConf
from PIL import Image, ImageOps
from pytorch_lightning import logging, seed_everything
import ldm.invoke.conditioning
import invokeai.backend.ldm.conditioning
from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler)
from invokeai.backend.ldm.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler)
from invokeai.backend.generator import infill_methods
from ldm.invoke.args import metadata_from_png
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.conditioning import get_uc_and_c_and_ec
from ldm.invoke.devices import choose_precision, choose_torch_device
from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary
from invokeai.backend.ldm.conditioning import get_uc_and_c_and_ec
from invokeai.backend.ldm.devices import choose_precision, choose_torch_device
from ldm.invoke.globals import Globals, global_cache_dir
from ldm.invoke.image_util import InitImageResizer
from ldm.invoke.pngwriter import PngWriter

View File

@ -25,10 +25,10 @@ from invokeai.backend.generator import PipelineIntermediateState
from .globals import Globals
from .image_util import make_grid
from .log import write_log
from invokeai.backend.models import ModelManager
from invokeai.backend.ldm.models import ModelManager
from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from .readline import Completer, get_completer
from ..util import url_attachment_name
from invokeai.backend.ldm.util import url_attachment_name
# global used in multiple functions (fix)
infile = None

View File

@ -97,7 +97,7 @@ from typing import List
import ldm.invoke
import ldm.invoke.pngwriter
from ldm.invoke.conditioning import split_weighted_subprompts
from invokeai.backend.ldm.conditioning import split_weighted_subprompts
from ldm.invoke.globals import Globals

View File

@ -22,7 +22,7 @@ from npyscreen import widget
from omegaconf import OmegaConf
from shutil import get_terminal_size
from ..devices import choose_precision, choose_torch_device
from invokeai.backend.ldm.devices import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root,

View File

@ -20,7 +20,7 @@ from typing import List
import invokeai.configs as configs
from invokeai.backend.generator import StableDiffusionGeneratorPipeline
from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.backend.models import ModelManager
from invokeai.backend.ldm.models import ModelManager
warnings.filterwarnings("ignore")

View File

@ -23,7 +23,7 @@ from omegaconf import OmegaConf
from ldm.invoke.config.widgets import FloatTitleSlider
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
global_models_dir, global_set_root)
from invokeai.backend.models import ModelManager
from invokeai.backend.ldm.models import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models"

View File

@ -12,7 +12,7 @@ import os
import re
import atexit
from ldm.invoke.args import Args
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.globals import Globals
# ---------------readline utilities---------------------

View File

@ -130,7 +130,7 @@ version = { attr = "ldm.invoke.__version__" }
[tool.setuptools.packages.find]
"where" = ["."]
"include" = [
"invokeai.assets.web*", "invokeai.models*",
"invokeai.assets.web*",
"invokeai.generator*","invokeai.backend*",
"invokeai.frontend.dist*", "invokeai.configs*",
"ldm*"