move models and modules under invokeai/backend/ldm

This commit is contained in:
Lincoln Stein 2023-03-01 18:24:18 -05:00
parent 2c7928b163
commit 850d1ee984
67 changed files with 79 additions and 79 deletions

View File

@ -22,8 +22,8 @@ from pytorch_lightning import seed_everything
from tqdm import trange from tqdm import trange
import invokeai.assets.web as web_assets import invokeai.assets.web as web_assets
from ..models.diffusion.ddpm import DiffusionWrapper from ..ldm.models.diffusion.ddpm import DiffusionWrapper
from ldm.util import rand_perlin_2d from ..ldm.util import rand_perlin_2d
downsampling = 8 downsampling = 8
CAUTION_IMG = 'caution.png' CAUTION_IMG = 'caution.png'

View File

@ -27,11 +27,11 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
from ..models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings from ..ldm.models.diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager from ..ldm.modules.textual_inversion_manager import TextualInversionManager
from ldm.invoke.devices import normalize_device, CPU_DEVICE from ..ldm.devices import normalize_device, CPU_DEVICE
from ldm.invoke.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup from ..ldm.offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ..models.diffusion import AttentionMapSaver from ..ldm.models.diffusion import AttentionMapSaver
from compel import EmbeddingsProvider from compel import EmbeddingsProvider
@dataclass @dataclass

View File

@ -7,7 +7,7 @@ from diffusers import logging
from .base import Generator from .base import Generator
from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData
from ..models.diffusion import PostprocessingSettings from ..ldm.models.diffusion import PostprocessingSettings
class Img2Img(Generator): class Img2Img(Generator):
def __init__(self, model, precision): def __init__(self, model, precision):

View File

@ -15,7 +15,7 @@ from .diffusers_pipeline import image_resized_to_grid_as_tensor, StableDiffusion
ConditioningData ConditioningData
from .img2img import Img2Img from .img2img import Img2Img
from ldm.invoke.patchmatch import PatchMatch from ldm.invoke.patchmatch import PatchMatch
from ldm.util import debug_image from ..ldm.util import debug_image
def infill_methods()->list[str]: def infill_methods()->list[str]:

View File

@ -6,7 +6,7 @@ import torch
from .base import Generator from .base import Generator
from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData from .diffusers_pipeline import StableDiffusionGeneratorPipeline, ConditioningData
from ..models import PostprocessingSettings from ..ldm.models import PostprocessingSettings
class Txt2Img(Generator): class Txt2Img(Generator):
def __init__(self, model, precision): def __init__(self, model, precision):

View File

@ -14,7 +14,7 @@ from transformers import CLIPTokenizer, CLIPTextModel
from compel import Compel from compel import Compel
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
from .devices import torch_dtype from .devices import torch_dtype
from invokeai.backend.models import InvokeAIDiffuserComponent from .models import InvokeAIDiffuserComponent
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
def get_tokenizer(model) -> CLIPTokenizer: def get_tokenizer(model) -> CLIPTokenizer:

View File

@ -5,12 +5,12 @@ from contextlib import contextmanager
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder from ..modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import ( from ..modules.distributions.distributions import (
DiagonalGaussianDistribution, DiagonalGaussianDistribution,
) )
from ldm.util import instantiate_from_config from ..util import instantiate_from_config
class VQModel(pl.LightningModule): class VQModel(pl.LightningModule):

View File

@ -15,7 +15,7 @@ from torch import nn
from compel.cross_attention_control import Arguments from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.cross_attention import AttnProcessor from diffusers.models.cross_attention import AttnProcessor
from ldm.invoke.devices import torch_dtype from ...devices import torch_dtype
class CrossAttentionType(enum.Enum): class CrossAttentionType(enum.Enum):

View File

@ -3,7 +3,7 @@
import torch import torch
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler from .sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like from ...modules.diffusionmodules.util import noise_like
class DDIMSampler(Sampler): class DDIMSampler(Sampler):
def __init__(self, model, schedule='linear', device=None, **kwargs): def __init__(self, model, schedule='linear', device=None, **kwargs):

View File

@ -22,8 +22,8 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig from omegaconf import ListConfig
import urllib import urllib
from ldm.modules.textual_inversion_manager import TextualInversionManager from ...modules.textual_inversion_manager import TextualInversionManager
from ldm.util import ( from ...util import (
log_txt_as_img, log_txt_as_img,
exists, exists,
default, default,
@ -33,8 +33,8 @@ from ldm.util import (
count_params, count_params,
instantiate_from_config, instantiate_from_config,
) )
from ldm.modules.ema import LitEma from ...modules.ema import LitEma
from ldm.modules.distributions.distributions import ( from ...modules.distributions.distributions import (
normal_kl, normal_kl,
DiagonalGaussianDistribution, DiagonalGaussianDistribution,
) )
@ -43,7 +43,7 @@ from ..autoencoder import (
IdentityFirstStage, IdentityFirstStage,
AutoencoderKL, AutoencoderKL,
) )
from ldm.modules.diffusionmodules.util import ( from ...modules.diffusionmodules.util import (
make_beta_schedule, make_beta_schedule,
extract_into_tensor, extract_into_tensor,
noise_like, noise_like,

View File

@ -4,10 +4,10 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from functools import partial from functools import partial
from ldm.invoke.devices import choose_torch_device from ...devices import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from .sampler import Sampler from .sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like from ...modules.diffusionmodules.util import noise_like
class PLMSSampler(Sampler): class PLMSSampler(Sampler):

View File

@ -7,10 +7,10 @@ import torch
import numpy as np import numpy as np
from tqdm import tqdm from tqdm import tqdm
from functools import partial from functools import partial
from ldm.invoke.devices import choose_torch_device from ...devices import choose_torch_device
from .shared_invokeai_diffusion import InvokeAIDiffuserComponent from .shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ldm.modules.diffusionmodules.util import ( from ...modules.diffusionmodules.util import (
make_ddim_sampling_parameters, make_ddim_sampling_parameters,
make_ddim_timesteps, make_ddim_timesteps,
noise_like, noise_like,

View File

@ -31,14 +31,14 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path from picklescan.scanner import scan_file_path
from ldm.invoke.devices import CPU_DEVICE from ..devices import CPU_DEVICE
from ldm.invoke.globals import Globals, global_cache_dir from ldm.invoke.globals import Globals, global_cache_dir
from ldm.util import ( from ..util import (
ask_user, ask_user,
download_with_resume, download_with_resume,
url_attachment_name, url_attachment_name,
) )
from ..generator.diffusers_pipeline import StableDiffusionGeneratorPipeline from ...generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
class SDLegacyType(Enum): class SDLegacyType(Enum):

View File

@ -7,8 +7,8 @@ import torch.nn.functional as F
from torch import nn, einsum from torch import nn, einsum
from einops import rearrange, repeat from einops import rearrange, repeat
from invokeai.backend.models.diffusion import InvokeAICrossAttentionMixin from ..models.diffusion import InvokeAICrossAttentionMixin
from ldm.modules.diffusionmodules.util import checkpoint from .diffusionmodules.util import checkpoint
def exists(val): def exists(val):
return val is not None return val is not None

View File

@ -7,8 +7,8 @@ from torch.nn.functional import silu
import numpy as np import numpy as np
from einops import rearrange from einops import rearrange
from ldm.util import instantiate_from_config from ...util import instantiate_from_config
from ldm.modules.attention import LinearAttention from ..attention import LinearAttention
import psutil import psutil

View File

@ -15,7 +15,7 @@ import torch.nn as nn
import numpy as np import numpy as np
from einops import repeat from einops import repeat
from ldm.util import instantiate_from_config from ...util import instantiate_from_config
def make_beta_schedule( def make_beta_schedule(

View File

Before

Width:  |  Height:  |  Size: 431 KiB

After

Width:  |  Height:  |  Size: 431 KiB

View File

@ -9,7 +9,7 @@ from picklescan.scanner import scan_file_path
from transformers import CLIPTextModel, CLIPTokenizer from transformers import CLIPTextModel, CLIPTokenizer
from compel.embeddings_provider import BaseTextualInversionManager from compel.embeddings_provider import BaseTextualInversionManager
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from ..concepts_lib import HuggingFaceConceptsLibrary
@dataclass @dataclass

View File

@ -15,7 +15,7 @@ import torch
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm from tqdm import tqdm
from ldm.invoke.devices import torch_dtype from .devices import torch_dtype
def log_txt_as_img(wh, xc, size=10): def log_txt_as_img(wh, xc, size=10):

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 5.0e-03 base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion
params: params:
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0 embedding_reg_weight: 0.0
personalization_config: personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params: params:
placeholder_strings: ["*"] placeholder_strings: ["*"]
initializer_words: ["sculpture"] initializer_words: ["sculpture"]
@ -28,7 +28,7 @@ model:
progressive_words: False progressive_words: False
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
image_size: 32 # unused image_size: 32 # unused
in_channels: 4 in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data: data:
target: main.DataModuleFromConfig target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2 num_workers: 2
wrap: false wrap: false
train: train:
target: ldm.data.personalized.PersonalizedBase target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params: params:
size: 512 size: 512
set: train set: train
per_image_tokens: false per_image_tokens: false
repeats: 100 repeats: 100
validation: validation:
target: ldm.data.personalized.PersonalizedBase target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params: params:
size: 512 size: 512
set: val set: val

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 5.0e-03 base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params: params:
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0 embedding_reg_weight: 0.0
personalization_config: personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params: params:
placeholder_strings: ["*"] placeholder_strings: ["*"]
initializer_words: ["painting"] initializer_words: ["painting"]
@ -27,7 +27,7 @@ model:
num_vectors_per_token: 1 num_vectors_per_token: 1
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
image_size: 32 # unused image_size: 32 # unused
in_channels: 4 in_channels: 4
@ -67,7 +67,7 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data: data:
target: main.DataModuleFromConfig target: main.DataModuleFromConfig
@ -76,14 +76,14 @@ data:
num_workers: 16 num_workers: 16
wrap: false wrap: false
train: train:
target: ldm.data.personalized_style.PersonalizedBase target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params: params:
size: 512 size: 512
set: train set: train
per_image_tokens: false per_image_tokens: false
repeats: 100 repeats: 100
validation: validation:
target: ldm.data.personalized_style.PersonalizedBase target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params: params:
size: 512 size: 512
set: val set: val

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 1.0e-04 base_learning_rate: 1.0e-04
target: invokeai.models.diffusion.ddpm.LatentDiffusion target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params: params:
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
@ -18,7 +18,7 @@ model:
use_ema: False use_ema: False
scheduler_config: # 10000 warmup steps scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params: params:
warm_up_steps: [ 10000 ] warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ] f_min: [ 1. ]
personalization_config: personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params: params:
placeholder_strings: ["*"] placeholder_strings: ["*"]
initializer_words: ['sculpture'] initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False progressive_words: False
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
image_size: 32 # unused image_size: 32 # unused
in_channels: 4 in_channels: 4
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 7.5e-05 base_learning_rate: 7.5e-05
target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion
params: params:
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
@ -18,7 +18,7 @@ model:
finetune_keys: null finetune_keys: null
scheduler_config: # 10000 warmup steps scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params: params:
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ] f_min: [ 1. ]
personalization_config: personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params: params:
placeholder_strings: ["*"] placeholder_strings: ["*"]
initializer_words: ['sculpture'] initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False progressive_words: False
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
image_size: 32 # unused image_size: 32 # unused
in_channels: 9 # 4 data + 4 downscaled image + 1 mask in_channels: 9 # 4 data + 4 downscaled image + 1 mask
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 5.0e-03 base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params: params:
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0 embedding_reg_weight: 0.0
personalization_config: personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params: params:
placeholder_strings: ["*"] placeholder_strings: ["*"]
initializer_words: ['sculpture'] initializer_words: ['sculpture']
@ -28,7 +28,7 @@ model:
progressive_words: False progressive_words: False
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
image_size: 32 # unused image_size: 32 # unused
in_channels: 4 in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data: data:
target: main.DataModuleFromConfig target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2 num_workers: 2
wrap: false wrap: false
train: train:
target: ldm.data.personalized.PersonalizedBase target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params: params:
size: 512 size: 512
set: train set: train
per_image_tokens: false per_image_tokens: false
repeats: 100 repeats: 100
validation: validation:
target: ldm.data.personalized.PersonalizedBase target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params: params:
size: 512 size: 512
set: val set: val

View File

@ -1,6 +1,6 @@
model: model:
base_learning_rate: 1.0e-4 base_learning_rate: 1.0e-4
target: invokeai.models.diffusion.ddpm.LatentDiffusion target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params: params:
parameterization: "v" parameterization: "v"
linear_start: 0.00085 linear_start: 0.00085
@ -19,7 +19,7 @@ model:
use_ema: False # we set this to false because this is an inference only config use_ema: False # we set this to false because this is an inference only config
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
use_checkpoint: True use_checkpoint: True
use_fp16: True use_fp16: True
@ -62,7 +62,7 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params: params:
freeze: True freeze: True
layer: "penultimate" layer: "penultimate"

View File

@ -25,14 +25,14 @@ from omegaconf import OmegaConf
from PIL import Image, ImageOps from PIL import Image, ImageOps
from pytorch_lightning import logging, seed_everything from pytorch_lightning import logging, seed_everything
import ldm.invoke.conditioning import invokeai.backend.ldm.conditioning
from invokeai.backend.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler) from invokeai.backend.ldm.models import (ModelManager,DDIMSampler, KSampler, PLMSSampler)
from invokeai.backend.generator import infill_methods from invokeai.backend.generator import infill_methods
from ldm.invoke.args import metadata_from_png from ldm.invoke.args import metadata_from_png
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.conditioning import get_uc_and_c_and_ec from invokeai.backend.ldm.conditioning import get_uc_and_c_and_ec
from ldm.invoke.devices import choose_precision, choose_torch_device from invokeai.backend.ldm.devices import choose_precision, choose_torch_device
from ldm.invoke.globals import Globals, global_cache_dir from ldm.invoke.globals import Globals, global_cache_dir
from ldm.invoke.image_util import InitImageResizer from ldm.invoke.image_util import InitImageResizer
from ldm.invoke.pngwriter import PngWriter from ldm.invoke.pngwriter import PngWriter

View File

@ -25,10 +25,10 @@ from invokeai.backend.generator import PipelineIntermediateState
from .globals import Globals from .globals import Globals
from .image_util import make_grid from .image_util import make_grid
from .log import write_log from .log import write_log
from invokeai.backend.models import ModelManager from invokeai.backend.ldm.models import ModelManager
from .pngwriter import PngWriter, retrieve_metadata, write_metadata from .pngwriter import PngWriter, retrieve_metadata, write_metadata
from .readline import Completer, get_completer from .readline import Completer, get_completer
from ..util import url_attachment_name from invokeai.backend.ldm.util import url_attachment_name
# global used in multiple functions (fix) # global used in multiple functions (fix)
infile = None infile = None

View File

@ -97,7 +97,7 @@ from typing import List
import ldm.invoke import ldm.invoke
import ldm.invoke.pngwriter import ldm.invoke.pngwriter
from ldm.invoke.conditioning import split_weighted_subprompts from invokeai.backend.ldm.conditioning import split_weighted_subprompts
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals

View File

@ -22,7 +22,7 @@ from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from shutil import get_terminal_size from shutil import get_terminal_size
from ..devices import choose_precision, choose_torch_device from invokeai.backend.ldm.devices import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir from ..globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file, from .model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root, default_dataset, get_root,

View File

@ -20,7 +20,7 @@ from typing import List
import invokeai.configs as configs import invokeai.configs as configs
from invokeai.backend.generator import StableDiffusionGeneratorPipeline from invokeai.backend.generator import StableDiffusionGeneratorPipeline
from ..globals import Globals, global_cache_dir, global_config_dir from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.backend.models import ModelManager from invokeai.backend.ldm.models import ModelManager
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")

View File

@ -23,7 +23,7 @@ from omegaconf import OmegaConf
from ldm.invoke.config.widgets import FloatTitleSlider from ldm.invoke.config.widgets import FloatTitleSlider
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
global_models_dir, global_set_root) global_models_dir, global_set_root)
from invokeai.backend.models import ModelManager from invokeai.backend.ldm.models import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models" DEST_MERGED_MODEL_DIR = "merged_models"

View File

@ -12,7 +12,7 @@ import os
import re import re
import atexit import atexit
from ldm.invoke.args import Args from ldm.invoke.args import Args
from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary from invokeai.backend.ldm.concepts_lib import HuggingFaceConceptsLibrary
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
# ---------------readline utilities--------------------- # ---------------readline utilities---------------------

View File

@ -130,7 +130,7 @@ version = { attr = "ldm.invoke.__version__" }
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
"where" = ["."] "where" = ["."]
"include" = [ "include" = [
"invokeai.assets.web*", "invokeai.models*", "invokeai.assets.web*",
"invokeai.generator*","invokeai.backend*", "invokeai.generator*","invokeai.backend*",
"invokeai.frontend.dist*", "invokeai.configs*", "invokeai.frontend.dist*", "invokeai.configs*",
"ldm*" "ldm*"