Merge branch 'main' into feat/ip-adapter

This commit is contained in:
Ryan Dick
2023-09-15 13:15:25 -04:00
266 changed files with 8450 additions and 2829 deletions

View File

@ -1,5 +1,5 @@
"""
Initialization file for invokeai.backend
"""
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401
from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401
from .model_management.models import SilenceWarnings # noqa: F401

View File

@ -3,12 +3,13 @@ This module defines a singleton object, "invisible_watermark" that
wraps the invisible watermark model. It respects the global "invisible_watermark"
configuration variable, that allows the watermarking to be supressed.
"""
import numpy as np
import cv2
from PIL import Image
import numpy as np
from imwatermark import WatermarkEncoder
from invokeai.app.services.config import InvokeAIAppConfig
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()

View File

@ -5,6 +5,7 @@ wraps the actual patchmatch object. It respects the global
be suppressed or deferred
"""
import numpy as np
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig

View File

@ -5,10 +5,11 @@ configuration variable, that allows the checker to be supressed.
"""
import numpy as np
from PIL import Image
from invokeai.backend import SilenceWarnings
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.devices import choose_torch_device
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend import SilenceWarnings
from invokeai.backend.util.devices import choose_torch_device
config = InvokeAIAppConfig.get_config()

View File

@ -2,9 +2,8 @@
Check that the invokeai_root is correctly configured and exit if not.
"""
import sys
from invokeai.app.services.config import (
InvokeAIAppConfig,
)
from invokeai.app.services.config import InvokeAIAppConfig
def check_invokeai_root(config: InvokeAIAppConfig):

View File

@ -6,68 +6,56 @@
#
# Coauthor: Kevin Turner http://github.com/keturn
#
import sys
import argparse
import io
import os
import psutil
import shutil
import sys
import textwrap
import torch
import traceback
import yaml
import warnings
from argparse import Namespace
from enum import Enum
from pathlib import Path
from shutil import get_terminal_size
from typing import get_type_hints, get_args, Any
from typing import Any, get_args, get_type_hints
from urllib import request
import npyscreen
import transformers
import omegaconf
import psutil
import torch
import transformers
import yaml
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
from pydantic.error_wrappers import ValidationError
from tqdm import tqdm
from transformers import (
CLIPTextModel,
CLIPTextConfig,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
import invokeai.configs as configs
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config import (
InvokeAIAppConfig,
)
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
# TO DO - Move all the frontend code into invokeai.frontend.install
from invokeai.frontend.install.widgets import (
SingleSelectColumnsSimple,
MultiSelectColumns,
CenteredButtonPress,
FileBox,
set_min_terminal_size,
CyclingForm,
MIN_COLS,
MIN_LINES,
CenteredButtonPress,
CyclingForm,
FileBox,
MultiSelectColumns,
SingleSelectColumnsSimple,
WindowTooSmallException,
set_min_terminal_size,
)
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import (
hf_download_from_pretrained,
InstallSelections,
ModelInstall,
)
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
from pydantic.error_wrappers import ValidationError
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
@ -507,7 +495,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
scroll_exit=True,
)
else:
self.vram_cache_size = DummyWidgetValue.zero
self.vram = DummyWidgetValue.zero
self.nextrely += 1
self.outdir = self.add_widget_intelligent(
FileBox,
@ -605,7 +593,8 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
"vram",
"outdir",
]:
setattr(new_opts, attr, getattr(self, attr).value)
if hasattr(self, attr):
setattr(new_opts, attr, getattr(self, attr).value)
for attr in self.autoimport_dirs:
directory = Path(self.autoimport_dirs[attr].value)

View File

@ -3,33 +3,26 @@ Migrate the models directory and models.yaml file from an existing
InvokeAI 2.3 installation to 3.0.0.
"""
import os
import argparse
import os
import shutil
import yaml
import transformers
import diffusers
import warnings
from dataclasses import dataclass
from pathlib import Path
from omegaconf import OmegaConf, DictConfig
from typing import Union
from diffusers import StableDiffusionPipeline, AutoencoderKL
import diffusers
import transformers
import yaml
from diffusers import AutoencoderKL, StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import (
CLIPTextModel,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
from omegaconf import DictConfig, OmegaConf
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import ModelProbe, ModelType, BaseModelType, ModelProbeInfo
from invokeai.backend.model_management.model_probe import BaseModelType, ModelProbe, ModelProbeInfo, ModelType
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()

View File

@ -19,18 +19,8 @@ from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import (
AddModelResult,
BaseModelType,
ModelManager,
ModelType,
ModelVariantType,
)
from invokeai.backend.model_management.model_probe import (
ModelProbe,
ModelProbeInfo,
SchedulerPredictionType,
)
from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType
from invokeai.backend.util import download_with_resume
from invokeai.backend.util.devices import choose_torch_device, torch_dtype

View File

@ -1,15 +1,19 @@
"""
Initialization file for invokeai.backend.model_management
"""
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401
from .model_cache import ModelCache # noqa: F401
# This import must be first
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401 isort: split
from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401
from .model_cache import ModelCache # noqa: F401
from .models import ( # noqa: F401
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
ModelNotFoundException,
DuplicateModelException,
ModelNotFoundException,
ModelType,
ModelVariantType,
SubModelType,
)
from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401
# This import must be last
from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401 isort: split

View File

@ -25,12 +25,7 @@ from typing import Optional, Union
import requests
import torch
from diffusers.models import (
AutoencoderKL,
ControlNetModel,
PriorTransformer,
UNet2DConditionModel,
)
from diffusers.models import AutoencoderKL, ControlNetModel, PriorTransformer, UNet2DConditionModel
from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
@ -64,6 +59,7 @@ from transformers import (
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.logging import InvokeAILogger
from .models import BaseModelType, ModelVariantType
try:
@ -1203,8 +1199,8 @@ def download_from_original_stable_diffusion_ckpt(
StableDiffusionControlNetPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionPipeline,
StableDiffusionXLPipeline,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLPipeline,
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
)

View File

@ -2,8 +2,8 @@ from __future__ import annotations
import copy
from contextlib import contextmanager
from typing import Optional, Dict, Tuple, Any, Union, List
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
@ -14,7 +14,6 @@ from transformers import CLIPTextModel, CLIPTokenizer
from .models.lora import LoRAModel
"""
loras = [
(lora_model1, 0.7),
@ -307,9 +306,10 @@ class TextualInversionManager(BaseTextualInversionManager):
class ONNXModelPatcher:
from .models.base import IAIOnnxRuntimeModel
from diffusers import OnnxRuntimeModel
from .models.base import IAIOnnxRuntimeModel
@classmethod
@contextmanager
def apply_lora_unet(

View File

@ -17,18 +17,23 @@ context. Use like this:
"""
import gc
import hashlib
import os
import sys
import hashlib
from contextlib import suppress
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Union, types, Optional, Type, Any
from typing import Any, Dict, Optional, Type, Union, types
import torch
import invokeai.backend.util.logging as logger
from .models import BaseModelType, ModelType, SubModelType, ModelBase
from ..util.devices import choose_torch_device
from .models import BaseModelType, ModelBase, ModelType, SubModelType
if choose_torch_device() == torch.device("mps"):
from torch import mps
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
@ -405,6 +410,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
@ -425,6 +432,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256()

View File

@ -9,13 +9,14 @@ Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
import warnings
from enum import Enum
from pathlib import Path
from typing import List, Optional, Union
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
from typing import List, Union, Optional
import invokeai.backend.util.logging as logger
from ...backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
from ...backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
class MergeInterpolationMethod(str, Enum):

View File

@ -8,18 +8,7 @@ from abc import ABCMeta, abstractmethod
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generic,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
)
from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union
import numpy as np
import onnx

View File

@ -1,23 +1,26 @@
import os
import torch
from enum import Enum
from pathlib import Path
from typing import Optional, Literal
from typing import Literal, Optional
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase,
ModelConfigBase,
BaseModelType,
ModelNotFoundException,
ModelType,
SubModelType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
InvalidModelException,
ModelNotFoundException,
)
from invokeai.app.services.config import InvokeAIAppConfig
import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum):

View File

@ -1,19 +1,21 @@
import os
import json
import os
from enum import Enum
from pydantic import Field
from typing import Literal, Optional
from omegaconf import OmegaConf
from pydantic import Field
from .base import (
ModelConfigBase,
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelType,
ModelVariantType,
DiffusersModel,
read_checkpoint_meta,
classproperty,
InvalidModelException,
read_checkpoint_meta,
)
from omegaconf import OmegaConf
class StableDiffusionXLModelFormat(str, Enum):

View File

@ -1,26 +1,29 @@
import os
import json
import os
from enum import Enum
from pydantic import Field
from pathlib import Path
from typing import Literal, Optional, Union
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from .base import (
ModelConfigBase,
BaseModelType,
ModelType,
ModelVariantType,
DiffusersModel,
SilenceWarnings,
read_checkpoint_meta,
classproperty,
InvalidModelException,
ModelNotFoundException,
)
from .sdxl import StableDiffusionXLModel
from omegaconf import OmegaConf
from pydantic import Field
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf
from .base import (
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelNotFoundException,
ModelType,
ModelVariantType,
SilenceWarnings,
classproperty,
read_checkpoint_meta,
)
from .sdxl import StableDiffusionXLModel
class StableDiffusion1ModelFormat(str, Enum):
@ -272,8 +275,8 @@ def _convert_ckpt_and_cache(
return output_path
# to avoid circular import errors
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
from ...util.devices import choose_torch_device, torch_dtype
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
model_base_to_model_type = {
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",

View File

@ -2,15 +2,16 @@ from enum import Enum
from typing import Literal
from diffusers import OnnxRuntimeModel
from .base import (
ModelConfigBase,
BaseModelType,
DiffusersModel,
IAIOnnxRuntimeModel,
ModelConfigBase,
ModelType,
ModelVariantType,
DiffusersModel,
SchedulerPredictionType,
classproperty,
IAIOnnxRuntimeModel,
)

View File

@ -1,19 +1,20 @@
import os
import torch
from typing import Optional
from .base import (
ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
classproperty,
ModelNotFoundException,
InvalidModelException,
)
import torch
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
from .base import (
BaseModelType,
InvalidModelException,
ModelBase,
ModelConfigBase,
ModelNotFoundException,
ModelType,
SubModelType,
classproperty,
)
class TextualInversionModel(ModelBase):

View File

@ -8,19 +8,20 @@ import torch
from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig
from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
ModelNotFoundException,
ModelType,
ModelVariantType,
SubModelType,
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
)

View File

@ -1,9 +1,6 @@
"""
Initialization file for the invokeai.backend.stable_diffusion package
"""
from .diffusers_pipeline import ( # noqa: F401
PipelineIntermediateState,
StableDiffusionGeneratorPipeline,
)
from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401

View File

@ -12,12 +12,8 @@ import torchvision.transforms as T
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.utils.import_utils import is_xformers_available
@ -27,9 +23,7 @@ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
ConditioningData,
)
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningData
from ..util import auto_detect_slice_size, normalize_device
from .diffusion import AttentionMapSaver, InvokeAIDiffuserComponent

View File

@ -11,12 +11,7 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import (
Attention,
AttentionProcessor,
AttnProcessor,
SlicedAttnProcessor,
)
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from torch import nn

View File

@ -1,18 +1,18 @@
from diffusers import (
DDIMScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
KDPM2DiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
EulerDiscreteScheduler,
DPMSolverSDEScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
DPMSolverSinglestepScheduler,
DEISMultistepScheduler,
DDPMScheduler,
DPMSolverSDEScheduler,
)
SCHEDULER_MAP = dict(

View File

@ -24,13 +24,8 @@ import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed, ProjectConfiguration
from diffusers import (
AutoencoderKL,
DDPMScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from accelerate.utils import ProjectConfiguration, set_seed
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available

View File

@ -1,6 +1,7 @@
"""
Initialization file for invokeai.backend.util
"""
from .attention import auto_detect_slice_size # noqa: F401
from .devices import ( # noqa: F401
CPU_DEVICE,
CUDA_DEVICE,
@ -10,11 +11,4 @@ from .devices import ( # noqa: F401
normalize_device,
torch_dtype,
)
from .util import ( # noqa: F401
ask_user,
download_with_resume,
instantiate_from_config,
url_attachment_name,
Chdir,
)
from .attention import auto_detect_slice_size # noqa: F401
from .util import Chdir, ask_user, download_with_resume, instantiate_from_config, url_attachment_name # noqa: F401

View File

@ -3,8 +3,8 @@
Utility routine used for autodetection of optimal slice size
for attention mechanism.
"""
import torch
import psutil
import torch
def auto_detect_slice_size(latents: torch.Tensor) -> str:

View File

@ -1,12 +1,13 @@
from __future__ import annotations
from contextlib import nullcontext
from packaging import version
import platform
from contextlib import nullcontext
from typing import Union
import torch
from packaging import version
from torch import autocast
from typing import Union
from invokeai.app.services.config import InvokeAIAppConfig
CPU_DEVICE = torch.device("cpu")

View File

@ -772,11 +772,13 @@ diffusers.models.controlnet.ControlNetModel = ControlNetModel
# NOTE: with this patch, torch.compile crashes on 2.0 torch(already fixed in nightly)
# https://github.com/huggingface/diffusers/pull/4315
# https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/lora.py#L96C18-L96C18
def new_LoRACompatibleConv_forward(self, x):
def new_LoRACompatibleConv_forward(self, hidden_states, scale: float = 1.0):
if self.lora_layer is None:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x)
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(hidden_states)
else:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x) + self.lora_layer(x)
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(hidden_states) + (
scale * self.lora_layer(hidden_states)
)
diffusers.models.lora.LoRACompatibleConv.forward = new_LoRACompatibleConv_forward

View File

@ -178,7 +178,6 @@ InvokeAI:
import logging.handlers
import socket
import urllib.parse
from abc import abstractmethod
from pathlib import Path

View File

@ -1,11 +1,10 @@
import base64
import importlib
import io
import math
import multiprocessing as mp
import os
import re
import io
import base64
from collections import abc
from inspect import isfunction
from pathlib import Path
@ -19,6 +18,7 @@ from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
import invokeai.backend.util.logging as logger
from .devices import torch_dtype