fix(config): use new get_config across the app, use correct settings

This commit is contained in:
psychedelicious
2024-03-11 23:01:48 +11:00
parent 7b1f9409bc
commit 897fe497dc
18 changed files with 36 additions and 32 deletions

View File

@ -9,14 +9,14 @@ from einops import repeat
from PIL import Image
from torchvision.transforms import Compose
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
from invokeai.backend.util.devices import choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.util import download_with_progress_bar
config = InvokeAIAppConfig.get_config()
config = get_config()
logger = InvokeAILogger.get_logger(config=config)
DEPTH_ANYTHING_MODELS = {

View File

@ -6,7 +6,7 @@ import pathlib
import numpy as np
import onnxruntime as ort
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.util.devices import choose_torch_device
from invokeai.backend.util.util import download_with_progress_bar
@ -24,7 +24,7 @@ DWPOSE_MODELS = {
},
}
config = InvokeAIAppConfig.get_config()
config = get_config
class Wholebody:

View File

@ -6,9 +6,11 @@ import torch
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.util.devices import choose_torch_device
config = get_config()
def norm_img(np_img):
if len(np_img.shape) == 2:
@ -29,7 +31,7 @@ def load_jit_model(url_or_path, device):
class LaMA:
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
device = choose_torch_device()
model_location = get_invokeai_config().models_path / "core/misc/lama/lama.pt"
model_location = get_config().models_path / "core/misc/lama/lama.pt"
model = load_jit_model(model_location, device)
image = np.asarray(input_image.convert("RGB"))

View File

@ -8,9 +8,9 @@ be suppressed or deferred
import numpy as np
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
config = InvokeAIAppConfig.get_config()
config = get_config()
class PatchMatch:

View File

@ -33,11 +33,11 @@ from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
CLIPSEG_SIZE = 352
config = InvokeAIAppConfig.get_config()
config = get_config()
class SegmentedGrayscale(object):

View File

@ -118,7 +118,7 @@ class ModelMerger(object):
config = self._installer.app_config
store = self._installer.record_store
base_models: Set[BaseModelType] = set()
variant = None if self._installer.app_config.full_precision else "fp16"
variant = None if self._installer.app_config.precision == "float32" else "fp16"
assert (
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference

View File

@ -21,7 +21,7 @@ from diffusers.utils.outputs import BaseOutput
from pydantic import Field
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
from invokeai.backend.ip_adapter.unet_patcher import UNetPatcher
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningData
@ -251,7 +251,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
"""
if xformers is available, use it, otherwise use sliced attention.
"""
config = InvokeAIAppConfig.get_config()
config = get_config()
if config.attention_type == "xformers":
self.enable_xformers_memory_efficient_attention()
return

View File

@ -8,7 +8,7 @@ import torch
from diffusers import UNet2DConditionModel
from typing_extensions import TypeAlias
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
ConditioningData,
ExtraConditioningInfo,
@ -54,7 +54,7 @@ class InvokeAIDiffuserComponent:
:param model: the unet model to pass through to cross attention control
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
"""
config = InvokeAIAppConfig.get_config()
config = get_config()
self.conditioning = None
self.model = model
self.model_forward_callback = model_forward_callback

View File

@ -7,11 +7,12 @@ import torch
from torch import autocast
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps")
config = InvokeAIAppConfig.get_config()
config = get_config()
def choose_torch_device() -> torch.device:

View File

@ -181,6 +181,7 @@ from pathlib import Path
from typing import Any, Dict, Optional
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config.config_default import get_config
try:
import syslog
@ -339,7 +340,7 @@ class InvokeAILogger(object): # noqa D102
@classmethod
def get_logger(
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = get_config()
) -> logging.Logger: # noqa D102
if name in cls.loggers:
return cls.loggers[name]