mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix(config): use new get_config across the app, use correct settings
This commit is contained in:
parent
7b1f9409bc
commit
897fe497dc
@ -65,7 +65,7 @@ class ApiDependencies:
|
|||||||
logger.info(f"InvokeAI version {__version__}")
|
logger.info(f"InvokeAI version {__version__}")
|
||||||
logger.info(f"Root directory = {str(config.root_path)}")
|
logger.info(f"Root directory = {str(config.root_path)}")
|
||||||
|
|
||||||
output_folder = config.output_path
|
output_folder = config.outputs_path
|
||||||
if output_folder is None:
|
if output_folder is None:
|
||||||
raise ValueError("Output folder is not set")
|
raise ValueError("Output folder is not set")
|
||||||
|
|
||||||
|
@ -3,9 +3,9 @@ import sys
|
|||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
|
|
||||||
custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.resolve())
|
custom_nodes_path = Path(get_config().custom_nodes_path.resolve())
|
||||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||||
|
@ -33,7 +33,7 @@ from invokeai.app.invocations.fields import (
|
|||||||
FieldKind,
|
FieldKind,
|
||||||
Input,
|
Input,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.app.util.metaenum import MetaEnum
|
from invokeai.app.util.metaenum import MetaEnum
|
||||||
from invokeai.app.util.misc import uuid_string
|
from invokeai.app.util.misc import uuid_string
|
||||||
@ -191,7 +191,7 @@ class BaseInvocation(ABC, BaseModel):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
||||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||||
app_config = InvokeAIAppConfig.get_config()
|
app_config = get_config()
|
||||||
allowed_invocations: set[BaseInvocation] = set()
|
allowed_invocations: set[BaseInvocation] = set()
|
||||||
for sc in cls._invocation_classes:
|
for sc in cls._invocation_classes:
|
||||||
invocation_type = sc.get_type()
|
invocation_type = sc.get_type()
|
||||||
|
@ -866,7 +866,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
vae.to(dtype=torch.float16)
|
vae.to(dtype=torch.float16)
|
||||||
latents = latents.half()
|
latents = latents.half()
|
||||||
|
|
||||||
if self.tiled or context.config.get().tiled_decode:
|
if self.tiled or context.config.get().force_tiled_decode:
|
||||||
vae.enable_tiling()
|
vae.enable_tiling()
|
||||||
else:
|
else:
|
||||||
vae.disable_tiling()
|
vae.disable_tiling()
|
||||||
|
@ -279,9 +279,9 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
def sync_to_config(self) -> None:
|
def sync_to_config(self) -> None:
|
||||||
"""Synchronize models on disk to those in the config record store database."""
|
"""Synchronize models on disk to those in the config record store database."""
|
||||||
self._scan_models_directory()
|
self._scan_models_directory()
|
||||||
if autoimport := self._app_config.autoimport_dir:
|
if self._app_config.autoimport_path:
|
||||||
self._logger.info("Scanning autoimport directory for new models")
|
self._logger.info("Scanning autoimport directory for new models")
|
||||||
installed = self.scan_directory(self._app_config.root_path / autoimport)
|
installed = self.scan_directory(self._app_config.autoimport_path)
|
||||||
self._logger.info(f"{len(installed)} new models registered")
|
self._logger.info(f"{len(installed)} new models registered")
|
||||||
self._logger.info("Model installer (re)initialized")
|
self._logger.info("Model installer (re)initialized")
|
||||||
|
|
||||||
@ -365,7 +365,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
) -> Path:
|
) -> Path:
|
||||||
"""Download the model file located at source to the models cache and return its Path."""
|
"""Download the model file located at source to the models cache and return its Path."""
|
||||||
model_hash = sha256(str(source).encode("utf-8")).hexdigest()[0:32]
|
model_hash = sha256(str(source).encode("utf-8")).hexdigest()[0:32]
|
||||||
model_path = self._app_config.models_convert_cache_path / model_hash
|
model_path = self._app_config.convert_cache_path / model_hash
|
||||||
|
|
||||||
# We expect the cache directory to contain one and only one downloaded file.
|
# We expect the cache directory to contain one and only one downloaded file.
|
||||||
# We don't know the file's name in advance, as it is set by the download
|
# We don't know the file's name in advance, as it is set by the download
|
||||||
@ -591,7 +591,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
|
|
||||||
# add 'main' specific fields
|
# add 'main' specific fields
|
||||||
if isinstance(info, CheckpointConfigBase):
|
if isinstance(info, CheckpointConfigBase):
|
||||||
legacy_conf = (self.app_config.root_dir / self.app_config.legacy_conf_dir / info.config_path).resolve()
|
legacy_conf = (self.app_config.legacy_conf_path / info.config_path).resolve()
|
||||||
info.config_path = legacy_conf.as_posix()
|
info.config_path = legacy_conf.as_posix()
|
||||||
self.record_store.add_model(info)
|
self.record_store.add_model(info)
|
||||||
return info.key
|
return info.key
|
||||||
|
@ -78,13 +78,13 @@ class ModelManagerService(ModelManagerServiceBase):
|
|||||||
logger.setLevel(app_config.log_level.upper())
|
logger.setLevel(app_config.log_level.upper())
|
||||||
|
|
||||||
ram_cache = ModelCache(
|
ram_cache = ModelCache(
|
||||||
max_cache_size=app_config.ram_cache_size,
|
max_cache_size=app_config.ram,
|
||||||
max_vram_cache_size=app_config.vram_cache_size,
|
max_vram_cache_size=app_config.vram,
|
||||||
logger=logger,
|
logger=logger,
|
||||||
execution_device=execution_device,
|
execution_device=execution_device,
|
||||||
)
|
)
|
||||||
convert_cache = ModelConvertCache(
|
convert_cache = ModelConvertCache(
|
||||||
cache_path=app_config.models_convert_cache_path, max_size=app_config.convert_cache_size
|
cache_path=app_config.convert_cache_path, max_size=app_config.convert_cache
|
||||||
)
|
)
|
||||||
loader = ModelLoadService(
|
loader = ModelLoadService(
|
||||||
app_config=app_config,
|
app_config=app_config,
|
||||||
|
@ -151,7 +151,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
|||||||
|
|
||||||
# TODO: how does this work in a multi-user scenario?
|
# TODO: how does this work in a multi-user scenario?
|
||||||
current_queue_size = self._get_current_queue_size(queue_id)
|
current_queue_size = self._get_current_queue_size(queue_id)
|
||||||
max_queue_size = self.__invoker.services.configuration.get_config().max_queue_size
|
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||||
max_new_queue_items = max_queue_size - current_queue_size
|
max_new_queue_items = max_queue_size - current_queue_size
|
||||||
|
|
||||||
priority = 0
|
priority = 0
|
||||||
|
@ -423,7 +423,7 @@ class ConfigInterface(InvocationContextInterface):
|
|||||||
The app's config.
|
The app's config.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return self._services.configuration.get_config()
|
return self._services.configuration
|
||||||
|
|
||||||
|
|
||||||
class UtilInterface(InvocationContextInterface):
|
class UtilInterface(InvocationContextInterface):
|
||||||
|
@ -9,14 +9,14 @@ from einops import repeat
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
from torchvision.transforms import Compose
|
from torchvision.transforms import Compose
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||||
from invokeai.backend.util.devices import choose_torch_device
|
from invokeai.backend.util.devices import choose_torch_device
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.backend.util.util import download_with_progress_bar
|
from invokeai.backend.util.util import download_with_progress_bar
|
||||||
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
logger = InvokeAILogger.get_logger(config=config)
|
logger = InvokeAILogger.get_logger(config=config)
|
||||||
|
|
||||||
DEPTH_ANYTHING_MODELS = {
|
DEPTH_ANYTHING_MODELS = {
|
||||||
|
@ -6,7 +6,7 @@ import pathlib
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import onnxruntime as ort
|
import onnxruntime as ort
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.backend.util.devices import choose_torch_device
|
from invokeai.backend.util.devices import choose_torch_device
|
||||||
from invokeai.backend.util.util import download_with_progress_bar
|
from invokeai.backend.util.util import download_with_progress_bar
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ DWPOSE_MODELS = {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config
|
||||||
|
|
||||||
|
|
||||||
class Wholebody:
|
class Wholebody:
|
||||||
|
@ -6,9 +6,11 @@ import torch
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import get_invokeai_config
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.backend.util.devices import choose_torch_device
|
from invokeai.backend.util.devices import choose_torch_device
|
||||||
|
|
||||||
|
config = get_config()
|
||||||
|
|
||||||
|
|
||||||
def norm_img(np_img):
|
def norm_img(np_img):
|
||||||
if len(np_img.shape) == 2:
|
if len(np_img.shape) == 2:
|
||||||
@ -29,7 +31,7 @@ def load_jit_model(url_or_path, device):
|
|||||||
class LaMA:
|
class LaMA:
|
||||||
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
|
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
|
||||||
device = choose_torch_device()
|
device = choose_torch_device()
|
||||||
model_location = get_invokeai_config().models_path / "core/misc/lama/lama.pt"
|
model_location = get_config().models_path / "core/misc/lama/lama.pt"
|
||||||
model = load_jit_model(model_location, device)
|
model = load_jit_model(model_location, device)
|
||||||
|
|
||||||
image = np.asarray(input_image.convert("RGB"))
|
image = np.asarray(input_image.convert("RGB"))
|
||||||
|
@ -8,9 +8,9 @@ be suppressed or deferred
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
|
|
||||||
|
|
||||||
class PatchMatch:
|
class PatchMatch:
|
||||||
|
@ -33,11 +33,11 @@ from PIL import Image, ImageOps
|
|||||||
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
|
|
||||||
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||||
CLIPSEG_SIZE = 352
|
CLIPSEG_SIZE = 352
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
|
|
||||||
|
|
||||||
class SegmentedGrayscale(object):
|
class SegmentedGrayscale(object):
|
||||||
|
@ -118,7 +118,7 @@ class ModelMerger(object):
|
|||||||
config = self._installer.app_config
|
config = self._installer.app_config
|
||||||
store = self._installer.record_store
|
store = self._installer.record_store
|
||||||
base_models: Set[BaseModelType] = set()
|
base_models: Set[BaseModelType] = set()
|
||||||
variant = None if self._installer.app_config.full_precision else "fp16"
|
variant = None if self._installer.app_config.precision == "float32" else "fp16"
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
|
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
|
||||||
|
@ -21,7 +21,7 @@ from diffusers.utils.outputs import BaseOutput
|
|||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||||
from invokeai.backend.ip_adapter.unet_patcher import UNetPatcher
|
from invokeai.backend.ip_adapter.unet_patcher import UNetPatcher
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningData
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningData
|
||||||
@ -251,7 +251,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
"""
|
"""
|
||||||
if xformers is available, use it, otherwise use sliced attention.
|
if xformers is available, use it, otherwise use sliced attention.
|
||||||
"""
|
"""
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
if config.attention_type == "xformers":
|
if config.attention_type == "xformers":
|
||||||
self.enable_xformers_memory_efficient_attention()
|
self.enable_xformers_memory_efficient_attention()
|
||||||
return
|
return
|
||||||
|
@ -8,7 +8,7 @@ import torch
|
|||||||
from diffusers import UNet2DConditionModel
|
from diffusers import UNet2DConditionModel
|
||||||
from typing_extensions import TypeAlias
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config.config_default import get_config
|
||||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||||
ConditioningData,
|
ConditioningData,
|
||||||
ExtraConditioningInfo,
|
ExtraConditioningInfo,
|
||||||
@ -54,7 +54,7 @@ class InvokeAIDiffuserComponent:
|
|||||||
:param model: the unet model to pass through to cross attention control
|
:param model: the unet model to pass through to cross attention control
|
||||||
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
|
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
|
||||||
"""
|
"""
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
self.conditioning = None
|
self.conditioning = None
|
||||||
self.model = model
|
self.model = model
|
||||||
self.model_forward_callback = model_forward_callback
|
self.model_forward_callback = model_forward_callback
|
||||||
|
@ -7,11 +7,12 @@ import torch
|
|||||||
from torch import autocast
|
from torch import autocast
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.config.config_default import get_config
|
||||||
|
|
||||||
CPU_DEVICE = torch.device("cpu")
|
CPU_DEVICE = torch.device("cpu")
|
||||||
CUDA_DEVICE = torch.device("cuda")
|
CUDA_DEVICE = torch.device("cuda")
|
||||||
MPS_DEVICE = torch.device("mps")
|
MPS_DEVICE = torch.device("mps")
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = get_config()
|
||||||
|
|
||||||
|
|
||||||
def choose_torch_device() -> torch.device:
|
def choose_torch_device() -> torch.device:
|
||||||
|
@ -181,6 +181,7 @@ from pathlib import Path
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.config.config_default import get_config
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import syslog
|
import syslog
|
||||||
@ -339,7 +340,7 @@ class InvokeAILogger(object): # noqa D102
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_logger(
|
def get_logger(
|
||||||
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = InvokeAIAppConfig.get_config()
|
cls, name: str = "InvokeAI", config: InvokeAIAppConfig = get_config()
|
||||||
) -> logging.Logger: # noqa D102
|
) -> logging.Logger: # noqa D102
|
||||||
if name in cls.loggers:
|
if name in cls.loggers:
|
||||||
return cls.loggers[name]
|
return cls.loggers[name]
|
||||||
|
Loading…
Reference in New Issue
Block a user