mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fixes to env parsing, textual inversion & help text
- Make environment variable settings case InSenSiTive: INVOKEAI_MAX_LOADED_MODELS and InvokeAI_Max_Loaded_Models environment variables will both set `max_loaded_models` - Updated realesrgan to use new config system. - Updated textual_inversion_training to use new config system. - Discovered a race condition when InvokeAIAppConfig is created at module load time, which makes it impossible to customize or replace the help message produced with --help on the command line. To fix this, moved all instances of get_invokeai_config() from module load time to object initialization time. Makes code cleaner, too. - Added `--from_file` argument to `invokeai-node-cli` and changed github action to match. CI tests will hopefully work now.
This commit is contained in:
@ -389,8 +389,8 @@ class editOptsForm(npyscreen.FormMultiPage):
|
||||
)
|
||||
self.nextrely += 1
|
||||
for i in [
|
||||
"If you have an account at HuggingFace you may paste your access token here",
|
||||
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
|
||||
"If you have an account at HuggingFace you may optionally paste your access token here",
|
||||
'to allow InvokeAI to download restricted styles & subjects from the "Concept Library".',
|
||||
"See https://huggingface.co/settings/tokens",
|
||||
]:
|
||||
self.add_widget_intelligent(
|
||||
@ -593,6 +593,9 @@ class editOptsForm(npyscreen.FormMultiPage):
|
||||
new_opts.hf_token = self.hf_token.value
|
||||
new_opts.license_acceptance = self.license_acceptance.value
|
||||
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
|
||||
|
||||
# widget library workaround to make max_loaded_models an int rather than a float
|
||||
new_opts.max_loaded_models = int(new_opts.max_loaded_models)
|
||||
|
||||
return new_opts
|
||||
|
||||
|
@ -8,8 +8,6 @@ import numpy as np
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
class PatchMatch:
|
||||
"""
|
||||
Thin class wrapper around the patchmatch function.
|
||||
@ -23,6 +21,7 @@ class PatchMatch:
|
||||
|
||||
@classmethod
|
||||
def _load_patch_match(self):
|
||||
config = get_invokeai_config()
|
||||
if self.tried_load:
|
||||
return
|
||||
if config.try_patchmatch:
|
||||
|
@ -37,7 +37,6 @@ from invokeai.app.services.config import get_invokeai_config
|
||||
|
||||
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||
CLIPSEG_SIZE = 352
|
||||
config = get_invokeai_config()
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
def __init__(self, image: Image, heatmap: torch.Tensor):
|
||||
@ -84,6 +83,7 @@ class Txt2Mask(object):
|
||||
|
||||
def __init__(self, device="cpu", refined=False):
|
||||
logger.info("Initializing clipseg model for text to mask inference")
|
||||
config = get_invokeai_config()
|
||||
|
||||
# BUG: we are not doing anything with the device option at this time
|
||||
self.device = device
|
||||
|
@ -74,8 +74,6 @@ from transformers import (
|
||||
|
||||
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
def shave_segments(path, n_shave_prefix_segments=1):
|
||||
"""
|
||||
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
||||
@ -844,7 +842,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config):
|
||||
|
||||
def convert_ldm_clip_checkpoint(checkpoint):
|
||||
text_model = CLIPTextModel.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=config.cache_dir
|
||||
"openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir
|
||||
)
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
@ -899,7 +897,7 @@ textenc_pattern = re.compile("|".join(protected.keys()))
|
||||
|
||||
|
||||
def convert_paint_by_example_checkpoint(checkpoint):
|
||||
cache_dir = config.cache_dir
|
||||
cache_dir = get_invokeai_config().cache_dir
|
||||
config = CLIPVisionConfig.from_pretrained(
|
||||
"openai/clip-vit-large-patch14", cache_dir=cache_dir
|
||||
)
|
||||
@ -971,7 +969,7 @@ def convert_paint_by_example_checkpoint(checkpoint):
|
||||
|
||||
|
||||
def convert_open_clip_checkpoint(checkpoint):
|
||||
cache_dir = config.cache_dir
|
||||
cache_dir = get_invokeai_config().cache_dir
|
||||
text_model = CLIPTextModel.from_pretrained(
|
||||
"stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir
|
||||
)
|
||||
@ -1094,7 +1092,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
:param vae: A diffusers VAE to load into the pipeline.
|
||||
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
|
||||
"""
|
||||
|
||||
config = get_invokeai_config()
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
verbosity = dlogging.get_verbosity()
|
||||
|
@ -68,7 +68,6 @@ class SDModelComponent(Enum):
|
||||
feature_extractor="feature_extractor"
|
||||
|
||||
DEFAULT_MAX_MODELS = 2
|
||||
config = get_invokeai_config()
|
||||
|
||||
class ModelManager(object):
|
||||
"""
|
||||
@ -99,6 +98,7 @@ class ModelManager(object):
|
||||
if not isinstance(config, DictConfig):
|
||||
config = OmegaConf.load(config)
|
||||
self.config = config
|
||||
self.globals = get_invokeai_config()
|
||||
self.precision = precision
|
||||
self.device = torch.device(device_type)
|
||||
self.max_loaded_models = max_loaded_models
|
||||
@ -291,7 +291,7 @@ class ModelManager(object):
|
||||
"""
|
||||
# if we are converting legacy files automatically, then
|
||||
# there are no legacy ckpts!
|
||||
if config.ckpt_convert:
|
||||
if self.globals.ckpt_convert:
|
||||
return False
|
||||
info = self.model_info(model_name)
|
||||
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
|
||||
@ -501,13 +501,13 @@ class ModelManager(object):
|
||||
|
||||
# TODO: scan weights maybe?
|
||||
pipeline_args: dict[str, Any] = dict(
|
||||
safety_checker=None, local_files_only=not config.internet_available
|
||||
safety_checker=None, local_files_only=not self.globals.internet_available
|
||||
)
|
||||
if "vae" in mconfig and mconfig["vae"] is not None:
|
||||
if vae := self._load_vae(mconfig["vae"]):
|
||||
pipeline_args.update(vae=vae)
|
||||
if not isinstance(name_or_path, Path):
|
||||
pipeline_args.update(cache_dir=config.cache_dir)
|
||||
pipeline_args.update(cache_dir=self.globals.cache_dir)
|
||||
if using_fp16:
|
||||
pipeline_args.update(torch_dtype=torch.float16)
|
||||
fp_args_list = [{"revision": "fp16"}, {}]
|
||||
@ -559,10 +559,9 @@ class ModelManager(object):
|
||||
width = mconfig.width
|
||||
height = mconfig.height
|
||||
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(config.root, config)
|
||||
if not os.path.isabs(weights):
|
||||
weights = os.path.normpath(os.path.join(config.root, weights))
|
||||
root_dir = self.globals.root_dir
|
||||
config = str(root_dir / config)
|
||||
weights = str(root_dir / weights)
|
||||
|
||||
# Convert to diffusers and return a diffusers pipeline
|
||||
self.logger.info(f"Converting legacy checkpoint {model_name} into a diffusers model...")
|
||||
@ -577,11 +576,7 @@ class ModelManager(object):
|
||||
|
||||
vae_path = None
|
||||
if vae:
|
||||
vae_path = (
|
||||
vae
|
||||
if os.path.isabs(vae)
|
||||
else os.path.normpath(os.path.join(config.root, vae))
|
||||
)
|
||||
vae_path = str(root_dir / vae)
|
||||
if self._has_cuda():
|
||||
torch.cuda.empty_cache()
|
||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
@ -613,9 +608,7 @@ class ModelManager(object):
|
||||
)
|
||||
|
||||
if "path" in mconfig and mconfig["path"] is not None:
|
||||
path = Path(mconfig["path"])
|
||||
if not path.is_absolute():
|
||||
path = Path(config.root, path).resolve()
|
||||
path = self.globals.root_dir / Path(mconfig["path"])
|
||||
return path
|
||||
elif "repo_id" in mconfig:
|
||||
return mconfig["repo_id"]
|
||||
@ -863,16 +856,16 @@ class ModelManager(object):
|
||||
model_type = self.probe_model_type(checkpoint)
|
||||
if model_type == SDLegacyType.V1:
|
||||
self.logger.debug("SD-v1 model detected")
|
||||
model_config_file = config.legacy_conf_path / "v1-inference.yaml"
|
||||
model_config_file = self.globals.legacy_conf_path / "v1-inference.yaml"
|
||||
elif model_type == SDLegacyType.V1_INPAINT:
|
||||
self.logger.debug("SD-v1 inpainting model detected")
|
||||
model_config_file = config.legacy_conf_path / "v1-inpainting-inference.yaml",
|
||||
model_config_file = self.globals.legacy_conf_path / "v1-inpainting-inference.yaml",
|
||||
elif model_type == SDLegacyType.V2_v:
|
||||
self.logger.debug("SD-v2-v model detected")
|
||||
model_config_file = config.legacy_conf_path / "v2-inference-v.yaml"
|
||||
model_config_file = self.globals.legacy_conf_path / "v2-inference-v.yaml"
|
||||
elif model_type == SDLegacyType.V2_e:
|
||||
self.logger.debug("SD-v2-e model detected")
|
||||
model_config_file = config.legacy_conf_path / "v2-inference.yaml"
|
||||
model_config_file = self.globals.legacy_conf_path / "v2-inference.yaml"
|
||||
elif model_type == SDLegacyType.V2:
|
||||
self.logger.warning(
|
||||
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
|
||||
@ -899,7 +892,7 @@ class ModelManager(object):
|
||||
self.logger.debug(f"Using VAE file {vae_path.name}")
|
||||
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
|
||||
|
||||
diffuser_path = config.root / "models/converted_ckpts" / model_path.stem
|
||||
diffuser_path = self.globals.root_dir / "models/converted_ckpts" / model_path.stem
|
||||
model_name = self.convert_and_import(
|
||||
model_path,
|
||||
diffusers_path=diffuser_path,
|
||||
@ -1032,7 +1025,7 @@ class ModelManager(object):
|
||||
"""
|
||||
yaml_str = OmegaConf.to_yaml(self.config)
|
||||
if not os.path.isabs(config_file_path):
|
||||
config_file_path = config.model_conf_path
|
||||
config_file_path = self.globals.model_conf_path
|
||||
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
|
||||
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
||||
outfile.write(self.preamble())
|
||||
@ -1064,7 +1057,8 @@ class ModelManager(object):
|
||||
"""
|
||||
# Three transformer models to check: bert, clip and safety checker, and
|
||||
# the diffusers as well
|
||||
models_dir = config.root / "models"
|
||||
config = get_invokeai_config()
|
||||
models_dir = config.root_dir / "models"
|
||||
legacy_locations = [
|
||||
Path(
|
||||
models_dir,
|
||||
@ -1138,13 +1132,12 @@ class ModelManager(object):
|
||||
if str(source).startswith(("http:", "https:", "ftp:")):
|
||||
dest_directory = Path(dest_directory)
|
||||
if not dest_directory.is_absolute():
|
||||
dest_directory = config.root / dest_directory
|
||||
dest_directory = self.globals.root_dir / dest_directory
|
||||
dest_directory.mkdir(parents=True, exist_ok=True)
|
||||
resolved_path = download_with_resume(str(source), dest_directory)
|
||||
else:
|
||||
if not os.path.isabs(source):
|
||||
source = config.root / source
|
||||
resolved_path = Path(source)
|
||||
source = self.globals.root_dir / source
|
||||
resolved_path = source
|
||||
return resolved_path
|
||||
|
||||
def _invalidate_cached_model(self, model_name: str) -> None:
|
||||
@ -1194,7 +1187,7 @@ class ModelManager(object):
|
||||
path = name_or_path
|
||||
else:
|
||||
owner, repo = name_or_path.split("/")
|
||||
path = Path(config.cache_dir / f"models--{owner}--{repo}")
|
||||
path = self.globals.cache_dir / f"models--{owner}--{repo}"
|
||||
if not path.exists():
|
||||
return None
|
||||
hashpath = path / "checksum.sha256"
|
||||
@ -1255,8 +1248,8 @@ class ModelManager(object):
|
||||
using_fp16 = self.precision == "float16"
|
||||
|
||||
vae_args.update(
|
||||
cache_dir=config.cache_dir,
|
||||
local_files_only=not config.internet_available,
|
||||
cache_dir=self.globals.cache_dir,
|
||||
local_files_only=not self.globals.internet_available,
|
||||
)
|
||||
|
||||
self.logger.debug(f"Loading diffusers VAE from {name_or_path}")
|
||||
@ -1294,7 +1287,7 @@ class ModelManager(object):
|
||||
|
||||
@classmethod
|
||||
def _delete_model_from_cache(cls,repo_id):
|
||||
cache_info = scan_cache_dir(config.cache_dir)
|
||||
cache_info = scan_cache_dir(get_invokeai_config().cache_dir)
|
||||
|
||||
# I'm sure there is a way to do this with comprehensions
|
||||
# but the code quickly became incomprehensible!
|
||||
@ -1311,9 +1304,10 @@ class ModelManager(object):
|
||||
|
||||
@staticmethod
|
||||
def _abs_path(path: str | Path) -> Path:
|
||||
globals = get_invokeai_config()
|
||||
if path is None or Path(path).is_absolute():
|
||||
return path
|
||||
return Path(config.root, path).resolve()
|
||||
return Path(globals.root_dir, path).resolve()
|
||||
|
||||
@staticmethod
|
||||
def _is_huggingface_hub_directory_present() -> bool:
|
||||
|
@ -25,8 +25,6 @@ from invokeai.app.services.config import get_invokeai_config
|
||||
from ..stable_diffusion import InvokeAIDiffuserComponent
|
||||
from ..util import torch_dtype
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
def get_uc_and_c_and_ec(prompt_string,
|
||||
model: InvokeAIDiffuserComponent,
|
||||
log_tokens=False, skip_normalize_legacy_blend=False):
|
||||
@ -40,6 +38,8 @@ def get_uc_and_c_and_ec(prompt_string,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
)
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
# get rid of any newline characters
|
||||
prompt_string = prompt_string.replace("\n", " ")
|
||||
|
@ -7,7 +7,6 @@ import torch
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
config = get_invokeai_config()
|
||||
|
||||
pretrained_model_url = (
|
||||
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
||||
@ -18,11 +17,11 @@ class CodeFormerRestoration:
|
||||
def __init__(
|
||||
self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth"
|
||||
) -> None:
|
||||
if not os.path.isabs(codeformer_dir):
|
||||
codeformer_dir = os.path.join(config.root, codeformer_dir)
|
||||
|
||||
self.model_path = os.path.join(codeformer_dir, codeformer_model_path)
|
||||
self.codeformer_model_exists = os.path.isfile(self.model_path)
|
||||
self.globals = get_invokeai_config()
|
||||
codeformer_dir = self.globals.root_dir / codeformer_dir
|
||||
self.model_path = codeformer_dir / codeformer_model_path
|
||||
self.codeformer_model_exists = self.model_path.exists()
|
||||
|
||||
if not self.codeformer_model_exists:
|
||||
logger.error("NOT FOUND: CodeFormer model not found at " + self.model_path)
|
||||
@ -72,9 +71,7 @@ class CodeFormerRestoration:
|
||||
upscale_factor=1,
|
||||
use_parse=True,
|
||||
device=device,
|
||||
model_rootpath=os.path.join(
|
||||
config.root, "models", "gfpgan", "weights"
|
||||
),
|
||||
model_rootpath = self.globals.root_dir / "gfpgan" / "weights"
|
||||
)
|
||||
face_helper.clean_all()
|
||||
face_helper.read_image(bgr_image_array)
|
||||
|
@ -8,14 +8,12 @@ from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
config = get_invokeai_config()
|
||||
|
||||
class GFPGAN:
|
||||
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:
|
||||
self.globals = get_invokeai_config()
|
||||
if not os.path.isabs(gfpgan_model_path):
|
||||
gfpgan_model_path = os.path.abspath(
|
||||
os.path.join(config.root, gfpgan_model_path)
|
||||
)
|
||||
gfpgan_model_path = self.globals.root_dir / gfpgan_model_path
|
||||
self.model_path = gfpgan_model_path
|
||||
self.gfpgan_model_exists = os.path.isfile(self.model_path)
|
||||
|
||||
@ -34,7 +32,7 @@ class GFPGAN:
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
cwd = os.getcwd()
|
||||
os.chdir(os.path.join(config.root, "models"))
|
||||
os.chdir(self.globals.root_dir / 'models')
|
||||
try:
|
||||
from gfpgan import GFPGANer
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
import os
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
@ -7,7 +6,8 @@ from PIL import Image
|
||||
from PIL.Image import Image as ImageType
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.globals import Globals
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
config = get_invokeai_config()
|
||||
|
||||
class ESRGAN:
|
||||
def __init__(self, bg_tile_size=400) -> None:
|
||||
@ -30,12 +30,8 @@ class ESRGAN:
|
||||
upscale=4,
|
||||
act_type="prelu",
|
||||
)
|
||||
model_path = os.path.join(
|
||||
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
|
||||
)
|
||||
wdn_model_path = os.path.join(
|
||||
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||
)
|
||||
model_path = config.root_dir / "models/realesrgan/realesr-general-x4v3.pth"
|
||||
wdn_model_path = config.root_dir / "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||
scale = 4
|
||||
|
||||
bg_upsampler = RealESRGANer(
|
||||
|
@ -18,8 +18,6 @@ import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
from .util import CPU_DEVICE
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
class SafetyChecker(object):
|
||||
CAUTION_IMG = "caution.png"
|
||||
|
||||
@ -28,7 +26,8 @@ class SafetyChecker(object):
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
|
||||
self.device = device
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
try:
|
||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||
safety_model_path = config.cache_dir
|
||||
|
@ -19,14 +19,14 @@ from huggingface_hub import (
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
config = get_invokeai_config()
|
||||
|
||||
class HuggingFaceConceptsLibrary(object):
|
||||
def __init__(self, root=None):
|
||||
"""
|
||||
Initialize the Concepts object. May optionally pass a root directory.
|
||||
"""
|
||||
self.root = root or config.root
|
||||
self.config = get_invokeai_config()
|
||||
self.root = root or self.config.root
|
||||
self.hf_api = HfApi()
|
||||
self.local_concepts = dict()
|
||||
self.concept_list = None
|
||||
@ -58,7 +58,7 @@ class HuggingFaceConceptsLibrary(object):
|
||||
self.concept_list.extend(list(local_concepts_to_add))
|
||||
return self.concept_list
|
||||
return self.concept_list
|
||||
elif config.internet_available is True:
|
||||
elif self.config.internet_available is True:
|
||||
try:
|
||||
models = self.hf_api.list_models(
|
||||
filter=ModelFilter(model_name="sd-concepts-library/")
|
||||
|
@ -43,8 +43,6 @@ from .diffusion import (
|
||||
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
|
||||
from .textual_inversion_manager import TextualInversionManager
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
@dataclass
|
||||
class PipelineIntermediateState:
|
||||
run_id: str
|
||||
@ -348,6 +346,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
"""
|
||||
if xformers is available, use it, otherwise use sliced attention.
|
||||
"""
|
||||
config = get_invokeai_config()
|
||||
if (
|
||||
torch.cuda.is_available()
|
||||
and is_xformers_available()
|
||||
|
@ -32,8 +32,6 @@ ModelForwardCallback: TypeAlias = Union[
|
||||
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
|
||||
]
|
||||
|
||||
config = get_invokeai_config()
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PostprocessingSettings:
|
||||
threshold: float
|
||||
@ -74,6 +72,7 @@ class InvokeAIDiffuserComponent:
|
||||
:param model: the unet model to pass through to cross attention control
|
||||
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
|
||||
"""
|
||||
config = get_invokeai_config()
|
||||
self.conditioning = None
|
||||
self.model = model
|
||||
self.is_running_diffusers = is_running_diffusers
|
||||
|
@ -7,7 +7,6 @@
|
||||
This is the backend to "textual_inversion.py"
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
@ -47,7 +46,7 @@ from tqdm.auto import tqdm
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
# invokeai stuff
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.config import InvokeAIAppConfig,PagingArgumentParser
|
||||
|
||||
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
||||
PIL_INTERPOLATION = {
|
||||
@ -89,10 +88,9 @@ def save_progress(
|
||||
|
||||
|
||||
def parse_args():
|
||||
config = InvokeAIAppConfig()
|
||||
|
||||
config = InvokeAIAppConfig(argv=[])
|
||||
parser = PagingArgumentParser(
|
||||
description="Textual inversion training", formatter_class=ArgFormatter
|
||||
description="Textual inversion training"
|
||||
)
|
||||
general_group = parser.add_argument_group("General")
|
||||
model_group = parser.add_argument_group("Models and Paths")
|
||||
@ -529,6 +527,7 @@ def get_full_repo_name(
|
||||
|
||||
|
||||
def do_textual_inversion_training(
|
||||
config: InvokeAIAppConfig,
|
||||
model: str,
|
||||
train_data_dir: Path,
|
||||
output_dir: Path,
|
||||
@ -629,7 +628,7 @@ def do_textual_inversion_training(
|
||||
elif output_dir is not None:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
models_conf = OmegaConf.load(os.path.join(config.root, "configs/models.yaml"))
|
||||
models_conf = OmegaConf.load(config.model_conf_path)
|
||||
model_conf = models_conf.get(model, None)
|
||||
assert model_conf is not None, f"Unknown model: {model}"
|
||||
assert (
|
||||
@ -641,7 +640,7 @@ def do_textual_inversion_training(
|
||||
assert (
|
||||
pretrained_model_name_or_path
|
||||
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
|
||||
pipeline_args = dict(cache_dir=config.cache_dir())
|
||||
pipeline_args = dict(cache_dir=config.cache_dir)
|
||||
|
||||
# Load tokenizer
|
||||
if tokenizer_name:
|
||||
|
@ -9,10 +9,10 @@ from invokeai.app.services.config import get_invokeai_config
|
||||
CPU_DEVICE = torch.device("cpu")
|
||||
CUDA_DEVICE = torch.device("cuda")
|
||||
MPS_DEVICE = torch.device("mps")
|
||||
config = get_invokeai_config()
|
||||
|
||||
def choose_torch_device() -> torch.device:
|
||||
"""Convenience routine for guessing which GPU device to run model on"""
|
||||
config = get_invokeai_config()
|
||||
if config.always_use_cpu:
|
||||
return CPU_DEVICE
|
||||
if torch.cuda.is_available():
|
||||
@ -32,6 +32,7 @@ def choose_precision(device: torch.device) -> str:
|
||||
|
||||
|
||||
def torch_dtype(device: torch.device) -> torch.dtype:
|
||||
config = get_invokeai_config()
|
||||
if config.full_precision:
|
||||
return torch.float32
|
||||
if choose_precision(device) == "float16":
|
||||
|
Reference in New Issue
Block a user