mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix relative model paths to be against config.models_path, not root (#4061)
## What type of PR is this? (check all applicable) - [ X] Bug Fix ## Have you discussed this change with the InvokeAI team? - [X] Yes - bug discovered by jpphoto - [ ] No, because: ## Have you updated all relevant documentation? - [ ] Yes - [ X] Not needed ## Description The user can customize the location of the models directory by setting configuration variable `models_dir`. However, the model manager and the TUI installer were all treating model relative paths as relative to the invokeai root rather than the designated models directory. This has been fixed by changing path resolution calls from using `config.root_path` to `config.models_path` Unfortunately there were many instances that needed replacement, so this needs a bit of functional testing - try adding models, removing models, renaming them, converting checkpoints, etc.
This commit is contained in:
commit
f17ad227cf
@ -171,7 +171,6 @@ from pydantic import BaseSettings, Field, parse_obj_as
|
||||
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
|
||||
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
MODEL_CORE = Path("models/core")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
|
||||
@ -357,7 +356,7 @@ def _find_root() -> Path:
|
||||
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
|
||||
if os.environ.get("INVOKEAI_ROOT"):
|
||||
root = Path(os.environ.get("INVOKEAI_ROOT")).resolve()
|
||||
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE, MODEL_CORE]]):
|
||||
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
|
||||
root = (venv.parent).resolve()
|
||||
else:
|
||||
root = Path("~/invokeai").expanduser().resolve()
|
||||
|
@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
||||
|
||||
|
||||
def download_conversion_models():
|
||||
target_dir = config.root_path / "models/core/convert"
|
||||
target_dir = config.models_path / "core/convert"
|
||||
kwargs = dict() # for future use
|
||||
try:
|
||||
logger.info("Downloading core tokenizers and text encoders")
|
||||
|
@ -128,7 +128,9 @@ class ModelInstall(object):
|
||||
model_dict[key] = ModelLoadInfo(**value)
|
||||
|
||||
# supplement with entries in models.yaml
|
||||
installed_models = self.mgr.list_models()
|
||||
installed_models = [x for x in self.mgr.list_models()]
|
||||
# suppresses autoloaded models
|
||||
# installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)]
|
||||
|
||||
for md in installed_models:
|
||||
base = md["base_model"]
|
||||
@ -147,6 +149,17 @@ class ModelInstall(object):
|
||||
)
|
||||
return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())}
|
||||
|
||||
def _is_autoloaded(self, model_info: dict) -> bool:
|
||||
path = model_info.get("path")
|
||||
if not path:
|
||||
return False
|
||||
for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]:
|
||||
if autodir_path := getattr(self.config, autodir):
|
||||
autodir_path = self.config.root_path / autodir_path
|
||||
if Path(path).is_relative_to(autodir_path):
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_models(self, model_type):
|
||||
installed = self.mgr.list_models(model_type=model_type)
|
||||
print(f"Installed models of type `{model_type}`:")
|
||||
@ -273,6 +286,7 @@ class ModelInstall(object):
|
||||
logger.error(f"Unable to download {url}. Skipping.")
|
||||
info = ModelProbe().heuristic_probe(location)
|
||||
dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
models_path = shutil.move(location, dest)
|
||||
|
||||
# staged version will be garbage-collected at this time
|
||||
@ -346,7 +360,7 @@ class ModelInstall(object):
|
||||
if key in self.datasets:
|
||||
description = self.datasets[key].get("description") or description
|
||||
|
||||
rel_path = self.relative_to_root(path)
|
||||
rel_path = self.relative_to_root(path, self.config.models_path)
|
||||
|
||||
attributes = dict(
|
||||
path=str(rel_path),
|
||||
@ -386,8 +400,8 @@ class ModelInstall(object):
|
||||
attributes.update(dict(config=str(legacy_conf)))
|
||||
return attributes
|
||||
|
||||
def relative_to_root(self, path: Path) -> Path:
|
||||
root = self.config.root_path
|
||||
def relative_to_root(self, path: Path, root: None) -> Path:
|
||||
root = root or self.config.root_path
|
||||
if path.is_relative_to(root):
|
||||
return path.relative_to(root)
|
||||
else:
|
||||
|
@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
|
||||
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
from picklescan.scanner import scan_file_path
|
||||
from .models import BaseModelType, ModelVariantType
|
||||
@ -81,7 +81,7 @@ if is_accelerate_available():
|
||||
from accelerate.utils import set_module_tensor_to_device
|
||||
|
||||
logger = InvokeAILogger.getLogger(__name__)
|
||||
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert"
|
||||
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert"
|
||||
|
||||
|
||||
def shave_segments(path, n_shave_prefix_segments=1):
|
||||
|
@ -187,7 +187,9 @@ class ModelCache(object):
|
||||
# TODO: lock for no copies on simultaneous calls?
|
||||
cache_entry = self._cached_models.get(key, None)
|
||||
if cache_entry is None:
|
||||
self.logger.info(f"Loading model {model_path}, type {base_model}:{model_type}:{submodel}")
|
||||
self.logger.info(
|
||||
f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}"
|
||||
)
|
||||
|
||||
# this will remove older cached models until
|
||||
# there is sufficient room to load the requested model
|
||||
|
@ -423,7 +423,7 @@ class ModelManager(object):
|
||||
return (model_name, base_model, model_type)
|
||||
|
||||
def _get_model_cache_path(self, model_path):
|
||||
return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest()
|
||||
return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest())
|
||||
|
||||
@classmethod
|
||||
def initialize_model_config(cls, config_path: Path):
|
||||
@ -456,7 +456,7 @@ class ModelManager(object):
|
||||
raise ModelNotFoundException(f"Model not found - {model_key}")
|
||||
|
||||
model_config = self.models[model_key]
|
||||
model_path = self.app_config.root_path / model_config.path
|
||||
model_path = self.resolve_model_path(model_config.path)
|
||||
|
||||
if not model_path.exists():
|
||||
if model_class.save_to_config:
|
||||
@ -586,7 +586,7 @@ class ModelManager(object):
|
||||
|
||||
# expose paths as absolute to help web UI
|
||||
if path := model_dict.get("path"):
|
||||
model_dict["path"] = str(self.app_config.root_path / path)
|
||||
model_dict["path"] = str(self.resolve_model_path(path))
|
||||
models.append(model_dict)
|
||||
|
||||
return models
|
||||
@ -623,7 +623,7 @@ class ModelManager(object):
|
||||
self.cache.uncache_model(cache_id)
|
||||
|
||||
# if model inside invoke models folder - delete files
|
||||
model_path = self.app_config.root_path / model_cfg.path
|
||||
model_path = self.resolve_model_path(model_cfg.path)
|
||||
cache_path = self._get_model_cache_path(model_path)
|
||||
if cache_path.exists():
|
||||
rmtree(str(cache_path))
|
||||
@ -654,10 +654,9 @@ class ModelManager(object):
|
||||
The returned dict has the same format as the dict returned by
|
||||
model_info().
|
||||
"""
|
||||
# relativize paths as they go in - this makes it easier to move the root directory around
|
||||
# relativize paths as they go in - this makes it easier to move the models directory around
|
||||
if path := model_attributes.get("path"):
|
||||
if Path(path).is_relative_to(self.app_config.root_path):
|
||||
model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path))
|
||||
model_attributes["path"] = str(self.relative_model_path(Path(path)))
|
||||
|
||||
model_class = MODEL_CLASSES[base_model][model_type]
|
||||
model_config = model_class.create_config(**model_attributes)
|
||||
@ -715,7 +714,7 @@ class ModelManager(object):
|
||||
if not model_cfg:
|
||||
raise ModelNotFoundException(f"Unknown model: {model_key}")
|
||||
|
||||
old_path = self.app_config.root_path / model_cfg.path
|
||||
old_path = self.resolve_model_path(model_cfg.path)
|
||||
new_name = new_name or model_name
|
||||
new_base = new_base or base_model
|
||||
new_key = self.create_key(new_name, new_base, model_type)
|
||||
@ -724,15 +723,15 @@ class ModelManager(object):
|
||||
|
||||
# if this is a model file/directory that we manage ourselves, we need to move it
|
||||
if old_path.is_relative_to(self.app_config.models_path):
|
||||
new_path = (
|
||||
self.app_config.root_path
|
||||
/ "models"
|
||||
/ BaseModelType(new_base).value
|
||||
/ ModelType(model_type).value
|
||||
/ new_name
|
||||
new_path = self.resolve_model_path(
|
||||
Path(
|
||||
BaseModelType(new_base).value,
|
||||
ModelType(model_type).value,
|
||||
new_name,
|
||||
)
|
||||
)
|
||||
move(old_path, new_path)
|
||||
model_cfg.path = str(new_path.relative_to(self.app_config.root_path))
|
||||
model_cfg.path = str(new_path.relative_to(self.app_config.models_path))
|
||||
|
||||
# clean up caches
|
||||
old_model_cache = self._get_model_cache_path(old_path)
|
||||
@ -782,7 +781,7 @@ class ModelManager(object):
|
||||
**submodel,
|
||||
)
|
||||
checkpoint_path = self.app_config.root_path / info["path"]
|
||||
old_diffusers_path = self.app_config.models_path / model.location
|
||||
old_diffusers_path = self.resolve_model_path(model.location)
|
||||
new_diffusers_path = (
|
||||
dest_directory or self.app_config.models_path / base_model.value / model_type.value
|
||||
) / model_name
|
||||
@ -795,7 +794,7 @@ class ModelManager(object):
|
||||
info["path"] = (
|
||||
str(new_diffusers_path)
|
||||
if dest_directory
|
||||
else str(new_diffusers_path.relative_to(self.app_config.root_path))
|
||||
else str(new_diffusers_path.relative_to(self.app_config.models_path))
|
||||
)
|
||||
info.pop("config")
|
||||
|
||||
@ -810,6 +809,15 @@ class ModelManager(object):
|
||||
|
||||
return result
|
||||
|
||||
def resolve_model_path(self, path: Union[Path, str]) -> Path:
|
||||
"""return relative paths based on configured models_path"""
|
||||
return self.app_config.models_path / path
|
||||
|
||||
def relative_model_path(self, model_path: Path) -> Path:
|
||||
if model_path.is_relative_to(self.app_config.models_path):
|
||||
model_path = model_path.relative_to(self.app_config.models_path)
|
||||
return model_path
|
||||
|
||||
def search_models(self, search_folder):
|
||||
self.logger.info(f"Finding Models In: {search_folder}")
|
||||
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
|
||||
@ -883,10 +891,17 @@ class ModelManager(object):
|
||||
new_models_found = False
|
||||
|
||||
self.logger.info(f"Scanning {self.app_config.models_path} for new models")
|
||||
with Chdir(self.app_config.root_path):
|
||||
with Chdir(self.app_config.models_path):
|
||||
for model_key, model_config in list(self.models.items()):
|
||||
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
|
||||
model_path = self.app_config.root_path.absolute() / model_config.path
|
||||
|
||||
# Patch for relative path bug in older models.yaml - paths should not
|
||||
# be starting with a hard-coded 'models'. This will also fix up
|
||||
# models.yaml when committed.
|
||||
if model_config.path.startswith("models"):
|
||||
model_config.path = str(Path(*Path(model_config.path).parts[1:]))
|
||||
|
||||
model_path = self.resolve_model_path(model_config.path).absolute()
|
||||
if not model_path.exists():
|
||||
model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
|
||||
if model_class.save_to_config:
|
||||
@ -905,7 +920,7 @@ class ModelManager(object):
|
||||
if model_type is not None and cur_model_type != model_type:
|
||||
continue
|
||||
model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
|
||||
models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value
|
||||
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
|
||||
|
||||
if not models_dir.exists():
|
||||
continue # TODO: or create all folders?
|
||||
@ -919,9 +934,7 @@ class ModelManager(object):
|
||||
if model_key in self.models:
|
||||
raise DuplicateModelException(f"Model with key {model_key} added twice")
|
||||
|
||||
if model_path.is_relative_to(self.app_config.root_path):
|
||||
model_path = model_path.relative_to(self.app_config.root_path)
|
||||
|
||||
model_path = self.relative_model_path(model_path)
|
||||
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
|
||||
self.models[model_key] = model_config
|
||||
new_models_found = True
|
||||
@ -932,12 +945,11 @@ class ModelManager(object):
|
||||
except NotImplementedError as e:
|
||||
self.logger.warning(e)
|
||||
|
||||
imported_models = self.autoimport()
|
||||
|
||||
imported_models = self.scan_autoimport_directory()
|
||||
if (new_models_found or imported_models) and self.config_path:
|
||||
self.commit()
|
||||
|
||||
def autoimport(self) -> Dict[str, AddModelResult]:
|
||||
def scan_autoimport_directory(self) -> Dict[str, AddModelResult]:
|
||||
"""
|
||||
Scan the autoimport directory (if defined) and import new models, delete defunct models.
|
||||
"""
|
||||
@ -971,7 +983,7 @@ class ModelManager(object):
|
||||
# LS: hacky
|
||||
# Patch in the SD VAE from core so that it is available for use by the UI
|
||||
try:
|
||||
self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"})
|
||||
self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")})
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -260,7 +260,7 @@ def _convert_ckpt_and_cache(
|
||||
"""
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
|
||||
weights = app_config.root_path / model_config.path
|
||||
weights = app_config.models_path / model_config.path
|
||||
config_file = app_config.root_path / model_config.config
|
||||
output_path = Path(output_path)
|
||||
|
||||
|
@ -156,7 +156,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
BufferBox,
|
||||
name="Log Messages",
|
||||
editable=False,
|
||||
max_height=8,
|
||||
max_height=15,
|
||||
)
|
||||
|
||||
self.nextrely += 1
|
||||
@ -417,7 +417,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
self.ok_button.hidden = True
|
||||
self.display()
|
||||
|
||||
# for communication with the subprocess
|
||||
# TO DO: Spawn a worker thread, not a subprocess
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(
|
||||
target=process_and_execute,
|
||||
@ -432,7 +432,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
self.subprocess_connection = parent_conn
|
||||
self.subprocess = p
|
||||
app.install_selections = InstallSelections()
|
||||
# process_and_execute(app.opt, app.install_selections)
|
||||
|
||||
def on_back(self):
|
||||
self.parentApp.switchFormPrevious()
|
||||
@ -507,8 +506,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
|
||||
# rebuild the form, saving and restoring some of the fields that need to be preserved.
|
||||
saved_messages = self.monitor.entry_widget.values
|
||||
# autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value)
|
||||
# autoscan = self.pipeline_models['autoscan_on_startup'].value
|
||||
|
||||
app.main_form = app.addForm(
|
||||
"MAIN",
|
||||
@ -562,12 +559,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
if downloads := section.get("download_ids"):
|
||||
selections.install_models.extend(downloads.value.split())
|
||||
|
||||
# load directory and whether to scan on startup
|
||||
# if self.parentApp.autoload_pending:
|
||||
# selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value)
|
||||
# self.parentApp.autoload_pending = False
|
||||
# selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value
|
||||
|
||||
|
||||
class AddModelApplication(npyscreen.NPSAppManaged):
|
||||
def __init__(self, opt):
|
||||
@ -657,6 +648,10 @@ def process_and_execute(
|
||||
selections: InstallSelections,
|
||||
conn_out: Connection = None,
|
||||
):
|
||||
# need to reinitialize config in subprocess
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
|
||||
# set up so that stderr is sent to conn_out
|
||||
if conn_out:
|
||||
translator = StderrToMessage(conn_out)
|
||||
@ -674,38 +669,11 @@ def process_and_execute(
|
||||
conn_out.close()
|
||||
|
||||
|
||||
def do_listings(opt) -> bool:
|
||||
"""List installed models of various sorts, and return
|
||||
True if any were requested."""
|
||||
model_manager = ModelManager(config.model_conf_path)
|
||||
if opt.list_models == "diffusers":
|
||||
print("Diffuser models:")
|
||||
model_manager.print_models()
|
||||
elif opt.list_models == "controlnets":
|
||||
print("Installed Controlnet Models:")
|
||||
cnm = model_manager.list_controlnet_models()
|
||||
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
|
||||
elif opt.list_models == "loras":
|
||||
print("Installed LoRA/LyCORIS Models:")
|
||||
cnm = model_manager.list_lora_models()
|
||||
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
|
||||
elif opt.list_models == "tis":
|
||||
print("Installed Textual Inversion Embeddings:")
|
||||
cnm = model_manager.list_ti_models()
|
||||
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# --------------------------------------------------------
|
||||
def select_and_download_models(opt: Namespace):
|
||||
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||
config.precision = precision
|
||||
helper = lambda x: ask_user_for_prediction_type(x)
|
||||
# if do_listings(opt):
|
||||
# pass
|
||||
|
||||
installer = ModelInstall(config, prediction_type_helper=helper)
|
||||
if opt.list_models:
|
||||
installer.list_models(opt.list_models)
|
||||
@ -724,8 +692,6 @@ def select_and_download_models(opt: Namespace):
|
||||
# needed to support the probe() method running under a subprocess
|
||||
torch.multiprocessing.set_start_method("spawn")
|
||||
|
||||
# the third argument is needed in the Windows 11 environment in
|
||||
# order to launch and resize a console window running this program
|
||||
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
||||
installApp = AddModelApplication(opt)
|
||||
try:
|
||||
|
Loading…
Reference in New Issue
Block a user