add support for an autoimport models directory scanned at startup time

This commit is contained in:
Lincoln Stein 2023-06-25 18:50:15 -04:00
parent c91d1eacba
commit 160b5d7992
7 changed files with 177 additions and 104 deletions

View File

@ -15,7 +15,7 @@ InvokeAI:
conf_path: configs/models.yaml conf_path: configs/models.yaml
legacy_conf_dir: configs/stable-diffusion legacy_conf_dir: configs/stable-diffusion
outdir: outputs outdir: outputs
autoconvert_dir: null autoimport_dir: null
Models: Models:
model: stable-diffusion-1.5 model: stable-diffusion-1.5
embeddings: true embeddings: true
@ -367,17 +367,17 @@ setting environment variables INVOKEAI_<setting>.
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance') free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
max_loaded_models : int = Field(default=2, gt=0, description="Maximum number of models to keep in memory for rapid switching", category='Memory/Performance') max_loaded_models : int = Field(default=3, gt=0, description="Maximum number of models to keep in memory for rapid switching", category='Memory/Performance')
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance') precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance')
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
autoimport_dir : Path = Field(default='models/autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths')
autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths') autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths')
conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths')
models_dir : Path = Field(default='./models', description='Path to the models directory', category='Paths') models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths')
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths') db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths')
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')

View File

@ -605,6 +605,7 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
for name in ( for name in (
"models", "models",
"databases", "databases",
"autoimport",
"text-inversion-output", "text-inversion-output",
"text-inversion-training-data", "text-inversion-training-data",
"configs" "configs"

View File

@ -183,61 +183,67 @@ class ModelInstall(object):
else: else:
update_autoimport_dir(None) update_autoimport_dir(None)
def heuristic_install(self, model_path_id_or_url: Union[str,Path]): def heuristic_install(self,
model_path_id_or_url: Union[str,Path],
models_installed: Set[Path]=None)->Set[Path]:
if not models_installed:
models_installed = set()
# A little hack to allow nested routines to retrieve info on the requested ID # A little hack to allow nested routines to retrieve info on the requested ID
self.current_id = model_path_id_or_url self.current_id = model_path_id_or_url
path = Path(model_path_id_or_url) path = Path(model_path_id_or_url)
# checkpoint file, or similar try:
if path.is_file(): # checkpoint file, or similar
self._install_path(path) if path.is_file():
return models_installed.add(self._install_path(path))
# folders style or similar # folders style or similar
if path.is_dir() and any([(path/x).exists() for x in ['config.json','model_index.json','learned_embeds.bin']]): elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
self._install_path(path) models_installed.add(self._install_path(path))
return
# recursive scan # recursive scan
if path.is_dir(): elif path.is_dir():
for child in path.iterdir(): for child in path.iterdir():
self.heuristic_install(child) self.heuristic_install(child, models_installed=models_installed)
return
# huggingface repo # huggingface repo
parts = str(path).split('/') elif len(str(path).split('/')) == 2:
if len(parts) == 2: models_installed.add(self._install_repo(str(path)))
self._install_repo(str(path))
return
# a URL # a URL
if model_path_id_or_url.startswith(("http:", "https:", "ftp:")): elif model_path_id_or_url.startswith(("http:", "https:", "ftp:")):
self._install_url(model_path_id_or_url) models_installed.add(self._install_url(model_path_id_or_url))
return
logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') else:
logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping')
except ValueError as e:
logger.error(str(e))
return models_installed
# install a model from a local path. The optional info parameter is there to prevent # install a model from a local path. The optional info parameter is there to prevent
# the model from being probed twice in the event that it has already been probed. # the model from being probed twice in the event that it has already been probed.
def _install_path(self, path: Path, info: ModelProbeInfo=None): def _install_path(self, path: Path, info: ModelProbeInfo=None)->Path:
try: try:
logger.info(f'Probing {path}') logger.info(f'Probing {path}')
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
if info.model_type == ModelType.Main: model_name = path.stem if info.format=='checkpoint' else path.name
model_name = path.stem if info.format=='checkpoint' else path.name if self.mgr.model_exists(model_name, info.base_type, info.model_type):
if self.mgr.model_exists(model_name, info.base_type, info.model_type): raise ValueError(f'A model named "{model_name}" is already installed.')
raise Exception(f'A model named "{model_name}" is already installed.') attributes = self._make_attributes(path,info)
attributes = self._make_attributes(path,info) self.mgr.add_model(model_name = model_name,
self.mgr.add_model(model_name = model_name, base_model = info.base_type,
base_model = info.base_type, model_type = info.model_type,
model_type = info.model_type, model_attributes = attributes
model_attributes = attributes )
)
except Exception as e: except Exception as e:
logger.warning(f'{str(e)} Skipping registration.') logger.warning(f'{str(e)} Skipping registration.')
return path
def _install_url(self, url: str): def _install_url(self, url: str)->Path:
# copy to a staging area, probe, import and delete # copy to a staging area, probe, import and delete
with TemporaryDirectory(dir=self.config.models_path) as staging: with TemporaryDirectory(dir=self.config.models_path) as staging:
location = download_with_resume(url,Path(staging)) location = download_with_resume(url,Path(staging))
@ -248,19 +254,9 @@ class ModelInstall(object):
models_path = shutil.move(location,dest) models_path = shutil.move(location,dest)
# staged version will be garbage-collected at this time # staged version will be garbage-collected at this time
self._install_path(Path(models_path), info) return self._install_path(Path(models_path), info)
def _get_model_name(self,path_name: str, location: Path)->str: def _install_repo(self, repo_id: str)->Path:
'''
Calculate a name for the model - primitive implementation.
'''
if key := self.reverse_paths.get(path_name):
(name, base, mtype) = ModelManager.parse_key(key)
return name
else:
return location.stem
def _install_repo(self, repo_id: str):
hinfo = HfApi().model_info(repo_id) hinfo = HfApi().model_info(repo_id)
# we try to figure out how to download this most economically # we try to figure out how to download this most economically
@ -300,7 +296,17 @@ class ModelInstall(object):
if dest.exists(): if dest.exists():
shutil.rmtree(dest) shutil.rmtree(dest)
shutil.copytree(location,dest) shutil.copytree(location,dest)
self._install_path(dest, info) return self._install_path(dest, info)
def _get_model_name(self,path_name: str, location: Path)->str:
'''
Calculate a name for the model - primitive implementation.
'''
if key := self.reverse_paths.get(path_name):
(name, base, mtype) = ModelManager.parse_key(key)
return name
else:
return location.stem
def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict: def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict:
# convoluted way to retrieve the description from datasets # convoluted way to retrieve the description from datasets
@ -308,9 +314,11 @@ class ModelInstall(object):
if key := self.reverse_paths.get(self.current_id): if key := self.reverse_paths.get(self.current_id):
if key in self.datasets: if key in self.datasets:
description = self.datasets[key]['description'] description = self.datasets[key]['description']
rel_path = self.relative_to_root(path)
attributes = dict( attributes = dict(
path = str(path), path = str(rel_path),
description = str(description), description = str(description),
model_format = info.format, model_format = info.format,
) )
@ -318,18 +326,30 @@ class ModelInstall(object):
attributes.update(dict(variant = info.variant_type,)) attributes.update(dict(variant = info.variant_type,))
if info.format=="checkpoint": if info.format=="checkpoint":
try: try:
legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if info.base_type == BaseModelType.StableDiffusion2 \ possible_conf = path.with_suffix('.yaml')
else LEGACY_CONFIGS[info.base_type][info.variant_type] if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
elif info.base_type == BaseModelType.StableDiffusion2:
legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type])
else:
legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type])
except KeyError: except KeyError:
legacy_conf = 'v1-inference.yaml' # best guess legacy_conf = Path(self.config.legacy_conf_dir, 'v1-inference.yaml') # best guess
attributes.update( attributes.update(
dict( dict(
config = str(self.config.legacy_conf_path / legacy_conf) config = str(legacy_conf)
) )
) )
return attributes return attributes
def relative_to_root(self, path: Path)->Path:
root = self.config.root_path
if path.is_relative_to(root):
return path.relative_to(root)
else:
return path
def _download_hf_pipeline(self, repo_id: str, staging: Path)->Path: def _download_hf_pipeline(self, repo_id: str, staging: Path)->Path:
''' '''
This retrieves a StableDiffusion model from cache or remote and then This retrieves a StableDiffusion model from cache or remote and then
@ -379,6 +399,9 @@ def update_autoimport_dir(autodir: Path):
''' '''
Update the "autoimport_dir" option in invokeai.yaml Update the "autoimport_dir" option in invokeai.yaml
''' '''
with open('log.txt','a') as f:
print(f'autodir = {autodir}',file=f)
invokeai_config_path = config.init_file_path invokeai_config_path = config.init_file_path
conf = OmegaConf.load(invokeai_config_path) conf = OmegaConf.load(invokeai_config_path)
conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None

View File

@ -227,7 +227,7 @@ from pydantic import BaseModel
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util import CUDA_DEVICE from invokeai.backend.util import CUDA_DEVICE, Chdir
from .model_cache import ModelCache, ModelLocker from .model_cache import ModelCache, ModelLocker
from .models import ( from .models import (
BaseModelType, ModelType, SubModelType, BaseModelType, ModelType, SubModelType,
@ -488,11 +488,6 @@ class ModelManager(object):
) -> list[dict]: ) -> list[dict]:
""" """
Return a list of models. Return a list of models.
Please use model_manager.models() to get all the model names,
model_manager.model_info('model-name') to get the stanza for the model
named 'model-name', and model_manager.config to get the full OmegaConf
object derived from models.yaml
""" """
models = [] models = []
@ -659,44 +654,82 @@ class ModelManager(object):
def scan_models_directory(self): def scan_models_directory(self):
loaded_files = set() loaded_files = set()
new_models_found = False new_models_found = False
for model_key, model_config in list(self.models.items()): with Chdir(self.globals.root_path):
model_name, base_model, model_type = self.parse_key(model_key) for model_key, model_config in list(self.models.items()):
model_path = str(self.globals.root_path / model_config.path) model_name, base_model, model_type = self.parse_key(model_key)
if not os.path.exists(model_path): model_path = str(model_config.path)
model_class = MODEL_CLASSES[base_model][model_type] if not os.path.exists(model_path):
if model_class.save_to_config: model_class = MODEL_CLASSES[base_model][model_type]
model_config.error = ModelError.NotFound if model_class.save_to_config:
model_config.error = ModelError.NotFound
else:
self.models.pop(model_key, None)
else: else:
self.models.pop(model_key, None) loaded_files.add(model_path)
else:
loaded_files.add(model_path)
for base_model in BaseModelType: for base_model in BaseModelType:
for model_type in ModelType: for model_type in ModelType:
model_class = MODEL_CLASSES[base_model][model_type] model_class = MODEL_CLASSES[base_model][model_type]
models_dir = os.path.join(self.globals.models_path, base_model, model_type) models_dir = os.path.join(self.globals.models_dir, base_model, model_type)
if not os.path.exists(models_dir): if not os.path.exists(models_dir):
continue # TODO: or create all folders? continue # TODO: or create all folders?
for entry_name in os.listdir(models_dir):
model_path = os.path.join(models_dir, entry_name)
if model_path not in loaded_files: # TODO: check
model_path = Path(model_path)
model_name = model_path.name if model_path.is_dir() else model_path.stem
model_key = self.create_key(model_name, base_model, model_type)
if model_key in self.models: for entry_name in os.listdir(models_dir):
raise Exception(f"Model with key {model_key} added twice") model_path = os.path.join(models_dir, entry_name)
if model_path not in loaded_files: # TODO: check
model_path = Path(model_path)
model_name = model_path.name if model_path.is_dir() else model_path.stem
model_key = self.create_key(model_name, base_model, model_type)
model_config: ModelConfigBase = model_class.probe_config(str(model_path)) if model_key in self.models:
self.models[model_key] = model_config raise Exception(f"Model with key {model_key} added twice")
new_models_found = True
if new_models_found and self.config_path: model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config
new_models_found = True
imported_models = self.autoimport()
if (new_models_found or imported_models) and self.config_path:
self.commit() self.commit()
def autoimport(self):
'''
Scan the autoimport directory (if defined) and import new models, delete defunct models.
'''
# avoid circular import
from invokeai.backend.install.model_install_backend import ModelInstall
installer = ModelInstall(config = self.globals,
model_manager = self)
installed = set()
if not self.globals.autoimport_dir:
return installed
autodir = self.globals.root_path / self.globals.autoimport_dir
if not (autodir and autodir.exists()):
return installed
known_paths = {(self.globals.root_path / x['path']).resolve() for x in self.list_models()}
scanned_dirs = set()
for root, dirs, files in os.walk(autodir):
for d in dirs:
path = Path(root) / d
if path in known_paths:
continue
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
installed.update(installer.heuristic_install(path))
scanned_dirs.add(path)
for f in files:
path = Path(root) / f
if path in known_paths or path.parent in scanned_dirs:
continue
if path.suffix in {'.ckpt','.bin','.pth','.safetensors'}:
installed.update(installer.heuristic_install(path))
return installed
def heuristic_import(self, def heuristic_import(self,
items_to_import: Set[str], items_to_import: Set[str],
@ -724,8 +757,8 @@ class ModelManager(object):
model_manager = self) model_manager = self)
for thing in items_to_import: for thing in items_to_import:
try: try:
installer.heuristic_install(thing) installed = installer.heuristic_install(thing)
successfully_installed.add(thing) successfully_installed.update(installed)
except Exception as e: except Exception as e:
self.logger.warning(f'{thing} could not be imported: {str(e)}') self.logger.warning(f'{thing} could not be imported: {str(e)}')

View File

@ -16,6 +16,7 @@ from .util import (
download_with_resume, download_with_resume,
instantiate_from_config, instantiate_from_config,
url_attachment_name, url_attachment_name,
Chdir
) )

View File

@ -381,3 +381,18 @@ def image_to_dataURL(image: Image.Image, image_format: str = "PNG") -> str:
buffered.getvalue() buffered.getvalue()
).decode("UTF-8") ).decode("UTF-8")
return image_base64 return image_base64
class Chdir(object):
'''Context manager to chdir to desired directory and change back after context exits:
Args:
path (Path): The path to the cwd
'''
def __init__(self, path: Path):
self.path = path
self.original = Path().absolute()
def __enter__(self):
os.chdir(self.path)
def __exit__(self,*args):
os.chdir(self.original)

View File

@ -65,8 +65,8 @@ def make_printable(s:str)->str:
return s.translate(NOPRINT_TRANS_TABLE) return s.translate(NOPRINT_TRANS_TABLE)
class addModelsForm(CyclingForm, npyscreen.FormMultiPage): class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# for responsive resizing - disabled # for responsive resizing set to False, but this seems to cause a crash!
# FIX_MINIMUM_SIZE_WHEN_CREATED = False FIX_MINIMUM_SIZE_WHEN_CREATED = True
# for persistence # for persistence
current_tab = 0 current_tab = 0
@ -323,7 +323,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
FileBox, FileBox,
max_height=3, max_height=3,
name=label, name=label,
value=str(config.autoimport_dir) if config.autoimport_dir else None, value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else None,
select_dir=True, select_dir=True,
must_exist=True, must_exist=True,
use_two_lines=False, use_two_lines=False,
@ -501,7 +501,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# rebuild the form, saving and restoring some of the fields that need to be preserved. # rebuild the form, saving and restoring some of the fields that need to be preserved.
saved_messages = self.monitor.entry_widget.values saved_messages = self.monitor.entry_widget.values
autoload_dir = self.pipeline_models['autoload_directory'].value autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value)
autoscan = self.pipeline_models['autoscan_on_startup'].value autoscan = self.pipeline_models['autoscan_on_startup'].value
app.main_form = app.addForm( app.main_form = app.addForm(
@ -547,7 +547,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# load directory and whether to scan on startup # load directory and whether to scan on startup
if self.parentApp.autoload_pending: if self.parentApp.autoload_pending:
selections.scan_directory = self.pipeline_models['autoload_directory'].value selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value)
self.parentApp.autoload_pending = False self.parentApp.autoload_pending = False
selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value