mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into maryhipp/optional-version
This commit is contained in:
commit
71a07ee5a7
@ -328,6 +328,25 @@ class ModelCache(object):
|
|||||||
|
|
||||||
refs = sys.getrefcount(cache_entry.model)
|
refs = sys.getrefcount(cache_entry.model)
|
||||||
|
|
||||||
|
# manualy clear local variable references of just finished function calls
|
||||||
|
# for some reason python don't want to collect it even by gc.collect() immidiately
|
||||||
|
if refs > 2:
|
||||||
|
while True:
|
||||||
|
cleared = False
|
||||||
|
for referrer in gc.get_referrers(cache_entry.model):
|
||||||
|
if type(referrer).__name__ == "frame":
|
||||||
|
# RuntimeError: cannot clear an executing frame
|
||||||
|
with suppress(RuntimeError):
|
||||||
|
referrer.clear()
|
||||||
|
cleared = True
|
||||||
|
#break
|
||||||
|
|
||||||
|
# repeat if referrers changes(due to frame clear), else exit loop
|
||||||
|
if cleared:
|
||||||
|
gc.collect()
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
device = cache_entry.model.device if hasattr(cache_entry.model, "device") else None
|
device = cache_entry.model.device if hasattr(cache_entry.model, "device") else None
|
||||||
self.logger.debug(f"Model: {model_key}, locks: {cache_entry._locks}, device: {device}, loaded: {cache_entry.loaded}, refs: {refs}")
|
self.logger.debug(f"Model: {model_key}, locks: {cache_entry._locks}, device: {device}, loaded: {cache_entry.loaded}, refs: {refs}")
|
||||||
|
|
||||||
@ -363,6 +382,9 @@ class ModelCache(object):
|
|||||||
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
|
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
|
||||||
vram_in_use += mem.vram_used # note vram_used is negative
|
vram_in_use += mem.vram_used # note vram_used is negative
|
||||||
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
|
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
|
||||||
|
|
||||||
|
gc.collect()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
|
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
|
||||||
sha = hashlib.sha256()
|
sha = hashlib.sha256()
|
||||||
|
@ -106,16 +106,16 @@ providing information about a model defined in models.yaml. For example:
|
|||||||
|
|
||||||
>>> models = mgr.list_models()
|
>>> models = mgr.list_models()
|
||||||
>>> json.dumps(models[0])
|
>>> json.dumps(models[0])
|
||||||
{"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny",
|
{"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny",
|
||||||
"model_format": "diffusers",
|
"model_format": "diffusers",
|
||||||
"name": "canny",
|
"name": "canny",
|
||||||
"base_model": "sd-1",
|
"base_model": "sd-1",
|
||||||
"type": "controlnet"
|
"type": "controlnet"
|
||||||
}
|
}
|
||||||
|
|
||||||
You can filter by model type and base model as shown here:
|
You can filter by model type and base model as shown here:
|
||||||
|
|
||||||
|
|
||||||
controlnets = mgr.list_models(model_type=ModelType.ControlNet,
|
controlnets = mgr.list_models(model_type=ModelType.ControlNet,
|
||||||
base_model=BaseModelType.StableDiffusion1)
|
base_model=BaseModelType.StableDiffusion1)
|
||||||
for c in controlnets:
|
for c in controlnets:
|
||||||
@ -140,14 +140,14 @@ Layout of the `models` directory:
|
|||||||
|
|
||||||
models
|
models
|
||||||
├── sd-1
|
├── sd-1
|
||||||
│ ├── controlnet
|
│ ├── controlnet
|
||||||
│ ├── lora
|
│ ├── lora
|
||||||
│ ├── main
|
│ ├── main
|
||||||
│ └── embedding
|
│ └── embedding
|
||||||
├── sd-2
|
├── sd-2
|
||||||
│ ├── controlnet
|
│ ├── controlnet
|
||||||
│ ├── lora
|
│ ├── lora
|
||||||
│ ├── main
|
│ ├── main
|
||||||
│ └── embedding
|
│ └── embedding
|
||||||
└── core
|
└── core
|
||||||
├── face_reconstruction
|
├── face_reconstruction
|
||||||
@ -195,7 +195,7 @@ name, base model, type and a dict of model attributes. See
|
|||||||
`invokeai/backend/model_management/models` for the attributes required
|
`invokeai/backend/model_management/models` for the attributes required
|
||||||
by each model type.
|
by each model type.
|
||||||
|
|
||||||
A model can be deleted using `del_model()`, providing the same
|
A model can be deleted using `del_model()`, providing the same
|
||||||
identifying information as `get_model()`
|
identifying information as `get_model()`
|
||||||
|
|
||||||
The `heuristic_import()` method will take a set of strings
|
The `heuristic_import()` method will take a set of strings
|
||||||
@ -304,7 +304,7 @@ class ModelManager(object):
|
|||||||
logger: types.ModuleType = logger,
|
logger: types.ModuleType = logger,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize with the path to the models.yaml config file.
|
Initialize with the path to the models.yaml config file.
|
||||||
Optional parameters are the torch device type, precision, max_models,
|
Optional parameters are the torch device type, precision, max_models,
|
||||||
and sequential_offload boolean. Note that the default device
|
and sequential_offload boolean. Note that the default device
|
||||||
type and precision are set up for a CUDA system running at half precision.
|
type and precision are set up for a CUDA system running at half precision.
|
||||||
@ -323,7 +323,7 @@ class ModelManager(object):
|
|||||||
self.config_meta = ConfigMeta(**config.pop("__metadata__"))
|
self.config_meta = ConfigMeta(**config.pop("__metadata__"))
|
||||||
# TODO: metadata not found
|
# TODO: metadata not found
|
||||||
# TODO: version check
|
# TODO: version check
|
||||||
|
|
||||||
self.app_config = InvokeAIAppConfig.get_config()
|
self.app_config = InvokeAIAppConfig.get_config()
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.cache = ModelCache(
|
self.cache = ModelCache(
|
||||||
@ -431,7 +431,7 @@ class ModelManager(object):
|
|||||||
:param model_name: symbolic name of the model in models.yaml
|
:param model_name: symbolic name of the model in models.yaml
|
||||||
:param model_type: ModelType enum indicating the type of model to return
|
:param model_type: ModelType enum indicating the type of model to return
|
||||||
:param base_model: BaseModelType enum indicating the base model used by this model
|
:param base_model: BaseModelType enum indicating the base model used by this model
|
||||||
:param submode_typel: an ModelType enum indicating the portion of
|
:param submode_typel: an ModelType enum indicating the portion of
|
||||||
the model to retrieve (e.g. ModelType.Vae)
|
the model to retrieve (e.g. ModelType.Vae)
|
||||||
"""
|
"""
|
||||||
model_class = MODEL_CLASSES[base_model][model_type]
|
model_class = MODEL_CLASSES[base_model][model_type]
|
||||||
@ -456,7 +456,7 @@ class ModelManager(object):
|
|||||||
raise ModelNotFoundException(f"Model not found - {model_key}")
|
raise ModelNotFoundException(f"Model not found - {model_key}")
|
||||||
|
|
||||||
# vae/movq override
|
# vae/movq override
|
||||||
# TODO:
|
# TODO:
|
||||||
if submodel_type is not None and hasattr(model_config, submodel_type):
|
if submodel_type is not None and hasattr(model_config, submodel_type):
|
||||||
override_path = getattr(model_config, submodel_type)
|
override_path = getattr(model_config, submodel_type)
|
||||||
if override_path:
|
if override_path:
|
||||||
@ -489,7 +489,7 @@ class ModelManager(object):
|
|||||||
self.cache_keys[model_key].add(model_context.key)
|
self.cache_keys[model_key].add(model_context.key)
|
||||||
|
|
||||||
model_hash = "<NO_HASH>" # TODO:
|
model_hash = "<NO_HASH>" # TODO:
|
||||||
|
|
||||||
return ModelInfo(
|
return ModelInfo(
|
||||||
context = model_context,
|
context = model_context,
|
||||||
name = model_name,
|
name = model_name,
|
||||||
@ -518,7 +518,7 @@ class ModelManager(object):
|
|||||||
|
|
||||||
def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]:
|
def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]:
|
||||||
"""
|
"""
|
||||||
Return a list of (str, BaseModelType, ModelType) corresponding to all models
|
Return a list of (str, BaseModelType, ModelType) corresponding to all models
|
||||||
known to the configuration.
|
known to the configuration.
|
||||||
"""
|
"""
|
||||||
return [(self.parse_key(x)) for x in self.models.keys()]
|
return [(self.parse_key(x)) for x in self.models.keys()]
|
||||||
@ -692,12 +692,12 @@ class ModelManager(object):
|
|||||||
if new_name is None and new_base is None:
|
if new_name is None and new_base is None:
|
||||||
self.logger.error("rename_model() called with neither a new_name nor a new_base. {model_name} unchanged.")
|
self.logger.error("rename_model() called with neither a new_name nor a new_base. {model_name} unchanged.")
|
||||||
return
|
return
|
||||||
|
|
||||||
model_key = self.create_key(model_name, base_model, model_type)
|
model_key = self.create_key(model_name, base_model, model_type)
|
||||||
model_cfg = self.models.get(model_key, None)
|
model_cfg = self.models.get(model_key, None)
|
||||||
if not model_cfg:
|
if not model_cfg:
|
||||||
raise ModelNotFoundException(f"Unknown model: {model_key}")
|
raise ModelNotFoundException(f"Unknown model: {model_key}")
|
||||||
|
|
||||||
old_path = self.app_config.root_path / model_cfg.path
|
old_path = self.app_config.root_path / model_cfg.path
|
||||||
new_name = new_name or model_name
|
new_name = new_name or model_name
|
||||||
new_base = new_base or base_model
|
new_base = new_base or base_model
|
||||||
@ -726,7 +726,7 @@ class ModelManager(object):
|
|||||||
self.models.pop(model_key, None) # delete
|
self.models.pop(model_key, None) # delete
|
||||||
self.models[new_key] = model_cfg
|
self.models[new_key] = model_cfg
|
||||||
self.commit()
|
self.commit()
|
||||||
|
|
||||||
def convert_model (
|
def convert_model (
|
||||||
self,
|
self,
|
||||||
model_name: str,
|
model_name: str,
|
||||||
@ -776,12 +776,12 @@ class ModelManager(object):
|
|||||||
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
|
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
|
||||||
rmtree(new_diffusers_path)
|
rmtree(new_diffusers_path)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if checkpoint_path.exists() and checkpoint_path.is_relative_to(self.app_config.models_path):
|
if checkpoint_path.exists() and checkpoint_path.is_relative_to(self.app_config.models_path):
|
||||||
checkpoint_path.unlink()
|
checkpoint_path.unlink()
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def search_models(self, search_folder):
|
def search_models(self, search_folder):
|
||||||
self.logger.info(f"Finding Models In: {search_folder}")
|
self.logger.info(f"Finding Models In: {search_folder}")
|
||||||
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
|
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
|
||||||
@ -824,10 +824,14 @@ class ModelManager(object):
|
|||||||
assert config_file_path is not None,'no config file path to write to'
|
assert config_file_path is not None,'no config file path to write to'
|
||||||
config_file_path = self.app_config.root_path / config_file_path
|
config_file_path = self.app_config.root_path / config_file_path
|
||||||
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
|
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
|
||||||
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
try:
|
||||||
outfile.write(self.preamble())
|
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
||||||
outfile.write(yaml_str)
|
outfile.write(self.preamble())
|
||||||
os.replace(tmpfile, config_file_path)
|
outfile.write(yaml_str)
|
||||||
|
os.replace(tmpfile, config_file_path)
|
||||||
|
except OSError as err:
|
||||||
|
self.logger.warning(f"Could not modify the config file at {config_file_path}")
|
||||||
|
self.logger.warning(err)
|
||||||
|
|
||||||
def preamble(self) -> str:
|
def preamble(self) -> str:
|
||||||
"""
|
"""
|
||||||
@ -977,13 +981,12 @@ class ModelManager(object):
|
|||||||
# avoid circular import here
|
# avoid circular import here
|
||||||
from invokeai.backend.install.model_install_backend import ModelInstall
|
from invokeai.backend.install.model_install_backend import ModelInstall
|
||||||
successfully_installed = dict()
|
successfully_installed = dict()
|
||||||
|
|
||||||
installer = ModelInstall(config = self.app_config,
|
installer = ModelInstall(config = self.app_config,
|
||||||
prediction_type_helper = prediction_type_helper,
|
prediction_type_helper = prediction_type_helper,
|
||||||
model_manager = self)
|
model_manager = self)
|
||||||
for thing in items_to_import:
|
for thing in items_to_import:
|
||||||
installed = installer.heuristic_import(thing)
|
installed = installer.heuristic_import(thing)
|
||||||
successfully_installed.update(installed)
|
successfully_installed.update(installed)
|
||||||
self.commit()
|
self.commit()
|
||||||
return successfully_installed
|
return successfully_installed
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user