chore(backend): rename ModelInfo -> LoadedModelInfo

We have two different classes named `ModelInfo` which might need to be used by API consumers. We need to export both but have to deal with this naming collision.

The `ModelInfo` I've renamed here is the one that is returned when a model is loaded. It's the object least likely to be used by API consumers.
This commit is contained in:
psychedelicious 2024-02-11 09:27:57 +11:00 committed by Brandon Rising
parent 08636e42af
commit 5927ab9c36
9 changed files with 38 additions and 30 deletions

View File

@ -11,7 +11,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
SessionQueueStatus,
)
from invokeai.app.util.misc import get_timestamp
from invokeai.backend.model_management.model_manager import ModelInfo
from invokeai.backend.model_management.model_manager import LoadedModelInfo
from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType
@ -201,7 +201,7 @@ class EventServiceBase:
base_model: BaseModelType,
model_type: ModelType,
submodel: SubModelType,
model_info: ModelInfo,
loaded_model_info: LoadedModelInfo,
) -> None:
"""Emitted when a model is correctly loaded (returns model info)"""
self.__emit_queue_event(
@ -215,9 +215,9 @@ class EventServiceBase:
"base_model": base_model,
"model_type": model_type,
"submodel": submodel,
"hash": model_info.hash,
"location": str(model_info.location),
"precision": str(model_info.precision),
"hash": loaded_model_info.hash,
"location": str(loaded_model_info.location),
"precision": str(loaded_model_info.precision),
},
)

View File

@ -14,8 +14,8 @@ from invokeai.app.services.shared.invocation_context import InvocationContextDat
from invokeai.backend.model_management import (
AddModelResult,
BaseModelType,
LoadedModelInfo,
MergeInterpolationMethod,
ModelInfo,
ModelType,
SchedulerPredictionType,
SubModelType,
@ -48,7 +48,7 @@ class ModelManagerServiceBase(ABC):
model_type: ModelType,
submodel: Optional[SubModelType] = None,
context_data: Optional[InvocationContextData] = None,
) -> ModelInfo:
) -> LoadedModelInfo:
"""Retrieve the indicated model with name and type.
submodel can be used to get a part (such as the vae)
of a diffusers pipeline."""

View File

@ -16,8 +16,8 @@ from invokeai.app.services.shared.invocation_context import InvocationContextDat
from invokeai.backend.model_management import (
AddModelResult,
BaseModelType,
LoadedModelInfo,
MergeInterpolationMethod,
ModelInfo,
ModelManager,
ModelMerger,
ModelNotFoundException,
@ -98,7 +98,7 @@ class ModelManagerService(ModelManagerServiceBase):
model_type: ModelType,
submodel: Optional[SubModelType] = None,
context_data: Optional[InvocationContextData] = None,
) -> ModelInfo:
) -> LoadedModelInfo:
"""
Retrieve the indicated model. submodel can be used to get a
part (such as the vae) of a diffusers mode.
@ -114,7 +114,7 @@ class ModelManagerService(ModelManagerServiceBase):
submodel=submodel,
)
model_info = self.mgr.get_model(
loaded_model_info = self.mgr.get_model(
model_name,
base_model,
model_type,
@ -128,10 +128,10 @@ class ModelManagerService(ModelManagerServiceBase):
base_model=base_model,
model_type=model_type,
submodel=submodel,
model_info=model_info,
loaded_model_info=loaded_model_info,
)
return model_info
return loaded_model_info
def model_exists(
self,
@ -273,7 +273,7 @@ class ModelManagerService(ModelManagerServiceBase):
base_model: BaseModelType,
model_type: ModelType,
submodel: Optional[SubModelType] = None,
model_info: Optional[ModelInfo] = None,
loaded_model_info: Optional[LoadedModelInfo] = None,
):
if self._invoker is None:
return
@ -281,7 +281,7 @@ class ModelManagerService(ModelManagerServiceBase):
if self._invoker.services.queue.is_canceled(context_data.session_id):
raise CanceledException()
if model_info:
if loaded_model_info:
self._invoker.services.events.emit_model_load_completed(
queue_id=context_data.queue_id,
queue_item_id=context_data.queue_item_id,
@ -291,7 +291,7 @@ class ModelManagerService(ModelManagerServiceBase):
base_model=base_model,
model_type=model_type,
submodel=submodel,
model_info=model_info,
loaded_model_info=loaded_model_info,
)
else:
self._invoker.services.events.emit_model_load_started(

View File

@ -13,7 +13,7 @@ from invokeai.app.services.images.images_common import ImageDTO
from invokeai.app.services.invocation_services import InvocationServices
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_management.model_manager import ModelInfo
from invokeai.backend.model_management.model_manager import LoadedModelInfo
from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
@ -272,14 +272,15 @@ class ModelsInterface(InvocationContextInterface):
def load(
self, model_name: str, base_model: BaseModelType, model_type: ModelType, submodel: Optional[SubModelType] = None
) -> ModelInfo:
) -> LoadedModelInfo:
"""
Loads a model, returning its `ModelInfo` object.
Loads a model.
:param model_name: The name of the model to get.
:param base_model: The base model of the model to get.
:param model_type: The type of the model to get.
:param submodel: The submodel of the model to get.
:returns: An object representing the loaded model.
"""
# The model manager emits events as it loads the model. It needs the context data to build

View File

@ -1,5 +1,12 @@
"""
Initialization file for invokeai.backend
"""
from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401
from .model_management import ( # noqa: F401
BaseModelType,
LoadedModelInfo,
ModelCache,
ModelManager,
ModelType,
SubModelType,
)
from .model_management.models import SilenceWarnings # noqa: F401

View File

@ -3,7 +3,7 @@
Initialization file for invokeai.backend.model_management
"""
# This import must be first
from .model_manager import AddModelResult, ModelInfo, ModelManager, SchedulerPredictionType
from .model_manager import AddModelResult, LoadedModelInfo, ModelManager, SchedulerPredictionType
from .lora import ModelPatcher, ONNXModelPatcher
from .model_cache import ModelCache

View File

@ -271,7 +271,7 @@ CONFIG_FILE_VERSION = "3.0.0"
@dataclass
class ModelInfo:
class LoadedModelInfo:
context: ModelLocker
name: str
base_model: BaseModelType
@ -450,7 +450,7 @@ class ModelManager(object):
base_model: BaseModelType,
model_type: ModelType,
submodel_type: Optional[SubModelType] = None,
) -> ModelInfo:
) -> LoadedModelInfo:
"""Given a model named identified in models.yaml, return
an ModelInfo object describing it.
:param model_name: symbolic name of the model in models.yaml
@ -508,7 +508,7 @@ class ModelManager(object):
model_hash = "<NO_HASH>" # TODO:
return ModelInfo(
return LoadedModelInfo(
context=model_context,
name=model_name,
base_model=base_model,

View File

@ -7,7 +7,7 @@ import torch
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.backend.install.model_install_backend import ModelInstall
from invokeai.backend.model_management.model_manager import ModelInfo
from invokeai.backend.model_management.model_manager import LoadedModelInfo
from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType
@ -34,8 +34,8 @@ def install_and_load_model(
base_model: BaseModelType,
model_type: ModelType,
submodel_type: Optional[SubModelType] = None,
) -> ModelInfo:
"""Install a model if it is not already installed, then get the ModelInfo for that model.
) -> LoadedModelInfo:
"""Install a model if it is not already installed, then get the LoadedModelInfo for that model.
This is intended as a utility function for tests.
@ -49,9 +49,9 @@ def install_and_load_model(
submodel_type (Optional[SubModelType]): The submodel type, forwarded to ModelManager.get_model(...).
Returns:
ModelInfo
LoadedModelInfo
"""
# If the requested model is already installed, return its ModelInfo.
# If the requested model is already installed, return its LoadedModelInfo.
with contextlib.suppress(ModelNotFoundException):
return model_installer.mgr.get_model(model_name, base_model, model_type, submodel_type)

View File

@ -52,7 +52,7 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.services.image_records.image_records_common import ImageCategory
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
from invokeai.backend.model_management.model_manager import ModelInfo
from invokeai.backend.model_management.model_manager import LoadedModelInfo
from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
@ -121,7 +121,7 @@ __all__ = [
# invokeai.app.services.config.config_default
"InvokeAIAppConfig",
# invokeai.backend.model_management.model_manager
"ModelInfo",
"LoadedModelInfo",
# invokeai.backend.model_management.models.base
"BaseModelType",
"ModelType",