mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
3d6d89feb4
* pass model config to _load_model * make conversion work again * do not write diffusers to disk when convert_cache set to 0 * adding same model to cache twice is a no-op, not an assertion error * fix issues identified by psychedelicious during pr review * following conversion, avoid redundant read of cached submodels * fix error introduced while merging --------- Co-authored-by: Lincoln Stein <lstein@gmail.com>
36 lines
748 B
Python
36 lines
748 B
Python
"""Re-export frequently-used symbols from the Model Manager backend."""
|
|
|
|
from .config import (
|
|
AnyModel,
|
|
AnyModelConfig,
|
|
BaseModelType,
|
|
InvalidModelConfigException,
|
|
ModelConfigFactory,
|
|
ModelFormat,
|
|
ModelRepoVariant,
|
|
ModelType,
|
|
ModelVariantType,
|
|
SchedulerPredictionType,
|
|
SubModelType,
|
|
)
|
|
from .load import LoadedModel
|
|
from .probe import ModelProbe
|
|
from .search import ModelSearch
|
|
|
|
__all__ = [
|
|
"AnyModel",
|
|
"AnyModelConfig",
|
|
"BaseModelType",
|
|
"ModelRepoVariant",
|
|
"InvalidModelConfigException",
|
|
"LoadedModel",
|
|
"ModelConfigFactory",
|
|
"ModelFormat",
|
|
"ModelProbe",
|
|
"ModelSearch",
|
|
"ModelType",
|
|
"ModelVariantType",
|
|
"SchedulerPredictionType",
|
|
"SubModelType",
|
|
]
|