mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add the import model router
This commit is contained in:
parent
0988725c1b
commit
96bf92ead4
@ -2,17 +2,17 @@
|
|||||||
|
|
||||||
from typing import Literal, Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
from fastapi import Query
|
from fastapi import Query, Body
|
||||||
from fastapi.routing import APIRouter, HTTPException
|
from fastapi.routing import APIRouter, HTTPException
|
||||||
from pydantic import BaseModel, Field, parse_obj_as
|
from pydantic import BaseModel, Field, parse_obj_as
|
||||||
from ..dependencies import ApiDependencies
|
from ..dependencies import ApiDependencies
|
||||||
from invokeai.backend import BaseModelType, ModelType
|
from invokeai.backend import BaseModelType, ModelType
|
||||||
|
from invokeai.backend.model_management import AddModelResult
|
||||||
from invokeai.backend.model_management.models import OPENAPI_MODEL_CONFIGS, SchedulerPredictionType
|
from invokeai.backend.model_management.models import OPENAPI_MODEL_CONFIGS, SchedulerPredictionType
|
||||||
MODEL_CONFIGS = Union[tuple(OPENAPI_MODEL_CONFIGS)]
|
MODEL_CONFIGS = Union[tuple(OPENAPI_MODEL_CONFIGS)]
|
||||||
|
|
||||||
models_router = APIRouter(prefix="/v1/models", tags=["models"])
|
models_router = APIRouter(prefix="/v1/models", tags=["models"])
|
||||||
|
|
||||||
|
|
||||||
class VaeRepo(BaseModel):
|
class VaeRepo(BaseModel):
|
||||||
repo_id: str = Field(description="The repo ID to use for this VAE")
|
repo_id: str = Field(description="The repo ID to use for this VAE")
|
||||||
path: Optional[str] = Field(description="The path to the VAE")
|
path: Optional[str] = Field(description="The path to the VAE")
|
||||||
@ -51,9 +51,12 @@ class CreateModelResponse(BaseModel):
|
|||||||
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
||||||
status: str = Field(description="The status of the API response")
|
status: str = Field(description="The status of the API response")
|
||||||
|
|
||||||
class ImportModelRequest(BaseModel):
|
class ImportModelResponse(BaseModel):
|
||||||
name: str = Field(description="A model path, repo_id or URL to import")
|
name: str = Field(description="The name of the imported model")
|
||||||
prediction_type: Optional[Literal['epsilon','v_prediction','sample']] = Field(description='Prediction type for SDv2 checkpoint files')
|
# base_model: str = Field(description="The base model")
|
||||||
|
# model_type: str = Field(description="The model type")
|
||||||
|
info: AddModelResult = Field(description="The model info")
|
||||||
|
status: str = Field(description="The status of the API response")
|
||||||
|
|
||||||
class ConversionRequest(BaseModel):
|
class ConversionRequest(BaseModel):
|
||||||
name: str = Field(description="The name of the new model")
|
name: str = Field(description="The name of the new model")
|
||||||
@ -86,7 +89,6 @@ async def list_models(
|
|||||||
models = parse_obj_as(ModelsList, { "models": models_raw })
|
models = parse_obj_as(ModelsList, { "models": models_raw })
|
||||||
return models
|
return models
|
||||||
|
|
||||||
|
|
||||||
@models_router.post(
|
@models_router.post(
|
||||||
"/",
|
"/",
|
||||||
operation_id="update_model",
|
operation_id="update_model",
|
||||||
@ -109,27 +111,38 @@ async def update_model(
|
|||||||
return model_response
|
return model_response
|
||||||
|
|
||||||
@models_router.post(
|
@models_router.post(
|
||||||
"/",
|
"/import",
|
||||||
operation_id="import_model",
|
operation_id="import_model",
|
||||||
responses={200: {"status": "success"}},
|
responses= {
|
||||||
|
201: {"description" : "The model imported successfully"},
|
||||||
|
404: {"description" : "The model could not be found"},
|
||||||
|
},
|
||||||
|
status_code=201,
|
||||||
|
response_model=ImportModelResponse
|
||||||
)
|
)
|
||||||
async def import_model(
|
async def import_model(
|
||||||
model_request: ImportModelRequest
|
name: str = Query(description="A model path, repo_id or URL to import"),
|
||||||
) -> None:
|
prediction_type: Optional[Literal['v_prediction','epsilon','sample']] = Query(description='Prediction type for SDv2 checkpoint files', default="v_prediction"),
|
||||||
""" Add Model """
|
) -> ImportModelResponse:
|
||||||
items_to_import = set([model_request.name])
|
""" Add a model using its local path, repo_id, or remote URL """
|
||||||
|
items_to_import = {name}
|
||||||
prediction_types = { x.value: x for x in SchedulerPredictionType }
|
prediction_types = { x.value: x for x in SchedulerPredictionType }
|
||||||
logger = ApiDependencies.invoker.services.logger
|
logger = ApiDependencies.invoker.services.logger
|
||||||
|
|
||||||
installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import(
|
installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import(
|
||||||
items_to_import = items_to_import,
|
items_to_import = items_to_import,
|
||||||
prediction_type_helper = lambda x: prediction_types.get(model_request.prediction_type)
|
prediction_type_helper = lambda x: prediction_types.get(prediction_type)
|
||||||
)
|
)
|
||||||
if len(installed_models) > 0:
|
if info := installed_models.get(name):
|
||||||
logger.info(f'Successfully imported {model_request.name}')
|
logger.info(f'Successfully imported {name}, got {info}')
|
||||||
|
return ImportModelResponse(
|
||||||
|
name = name,
|
||||||
|
info = info,
|
||||||
|
status = "success",
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.error(f'Model {model_request.name} not imported')
|
logger.error(f'Model {name} not imported')
|
||||||
raise HTTPException(status_code=500, detail=f'Model {model_request.name} not imported')
|
raise HTTPException(status_code=404, detail=f'Model {name} not found')
|
||||||
|
|
||||||
@models_router.delete(
|
@models_router.delete(
|
||||||
"/{model_name}",
|
"/{model_name}",
|
||||||
|
@ -135,6 +135,29 @@ class ModelManagerServiceBase(ABC):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def heuristic_import(self,
|
||||||
|
items_to_import: Set[str],
|
||||||
|
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
||||||
|
)->Dict[str, AddModelResult]:
|
||||||
|
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
||||||
|
successfully imported items.
|
||||||
|
:param items_to_import: Set of strings corresponding to models to be imported.
|
||||||
|
:param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType.
|
||||||
|
|
||||||
|
The prediction type helper is necessary to distinguish between
|
||||||
|
models based on Stable Diffusion 2 Base (requiring
|
||||||
|
SchedulerPredictionType.Epsilson) and Stable Diffusion 768
|
||||||
|
(requiring SchedulerPredictionType.VPrediction). It is
|
||||||
|
generally impossible to do this programmatically, so the
|
||||||
|
prediction_type_helper usually asks the user to choose.
|
||||||
|
|
||||||
|
The result is a set of successfully installed models. Each element
|
||||||
|
of the set is a dict corresponding to the newly-created OmegaConf stanza for
|
||||||
|
that model.
|
||||||
|
'''
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def commit(self, conf_file: Path = None) -> None:
|
def commit(self, conf_file: Path = None) -> None:
|
||||||
"""
|
"""
|
||||||
@ -361,3 +384,24 @@ class ModelManagerService(ModelManagerServiceBase):
|
|||||||
def logger(self):
|
def logger(self):
|
||||||
return self.mgr.logger
|
return self.mgr.logger
|
||||||
|
|
||||||
|
def heuristic_import(self,
|
||||||
|
items_to_import: Set[str],
|
||||||
|
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
||||||
|
)->Dict[str, AddModelResult]:
|
||||||
|
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
||||||
|
successfully imported items.
|
||||||
|
:param items_to_import: Set of strings corresponding to models to be imported.
|
||||||
|
:param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType.
|
||||||
|
|
||||||
|
The prediction type helper is necessary to distinguish between
|
||||||
|
models based on Stable Diffusion 2 Base (requiring
|
||||||
|
SchedulerPredictionType.Epsilson) and Stable Diffusion 768
|
||||||
|
(requiring SchedulerPredictionType.VPrediction). It is
|
||||||
|
generally impossible to do this programmatically, so the
|
||||||
|
prediction_type_helper usually asks the user to choose.
|
||||||
|
|
||||||
|
The result is a set of successfully installed models. Each element
|
||||||
|
of the set is a dict corresponding to the newly-created OmegaConf stanza for
|
||||||
|
that model.
|
||||||
|
'''
|
||||||
|
return self.mgr.heuristic_import(items_to_import, prediction_type_helper)
|
||||||
|
@ -18,7 +18,7 @@ from tqdm import tqdm
|
|||||||
import invokeai.configs as configs
|
import invokeai.configs as configs
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType
|
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
|
||||||
from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo
|
from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo
|
||||||
from invokeai.backend.util import download_with_resume
|
from invokeai.backend.util import download_with_resume
|
||||||
from ..util.logging import InvokeAILogger
|
from ..util.logging import InvokeAILogger
|
||||||
@ -166,17 +166,22 @@ class ModelInstall(object):
|
|||||||
# add requested models
|
# add requested models
|
||||||
for path in selections.install_models:
|
for path in selections.install_models:
|
||||||
logger.info(f'Installing {path} [{job}/{jobs}]')
|
logger.info(f'Installing {path} [{job}/{jobs}]')
|
||||||
self.heuristic_install(path)
|
self.heuristic_import(path)
|
||||||
job += 1
|
job += 1
|
||||||
|
|
||||||
self.mgr.commit()
|
self.mgr.commit()
|
||||||
|
|
||||||
def heuristic_install(self,
|
def heuristic_import(self,
|
||||||
model_path_id_or_url: Union[str,Path],
|
model_path_id_or_url: Union[str,Path],
|
||||||
models_installed: Set[Path]=None)->Set[Path]:
|
models_installed: Set[Path]=None)->Dict[str, AddModelResult]:
|
||||||
|
'''
|
||||||
|
:param model_path_id_or_url: A Path to a local model to import, or a string representing its repo_id or URL
|
||||||
|
:param models_installed: Set of installed models, used for recursive invocation
|
||||||
|
Returns a set of dict objects corresponding to newly-created stanzas in models.yaml.
|
||||||
|
'''
|
||||||
|
|
||||||
if not models_installed:
|
if not models_installed:
|
||||||
models_installed = set()
|
models_installed = dict()
|
||||||
|
|
||||||
# A little hack to allow nested routines to retrieve info on the requested ID
|
# A little hack to allow nested routines to retrieve info on the requested ID
|
||||||
self.current_id = model_path_id_or_url
|
self.current_id = model_path_id_or_url
|
||||||
@ -185,24 +190,24 @@ class ModelInstall(object):
|
|||||||
try:
|
try:
|
||||||
# checkpoint file, or similar
|
# checkpoint file, or similar
|
||||||
if path.is_file():
|
if path.is_file():
|
||||||
models_installed.add(self._install_path(path))
|
models_installed.update(self._install_path(path))
|
||||||
|
|
||||||
# folders style or similar
|
# folders style or similar
|
||||||
elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
||||||
models_installed.add(self._install_path(path))
|
models_installed.update(self._install_path(path))
|
||||||
|
|
||||||
# recursive scan
|
# recursive scan
|
||||||
elif path.is_dir():
|
elif path.is_dir():
|
||||||
for child in path.iterdir():
|
for child in path.iterdir():
|
||||||
self.heuristic_install(child, models_installed=models_installed)
|
self.heuristic_import(child, models_installed=models_installed)
|
||||||
|
|
||||||
# huggingface repo
|
# huggingface repo
|
||||||
elif len(str(path).split('/')) == 2:
|
elif len(str(path).split('/')) == 2:
|
||||||
models_installed.add(self._install_repo(str(path)))
|
models_installed.update(self._install_repo(str(path)))
|
||||||
|
|
||||||
# a URL
|
# a URL
|
||||||
elif model_path_id_or_url.startswith(("http:", "https:", "ftp:")):
|
elif model_path_id_or_url.startswith(("http:", "https:", "ftp:")):
|
||||||
models_installed.add(self._install_url(model_path_id_or_url))
|
models_installed.update(self._install_url(model_path_id_or_url))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping')
|
logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping')
|
||||||
@ -214,24 +219,25 @@ class ModelInstall(object):
|
|||||||
|
|
||||||
# install a model from a local path. The optional info parameter is there to prevent
|
# install a model from a local path. The optional info parameter is there to prevent
|
||||||
# the model from being probed twice in the event that it has already been probed.
|
# the model from being probed twice in the event that it has already been probed.
|
||||||
def _install_path(self, path: Path, info: ModelProbeInfo=None)->Path:
|
def _install_path(self, path: Path, info: ModelProbeInfo=None)->Dict[str, AddModelResult]:
|
||||||
try:
|
try:
|
||||||
# logger.debug(f'Probing {path}')
|
model_result = None
|
||||||
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
|
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
|
||||||
model_name = path.stem if info.format=='checkpoint' else path.name
|
model_name = path.stem if info.format=='checkpoint' else path.name
|
||||||
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
|
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
|
||||||
raise ValueError(f'A model named "{model_name}" is already installed.')
|
raise ValueError(f'A model named "{model_name}" is already installed.')
|
||||||
attributes = self._make_attributes(path,info)
|
attributes = self._make_attributes(path,info)
|
||||||
self.mgr.add_model(model_name = model_name,
|
model_result = self.mgr.add_model(model_name = model_name,
|
||||||
base_model = info.base_type,
|
base_model = info.base_type,
|
||||||
model_type = info.model_type,
|
model_type = info.model_type,
|
||||||
model_attributes = attributes,
|
model_attributes = attributes,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f'{str(e)} Skipping registration.')
|
logger.warning(f'{str(e)} Skipping registration.')
|
||||||
return path
|
return {}
|
||||||
|
return {str(path): model_result}
|
||||||
|
|
||||||
def _install_url(self, url: str)->Path:
|
def _install_url(self, url: str)->dict:
|
||||||
# copy to a staging area, probe, import and delete
|
# copy to a staging area, probe, import and delete
|
||||||
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
||||||
location = download_with_resume(url,Path(staging))
|
location = download_with_resume(url,Path(staging))
|
||||||
@ -244,7 +250,7 @@ class ModelInstall(object):
|
|||||||
# staged version will be garbage-collected at this time
|
# staged version will be garbage-collected at this time
|
||||||
return self._install_path(Path(models_path), info)
|
return self._install_path(Path(models_path), info)
|
||||||
|
|
||||||
def _install_repo(self, repo_id: str)->Path:
|
def _install_repo(self, repo_id: str)->dict:
|
||||||
hinfo = HfApi().model_info(repo_id)
|
hinfo = HfApi().model_info(repo_id)
|
||||||
|
|
||||||
# we try to figure out how to download this most economically
|
# we try to figure out how to download this most economically
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Initialization file for invokeai.backend.model_management
|
Initialization file for invokeai.backend.model_management
|
||||||
"""
|
"""
|
||||||
from .model_manager import ModelManager, ModelInfo
|
from .model_manager import ModelManager, ModelInfo, AddModelResult
|
||||||
from .model_cache import ModelCache
|
from .model_cache import ModelCache
|
||||||
from .models import BaseModelType, ModelType, SubModelType, ModelVariantType
|
from .models import BaseModelType, ModelType, SubModelType, ModelVariantType
|
||||||
|
|
||||||
|
@ -233,14 +233,14 @@ import hashlib
|
|||||||
import textwrap
|
import textwrap
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional, List, Tuple, Union, Set, Callable, types
|
from typing import Optional, List, Tuple, Union, Dict, Set, Callable, types
|
||||||
from shutil import rmtree
|
from shutil import rmtree
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
from omegaconf.dictconfig import DictConfig
|
from omegaconf.dictconfig import DictConfig
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
@ -278,8 +278,13 @@ class InvalidModelError(Exception):
|
|||||||
"Raised when an invalid model is requested"
|
"Raised when an invalid model is requested"
|
||||||
pass
|
pass
|
||||||
|
|
||||||
MAX_CACHE_SIZE = 6.0 # GB
|
class AddModelResult(BaseModel):
|
||||||
|
name: str = Field(description="The name of the model after import")
|
||||||
|
model_type: ModelType = Field(description="The type of model")
|
||||||
|
base_model: BaseModelType = Field(description="The base model")
|
||||||
|
config: ModelConfigBase = Field(description="The configuration of the model")
|
||||||
|
|
||||||
|
MAX_CACHE_SIZE = 6.0 # GB
|
||||||
|
|
||||||
class ConfigMeta(BaseModel):
|
class ConfigMeta(BaseModel):
|
||||||
version: str
|
version: str
|
||||||
@ -571,13 +576,16 @@ class ModelManager(object):
|
|||||||
model_type: ModelType,
|
model_type: ModelType,
|
||||||
model_attributes: dict,
|
model_attributes: dict,
|
||||||
clobber: bool = False,
|
clobber: bool = False,
|
||||||
) -> None:
|
) -> AddModelResult:
|
||||||
"""
|
"""
|
||||||
Update the named model with a dictionary of attributes. Will fail with an
|
Update the named model with a dictionary of attributes. Will fail with an
|
||||||
assertion error if the name already exists. Pass clobber=True to overwrite.
|
assertion error if the name already exists. Pass clobber=True to overwrite.
|
||||||
On a successful update, the config will be changed in memory and the
|
On a successful update, the config will be changed in memory and the
|
||||||
method will return True. Will fail with an assertion error if provided
|
method will return True. Will fail with an assertion error if provided
|
||||||
attributes are incorrect or the model name is missing.
|
attributes are incorrect or the model name is missing.
|
||||||
|
|
||||||
|
The returned dict has the same format as the dict returned by
|
||||||
|
model_info().
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model_class = MODEL_CLASSES[base_model][model_type]
|
model_class = MODEL_CLASSES[base_model][model_type]
|
||||||
@ -601,12 +609,18 @@ class ModelManager(object):
|
|||||||
old_model_cache.unlink()
|
old_model_cache.unlink()
|
||||||
|
|
||||||
# remove in-memory cache
|
# remove in-memory cache
|
||||||
# note: it not garantie to release memory(model can has other references)
|
# note: it not guaranteed to release memory(model can has other references)
|
||||||
cache_ids = self.cache_keys.pop(model_key, [])
|
cache_ids = self.cache_keys.pop(model_key, [])
|
||||||
for cache_id in cache_ids:
|
for cache_id in cache_ids:
|
||||||
self.cache.uncache_model(cache_id)
|
self.cache.uncache_model(cache_id)
|
||||||
|
|
||||||
self.models[model_key] = model_config
|
self.models[model_key] = model_config
|
||||||
|
return AddModelResult(
|
||||||
|
name = model_name,
|
||||||
|
model_type = model_type,
|
||||||
|
base_model = base_model,
|
||||||
|
config = model_config,
|
||||||
|
)
|
||||||
|
|
||||||
def search_models(self, search_folder):
|
def search_models(self, search_folder):
|
||||||
self.logger.info(f"Finding Models In: {search_folder}")
|
self.logger.info(f"Finding Models In: {search_folder}")
|
||||||
@ -729,7 +743,7 @@ class ModelManager(object):
|
|||||||
if (new_models_found or imported_models) and self.config_path:
|
if (new_models_found or imported_models) and self.config_path:
|
||||||
self.commit()
|
self.commit()
|
||||||
|
|
||||||
def autoimport(self)->set[Path]:
|
def autoimport(self)->Dict[str, AddModelResult]:
|
||||||
'''
|
'''
|
||||||
Scan the autoimport directory (if defined) and import new models, delete defunct models.
|
Scan the autoimport directory (if defined) and import new models, delete defunct models.
|
||||||
'''
|
'''
|
||||||
@ -742,7 +756,6 @@ class ModelManager(object):
|
|||||||
prediction_type_helper = ask_user_for_prediction_type,
|
prediction_type_helper = ask_user_for_prediction_type,
|
||||||
)
|
)
|
||||||
|
|
||||||
installed = set()
|
|
||||||
scanned_dirs = set()
|
scanned_dirs = set()
|
||||||
|
|
||||||
config = self.app_config
|
config = self.app_config
|
||||||
@ -756,13 +769,14 @@ class ModelManager(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
self.logger.info(f'Scanning {autodir} for models to import')
|
self.logger.info(f'Scanning {autodir} for models to import')
|
||||||
|
installed = dict()
|
||||||
|
|
||||||
autodir = self.app_config.root_path / autodir
|
autodir = self.app_config.root_path / autodir
|
||||||
if not autodir.exists():
|
if not autodir.exists():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
items_scanned = 0
|
items_scanned = 0
|
||||||
new_models_found = set()
|
new_models_found = dict()
|
||||||
|
|
||||||
for root, dirs, files in os.walk(autodir):
|
for root, dirs, files in os.walk(autodir):
|
||||||
items_scanned += len(dirs) + len(files)
|
items_scanned += len(dirs) + len(files)
|
||||||
@ -772,7 +786,7 @@ class ModelManager(object):
|
|||||||
scanned_dirs.add(path)
|
scanned_dirs.add(path)
|
||||||
continue
|
continue
|
||||||
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
||||||
new_models_found.update(installer.heuristic_install(path))
|
new_models_found.update(installer.heuristic_import(path))
|
||||||
scanned_dirs.add(path)
|
scanned_dirs.add(path)
|
||||||
|
|
||||||
for f in files:
|
for f in files:
|
||||||
@ -780,7 +794,7 @@ class ModelManager(object):
|
|||||||
if path in known_paths or path.parent in scanned_dirs:
|
if path in known_paths or path.parent in scanned_dirs:
|
||||||
continue
|
continue
|
||||||
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
|
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
|
||||||
new_models_found.update(installer.heuristic_install(path))
|
new_models_found.update(installer.heuristic_import(path))
|
||||||
|
|
||||||
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
|
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
|
||||||
installed.update(new_models_found)
|
installed.update(new_models_found)
|
||||||
@ -790,7 +804,7 @@ class ModelManager(object):
|
|||||||
def heuristic_import(self,
|
def heuristic_import(self,
|
||||||
items_to_import: Set[str],
|
items_to_import: Set[str],
|
||||||
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
|
||||||
)->Set[str]:
|
)->Dict[str, AddModelResult]:
|
||||||
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
'''Import a list of paths, repo_ids or URLs. Returns the set of
|
||||||
successfully imported items.
|
successfully imported items.
|
||||||
:param items_to_import: Set of strings corresponding to models to be imported.
|
:param items_to_import: Set of strings corresponding to models to be imported.
|
||||||
@ -803,17 +817,20 @@ class ModelManager(object):
|
|||||||
generally impossible to do this programmatically, so the
|
generally impossible to do this programmatically, so the
|
||||||
prediction_type_helper usually asks the user to choose.
|
prediction_type_helper usually asks the user to choose.
|
||||||
|
|
||||||
|
The result is a set of successfully installed models. Each element
|
||||||
|
of the set is a dict corresponding to the newly-created OmegaConf stanza for
|
||||||
|
that model.
|
||||||
'''
|
'''
|
||||||
# avoid circular import here
|
# avoid circular import here
|
||||||
from invokeai.backend.install.model_install_backend import ModelInstall
|
from invokeai.backend.install.model_install_backend import ModelInstall
|
||||||
successfully_installed = set()
|
successfully_installed = dict()
|
||||||
|
|
||||||
installer = ModelInstall(config = self.app_config,
|
installer = ModelInstall(config = self.app_config,
|
||||||
prediction_type_helper = prediction_type_helper,
|
prediction_type_helper = prediction_type_helper,
|
||||||
model_manager = self)
|
model_manager = self)
|
||||||
for thing in items_to_import:
|
for thing in items_to_import:
|
||||||
try:
|
try:
|
||||||
installed = installer.heuristic_install(thing)
|
installed = installer.heuristic_import(thing)
|
||||||
successfully_installed.update(installed)
|
successfully_installed.update(installed)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.warning(f'{thing} could not be imported: {str(e)}')
|
self.logger.warning(f'{thing} could not be imported: {str(e)}')
|
||||||
|
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@ -12,7 +12,7 @@
|
|||||||
margin: 0;
|
margin: 0;
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
<script type="module" crossorigin src="./assets/index-8a3e9251.js"></script>
|
<script type="module" crossorigin src="./assets/index-c0367e37.js"></script>
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<body dir="ltr">
|
<body dir="ltr">
|
||||||
|
17
invokeai/frontend/web/dist/locales/en.json
vendored
17
invokeai/frontend/web/dist/locales/en.json
vendored
@ -24,16 +24,13 @@
|
|||||||
},
|
},
|
||||||
"common": {
|
"common": {
|
||||||
"hotkeysLabel": "Hotkeys",
|
"hotkeysLabel": "Hotkeys",
|
||||||
"themeLabel": "Theme",
|
"darkMode": "Dark Mode",
|
||||||
|
"lightMode": "Light Mode",
|
||||||
"languagePickerLabel": "Language",
|
"languagePickerLabel": "Language",
|
||||||
"reportBugLabel": "Report Bug",
|
"reportBugLabel": "Report Bug",
|
||||||
"githubLabel": "Github",
|
"githubLabel": "Github",
|
||||||
"discordLabel": "Discord",
|
"discordLabel": "Discord",
|
||||||
"settingsLabel": "Settings",
|
"settingsLabel": "Settings",
|
||||||
"darkTheme": "Dark",
|
|
||||||
"lightTheme": "Light",
|
|
||||||
"greenTheme": "Green",
|
|
||||||
"oceanTheme": "Ocean",
|
|
||||||
"langArabic": "العربية",
|
"langArabic": "العربية",
|
||||||
"langEnglish": "English",
|
"langEnglish": "English",
|
||||||
"langDutch": "Nederlands",
|
"langDutch": "Nederlands",
|
||||||
@ -55,6 +52,7 @@
|
|||||||
"unifiedCanvas": "Unified Canvas",
|
"unifiedCanvas": "Unified Canvas",
|
||||||
"linear": "Linear",
|
"linear": "Linear",
|
||||||
"nodes": "Node Editor",
|
"nodes": "Node Editor",
|
||||||
|
"modelmanager": "Model Manager",
|
||||||
"postprocessing": "Post Processing",
|
"postprocessing": "Post Processing",
|
||||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||||
"postProcessing": "Post Processing",
|
"postProcessing": "Post Processing",
|
||||||
@ -336,6 +334,7 @@
|
|||||||
"modelManager": {
|
"modelManager": {
|
||||||
"modelManager": "Model Manager",
|
"modelManager": "Model Manager",
|
||||||
"model": "Model",
|
"model": "Model",
|
||||||
|
"vae": "VAE",
|
||||||
"allModels": "All Models",
|
"allModels": "All Models",
|
||||||
"checkpointModels": "Checkpoints",
|
"checkpointModels": "Checkpoints",
|
||||||
"diffusersModels": "Diffusers",
|
"diffusersModels": "Diffusers",
|
||||||
@ -351,6 +350,7 @@
|
|||||||
"scanForModels": "Scan For Models",
|
"scanForModels": "Scan For Models",
|
||||||
"addManually": "Add Manually",
|
"addManually": "Add Manually",
|
||||||
"manual": "Manual",
|
"manual": "Manual",
|
||||||
|
"baseModel": "Base Model",
|
||||||
"name": "Name",
|
"name": "Name",
|
||||||
"nameValidationMsg": "Enter a name for your model",
|
"nameValidationMsg": "Enter a name for your model",
|
||||||
"description": "Description",
|
"description": "Description",
|
||||||
@ -363,6 +363,7 @@
|
|||||||
"repoIDValidationMsg": "Online repository of your model",
|
"repoIDValidationMsg": "Online repository of your model",
|
||||||
"vaeLocation": "VAE Location",
|
"vaeLocation": "VAE Location",
|
||||||
"vaeLocationValidationMsg": "Path to where your VAE is located.",
|
"vaeLocationValidationMsg": "Path to where your VAE is located.",
|
||||||
|
"variant": "Variant",
|
||||||
"vaeRepoID": "VAE Repo ID",
|
"vaeRepoID": "VAE Repo ID",
|
||||||
"vaeRepoIDValidationMsg": "Online repository of your VAE",
|
"vaeRepoIDValidationMsg": "Online repository of your VAE",
|
||||||
"width": "Width",
|
"width": "Width",
|
||||||
@ -524,7 +525,8 @@
|
|||||||
"initialImage": "Initial Image",
|
"initialImage": "Initial Image",
|
||||||
"showOptionsPanel": "Show Options Panel",
|
"showOptionsPanel": "Show Options Panel",
|
||||||
"hidePreview": "Hide Preview",
|
"hidePreview": "Hide Preview",
|
||||||
"showPreview": "Show Preview"
|
"showPreview": "Show Preview",
|
||||||
|
"controlNetControlMode": "Control Mode"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"models": "Models",
|
"models": "Models",
|
||||||
@ -547,7 +549,8 @@
|
|||||||
"general": "General",
|
"general": "General",
|
||||||
"generation": "Generation",
|
"generation": "Generation",
|
||||||
"ui": "User Interface",
|
"ui": "User Interface",
|
||||||
"availableSchedulers": "Available Schedulers"
|
"favoriteSchedulers": "Favorite Schedulers",
|
||||||
|
"favoriteSchedulersPlaceholder": "No schedulers favorited"
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"serverError": "Server Error",
|
"serverError": "Server Error",
|
||||||
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user