diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index dcbdbec82d..2ea287cae3 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -116,33 +116,41 @@ async def update_model( responses= { 201: {"description" : "The model imported successfully"}, 404: {"description" : "The model could not be found"}, + 409: {"description" : "There is already a model corresponding to this path or repo_id"}, }, status_code=201, response_model=ImportModelResponse ) async def import_model( - name: str = Query(description="A model path, repo_id or URL to import"), - prediction_type: Optional[Literal['v_prediction','epsilon','sample']] = Query(description='Prediction type for SDv2 checkpoint files', default="v_prediction"), + name: str = Body(description="A model path, repo_id or URL to import"), + prediction_type: Optional[Literal['v_prediction','epsilon','sample']] = \ + Body(description='Prediction type for SDv2 checkpoint files', default="v_prediction"), ) -> ImportModelResponse: """ Add a model using its local path, repo_id, or remote URL """ + items_to_import = {name} prediction_types = { x.value: x for x in SchedulerPredictionType } logger = ApiDependencies.invoker.services.logger - - installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( - items_to_import = items_to_import, - prediction_type_helper = lambda x: prediction_types.get(prediction_type) - ) - if info := installed_models.get(name): - logger.info(f'Successfully imported {name}, got {info}') - return ImportModelResponse( - name = name, - info = info, - status = "success", + + try: + installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( + items_to_import = items_to_import, + prediction_type_helper = lambda x: prediction_types.get(prediction_type) ) - else: - logger.error(f'Model {name} not imported') - raise HTTPException(status_code=404, detail=f'Model {name} not found') + if info := installed_models.get(name): + logger.info(f'Successfully imported {name}, got {info}') + return ImportModelResponse( + name = name, + info = info, + status = "success", + ) + except KeyError as e: + logger.error(str(e)) + raise HTTPException(status_code=404, detail=str(e)) + except ValueError as e: + logger.error(str(e)) + raise HTTPException(status_code=409, detail=str(e)) + @models_router.delete( "/{model_name}", diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index a10fa852c0..78dc341b63 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -166,14 +166,18 @@ class ModelInstall(object): # add requested models for path in selections.install_models: logger.info(f'Installing {path} [{job}/{jobs}]') - self.heuristic_import(path) + try: + self.heuristic_import(path) + except (ValueError, KeyError) as e: + logger.error(str(e)) job += 1 self.mgr.commit() def heuristic_import(self, - model_path_id_or_url: Union[str,Path], - models_installed: Set[Path]=None)->Dict[str, AddModelResult]: + model_path_id_or_url: Union[str,Path], + models_installed: Set[Path]=None, + )->Dict[str, AddModelResult]: ''' :param model_path_id_or_url: A Path to a local model to import, or a string representing its repo_id or URL :param models_installed: Set of installed models, used for recursive invocation @@ -187,57 +191,48 @@ class ModelInstall(object): self.current_id = model_path_id_or_url path = Path(model_path_id_or_url) - try: - # checkpoint file, or similar - if path.is_file(): - models_installed.update(self._install_path(path)) + # checkpoint file, or similar + if path.is_file(): + models_installed.update({str(path):self._install_path(path)}) - # folders style or similar - elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): - models_installed.update(self._install_path(path)) + # folders style or similar + elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + models_installed.update({str(path): self._install_path(path)}) - # recursive scan - elif path.is_dir(): - for child in path.iterdir(): - self.heuristic_import(child, models_installed=models_installed) + # recursive scan + elif path.is_dir(): + for child in path.iterdir(): + self.heuristic_import(child, models_installed=models_installed) - # huggingface repo - elif len(str(path).split('/')) == 2: - models_installed.update(self._install_repo(str(path))) + # huggingface repo + elif str(model_path_id_or_url).split('/') == 2: + models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))}) - # a URL - elif model_path_id_or_url.startswith(("http:", "https:", "ftp:")): - models_installed.update(self._install_url(model_path_id_or_url)) + # a URL + elif str(model_path_id_or_url).startswith(("http:", "https:", "ftp:")): + models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)}) - else: - logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') - - except ValueError as e: - logger.error(str(e)) + else: + raise KeyError(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') return models_installed # install a model from a local path. The optional info parameter is there to prevent # the model from being probed twice in the event that it has already been probed. - def _install_path(self, path: Path, info: ModelProbeInfo=None)->Dict[str, AddModelResult]: - try: - model_result = None - info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) - model_name = path.stem if info.format=='checkpoint' else path.name - if self.mgr.model_exists(model_name, info.base_type, info.model_type): - raise ValueError(f'A model named "{model_name}" is already installed.') - attributes = self._make_attributes(path,info) - model_result = self.mgr.add_model(model_name = model_name, - base_model = info.base_type, - model_type = info.model_type, - model_attributes = attributes, - ) - except Exception as e: - logger.warning(f'{str(e)} Skipping registration.') - return {} - return {str(path): model_result} + def _install_path(self, path: Path, info: ModelProbeInfo=None)->AddModelResult: + model_result = None + info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) + model_name = path.stem if info.format=='checkpoint' else path.name + if self.mgr.model_exists(model_name, info.base_type, info.model_type): + raise ValueError(f'A model named "{model_name}" is already installed.') + attributes = self._make_attributes(path,info) + return self.mgr.add_model(model_name = model_name, + base_model = info.base_type, + model_type = info.model_type, + model_attributes = attributes, + ) - def _install_url(self, url: str)->dict: + def _install_url(self, url: str)->AddModelResult: # copy to a staging area, probe, import and delete with TemporaryDirectory(dir=self.config.models_path) as staging: location = download_with_resume(url,Path(staging)) @@ -250,7 +245,7 @@ class ModelInstall(object): # staged version will be garbage-collected at this time return self._install_path(Path(models_path), info) - def _install_repo(self, repo_id: str)->dict: + def _install_repo(self, repo_id: str)->AddModelResult: hinfo = HfApi().model_info(repo_id) # we try to figure out how to download this most economically diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index a8cbb50474..ffa4f2ae82 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -820,6 +820,10 @@ class ModelManager(object): The result is a set of successfully installed models. Each element of the set is a dict corresponding to the newly-created OmegaConf stanza for that model. + + May return the following exceptions: + - KeyError - one or more of the items to import is not a valid path, repo_id or URL + - ValueError - a corresponding model already exists ''' # avoid circular import here from invokeai.backend.install.model_install_backend import ModelInstall @@ -829,11 +833,7 @@ class ModelManager(object): prediction_type_helper = prediction_type_helper, model_manager = self) for thing in items_to_import: - try: - installed = installer.heuristic_import(thing) - successfully_installed.update(installed) - except Exception as e: - self.logger.warning(f'{thing} could not be imported: {str(e)}') - + installed = installer.heuristic_import(thing) + successfully_installed.update(installed) self.commit() return successfully_installed