support clipvision image encoder downloading

This commit is contained in:
Lincoln Stein
2023-10-07 19:13:41 -04:00
parent 7f68f58cf7
commit 51060543dc
7 changed files with 64 additions and 25 deletions

View File

@ -11,6 +11,7 @@ from typing import Dict, List, Literal, get_args, get_origin, get_type_hints
import invokeai.backend.util.logging as logger
from invokeai.backend.model_manager import ModelType
from ..invocations.baseinvocation import BaseInvocation
from ..services.invocation_services import InvocationServices
from ..services.model_record_service import ModelRecordServiceBase

View File

@ -22,16 +22,16 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
from pydantic.fields import Field
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue
from invokeai.app.services.board_image_record_storage import SqliteBoardImageRecordStorage
from invokeai.app.services.board_images import BoardImagesService, BoardImagesServiceDependencies
from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage
from invokeai.app.services.boards import BoardService, BoardServiceDependencies
from invokeai.app.services.session_processor.session_processor_default import DefaultSessionProcessor
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
from invokeai.app.services.images import ImageService, ImageServiceDependencies
from invokeai.app.services.invocation_stats import InvocationStatsService
from invokeai.app.services.resource_name import SimpleNameService
from invokeai.app.services.session_processor.session_processor_default import DefaultSessionProcessor
from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue
from invokeai.app.services.urls import LocalUrlService
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.version.invokeai_version import __version__
@ -49,7 +49,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
LibraryGraph,
are_connection_types_compatible,
)
from .services.thread import lock
from .services.image_file_storage import DiskImageFileStorage
from .services.invocation_queue import MemoryInvocationQueue
from .services.invocation_services import InvocationServices
@ -60,6 +59,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
from .services.model_record_service import ModelRecordServiceBase
from .services.processor import DefaultInvocationProcessor
from .services.sqlite import SqliteItemStorage
from .services.thread import lock
if torch.backends.mps.is_available():
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
@ -266,7 +266,9 @@ def invoke_cli():
model_loader = ModelLoadService(config, model_record_store)
model_installer = ModelInstallService(config, model_record_store, events)
graph_execution_manager = SqliteItemStorage[GraphExecutionState](conn=db_conn, table_name="graph_executions", lock=lock)
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
conn=db_conn, table_name="graph_executions", lock=lock
)
urls = LocalUrlService()
image_record_storage = SqliteImageRecordStorage(conn=db_conn, lock=lock)

View File

@ -426,6 +426,7 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
processed_image = zoe_depth_processor(image)
return processed_image
@invocation(
"mediapipe_face_processor",
title="Mediapipe Face Processor",

View File

@ -29,6 +29,7 @@ class UnifiedModelInfo(BaseModel):
recommended: bool = False
installed: bool = False
default: bool = False
requires: Optional[List[str]] = Field(default_factory=list)
@dataclass
@ -116,6 +117,7 @@ class InstallHelper(object):
description=self._initial_models[key].get("description"),
recommended=self._initial_models[key].get("recommended", False),
default=self._initial_models[key].get("default", False),
requires=list(self._initial_models[key].get("requires", [])),
)
self.all_models[key] = info
if not self.default_model:
@ -147,8 +149,21 @@ class InstallHelper(object):
def _to_model(self, key: str) -> UnifiedModelInfo:
return self.all_models[key]
def _add_required_models(self, model_list: List[UnifiedModelInfo]):
installed = {x.source for x in self.installed_models()}
reverse_source = {x.source: x for x in self.all_models.values()}
additional_models = []
for model_info in model_list:
print(f"DEBUG: model_info={model_info}")
for requirement in model_info.requires:
if requirement not in installed:
print(f"DEBUG: installing {requirement}")
additional_models.append(reverse_source.get(requirement))
model_list.extend(additional_models)
def add_or_delete(self, selections: InstallSelections):
installer = self._installer
self._add_required_models(selections.install_models)
for model in selections.install_models:
metadata = ModelSourceMetadata(description=model.description, name=model.name)
installer.install(

View File

@ -11,7 +11,7 @@ import shutil
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Union, Optional
from typing import Optional, Union
import diffusers
import transformers
@ -23,8 +23,8 @@ from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel,
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.model_record_service import ModelRecordServiceBase
from invokeai.app.services.model_install_service import ModelInstallService
from invokeai.app.services.model_record_service import ModelRecordServiceBase
from invokeai.backend.model_manager import BaseModelType, ModelProbe, ModelProbeInfo, ModelType
warnings.filterwarnings("ignore")
@ -474,7 +474,6 @@ def do_migrate(config: InvokeAIAppConfig, src_directory: Path, dest_directory: P
migrator.migrate()
print("Migration successful.")
(dest_directory / "configs" / "models.yaml").replace(src_directory / "configs" / "models.yaml.orig")
print(f"Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig")

View File

@ -239,6 +239,12 @@ class IPAdapterConfig(ModelConfigBase):
model_format: Literal[ModelFormat.InvokeAI]
class CLIPVisionDiffusersConfig(ModelConfigBase):
"""Model config for ClipVision."""
model_format: Literal[ModelFormat.Diffusers]
AnyModelConfig = Union[
MainCheckpointConfig,
MainDiffusersConfig,
@ -268,6 +274,7 @@ class ModelConfigFactory(object):
ModelType.Lora: LoRAConfig,
ModelType.Vae: VaeDiffusersConfig,
ModelType.ControlNet: ControlNetDiffusersConfig,
ModelType.CLIPVision: CLIPVisionDiffusersConfig,
},
ModelFormat.Lycoris: {
ModelType.Lora: LoRAConfig,

View File

@ -227,13 +227,24 @@ class ModelProbe(ModelProbeBase):
if config_path:
with open(config_path, "r") as file:
conf = json.load(file)
class_name = conf["_class_name"]
if "_class_name" in conf:
class_name = conf["_class_name"]
elif "architectures" in conf:
class_name = conf["architectures"][0]
else:
class_name = None
else:
error_hint = f"No model_index.json or config.json found in {folder_path}."
if class_name and (type := cls.CLASS2TYPE.get(class_name)):
return type
else:
error_hint = f"class {class_name} is not one of the supported classes [{', '.join(cls.CLASS2TYPE.keys())}]"
# give up
raise InvalidModelException(f"Unable to determine model type for {folder_path}")
raise InvalidModelException(
f"Unable to determine model type for {folder_path}" + (f"; {error_hint}" if error_hint else "")
)
@classmethod
def _scan_and_load_checkpoint(cls, model: Path) -> dict:
@ -675,22 +686,25 @@ class T2IAdapterFolderProbe(FolderProbeBase):
############## register probe classes ######
ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.Vae, VaeFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.Lora, LoRAFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe)
diffusers = ModelFormat("diffusers")
checkpoint = ModelFormat("checkpoint")
ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.Vae, VaeCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.Lora, LoRACheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.TextualInversion, TextualInversionCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe)
ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe)
ModelProbe.register_probe(diffusers, ModelType.Main, PipelineFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.Vae, VaeFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.Lora, LoRAFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.TextualInversion, TextualInversionFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.ControlNet, ControlNetFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.IPAdapter, IPAdapterFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.CLIPVision, CLIPVisionFolderProbe)
ModelProbe.register_probe(diffusers, ModelType.T2IAdapter, T2IAdapterFolderProbe)
ModelProbe.register_probe(checkpoint, ModelType.Main, PipelineCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.Vae, VaeCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.Lora, LoRACheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.TextualInversion, TextualInversionCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.ControlNet, ControlNetCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.IPAdapter, IPAdapterCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.CLIPVision, CLIPVisionCheckpointProbe)
ModelProbe.register_probe(checkpoint, ModelType.T2IAdapter, T2IAdapterCheckpointProbe)
ModelProbe.register_probe(ModelFormat("onnx"), ModelType.ONNX, ONNXFolderProbe)