add back source URL validation to download job hierarchy

This commit is contained in:
Lincoln Stein
2023-10-11 22:42:07 -04:00
parent 76aa19a0f7
commit e079cc9f07
5 changed files with 12 additions and 7 deletions

View File

@ -130,7 +130,7 @@ class ApiDependencies:
)
)
download_queue = DownloadQueueService(event_bus=events, config=config)
download_queue = DownloadQueueService(event_bus=events)
model_record_store = ModelRecordServiceBase.open(config, conn=db_conn, lock=lock)
model_loader = ModelLoadService(config, model_record_store)
model_installer = ModelInstallService(config, queue=download_queue, store=model_record_store, event_bus=events)

View File

@ -241,8 +241,8 @@ class InvokeAIAppConfig(InvokeAISettings):
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# CACHE
ram : Union[float, Literal["auto"]] = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
vram : Union[float, Literal["auto"]] = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
ram : float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching", category="Model Cache", )
vram : float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage", category="Model Cache", )
disk : float = Field(default=DEFAULT_MAX_DISK_CACHE, ge=0, description="Maximum size (in GB) for the disk-based diffusers model conversion cache", category="Model Cache", )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )
@ -443,7 +443,7 @@ class InvokeAIAppConfig(InvokeAISettings):
return self.disk
@property
def vram_cache_size(self) -> Union[Literal["auto"], float]:
def vram_cache_size(self) -> float:
return self.max_vram_cache_size or self.vram
@property

View File

@ -25,6 +25,7 @@ from invokeai.backend.model_manager.config import (
from invokeai.backend.model_manager.download.model_queue import (
HTTP_RE,
REPO_ID_WITH_OPTIONAL_SUBFOLDER_RE,
DownloadJobMetadataURL,
DownloadJobRepoID,
DownloadJobWithMetadata,
)
@ -60,7 +61,7 @@ class ModelInstallJob(DownloadJobBase):
)
class ModelInstallURLJob(DownloadJobWithMetadata, ModelInstallJob):
class ModelInstallURLJob(DownloadJobMetadataURL, ModelInstallJob):
"""Job for installing URLs."""

View File

@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, parse_obj_as, validator
from pydantic.networks import AnyHttpUrl
from .base import DownloadEventHandler, DownloadJobBase, DownloadJobStatus, DownloadQueueBase
from .queue import HTTP_RE, DownloadJobRemoteSource, DownloadQueue
from .queue import HTTP_RE, DownloadJobRemoteSource, DownloadJobURL, DownloadQueue
# regular expressions used to dispatch appropriate downloaders and metadata retrievers
# endpoint for civitai get-model API
@ -40,6 +40,10 @@ class DownloadJobWithMetadata(DownloadJobRemoteSource):
)
class DownloadJobMetadataURL(DownloadJobWithMetadata, DownloadJobURL):
"""DownloadJobWithMetadata with validation of the source URL."""
class DownloadJobRepoID(DownloadJobWithMetadata):
"""Download repo ids."""

View File

@ -50,7 +50,7 @@ def test_install(datadir: Path):
)
event_bus = DummyEventService()
mm_store = ModelRecordServiceBase.get_impl(config)
mm_store = ModelRecordServiceBase.open(config)
mm_load = ModelLoadService(config, mm_store)
mm_install = ModelInstallService(config=config, store=mm_store, event_bus=event_bus)