diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 17673de937..071232e06e 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -125,6 +125,7 @@ jobs: --no-nsfw_checker --precision=float32 --always_use_cpu + --use_memory_db --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }} --from_file ${{ env.TEST_PROMPTS }} diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 073a8f569b..de543d2d85 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -13,10 +13,13 @@ from typing import ( from pydantic import BaseModel, ValidationError from pydantic.fields import Field +from invokeai.app.services.image_record_storage import SqliteImageRecordStorage +from invokeai.app.services.images import ImageService +from invokeai.app.services.metadata import CoreMetadataService +from invokeai.app.services.urls import LocalUrlService import invokeai.backend.util.logging as logger -from invokeai.app.services.metadata import PngMetadataService from .services.default_graphs import create_system_graphs from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage @@ -188,6 +191,9 @@ def invoke_all(context: CliContext): raise SessionError() +logger = logger.InvokeAILogger.getLogger() + + def invoke_cli(): # this gets the basic configuration config = get_invokeai_config() @@ -206,24 +212,43 @@ def invoke_cli(): events = EventServiceBase() output_folder = config.output_path - metadata = PngMetadataService() # TODO: build a file/path manager? - db_location = os.path.join(output_folder, "invokeai.db") + if config.use_memory_db: + db_location = ":memory:" + else: + db_location = os.path.join(output_folder, "invokeai.db") + + logger.info(f'InvokeAI database location is "{db_location}"') + + graph_execution_manager = SqliteItemStorage[GraphExecutionState]( + filename=db_location, table_name="graph_executions" + ) + + urls = LocalUrlService() + metadata = CoreMetadataService() + image_record_storage = SqliteImageRecordStorage(db_location) + image_file_storage = DiskImageFileStorage(f"{output_folder}/images") + + images = ImageService( + image_record_storage=image_record_storage, + image_file_storage=image_file_storage, + metadata=metadata, + url=urls, + logger=logger, + graph_execution_manager=graph_execution_manager, + ) services = InvocationServices( model_manager=model_manager, events=events, latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')), - images=DiskImageFileStorage(f'{output_folder}/images', metadata_service=metadata), - metadata=metadata, + images=images, queue=MemoryInvocationQueue(), graph_library=SqliteItemStorage[LibraryGraph]( filename=db_location, table_name="graphs" ), - graph_execution_manager=SqliteItemStorage[GraphExecutionState]( - filename=db_location, table_name="graph_executions" - ), + graph_execution_manager=graph_execution_manager, processor=DefaultInvocationProcessor(), restoration=RestorationServices(config,logger=logger), logger=logger, diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 49f3ed1aa7..5623eaf3e0 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -352,6 +352,7 @@ setting environment variables INVOKEAI_. sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') + root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') autoconvert_dir : Path = Field(default=None, description='Path to a directory of ckpt files to be converted into diffusers and imported on startup.', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') @@ -361,6 +362,7 @@ setting environment variables INVOKEAI_. lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') + use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models') @@ -515,7 +517,7 @@ class PagingArgumentParser(argparse.ArgumentParser): text = self.format_help() pydoc.pager(text) -def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAISettings: +def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAIAppConfig: ''' This returns a singleton InvokeAIAppConfig configuration object. ''' diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index bcbe95a41f..1f910253e5 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -1,18 +1,17 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team +from __future__ import annotations from typing import TYPE_CHECKING -from logging import Logger - -from invokeai.app.services.images import ImageService -from invokeai.backend import ModelManager -from .events import EventServiceBase -from .latent_storage import LatentsStorageBase -from .restoration_services import RestorationServices -from .invocation_queue import InvocationQueueABC -from .item_storage import ItemStorageABC -from .config import InvokeAISettings - if TYPE_CHECKING: + from logging import Logger + from invokeai.app.services.images import ImageService + from invokeai.backend import ModelManager + from invokeai.app.services.events import EventServiceBase + from invokeai.app.services.latent_storage import LatentsStorageBase + from invokeai.app.services.restoration_services import RestorationServices + from invokeai.app.services.invocation_queue import InvocationQueueABC + from invokeai.app.services.item_storage import ItemStorageABC + from invokeai.app.services.config import InvokeAISettings from invokeai.app.services.graph import GraphExecutionState, LibraryGraph from invokeai.app.services.invoker import InvocationProcessorABC @@ -20,32 +19,33 @@ if TYPE_CHECKING: class InvocationServices: """Services that can be used by invocations""" - events: EventServiceBase - latents: LatentsStorageBase - queue: InvocationQueueABC - model_manager: ModelManager - restoration: RestorationServices - configuration: InvokeAISettings - images: ImageService + # TODO: Just forward-declared everything due to circular dependencies. Fix structure. + events: "EventServiceBase" + latents: "LatentsStorageBase" + queue: "InvocationQueueABC" + model_manager: "ModelManager" + restoration: "RestorationServices" + configuration: "InvokeAISettings" + images: "ImageService" # NOTE: we must forward-declare any types that include invocations, since invocations can use services - graph_library: ItemStorageABC["LibraryGraph"] - graph_execution_manager: ItemStorageABC["GraphExecutionState"] + graph_library: "ItemStorageABC"["LibraryGraph"] + graph_execution_manager: "ItemStorageABC"["GraphExecutionState"] processor: "InvocationProcessorABC" def __init__( self, - model_manager: ModelManager, - events: EventServiceBase, - logger: Logger, - latents: LatentsStorageBase, - images: ImageService, - queue: InvocationQueueABC, - graph_library: ItemStorageABC["LibraryGraph"], - graph_execution_manager: ItemStorageABC["GraphExecutionState"], + model_manager: "ModelManager", + events: "EventServiceBase", + logger: "Logger", + latents: "LatentsStorageBase", + images: "ImageService", + queue: "InvocationQueueABC", + graph_library: "ItemStorageABC"["LibraryGraph"], + graph_execution_manager: "ItemStorageABC"["GraphExecutionState"], processor: "InvocationProcessorABC", - restoration: RestorationServices, - configuration: InvokeAISettings = None, + restoration: "RestorationServices", + configuration: "InvokeAISettings", ): self.model_manager = model_manager self.events = events diff --git a/invokeai/app/services/processor.py b/invokeai/app/services/processor.py index cdd9db85de..9e3b5a0a30 100644 --- a/invokeai/app/services/processor.py +++ b/invokeai/app/services/processor.py @@ -1,10 +1,7 @@ import time import traceback from threading import Event, Thread, BoundedSemaphore -from typing import Any, TypeGuard -from invokeai.app.invocations.image import ImageOutput -from invokeai.app.models.image import ImageType from ..invocations.baseinvocation import InvocationContext from .invocation_queue import InvocationQueueItem from .invoker import InvocationProcessorABC, Invoker diff --git a/tests/nodes/test_graph_execution_state.py b/tests/nodes/test_graph_execution_state.py index d4631ec735..9f433aa330 100644 --- a/tests/nodes/test_graph_execution_state.py +++ b/tests/nodes/test_graph_execution_state.py @@ -35,6 +35,7 @@ def mock_services(): graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), processor = DefaultInvocationProcessor(), restoration = None, # type: ignore + configuration = None, # type: ignore ) def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]: diff --git a/tests/nodes/test_invoker.py b/tests/nodes/test_invoker.py index 80ed427485..6e1dde716c 100644 --- a/tests/nodes/test_invoker.py +++ b/tests/nodes/test_invoker.py @@ -33,6 +33,7 @@ def mock_services() -> InvocationServices: graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), processor = DefaultInvocationProcessor(), restoration = None, # type: ignore + configuration = None, # type: ignore ) @pytest.fixture() diff --git a/tests/nodes/test_nodes.py b/tests/nodes/test_nodes.py index e334953d7e..d16d67d815 100644 --- a/tests/nodes/test_nodes.py +++ b/tests/nodes/test_nodes.py @@ -49,7 +49,7 @@ class ImageTestInvocation(BaseInvocation): prompt: str = Field(default = "") def invoke(self, context: InvocationContext) -> ImageTestInvocationOutput: - return ImageTestInvocationOutput(image=ImageField(image_name=self.id, width=512, height=512, mode="", info={})) + return ImageTestInvocationOutput(image=ImageField(image_name=self.id)) class PromptCollectionTestInvocationOutput(BaseInvocationOutput): type: Literal['test_prompt_collection_output'] = 'test_prompt_collection_output'