Merge branch 'lstein/logging-improvements' of github.com:invoke-ai/InvokeAI into lstein/logging-improvements

This commit is contained in:
Lincoln Stein 2023-05-25 09:39:56 -04:00
commit 7f5992d6a5
8 changed files with 70 additions and 43 deletions

View File

@ -125,6 +125,7 @@ jobs:
--no-nsfw_checker --no-nsfw_checker
--precision=float32 --precision=float32
--always_use_cpu --always_use_cpu
--use_memory_db
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }} --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
--from_file ${{ env.TEST_PROMPTS }} --from_file ${{ env.TEST_PROMPTS }}

View File

@ -13,10 +13,13 @@ from typing import (
from pydantic import BaseModel, ValidationError from pydantic import BaseModel, ValidationError
from pydantic.fields import Field from pydantic.fields import Field
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
from invokeai.app.services.images import ImageService
from invokeai.app.services.metadata import CoreMetadataService
from invokeai.app.services.urls import LocalUrlService
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.metadata import PngMetadataService
from .services.default_graphs import create_system_graphs from .services.default_graphs import create_system_graphs
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -188,6 +191,9 @@ def invoke_all(context: CliContext):
raise SessionError() raise SessionError()
logger = logger.InvokeAILogger.getLogger()
def invoke_cli(): def invoke_cli():
# this gets the basic configuration # this gets the basic configuration
config = get_invokeai_config() config = get_invokeai_config()
@ -206,24 +212,43 @@ def invoke_cli():
events = EventServiceBase() events = EventServiceBase()
output_folder = config.output_path output_folder = config.output_path
metadata = PngMetadataService()
# TODO: build a file/path manager? # TODO: build a file/path manager?
if config.use_memory_db:
db_location = ":memory:"
else:
db_location = os.path.join(output_folder, "invokeai.db") db_location = os.path.join(output_folder, "invokeai.db")
logger.info(f'InvokeAI database location is "{db_location}"')
graph_execution_manager = SqliteItemStorage[GraphExecutionState](
filename=db_location, table_name="graph_executions"
)
urls = LocalUrlService()
metadata = CoreMetadataService()
image_record_storage = SqliteImageRecordStorage(db_location)
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
images = ImageService(
image_record_storage=image_record_storage,
image_file_storage=image_file_storage,
metadata=metadata,
url=urls,
logger=logger,
graph_execution_manager=graph_execution_manager,
)
services = InvocationServices( services = InvocationServices(
model_manager=model_manager, model_manager=model_manager,
events=events, events=events,
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')), latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
images=DiskImageFileStorage(f'{output_folder}/images', metadata_service=metadata), images=images,
metadata=metadata,
queue=MemoryInvocationQueue(), queue=MemoryInvocationQueue(),
graph_library=SqliteItemStorage[LibraryGraph]( graph_library=SqliteItemStorage[LibraryGraph](
filename=db_location, table_name="graphs" filename=db_location, table_name="graphs"
), ),
graph_execution_manager=SqliteItemStorage[GraphExecutionState]( graph_execution_manager=graph_execution_manager,
filename=db_location, table_name="graph_executions"
),
processor=DefaultInvocationProcessor(), processor=DefaultInvocationProcessor(),
restoration=RestorationServices(config,logger=logger), restoration=RestorationServices(config,logger=logger),
logger=logger, logger=logger,

View File

@ -352,6 +352,7 @@ setting environment variables INVOKEAI_<setting>.
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
autoconvert_dir : Path = Field(default=None, description='Path to a directory of ckpt files to be converted into diffusers and imported on startup.', category='Paths') autoconvert_dir : Path = Field(default=None, description='Path to a directory of ckpt files to be converted into diffusers and imported on startup.', category='Paths')
conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths')
@ -361,6 +362,7 @@ setting environment variables INVOKEAI_<setting>.
lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths') lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths')
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models') embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models')
@ -515,7 +517,7 @@ class PagingArgumentParser(argparse.ArgumentParser):
text = self.format_help() text = self.format_help()
pydoc.pager(text) pydoc.pager(text)
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAISettings: def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAIAppConfig:
''' '''
This returns a singleton InvokeAIAppConfig configuration object. This returns a singleton InvokeAIAppConfig configuration object.
''' '''

View File

@ -1,18 +1,17 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
from __future__ import annotations
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from logging import Logger
from invokeai.app.services.images import ImageService
from invokeai.backend import ModelManager
from .events import EventServiceBase
from .latent_storage import LatentsStorageBase
from .restoration_services import RestorationServices
from .invocation_queue import InvocationQueueABC
from .item_storage import ItemStorageABC
from .config import InvokeAISettings
if TYPE_CHECKING: if TYPE_CHECKING:
from logging import Logger
from invokeai.app.services.images import ImageService
from invokeai.backend import ModelManager
from invokeai.app.services.events import EventServiceBase
from invokeai.app.services.latent_storage import LatentsStorageBase
from invokeai.app.services.restoration_services import RestorationServices
from invokeai.app.services.invocation_queue import InvocationQueueABC
from invokeai.app.services.item_storage import ItemStorageABC
from invokeai.app.services.config import InvokeAISettings
from invokeai.app.services.graph import GraphExecutionState, LibraryGraph from invokeai.app.services.graph import GraphExecutionState, LibraryGraph
from invokeai.app.services.invoker import InvocationProcessorABC from invokeai.app.services.invoker import InvocationProcessorABC
@ -20,32 +19,33 @@ if TYPE_CHECKING:
class InvocationServices: class InvocationServices:
"""Services that can be used by invocations""" """Services that can be used by invocations"""
events: EventServiceBase # TODO: Just forward-declared everything due to circular dependencies. Fix structure.
latents: LatentsStorageBase events: "EventServiceBase"
queue: InvocationQueueABC latents: "LatentsStorageBase"
model_manager: ModelManager queue: "InvocationQueueABC"
restoration: RestorationServices model_manager: "ModelManager"
configuration: InvokeAISettings restoration: "RestorationServices"
images: ImageService configuration: "InvokeAISettings"
images: "ImageService"
# NOTE: we must forward-declare any types that include invocations, since invocations can use services # NOTE: we must forward-declare any types that include invocations, since invocations can use services
graph_library: ItemStorageABC["LibraryGraph"] graph_library: "ItemStorageABC"["LibraryGraph"]
graph_execution_manager: ItemStorageABC["GraphExecutionState"] graph_execution_manager: "ItemStorageABC"["GraphExecutionState"]
processor: "InvocationProcessorABC" processor: "InvocationProcessorABC"
def __init__( def __init__(
self, self,
model_manager: ModelManager, model_manager: "ModelManager",
events: EventServiceBase, events: "EventServiceBase",
logger: Logger, logger: "Logger",
latents: LatentsStorageBase, latents: "LatentsStorageBase",
images: ImageService, images: "ImageService",
queue: InvocationQueueABC, queue: "InvocationQueueABC",
graph_library: ItemStorageABC["LibraryGraph"], graph_library: "ItemStorageABC"["LibraryGraph"],
graph_execution_manager: ItemStorageABC["GraphExecutionState"], graph_execution_manager: "ItemStorageABC"["GraphExecutionState"],
processor: "InvocationProcessorABC", processor: "InvocationProcessorABC",
restoration: RestorationServices, restoration: "RestorationServices",
configuration: InvokeAISettings = None, configuration: "InvokeAISettings",
): ):
self.model_manager = model_manager self.model_manager = model_manager
self.events = events self.events = events

View File

@ -1,10 +1,7 @@
import time import time
import traceback import traceback
from threading import Event, Thread, BoundedSemaphore from threading import Event, Thread, BoundedSemaphore
from typing import Any, TypeGuard
from invokeai.app.invocations.image import ImageOutput
from invokeai.app.models.image import ImageType
from ..invocations.baseinvocation import InvocationContext from ..invocations.baseinvocation import InvocationContext
from .invocation_queue import InvocationQueueItem from .invocation_queue import InvocationQueueItem
from .invoker import InvocationProcessorABC, Invoker from .invoker import InvocationProcessorABC, Invoker

View File

@ -35,6 +35,7 @@ def mock_services():
graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'),
processor = DefaultInvocationProcessor(), processor = DefaultInvocationProcessor(),
restoration = None, # type: ignore restoration = None, # type: ignore
configuration = None, # type: ignore
) )
def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]: def invoke_next(g: GraphExecutionState, services: InvocationServices) -> tuple[BaseInvocation, BaseInvocationOutput]:

View File

@ -33,6 +33,7 @@ def mock_services() -> InvocationServices:
graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'), graph_execution_manager = SqliteItemStorage[GraphExecutionState](filename = sqlite_memory, table_name = 'graph_executions'),
processor = DefaultInvocationProcessor(), processor = DefaultInvocationProcessor(),
restoration = None, # type: ignore restoration = None, # type: ignore
configuration = None, # type: ignore
) )
@pytest.fixture() @pytest.fixture()

View File

@ -49,7 +49,7 @@ class ImageTestInvocation(BaseInvocation):
prompt: str = Field(default = "") prompt: str = Field(default = "")
def invoke(self, context: InvocationContext) -> ImageTestInvocationOutput: def invoke(self, context: InvocationContext) -> ImageTestInvocationOutput:
return ImageTestInvocationOutput(image=ImageField(image_name=self.id, width=512, height=512, mode="", info={})) return ImageTestInvocationOutput(image=ImageField(image_name=self.id))
class PromptCollectionTestInvocationOutput(BaseInvocationOutput): class PromptCollectionTestInvocationOutput(BaseInvocationOutput):
type: Literal['test_prompt_collection_output'] = 'test_prompt_collection_output' type: Literal['test_prompt_collection_output'] = 'test_prompt_collection_output'