diff --git a/invokeai/app/cli/commands.py b/invokeai/app/cli/commands.py index bffb2988dc..10d1ead677 100644 --- a/invokeai/app/cli/commands.py +++ b/invokeai/app/cli/commands.py @@ -47,7 +47,7 @@ def add_parsers( commands: list[type], command_field: str = "type", exclude_fields: list[str] = ["id", "type"], - add_arguments: Union[Callable[[argparse.ArgumentParser], None],None] = None + add_arguments: Callable[[argparse.ArgumentParser], None]|None = None ): """Adds parsers for each command to the subparsers""" @@ -72,7 +72,7 @@ def add_parsers( def add_graph_parsers( subparsers, graphs: list[LibraryGraph], - add_arguments: Union[Callable[[argparse.ArgumentParser], None], None] = None + add_arguments: Callable[[argparse.ArgumentParser], None]|None = None ): for graph in graphs: command_parser = subparsers.add_parser(graph.name, help=graph.description) diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 1f734b1f4f..07193c8500 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -1,6 +1,7 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) import argparse +import os import re import shlex import sys @@ -347,7 +348,7 @@ def invoke_cli(): # Parse invocation command: CliCommand = None # type:ignore - system_graph: Union[LibraryGraph,None] = None + system_graph: LibraryGraph|None = None if args['type'] in system_graph_names: system_graph = next(filter(lambda g: g.name == args['type'], system_graphs)) invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id)) diff --git a/invokeai/app/services/board_images.py b/invokeai/app/services/board_images.py index aea8a53876..072effbfae 100644 --- a/invokeai/app/services/board_images.py +++ b/invokeai/app/services/board_images.py @@ -132,7 +132,7 @@ class BoardImagesService(BoardImagesServiceABC): def board_record_to_dto( - board_record: BoardRecord, cover_image_name: Union[str, None], image_count: int + board_record: BoardRecord, cover_image_name: str | None, image_count: int ) -> BoardDTO: """Converts a board record to a board DTO.""" return BoardDTO( diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events.py index 61acf1f499..e578a24006 100644 --- a/invokeai/app/services/events.py +++ b/invokeai/app/services/events.py @@ -1,6 +1,6 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Any, Union +from typing import Any from invokeai.app.models.image import ProgressImage from invokeai.app.util.misc import get_timestamp from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo @@ -28,7 +28,7 @@ class EventServiceBase: graph_execution_state_id: str, node: dict, source_node_id: str, - progress_image: Union[ProgressImage, None], + progress_image: ProgressImage | None, step: int, total_steps: int, ) -> None: diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/graph.py index f019ab1f70..e3cd3d47ce 100644 --- a/invokeai/app/services/graph.py +++ b/invokeai/app/services/graph.py @@ -3,6 +3,7 @@ import copy import itertools import uuid +from types import NoneType from typing import ( Annotated, Any, @@ -25,8 +26,6 @@ from ..invocations.baseinvocation import ( InvocationContext, ) -# in 3.10 this would be "from types import NoneType" -NoneType = type(None) class EdgeConnection(BaseModel): node_id: str = Field(description="The id of the node for this edge connection") @@ -847,7 +846,7 @@ class GraphExecutionState(BaseModel): ] } - def next(self) -> Union[BaseInvocation, None]: + def next(self) -> BaseInvocation | None: """Gets the next node ready to execute.""" # TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes diff --git a/invokeai/app/services/image_file_storage.py b/invokeai/app/services/image_file_storage.py index 23ea6fca7e..f30499ea26 100644 --- a/invokeai/app/services/image_file_storage.py +++ b/invokeai/app/services/image_file_storage.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from pathlib import Path from queue import Queue -from typing import Dict, Optional, Union +from typing import Dict, Optional from PIL.Image import Image as PILImageType from PIL import Image, PngImagePlugin @@ -80,7 +80,7 @@ class DiskImageFileStorage(ImageFileStorageBase): __cache: Dict[Path, PILImageType] __max_cache_size: int - def __init__(self, output_folder: Union[str, Path]): + def __init__(self, output_folder: str | Path): self.__cache = dict() self.__cache_ids = Queue() self.__max_cache_size = 10 # TODO: get this from config @@ -164,7 +164,7 @@ class DiskImageFileStorage(ImageFileStorageBase): return path - def validate_path(self, path: Union[str, Path]) -> bool: + def validate_path(self, path: str | Path) -> bool: """Validates the path given for an image or thumbnail.""" path = path if isinstance(path, Path) else Path(path) return path.exists() @@ -175,7 +175,7 @@ class DiskImageFileStorage(ImageFileStorageBase): for folder in folders: folder.mkdir(parents=True, exist_ok=True) - def __get_cache(self, image_name: Path) -> Union[PILImageType, None]: + def __get_cache(self, image_name: Path) -> PILImageType | None: return None if image_name not in self.__cache else self.__cache[image_name] def __set_cache(self, image_name: Path, image: PILImageType): diff --git a/invokeai/app/services/image_record_storage.py b/invokeai/app/services/image_record_storage.py index dc975fc9fc..066e6f8d5f 100644 --- a/invokeai/app/services/image_record_storage.py +++ b/invokeai/app/services/image_record_storage.py @@ -116,7 +116,7 @@ class ImageRecordStorageBase(ABC): pass @abstractmethod - def get_most_recent_image_for_board(self, board_id: str) -> Union[ImageRecord, None]: + def get_most_recent_image_for_board(self, board_id: str) -> ImageRecord | None: """Gets the most recent image for a board.""" pass diff --git a/invokeai/app/services/invocation_queue.py b/invokeai/app/services/invocation_queue.py index d100161bc1..acfda6b90b 100644 --- a/invokeai/app/services/invocation_queue.py +++ b/invokeai/app/services/invocation_queue.py @@ -5,7 +5,6 @@ from abc import ABC, abstractmethod from queue import Queue from pydantic import BaseModel, Field -from typing import Union class InvocationQueueItem(BaseModel): @@ -23,7 +22,7 @@ class InvocationQueueABC(ABC): pass @abstractmethod - def put(self, item: Union[InvocationQueueItem, None]) -> None: + def put(self, item: InvocationQueueItem | None) -> None: pass @abstractmethod @@ -58,7 +57,7 @@ class MemoryInvocationQueue(InvocationQueueABC): return item - def put(self, item: Union[InvocationQueueItem, None]) -> None: + def put(self, item: InvocationQueueItem | None) -> None: self.__queue.put(item) def cancel(self, graph_execution_state_id: str) -> None: diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py index c8883a057e..f12ba79c15 100644 --- a/invokeai/app/services/invoker.py +++ b/invokeai/app/services/invoker.py @@ -2,7 +2,6 @@ from abc import ABC from threading import Event, Thread -from typing import Union from ..invocations.baseinvocation import InvocationContext from .graph import Graph, GraphExecutionState @@ -22,7 +21,7 @@ class Invoker: def invoke( self, graph_execution_state: GraphExecutionState, invoke_all: bool = False - ) -> Union[str, None]: + ) -> str | None: """Determines the next node to invoke and enqueues it, preparing if needed. Returns the id of the queued node, or `None` if there are no nodes left to enqueue.""" @@ -46,7 +45,7 @@ class Invoker: return invocation.id - def create_execution_state(self, graph: Union[Graph, None] = None) -> GraphExecutionState: + def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState: """Creates a new execution state for the given graph""" new_state = GraphExecutionState(graph=Graph() if graph is None else graph) self.services.graph_execution_manager.set(new_state) diff --git a/invokeai/app/services/latent_storage.py b/invokeai/app/services/latent_storage.py index 9836460319..17d35d7c33 100644 --- a/invokeai/app/services/latent_storage.py +++ b/invokeai/app/services/latent_storage.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from pathlib import Path from queue import Queue -from typing import Dict, Union +from typing import Dict import torch @@ -55,7 +55,7 @@ class ForwardCacheLatentsStorage(LatentsStorageBase): if name in self.__cache: del self.__cache[name] - def __get_cache(self, name: str) -> Union[torch.Tensor, None]: + def __get_cache(self, name: str) -> torch.Tensor|None: return None if name not in self.__cache else self.__cache[name] def __set_cache(self, name: str, data: torch.Tensor): @@ -69,9 +69,9 @@ class ForwardCacheLatentsStorage(LatentsStorageBase): class DiskLatentsStorage(LatentsStorageBase): """Stores latents in a folder on disk without caching""" - __output_folder: Union[str, Path] + __output_folder: str | Path - def __init__(self, output_folder: Union[str, Path]): + def __init__(self, output_folder: str | Path): self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder) self.__output_folder.mkdir(parents=True, exist_ok=True) @@ -91,4 +91,4 @@ class DiskLatentsStorage(LatentsStorageBase): def get_path(self, name: str) -> Path: return self.__output_folder / name - + \ No newline at end of file diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index 485a99412c..462b1a4f4b 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -21,7 +21,7 @@ from PIL import Image, ImageChops, ImageFilter from accelerate.utils import set_seed from diffusers import DiffusionPipeline from tqdm import trange -from typing import Callable, List, Iterator, Optional, Type, Union +from typing import Callable, List, Iterator, Optional, Type from dataclasses import dataclass, field from diffusers.schedulers import SchedulerMixin as Scheduler @@ -178,7 +178,7 @@ class InvokeAIGenerator(metaclass=ABCMeta): # ------------------------------------ class Img2Img(InvokeAIGenerator): def generate(self, - init_image: Union[Image.Image, torch.FloatTensor], + init_image: Image.Image | torch.FloatTensor, strength: float=0.75, **keyword_args )->Iterator[InvokeAIGeneratorOutput]: @@ -195,7 +195,7 @@ class Img2Img(InvokeAIGenerator): # Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff class Inpaint(Img2Img): def generate(self, - mask_image: Union[Image.Image, torch.FloatTensor], + mask_image: Image.Image | torch.FloatTensor, # Seam settings - when 0, doesn't fill seam seam_size: int = 96, seam_blur: int = 16, diff --git a/invokeai/backend/generator/inpaint.py b/invokeai/backend/generator/inpaint.py index fc9b9d4373..eaf4047109 100644 --- a/invokeai/backend/generator/inpaint.py +++ b/invokeai/backend/generator/inpaint.py @@ -203,8 +203,8 @@ class Inpaint(Img2Img): cfg_scale, ddim_eta, conditioning, - init_image: Union[Image.Image, torch.FloatTensor], - mask_image: Union[Image.Image, torch.FloatTensor], + init_image: Image.Image | torch.FloatTensor, + mask_image: Image.Image | torch.FloatTensor, strength: float, mask_blur_radius: int = 8, # Seam settings - when 0, doesn't fill seam diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index 10884b6a84..87b0ad3c4e 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -68,11 +68,7 @@ def get_model_config_enums(): enums = list() for model_config in MODEL_CONFIGS: - - if hasattr(inspect,'get_annotations'): - fields = inspect.get_annotations(model_config) - else: - fields = model_config.__annotations__ + fields = inspect.get_annotations(model_config) try: field = fields["model_format"] except: diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 457f4edbb0..8493b4286f 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -7,7 +7,7 @@ import secrets from collections.abc import Sequence from dataclasses import dataclass, field from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union -from pydantic import Field +from pydantic import BaseModel, Field import einops import PIL.Image @@ -17,11 +17,12 @@ import psutil import torch import torchvision.transforms as T from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.models.controlnet import ControlNetModel +from diffusers.models.controlnet import ControlNetModel, ControlNetOutput from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( StableDiffusionPipeline, ) +from diffusers.pipelines.controlnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import ( StableDiffusionImg2ImgPipeline, @@ -45,7 +46,7 @@ from .diffusion import ( InvokeAIDiffuserComponent, PostprocessingSettings, ) -from .offloading import FullyLoadedModelGroup, ModelGroup +from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup @dataclass class PipelineIntermediateState: @@ -104,7 +105,7 @@ class AddsMaskGuidance: _debug: Optional[Callable] = None def __call__( - self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning + self, step_output: BaseOutput | SchedulerOutput, t: torch.Tensor, conditioning ) -> BaseOutput: output_class = step_output.__class__ # We'll create a new one with masked data. diff --git a/invokeai/backend/stable_diffusion/offloading.py b/invokeai/backend/stable_diffusion/offloading.py index d36b65872a..5fc3f765ae 100644 --- a/invokeai/backend/stable_diffusion/offloading.py +++ b/invokeai/backend/stable_diffusion/offloading.py @@ -4,7 +4,7 @@ import warnings import weakref from abc import ABCMeta, abstractmethod from collections.abc import MutableMapping -from typing import Callable, Union +from typing import Callable import torch from accelerate.utils import send_to_device @@ -117,7 +117,7 @@ class LazilyLoadedModelGroup(ModelGroup): """ _hooks: MutableMapping[torch.nn.Module, RemovableHandle] - _current_model_ref: Callable[[], Union[torch.nn.Module, _NoModel]] + _current_model_ref: Callable[[], torch.nn.Module | _NoModel] def __init__(self, execution_device: torch.device): super().__init__(execution_device) diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index de808972f0..615209d98d 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -4,7 +4,6 @@ from contextlib import nullcontext import torch from torch import autocast -from typing import Union from invokeai.app.services.config import InvokeAIAppConfig CPU_DEVICE = torch.device("cpu") @@ -50,7 +49,7 @@ def choose_autocast(precision): return nullcontext -def normalize_device(device: Union[str, torch.device]) -> torch.device: +def normalize_device(device: str | torch.device) -> torch.device: """Ensure device has a device index defined, if appropriate.""" device = torch.device(device) if device.index is None: