resolve merge conflict

This commit is contained in:
Lincoln Stein 2023-07-03 12:19:11 -04:00
commit d6de11bd56
25 changed files with 75 additions and 95 deletions

View File

@ -5,7 +5,7 @@ import re
import shlex
import sys
import time
from typing import Union, get_type_hints
from typing import Union, get_type_hints, Optional
from pydantic import BaseModel, ValidationError
from pydantic.fields import Field
@ -347,7 +347,7 @@ def invoke_cli():
# Parse invocation
command: CliCommand = None # type:ignore
system_graph: Union[LibraryGraph,None] = None
system_graph: Optional[LibraryGraph] = None
if args['type'] in system_graph_names:
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))

View File

@ -1,6 +1,5 @@
from typing import Literal, Optional, Union
from pydantic import BaseModel, Field
from contextlib import ExitStack
import re
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
@ -8,7 +7,7 @@ from .model import ClipField
from ...backend.util.devices import torch_dtype
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
from ...backend.model_management import BaseModelType, ModelType, SubModelType
from ...backend.model_management import ModelType
from ...backend.model_management.lora import ModelPatcher
from compel import Compel

View File

@ -6,7 +6,7 @@ from builtins import float, bool
import cv2
import numpy as np
from typing import Literal, Optional, Union, List, Dict
from PIL import Image, ImageFilter, ImageOps
from PIL import Image
from pydantic import BaseModel, Field, validator
from ..models.image import ImageField, ImageCategory, ResourceOrigin
@ -422,9 +422,9 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation, PILInvoca
# Inputs
detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection")
image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image")
h: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `h` parameter")
w: Union[int, None] = Field(default=512, ge=0, description="Content shuffle `w` parameter")
f: Union[int, None] = Field(default=256, ge=0, description="Content shuffle `f` parameter")
h: Optional[int] = Field(default=512, ge=0, description="Content shuffle `h` parameter")
w: Optional[int] = Field(default=512, ge=0, description="Content shuffle `w` parameter")
f: Optional[int] = Field(default=256, ge=0, description="Content shuffle `f` parameter")
# fmt: on
def run_processor(self, image):

View File

@ -1,11 +1,10 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from functools import partial
from typing import Literal, Optional, Union, get_args
from typing import Literal, Optional, get_args
import torch
from diffusers import ControlNetModel
from pydantic import BaseModel, Field
from pydantic import Field
from invokeai.app.models.image import (ColorField, ImageCategory, ImageField,
ResourceOrigin)
@ -18,7 +17,6 @@ from ..util.step_callback import stable_diffusion_step_callback
from .baseinvocation import BaseInvocation, InvocationConfig, InvocationContext
from .image import ImageOutput
import re
from ...backend.model_management.lora import ModelPatcher
from ...backend.stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
from .model import UNetField, VaeField
@ -76,7 +74,7 @@ class InpaintInvocation(BaseInvocation):
vae: VaeField = Field(default=None, description="Vae model")
# Inputs
image: Union[ImageField, None] = Field(description="The input image")
image: Optional[ImageField] = Field(description="The input image")
strength: float = Field(
default=0.75, gt=0, le=1, description="The strength of the original image"
)
@ -86,7 +84,7 @@ class InpaintInvocation(BaseInvocation):
)
# Inputs
mask: Union[ImageField, None] = Field(description="The mask")
mask: Optional[ImageField] = Field(description="The mask")
seam_size: int = Field(default=96, ge=1, description="The seam inpaint size (px)")
seam_blur: int = Field(
default=16, ge=0, description="The seam inpaint blur radius (px)"

View File

@ -1,7 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import io
from typing import Literal, Optional, Union
from typing import Literal, Optional
import numpy
from PIL import Image, ImageFilter, ImageOps, ImageChops
@ -67,7 +66,7 @@ class LoadImageInvocation(BaseInvocation):
type: Literal["load_image"] = "load_image"
# Inputs
image: Union[ImageField, None] = Field(
image: Optional[ImageField] = Field(
default=None, description="The image to load"
)
# fmt: on
@ -87,7 +86,7 @@ class ShowImageInvocation(BaseInvocation):
type: Literal["show_image"] = "show_image"
# Inputs
image: Union[ImageField, None] = Field(
image: Optional[ImageField] = Field(
default=None, description="The image to show"
)
@ -112,7 +111,7 @@ class ImageCropInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_crop"] = "img_crop"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to crop")
image: Optional[ImageField] = Field(default=None, description="The image to crop")
x: int = Field(default=0, description="The left x coordinate of the crop rectangle")
y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
@ -150,8 +149,8 @@ class ImagePasteInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_paste"] = "img_paste"
# Inputs
base_image: Union[ImageField, None] = Field(default=None, description="The base image")
image: Union[ImageField, None] = Field(default=None, description="The image to paste")
base_image: Optional[ImageField] = Field(default=None, description="The base image")
image: Optional[ImageField] = Field(default=None, description="The image to paste")
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
x: int = Field(default=0, description="The left x coordinate at which to paste the image")
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
@ -203,7 +202,7 @@ class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["tomask"] = "tomask"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to create the mask from")
image: Optional[ImageField] = Field(default=None, description="The image to create the mask from")
invert: bool = Field(default=False, description="Whether or not to invert the mask")
# fmt: on
@ -237,8 +236,8 @@ class ImageMultiplyInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_mul"] = "img_mul"
# Inputs
image1: Union[ImageField, None] = Field(default=None, description="The first image to multiply")
image2: Union[ImageField, None] = Field(default=None, description="The second image to multiply")
image1: Optional[ImageField] = Field(default=None, description="The first image to multiply")
image2: Optional[ImageField] = Field(default=None, description="The second image to multiply")
# fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput:
@ -273,7 +272,7 @@ class ImageChannelInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_chan"] = "img_chan"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to get the channel from")
image: Optional[ImageField] = Field(default=None, description="The image to get the channel from")
channel: IMAGE_CHANNELS = Field(default="A", description="The channel to get")
# fmt: on
@ -308,7 +307,7 @@ class ImageConvertInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_conv"] = "img_conv"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to convert")
image: Optional[ImageField] = Field(default=None, description="The image to convert")
mode: IMAGE_MODES = Field(default="L", description="The mode to convert to")
# fmt: on
@ -340,7 +339,7 @@ class ImageBlurInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_blur"] = "img_blur"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to blur")
image: Optional[ImageField] = Field(default=None, description="The image to blur")
radius: float = Field(default=8.0, ge=0, description="The blur radius")
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
# fmt: on
@ -398,7 +397,7 @@ class ImageResizeInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_resize"] = "img_resize"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to resize")
image: Optional[ImageField] = Field(default=None, description="The image to resize")
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
@ -437,7 +436,7 @@ class ImageScaleInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_scale"] = "img_scale"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to scale")
image: Optional[ImageField] = Field(default=None, description="The image to scale")
scale_factor: float = Field(gt=0, description="The factor by which to scale the image")
resample_mode: PIL_RESAMPLING_MODES = Field(default="bicubic", description="The resampling mode")
# fmt: on
@ -477,7 +476,7 @@ class ImageLerpInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_lerp"] = "img_lerp"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to lerp")
image: Optional[ImageField] = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
# fmt: on
@ -513,7 +512,7 @@ class ImageInverseLerpInvocation(BaseInvocation, PILInvocationConfig):
type: Literal["img_ilerp"] = "img_ilerp"
# Inputs
image: Union[ImageField, None] = Field(default=None, description="The image to lerp")
image: Optional[ImageField] = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
# fmt: on

View File

@ -1,6 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
from typing import Literal, Union, get_args
from typing import Literal, Optional, get_args
import numpy as np
import math
@ -68,7 +68,7 @@ def get_tile_images(image: np.ndarray, width=8, height=8):
def tile_fill_missing(
im: Image.Image, tile_size: int = 16, seed: Union[int, None] = None
im: Image.Image, tile_size: int = 16, seed: Optional[int] = None
) -> Image.Image:
# Only fill if there's an alpha layer
if im.mode != "RGBA":
@ -125,7 +125,7 @@ class InfillColorInvocation(BaseInvocation):
"""Infills transparent areas of an image with a solid color"""
type: Literal["infill_rgba"] = "infill_rgba"
image: Union[ImageField, None] = Field(
image: Optional[ImageField] = Field(
default=None, description="The image to infill"
)
color: ColorField = Field(
@ -162,7 +162,7 @@ class InfillTileInvocation(BaseInvocation):
type: Literal["infill_tile"] = "infill_tile"
image: Union[ImageField, None] = Field(
image: Optional[ImageField] = Field(
default=None, description="The image to infill"
)
tile_size: int = Field(default=32, ge=1, description="The tile size (px)")
@ -202,7 +202,7 @@ class InfillPatchMatchInvocation(BaseInvocation):
type: Literal["infill_patchmatch"] = "infill_patchmatch"
image: Union[ImageField, None] = Field(
image: Optional[ImageField] = Field(
default=None, description="The image to infill"
)

View File

@ -1,21 +1,18 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from contextlib import ExitStack
from typing import List, Literal, Optional, Union
import einops
from pydantic import BaseModel, Field, validator
import torch
from diffusers import ControlNetModel, DPMSolverMultistepScheduler
from diffusers import ControlNetModel
from diffusers.image_processor import VaeImageProcessor
from diffusers.schedulers import SchedulerMixin as Scheduler
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from ...backend.image_util.seamless import configure_model_padding
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (
ConditioningData, ControlNetData, StableDiffusionGeneratorPipeline,
@ -546,7 +543,7 @@ class ImageToLatentsInvocation(BaseInvocation):
type: Literal["i2l"] = "i2l"
# Inputs
image: Union[ImageField, None] = Field(description="The image to encode")
image: Optional[ImageField] = Field(description="The image to encode")
vae: VaeField = Field(default=None, description="Vae submodel")
tiled: bool = Field(default=False, description="Encode latents by overlaping tiles(less memory consumption)")

View File

@ -1,4 +1,4 @@
from typing import Literal, Union
from typing import Literal, Optional
from pydantic import Field
@ -15,7 +15,7 @@ class RestoreFaceInvocation(BaseInvocation):
type: Literal["restore_face"] = "restore_face"
# Inputs
image: Union[ImageField, None] = Field(description="The input image")
image: Optional[ImageField] = Field(description="The input image")
strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" )
# fmt: on

View File

@ -1,6 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Literal, Union
from typing import Literal, Optional
from pydantic import Field
@ -16,7 +16,7 @@ class UpscaleInvocation(BaseInvocation):
type: Literal["upscale"] = "upscale"
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
image: Optional[ImageField] = Field(description="The input image", default=None)
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
level: Literal[2, 4] = Field(default=2, description="The upscale level")
# fmt: on

View File

@ -1,8 +1,7 @@
from abc import ABC, abstractmethod
import sqlite3
import threading
from typing import Union, cast
from invokeai.app.services.board_record_storage import BoardRecord
from typing import Optional, cast
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
from invokeai.app.services.models.image_record import (
@ -44,7 +43,7 @@ class BoardImageRecordStorageBase(ABC):
def get_board_for_image(
self,
image_name: str,
) -> Union[str, None]:
) -> Optional[str]:
"""Gets an image's board id, if it has one."""
pass
@ -215,7 +214,7 @@ class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase):
def get_board_for_image(
self,
image_name: str,
) -> Union[str, None]:
) -> Optional[str]:
try:
self._lock.acquire()
self._cursor.execute(

View File

@ -1,6 +1,6 @@
from abc import ABC, abstractmethod
from logging import Logger
from typing import List, Union
from typing import List, Union, Optional
from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
from invokeai.app.services.board_record_storage import (
BoardRecord,
@ -49,7 +49,7 @@ class BoardImagesServiceABC(ABC):
def get_board_for_image(
self,
image_name: str,
) -> Union[str, None]:
) -> Optional[str]:
"""Gets an image's board id, if it has one."""
pass
@ -126,13 +126,13 @@ class BoardImagesService(BoardImagesServiceABC):
def get_board_for_image(
self,
image_name: str,
) -> Union[str, None]:
) -> Optional[str]:
board_id = self._services.board_image_records.get_board_for_image(image_name)
return board_id
def board_record_to_dto(
board_record: BoardRecord, cover_image_name: Union[str, None], image_count: int
board_record: BoardRecord, cover_image_name: Optional[str], image_count: int
) -> BoardDTO:
"""Converts a board record to a board DTO."""
return BoardDTO(

View File

@ -1,10 +1,9 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Any, Union
from typing import Any, Optional
from invokeai.app.models.image import ProgressImage
from invokeai.app.util.misc import get_timestamp
from invokeai.app.services.model_manager_service import BaseModelType, ModelType, SubModelType, ModelInfo
from invokeai.app.models.exceptions import CanceledException
class EventServiceBase:
session_event: str = "session_event"
@ -28,7 +27,7 @@ class EventServiceBase:
graph_execution_state_id: str,
node: dict,
source_node_id: str,
progress_image: Union[ProgressImage, None],
progress_image: Optional[ProgressImage],
step: int,
total_steps: int,
) -> None:

View File

@ -61,8 +61,6 @@ def get_input_field(node: BaseInvocation, field: str) -> Any:
node_input_field = node_inputs.get(field) or None
return node_input_field
from typing import Optional, Union, List, get_args
def is_union_subtype(t1, t2):
t1_args = get_args(t1)
t2_args = get_args(t2)
@ -847,7 +845,7 @@ class GraphExecutionState(BaseModel):
]
}
def next(self) -> Union[BaseInvocation, None]:
def next(self) -> Optional[BaseInvocation]:
"""Gets the next node ready to execute."""
# TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes

View File

@ -8,7 +8,6 @@ from PIL.Image import Image as PILImageType
from PIL import Image, PngImagePlugin
from send2trash import send2trash
from invokeai.app.models.image import ResourceOrigin
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
@ -175,7 +174,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
for folder in folders:
folder.mkdir(parents=True, exist_ok=True)
def __get_cache(self, image_name: Path) -> Union[PILImageType, None]:
def __get_cache(self, image_name: Path) -> Optional[PILImageType]:
return None if image_name not in self.__cache else self.__cache[image_name]
def __set_cache(self, image_name: Path, image: PILImageType):

View File

@ -3,7 +3,6 @@ from datetime import datetime
from typing import Generic, Optional, TypeVar, cast
import sqlite3
import threading
from typing import Optional, Union
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
@ -116,7 +115,7 @@ class ImageRecordStorageBase(ABC):
pass
@abstractmethod
def get_most_recent_image_for_board(self, board_id: str) -> Union[ImageRecord, None]:
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
"""Gets the most recent image for a board."""
pass
@ -208,7 +207,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
"""
)
def get(self, image_name: str) -> Union[ImageRecord, None]:
def get(self, image_name: str) -> Optional[ImageRecord]:
try:
self._lock.acquire()
@ -220,7 +219,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
(image_name,),
)
result = cast(Union[sqlite3.Row, None], self._cursor.fetchone())
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordNotFoundException from e
@ -475,7 +474,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
def get_most_recent_image_for_board(
self, board_id: str
) -> Union[ImageRecord, None]:
) -> Optional[ImageRecord]:
try:
self._lock.acquire()
self._cursor.execute(
@ -490,7 +489,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
(board_id,),
)
result = cast(Union[sqlite3.Row, None], self._cursor.fetchone())
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
finally:
self._lock.release()
if result is None:

View File

@ -370,7 +370,7 @@ class ImageService(ImageServiceABC):
def _get_metadata(
self, session_id: Optional[str] = None, node_id: Optional[str] = None
) -> Union[ImageMetadata, None]:
) -> Optional[ImageMetadata]:
"""Get the metadata for a node."""
metadata = None

View File

@ -5,8 +5,7 @@ from abc import ABC, abstractmethod
from queue import Queue
from pydantic import BaseModel, Field
from typing import Union
from typing import Optional
class InvocationQueueItem(BaseModel):
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
@ -23,7 +22,7 @@ class InvocationQueueABC(ABC):
pass
@abstractmethod
def put(self, item: Union[InvocationQueueItem, None]) -> None:
def put(self, item: Optional[InvocationQueueItem]) -> None:
pass
@abstractmethod
@ -58,7 +57,7 @@ class MemoryInvocationQueue(InvocationQueueABC):
return item
def put(self, item: Union[InvocationQueueItem, None]) -> None:
def put(self, item: Optional[InvocationQueueItem]) -> None:
self.__queue.put(item)
def cancel(self, graph_execution_state_id: str) -> None:

View File

@ -1,15 +1,11 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from abc import ABC
from threading import Event, Thread
from typing import Union
from typing import Optional
from ..invocations.baseinvocation import InvocationContext
from .graph import Graph, GraphExecutionState
from .invocation_queue import InvocationQueueABC, InvocationQueueItem
from .invocation_queue import InvocationQueueItem
from .invocation_services import InvocationServices
from .item_storage import ItemStorageABC
class Invoker:
"""The invoker, used to execute invocations"""
@ -22,7 +18,7 @@ class Invoker:
def invoke(
self, graph_execution_state: GraphExecutionState, invoke_all: bool = False
) -> Union[str, None]:
) -> Optional[str]:
"""Determines the next node to invoke and enqueues it, preparing if needed.
Returns the id of the queued node, or `None` if there are no nodes left to enqueue."""
@ -46,7 +42,7 @@ class Invoker:
return invocation.id
def create_execution_state(self, graph: Union[Graph, None] = None) -> GraphExecutionState:
def create_execution_state(self, graph: Optional[Graph] = None) -> GraphExecutionState:
"""Creates a new execution state for the given graph"""
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
self.services.graph_execution_manager.set(new_state)

View File

@ -3,7 +3,7 @@
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Dict, Union
from typing import Dict, Union, Optional
import torch
@ -55,7 +55,7 @@ class ForwardCacheLatentsStorage(LatentsStorageBase):
if name in self.__cache:
del self.__cache[name]
def __get_cache(self, name: str) -> Union[torch.Tensor, None]:
def __get_cache(self, name: str) -> Optional[torch.Tensor]:
return None if name not in self.__cache else self.__cache[name]
def __set_cache(self, name: str, data: torch.Tensor):

View File

@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
from typing import Any, Union
from typing import Any, Optional
import networkx as nx
from invokeai.app.models.metadata import ImageMetadata
@ -34,7 +34,7 @@ class CoreMetadataService(MetadataServiceBase):
return metadata
def _find_nearest_ancestor(self, G: nx.DiGraph, node_id: str) -> Union[str, None]:
def _find_nearest_ancestor(self, G: nx.DiGraph, node_id: str) -> Optional[str]:
"""
Finds the id of the nearest ancestor (of a valid type) of a given node.
@ -65,7 +65,7 @@ class CoreMetadataService(MetadataServiceBase):
def _get_additional_metadata(
self, graph: Graph, node_id: str
) -> Union[dict[str, Any], None]:
) -> Optional[dict[str, Any]]:
"""
Returns additional metadata for a given node.

View File

@ -88,7 +88,7 @@ class ImageUrlsDTO(BaseModel):
class ImageDTO(ImageRecord, ImageUrlsDTO):
"""Deserialized image record, enriched for the frontend."""
board_id: Union[str, None] = Field(
board_id: Optional[str] = Field(
description="The id of the board the image belongs to, if one exists."
)
"""The id of the board the image belongs to, if one exists."""
@ -96,7 +96,7 @@ class ImageDTO(ImageRecord, ImageUrlsDTO):
def image_record_to_dto(
image_record: ImageRecord, image_url: str, thumbnail_url: str, board_id: Union[str, None]
image_record: ImageRecord, image_url: str, thumbnail_url: str, board_id: Optional[str]
) -> ImageDTO:
"""Converts an image record to an image DTO."""
return ImageDTO(

View File

@ -1,6 +1,6 @@
import sqlite3
from threading import Lock
from typing import Generic, TypeVar, Union, get_args
from typing import Generic, TypeVar, Optional, Union, get_args
from pydantic import BaseModel, parse_raw_as
@ -63,7 +63,7 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._lock.release()
self._on_changed(item)
def get(self, id: str) -> Union[T, None]:
def get(self, id: str) -> Optional[T]:
try:
self._lock.acquire()
self._cursor.execute(

View File

@ -4,11 +4,10 @@ invokeai.backend.generator.inpaint descends from .generator
from __future__ import annotations
import math
from typing import Tuple, Union
from typing import Tuple, Union, Optional
import cv2
import numpy as np
import PIL
import torch
from PIL import Image, ImageChops, ImageFilter, ImageOps
@ -76,7 +75,7 @@ class Inpaint(Img2Img):
return im_patched
def tile_fill_missing(
self, im: Image.Image, tile_size: int = 16, seed: Union[int, None] = None
self, im: Image.Image, tile_size: int = 16, seed: Optional[int] = None
) -> Image.Image:
# Only fill if there's an alpha layer
if im.mode != "RGBA":

View File

@ -306,7 +306,6 @@ class ModelManager(object):
and sequential_offload boolean. Note that the default device
type and precision are set up for a CUDA system running at half precision.
"""
self.config_path = None
if isinstance(config, (str, Path)):
self.config_path = Path(config)

View File

@ -6,7 +6,7 @@ from dataclasses import dataclass
from diffusers import ModelMixin, ConfigMixin
from pathlib import Path
from typing import Callable, Literal, Union, Dict
from typing import Callable, Literal, Union, Dict, Optional
from picklescan.scanner import scan_file_path
from .models import (
@ -64,7 +64,7 @@ class ModelProbe(object):
@classmethod
def probe(cls,
model_path: Path,
model: Union[Dict, ModelMixin] = None,
model: Optional[Union[Dict, ModelMixin]],
prediction_type_helper: Callable[[Path],SchedulerPredictionType] = None)->ModelProbeInfo:
'''
Probe the model at model_path and return sufficient information about it