mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
1 Commits
feat/save-
...
feat/nodes
Author | SHA1 | Date | |
---|---|---|---|
8918501869 |
@ -45,17 +45,13 @@ async def upload_image(
|
||||
if not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
metadata: Optional[str] = None
|
||||
workflow: Optional[str] = None
|
||||
|
||||
contents = await file.read()
|
||||
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
if crop_visible:
|
||||
bbox = pil_image.getbbox()
|
||||
pil_image = pil_image.crop(bbox)
|
||||
metadata = pil_image.info.get("invokeai_metadata", None)
|
||||
workflow = pil_image.info.get("invokeai_workflow", None)
|
||||
except Exception:
|
||||
# Error opening the image
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
@ -67,8 +63,6 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=metadata,
|
||||
workflow=workflow,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
|
@ -645,13 +645,19 @@ class ColorCorrectInvocation(BaseInvocation):
|
||||
mask_blur_radius: float = InputField(default=8, description="Mask blur radius")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
|
||||
init_image = context.services.images.get_pil_image(self.reference.image_name)
|
||||
# fit reference image to the input image
|
||||
if init_image.size != result.size:
|
||||
init_image = init_image.resize((result.width, result.height), Image.BILINEAR)
|
||||
|
||||
pil_init_mask = None
|
||||
if self.mask is not None:
|
||||
pil_init_mask = context.services.images.get_pil_image(self.mask.image_name).convert("L")
|
||||
|
||||
init_image = context.services.images.get_pil_image(self.reference.image_name)
|
||||
|
||||
result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA")
|
||||
# fit mask to the input image
|
||||
if pil_init_mask.size != result.size:
|
||||
pil_init_mask = pil_init_mask.resize((result.width, result.height), Image.BILINEAR)
|
||||
|
||||
# if init_image is None or init_mask is None:
|
||||
# return result
|
||||
|
@ -77,6 +77,25 @@ DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
||||
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
|
||||
|
||||
|
||||
def fit_latents(latents: torch.Tensor, max_latents_size: int, device: torch.device) -> torch.Tensor:
|
||||
if max_latents_size == 0:
|
||||
return latents
|
||||
|
||||
latents_area = latents.shape[2] * latents.shape[3]
|
||||
if latents_area <= max_latents_size:
|
||||
return latents
|
||||
|
||||
scale_factor = np.sqrt(max_latents_size / latents_area)
|
||||
scaled_latents = torch.nn.functional.interpolate(
|
||||
latents.to(device),
|
||||
scale_factor=scale_factor,
|
||||
mode="bilinear",
|
||||
antialias=True,
|
||||
)
|
||||
|
||||
return scaled_latents
|
||||
|
||||
|
||||
@invocation_output("scheduler_output")
|
||||
class SchedulerOutput(BaseInvocationOutput):
|
||||
scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler)
|
||||
@ -500,14 +519,20 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
with SilenceWarnings(): # this quenches NSFW nag from diffusers
|
||||
seed = None
|
||||
noise = None
|
||||
max_image_size = context.services.configuration.max_image_size
|
||||
|
||||
if self.noise is not None:
|
||||
noise = context.services.latents.get(self.noise.latents_name)
|
||||
seed = self.noise.seed
|
||||
noise = fit_latents(latents=noise, max_latents_size=max_image_size // 64, device=choose_torch_device())
|
||||
|
||||
if self.latents is not None:
|
||||
latents = context.services.latents.get(self.latents.latents_name)
|
||||
if seed is None:
|
||||
seed = self.latents.seed
|
||||
latents = fit_latents(
|
||||
latents=latents, max_latents_size=max_image_size // 64, device=choose_torch_device()
|
||||
)
|
||||
|
||||
if noise is not None and noise.shape[1:] != latents.shape[1:]:
|
||||
raise Exception(f"Incompatable 'noise' and 'latents' shapes: {latents.shape=} {noise.shape=}")
|
||||
@ -532,8 +557,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
def _lora_loader():
|
||||
for lora in self.unet.loras:
|
||||
lora_info = context.services.model_manager.get_model(
|
||||
**lora.dict(exclude={"weight"}),
|
||||
context=context,
|
||||
**lora.dict(exclude={"weight"}), context=context
|
||||
)
|
||||
yield (lora_info.context.model, lora.weight)
|
||||
del lora_info
|
||||
|
@ -85,8 +85,11 @@ class CoreMetadata(BaseModelExcludeNull):
|
||||
class ImageMetadata(BaseModelExcludeNull):
|
||||
"""An image's generation metadata"""
|
||||
|
||||
metadata: Optional[dict] = Field(default=None, description="The metadata associated with the image")
|
||||
workflow: Optional[dict] = Field(default=None, description="The workflow associated with the image")
|
||||
metadata: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="The image's core metadata, if it was created in the Linear or Canvas UI",
|
||||
)
|
||||
graph: Optional[dict] = Field(default=None, description="The graph that created the image")
|
||||
|
||||
|
||||
@invocation_output("metadata_accumulator_output")
|
||||
|
@ -255,6 +255,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", )
|
||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
||||
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
||||
max_image_size : int = Field(default=512 * 512, description="The maximum size of images, in pixels. The maximum size for latents is inferred from this evaluating `max_image_size // 8`. If the size is exceeded during denoising, the latents will be resized.", category="Generation", )
|
||||
|
||||
# QUEUE
|
||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", category="Queue", )
|
||||
|
@ -59,7 +59,7 @@ class ImageFileStorageBase(ABC):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[Union[str, dict]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
workflow: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
@ -109,7 +109,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[Union[str, dict]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
workflow: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
@ -119,10 +119,20 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
|
||||
pnginfo = PngImagePlugin.PngInfo()
|
||||
|
||||
if metadata is not None:
|
||||
pnginfo.add_text("invokeai_metadata", json.dumps(metadata) if type(metadata) is dict else metadata)
|
||||
if workflow is not None:
|
||||
pnginfo.add_text("invokeai_workflow", workflow)
|
||||
if metadata is not None or workflow is not None:
|
||||
if metadata is not None:
|
||||
pnginfo.add_text("invokeai_metadata", json.dumps(metadata))
|
||||
if workflow is not None:
|
||||
pnginfo.add_text("invokeai_workflow", workflow)
|
||||
else:
|
||||
# For uploaded images, we want to retain metadata. PIL strips it on save; manually add it back
|
||||
# TODO: retain non-invokeai metadata on save...
|
||||
original_metadata = image.info.get("invokeai_metadata", None)
|
||||
if original_metadata is not None:
|
||||
pnginfo.add_text("invokeai_metadata", original_metadata)
|
||||
original_workflow = image.info.get("invokeai_workflow", None)
|
||||
if original_workflow is not None:
|
||||
pnginfo.add_text("invokeai_workflow", original_workflow)
|
||||
|
||||
image.save(image_path, "PNG", pnginfo=pnginfo)
|
||||
|
||||
|
@ -3,12 +3,11 @@ import sqlite3
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from typing import Generic, Optional, TypeVar, Union, cast
|
||||
from typing import Generic, Optional, TypeVar, cast
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.generics import GenericModel
|
||||
|
||||
from invokeai.app.invocations.metadata import ImageMetadata
|
||||
from invokeai.app.models.image import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.models.image_record import ImageRecord, ImageRecordChanges, deserialize_image_record
|
||||
|
||||
@ -82,7 +81,7 @@ class ImageRecordStorageBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_metadata(self, image_name: str) -> ImageMetadata:
|
||||
def get_metadata(self, image_name: str) -> Optional[dict]:
|
||||
"""Gets an image's metadata'."""
|
||||
pass
|
||||
|
||||
@ -135,8 +134,7 @@ class ImageRecordStorageBase(ABC):
|
||||
height: int,
|
||||
session_id: Optional[str],
|
||||
node_id: Optional[str],
|
||||
metadata: Optional[Union[str, dict]],
|
||||
workflow: Optional[str],
|
||||
metadata: Optional[dict],
|
||||
is_intermediate: bool = False,
|
||||
starred: bool = False,
|
||||
) -> datetime:
|
||||
@ -206,13 +204,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
"""
|
||||
)
|
||||
|
||||
if "workflow" not in columns:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
ALTER TABLE images ADD COLUMN workflow TEXT;
|
||||
"""
|
||||
)
|
||||
|
||||
# Create the `images` table indices.
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@ -278,31 +269,22 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
|
||||
return deserialize_image_record(dict(result))
|
||||
|
||||
def get_metadata(self, image_name: str) -> ImageMetadata:
|
||||
def get_metadata(self, image_name: str) -> Optional[dict]:
|
||||
try:
|
||||
self._lock.acquire()
|
||||
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT metadata, workflow FROM images
|
||||
SELECT images.metadata FROM images
|
||||
WHERE image_name = ?;
|
||||
""",
|
||||
(image_name,),
|
||||
)
|
||||
|
||||
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
|
||||
|
||||
if not result:
|
||||
return ImageMetadata()
|
||||
|
||||
as_dict = dict(result)
|
||||
metadata_raw = cast(Optional[str], as_dict.get("metadata", None))
|
||||
workflow_raw = cast(Optional[str], as_dict.get("workflow", None))
|
||||
|
||||
return ImageMetadata(
|
||||
metadata=json.loads(metadata_raw) if metadata_raw is not None else None,
|
||||
workflow=json.loads(workflow_raw) if workflow_raw is not None else None,
|
||||
)
|
||||
if not result or not result[0]:
|
||||
return None
|
||||
return json.loads(result[0])
|
||||
except sqlite3.Error as e:
|
||||
self._conn.rollback()
|
||||
raise ImageRecordNotFoundException from e
|
||||
@ -537,15 +519,12 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
width: int,
|
||||
height: int,
|
||||
node_id: Optional[str],
|
||||
metadata: Optional[Union[str, dict]],
|
||||
workflow: Optional[str],
|
||||
metadata: Optional[dict],
|
||||
is_intermediate: bool = False,
|
||||
starred: bool = False,
|
||||
) -> datetime:
|
||||
try:
|
||||
metadata_json: Optional[str] = None
|
||||
if metadata is not None:
|
||||
metadata_json = metadata if type(metadata) is str else json.dumps(metadata)
|
||||
metadata_json = None if metadata is None else json.dumps(metadata)
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@ -558,11 +537,10 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
node_id,
|
||||
session_id,
|
||||
metadata,
|
||||
workflow,
|
||||
is_intermediate,
|
||||
starred
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
|
||||
""",
|
||||
(
|
||||
image_name,
|
||||
@ -573,7 +551,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
node_id,
|
||||
session_id,
|
||||
metadata_json,
|
||||
workflow,
|
||||
is_intermediate,
|
||||
starred,
|
||||
),
|
||||
|
@ -1,6 +1,6 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import Logger
|
||||
from typing import TYPE_CHECKING, Callable, Optional, Union
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
@ -29,6 +29,7 @@ from invokeai.app.services.item_storage import ItemStorageABC
|
||||
from invokeai.app.services.models.image_record import ImageDTO, ImageRecord, ImageRecordChanges, image_record_to_dto
|
||||
from invokeai.app.services.resource_name import NameServiceBase
|
||||
from invokeai.app.services.urls import UrlServiceBase
|
||||
from invokeai.app.util.metadata import get_metadata_graph_from_raw_session
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.graph import GraphExecutionState
|
||||
@ -70,7 +71,7 @@ class ImageServiceABC(ABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[Union[str, dict]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
workflow: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
"""Creates an image, storing the file and its metadata."""
|
||||
@ -195,7 +196,7 @@ class ImageService(ImageServiceABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: bool = False,
|
||||
metadata: Optional[Union[str, dict]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
workflow: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
if image_origin not in ResourceOrigin:
|
||||
@ -233,7 +234,6 @@ class ImageService(ImageServiceABC):
|
||||
# Nullable fields
|
||||
node_id=node_id,
|
||||
metadata=metadata,
|
||||
workflow=workflow,
|
||||
session_id=session_id,
|
||||
)
|
||||
if board_id is not None:
|
||||
@ -311,7 +311,23 @@ class ImageService(ImageServiceABC):
|
||||
|
||||
def get_metadata(self, image_name: str) -> Optional[ImageMetadata]:
|
||||
try:
|
||||
return self._services.image_records.get_metadata(image_name)
|
||||
image_record = self._services.image_records.get(image_name)
|
||||
metadata = self._services.image_records.get_metadata(image_name)
|
||||
|
||||
if not image_record.session_id:
|
||||
return ImageMetadata(metadata=metadata)
|
||||
|
||||
session_raw = self._services.graph_execution_manager.get_raw(image_record.session_id)
|
||||
graph = None
|
||||
|
||||
if session_raw:
|
||||
try:
|
||||
graph = get_metadata_graph_from_raw_session(session_raw)
|
||||
except Exception as e:
|
||||
self._services.logger.warn(f"Failed to parse session graph: {e}")
|
||||
graph = None
|
||||
|
||||
return ImageMetadata(graph=graph, metadata=metadata)
|
||||
except ImageRecordNotFoundException:
|
||||
self._services.logger.error("Image record not found")
|
||||
raise
|
||||
|
@ -28,7 +28,7 @@ import {
|
||||
setShouldShowImageDetails,
|
||||
setShouldShowProgressInViewer,
|
||||
} from 'features/ui/store/uiSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useHotkeys } from 'react-hotkeys-hook';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import {
|
||||
@ -41,10 +41,9 @@ import {
|
||||
import { FaCircleNodes, FaEllipsis } from 'react-icons/fa6';
|
||||
import {
|
||||
useGetImageDTOQuery,
|
||||
useGetImageMetadataQuery,
|
||||
useGetImageMetadataFromFileQuery,
|
||||
} from 'services/api/endpoints/images';
|
||||
import { menuListMotionProps } from 'theme/components/menu';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
import { sentImageToImg2Img } from '../../store/actions';
|
||||
import SingleSelectionMenuItems from '../ImageContextMenu/SingleSelectionMenuItems';
|
||||
|
||||
@ -93,6 +92,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
|
||||
shouldShowImageDetails,
|
||||
lastSelectedImage,
|
||||
shouldShowProgressInViewer,
|
||||
shouldFetchMetadataFromApi,
|
||||
} = useAppSelector(currentImageButtonsSelector);
|
||||
|
||||
const isUpscalingEnabled = useFeatureStatus('upscaling').isFeatureEnabled;
|
||||
@ -107,10 +107,16 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
|
||||
lastSelectedImage?.image_name ?? skipToken
|
||||
);
|
||||
|
||||
const [debouncedImageName] = useDebounce(lastSelectedImage?.image_name, 300);
|
||||
const getMetadataArg = useMemo(() => {
|
||||
if (lastSelectedImage) {
|
||||
return { image: lastSelectedImage, shouldFetchMetadataFromApi };
|
||||
} else {
|
||||
return skipToken;
|
||||
}
|
||||
}, [lastSelectedImage, shouldFetchMetadataFromApi]);
|
||||
|
||||
const { metadata, workflow, isLoading } = useGetImageMetadataQuery(
|
||||
debouncedImageName ?? skipToken,
|
||||
const { metadata, workflow, isLoading } = useGetImageMetadataFromFileQuery(
|
||||
getMetadataArg,
|
||||
{
|
||||
selectFromResult: (res) => ({
|
||||
isLoading: res.isFetching,
|
||||
|
@ -1,9 +1,8 @@
|
||||
import { Flex, MenuItem, Spinner } from '@chakra-ui/react';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
||||
import { useAppToaster } from 'app/components/Toaster';
|
||||
import { $customStarUI } from 'app/store/nanostores/customStarUI';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
imagesToChangeSelected,
|
||||
@ -33,12 +32,12 @@ import {
|
||||
import { FaCircleNodes } from 'react-icons/fa6';
|
||||
import { MdStar, MdStarBorder } from 'react-icons/md';
|
||||
import {
|
||||
useGetImageMetadataQuery,
|
||||
useGetImageMetadataFromFileQuery,
|
||||
useStarImagesMutation,
|
||||
useUnstarImagesMutation,
|
||||
} from 'services/api/endpoints/images';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
import { configSelector } from '../../../system/store/configSelectors';
|
||||
import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions';
|
||||
|
||||
type SingleSelectionMenuItemsProps = {
|
||||
@ -54,12 +53,11 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
|
||||
const toaster = useAppToaster();
|
||||
|
||||
const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled;
|
||||
const { shouldFetchMetadataFromApi } = useAppSelector(configSelector);
|
||||
const customStarUi = useStore($customStarUI);
|
||||
|
||||
const [debouncedImageName] = useDebounce(imageDTO.image_name, 300);
|
||||
|
||||
const { metadata, workflow, isLoading } = useGetImageMetadataQuery(
|
||||
debouncedImageName ?? skipToken,
|
||||
const { metadata, workflow, isLoading } = useGetImageMetadataFromFileQuery(
|
||||
{ image: imageDTO, shouldFetchMetadataFromApi },
|
||||
{
|
||||
selectFromResult: (res) => ({
|
||||
isLoading: res.isFetching,
|
||||
|
@ -9,15 +9,15 @@ import {
|
||||
Tabs,
|
||||
Text,
|
||||
} from '@chakra-ui/react';
|
||||
import { skipToken } from '@reduxjs/toolkit/dist/query';
|
||||
import { IAINoContentFallback } from 'common/components/IAIImageFallback';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useGetImageMetadataQuery } from 'services/api/endpoints/images';
|
||||
import { useGetImageMetadataFromFileQuery } from 'services/api/endpoints/images';
|
||||
import { ImageDTO } from 'services/api/types';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
import DataViewer from './DataViewer';
|
||||
import ImageMetadataActions from './ImageMetadataActions';
|
||||
import { useAppSelector } from '../../../../app/store/storeHooks';
|
||||
import { configSelector } from '../../../system/store/configSelectors';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
type ImageMetadataViewerProps = {
|
||||
image: ImageDTO;
|
||||
@ -31,10 +31,10 @@ const ImageMetadataViewer = ({ image }: ImageMetadataViewerProps) => {
|
||||
// });
|
||||
const { t } = useTranslation();
|
||||
|
||||
const [debouncedImageName] = useDebounce(image.image_name, 300);
|
||||
const { shouldFetchMetadataFromApi } = useAppSelector(configSelector);
|
||||
|
||||
const { metadata, workflow } = useGetImageMetadataQuery(
|
||||
debouncedImageName ?? skipToken,
|
||||
const { metadata, workflow } = useGetImageMetadataFromFileQuery(
|
||||
{ image, shouldFetchMetadataFromApi },
|
||||
{
|
||||
selectFromResult: (res) => ({
|
||||
metadata: res?.currentData?.metadata,
|
||||
|
@ -10,7 +10,6 @@ import {
|
||||
import {
|
||||
ImageMetadataAndWorkflow,
|
||||
zCoreMetadata,
|
||||
zWorkflow,
|
||||
} from 'features/nodes/types/types';
|
||||
import { getMetadataAndWorkflowFromImageBlob } from 'features/nodes/util/getMetadataAndWorkflowFromImageBlob';
|
||||
import { keyBy } from 'lodash-es';
|
||||
@ -24,6 +23,7 @@ import {
|
||||
ListImagesArgs,
|
||||
OffsetPaginatedResults_ImageDTO_,
|
||||
PostUploadAction,
|
||||
UnsafeImageMetadata,
|
||||
} from '../types';
|
||||
import {
|
||||
getCategories,
|
||||
@ -33,7 +33,6 @@ import {
|
||||
imagesSelectors,
|
||||
} from '../util';
|
||||
import { boardsApi } from './boards';
|
||||
import { logger } from 'app/logging/logger';
|
||||
|
||||
export const imagesApi = api.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
@ -114,33 +113,11 @@ export const imagesApi = api.injectEndpoints({
|
||||
],
|
||||
keepUnusedDataFor: 86400, // 24 hours
|
||||
}),
|
||||
getImageMetadata: build.query<ImageMetadataAndWorkflow, string>({
|
||||
getImageMetadata: build.query<UnsafeImageMetadata, string>({
|
||||
query: (image_name) => ({ url: `images/i/${image_name}/metadata` }),
|
||||
providesTags: (result, error, image_name) => [
|
||||
{ type: 'ImageMetadata', id: image_name },
|
||||
],
|
||||
transformResponse: (
|
||||
response: paths['/api/v1/images/i/{image_name}/metadata']['get']['responses']['200']['content']['application/json']
|
||||
) => {
|
||||
const imageMetadataAndWorkflow: ImageMetadataAndWorkflow = {};
|
||||
if (response?.metadata) {
|
||||
const metadataResult = zCoreMetadata.safeParse(response.metadata);
|
||||
if (metadataResult.success) {
|
||||
imageMetadataAndWorkflow.metadata = metadataResult.data;
|
||||
} else {
|
||||
logger('images').warn('Problem parsing metadata');
|
||||
}
|
||||
}
|
||||
if (response?.workflow) {
|
||||
const workflowResult = zWorkflow.safeParse(response.workflow);
|
||||
if (workflowResult.success) {
|
||||
imageMetadataAndWorkflow.workflow = workflowResult.data;
|
||||
} else {
|
||||
logger('images').warn('Problem parsing workflow');
|
||||
}
|
||||
}
|
||||
return imageMetadataAndWorkflow;
|
||||
},
|
||||
keepUnusedDataFor: 86400, // 24 hours
|
||||
}),
|
||||
getImageMetadataFromFile: build.query<
|
||||
|
@ -1287,11 +1287,6 @@ export type components = {
|
||||
* @default true
|
||||
*/
|
||||
use_cache?: boolean;
|
||||
/**
|
||||
* CLIP
|
||||
* @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
|
||||
*/
|
||||
clip?: components["schemas"]["ClipField"];
|
||||
/**
|
||||
* Skipped Layers
|
||||
* @description Number of layers to skip in text encoder
|
||||
@ -1304,6 +1299,11 @@ export type components = {
|
||||
* @enum {string}
|
||||
*/
|
||||
type: "clip_skip";
|
||||
/**
|
||||
* CLIP
|
||||
* @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count
|
||||
*/
|
||||
clip?: components["schemas"]["ClipField"];
|
||||
};
|
||||
/**
|
||||
* ClipSkipInvocationOutput
|
||||
@ -3916,14 +3916,14 @@ export type components = {
|
||||
ImageMetadata: {
|
||||
/**
|
||||
* Metadata
|
||||
* @description The metadata associated with the image
|
||||
* @description The image's core metadata, if it was created in the Linear or Canvas UI
|
||||
*/
|
||||
metadata?: Record<string, never>;
|
||||
/**
|
||||
* Workflow
|
||||
* @description The workflow associated with the image
|
||||
* Graph
|
||||
* @description The graph that created the image
|
||||
*/
|
||||
workflow?: Record<string, never>;
|
||||
graph?: Record<string, never>;
|
||||
};
|
||||
/**
|
||||
* Multiply Images
|
||||
@ -7550,11 +7550,6 @@ export type components = {
|
||||
* @default false
|
||||
*/
|
||||
use_cache?: boolean;
|
||||
/**
|
||||
* Image
|
||||
* @description The image to load
|
||||
*/
|
||||
image?: components["schemas"]["ImageField"];
|
||||
/**
|
||||
* Metadata
|
||||
* @description Optional core metadata to be written to image
|
||||
@ -7566,6 +7561,11 @@ export type components = {
|
||||
* @enum {string}
|
||||
*/
|
||||
type: "save_image";
|
||||
/**
|
||||
* Image
|
||||
* @description The image to load
|
||||
*/
|
||||
image?: components["schemas"]["ImageField"];
|
||||
};
|
||||
/**
|
||||
* Scale Latents
|
||||
@ -7862,6 +7862,16 @@ export type components = {
|
||||
* @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed.
|
||||
*/
|
||||
session_id: string;
|
||||
/**
|
||||
* Field Values
|
||||
* @description The field values that were used for this queue item
|
||||
*/
|
||||
field_values?: components["schemas"]["NodeFieldValue"][];
|
||||
/**
|
||||
* Queue Id
|
||||
* @description The id of the queue with which this item is associated
|
||||
*/
|
||||
queue_id: string;
|
||||
/**
|
||||
* Error
|
||||
* @description The error message if this queue item errored
|
||||
@ -7887,16 +7897,6 @@ export type components = {
|
||||
* @description When this queue item was completed
|
||||
*/
|
||||
completed_at?: string;
|
||||
/**
|
||||
* Queue Id
|
||||
* @description The id of the queue with which this item is associated
|
||||
*/
|
||||
queue_id: string;
|
||||
/**
|
||||
* Field Values
|
||||
* @description The field values that were used for this queue item
|
||||
*/
|
||||
field_values?: components["schemas"]["NodeFieldValue"][];
|
||||
/**
|
||||
* Session
|
||||
* @description The fully-populated session to be executed
|
||||
@ -7936,6 +7936,16 @@ export type components = {
|
||||
* @description The ID of the session associated with this queue item. The session doesn't exist in graph_executions until the queue item is executed.
|
||||
*/
|
||||
session_id: string;
|
||||
/**
|
||||
* Field Values
|
||||
* @description The field values that were used for this queue item
|
||||
*/
|
||||
field_values?: components["schemas"]["NodeFieldValue"][];
|
||||
/**
|
||||
* Queue Id
|
||||
* @description The id of the queue with which this item is associated
|
||||
*/
|
||||
queue_id: string;
|
||||
/**
|
||||
* Error
|
||||
* @description The error message if this queue item errored
|
||||
@ -7961,16 +7971,6 @@ export type components = {
|
||||
* @description When this queue item was completed
|
||||
*/
|
||||
completed_at?: string;
|
||||
/**
|
||||
* Queue Id
|
||||
* @description The id of the queue with which this item is associated
|
||||
*/
|
||||
queue_id: string;
|
||||
/**
|
||||
* Field Values
|
||||
* @description The field values that were used for this queue item
|
||||
*/
|
||||
field_values?: components["schemas"]["NodeFieldValue"][];
|
||||
};
|
||||
/** SessionQueueStatus */
|
||||
SessionQueueStatus: {
|
||||
@ -9095,12 +9095,6 @@ export type components = {
|
||||
/** Ui Order */
|
||||
ui_order?: number;
|
||||
};
|
||||
/**
|
||||
* ControlNetModelFormat
|
||||
* @description An enumeration.
|
||||
* @enum {string}
|
||||
*/
|
||||
ControlNetModelFormat: "checkpoint" | "diffusers";
|
||||
/**
|
||||
* StableDiffusionOnnxModelFormat
|
||||
* @description An enumeration.
|
||||
@ -9113,18 +9107,18 @@ export type components = {
|
||||
* @enum {string}
|
||||
*/
|
||||
StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
|
||||
/**
|
||||
* StableDiffusion1ModelFormat
|
||||
* @description An enumeration.
|
||||
* @enum {string}
|
||||
*/
|
||||
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
|
||||
/**
|
||||
* CLIPVisionModelFormat
|
||||
* @description An enumeration.
|
||||
* @enum {string}
|
||||
*/
|
||||
CLIPVisionModelFormat: "diffusers";
|
||||
/**
|
||||
* StableDiffusion1ModelFormat
|
||||
* @description An enumeration.
|
||||
* @enum {string}
|
||||
*/
|
||||
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
|
||||
/**
|
||||
* StableDiffusionXLModelFormat
|
||||
* @description An enumeration.
|
||||
@ -9137,6 +9131,12 @@ export type components = {
|
||||
* @enum {string}
|
||||
*/
|
||||
IPAdapterModelFormat: "invokeai";
|
||||
/**
|
||||
* ControlNetModelFormat
|
||||
* @description An enumeration.
|
||||
* @enum {string}
|
||||
*/
|
||||
ControlNetModelFormat: "checkpoint" | "diffusers";
|
||||
};
|
||||
responses: never;
|
||||
parameters: never;
|
||||
|
Reference in New Issue
Block a user