Merge branch 'main' into bugfix/prevent-cli-crash

This commit is contained in:
Lincoln Stein 2023-04-25 03:10:51 +01:00 committed by GitHub
commit 4fa5c963a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
388 changed files with 15353 additions and 1933 deletions

14
.github/CODEOWNERS vendored
View File

@ -1,16 +1,16 @@
# continuous integration # continuous integration
/.github/workflows/ @mauwii @lstein @blessedcoolant /.github/workflows/ @lstein @blessedcoolant
# documentation # documentation
/docs/ @lstein @mauwii @tildebyte @blessedcoolant /docs/ @lstein @tildebyte @blessedcoolant
/mkdocs.yml @lstein @mauwii @blessedcoolant /mkdocs.yml @lstein @blessedcoolant
# nodes # nodes
/invokeai/app/ @Kyle0654 @blessedcoolant /invokeai/app/ @Kyle0654 @blessedcoolant
# installation and configuration # installation and configuration
/pyproject.toml @mauwii @lstein @blessedcoolant /pyproject.toml @lstein @blessedcoolant
/docker/ @mauwii @lstein @blessedcoolant /docker/ @lstein @blessedcoolant
/scripts/ @ebr @lstein /scripts/ @ebr @lstein
/installer/ @lstein @ebr /installer/ @lstein @ebr
/invokeai/assets @lstein @ebr /invokeai/assets @lstein @ebr
@ -22,11 +22,11 @@
/invokeai/backend @blessedcoolant @psychedelicious @lstein /invokeai/backend @blessedcoolant @psychedelicious @lstein
# generation, model management, postprocessing # generation, model management, postprocessing
/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto /invokeai/backend @damian0815 @lstein @blessedcoolant @jpphoto @gregghelt2
# front ends # front ends
/invokeai/frontend/CLI @lstein /invokeai/frontend/CLI @lstein
/invokeai/frontend/install @lstein @ebr @mauwii /invokeai/frontend/install @lstein @ebr
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername /invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername /invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
/invokeai/frontend/web @psychedelicious @blessedcoolant /invokeai/frontend/web @psychedelicious @blessedcoolant

2
.gitignore vendored
View File

@ -9,6 +9,8 @@ models/ldm/stable-diffusion-v1/model.ckpt
configs/models.user.yaml configs/models.user.yaml
config/models.user.yml config/models.user.yml
invokeai.init invokeai.init
.version
.last_model
# ignore the Anaconda/Miniconda installer used while building Docker image # ignore the Anaconda/Miniconda installer used while building Docker image
anaconda.sh anaconda.sh

View File

@ -148,6 +148,11 @@ not supported.
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2 pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
``` ```
_For non-GPU systems:_
```terminal
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
_For Macintoshes, either Intel or M1/M2:_ _For Macintoshes, either Intel or M1/M2:_
```sh ```sh

View File

@ -32,7 +32,7 @@ turned on and off on the command line using `--nsfw_checker` and
At installation time, InvokeAI will ask whether the checker should be At installation time, InvokeAI will ask whether the checker should be
activated by default (neither argument given on the command line). The activated by default (neither argument given on the command line). The
response is stored in the InvokeAI initialization file (usually response is stored in the InvokeAI initialization file (usually
`.invokeai` in your home directory). You can change the default at any `invokeai.init` in your home directory). You can change the default at any
time by opening this file in a text editor and commenting or time by opening this file in a text editor and commenting or
uncommenting the line `--nsfw_checker`. uncommenting the line `--nsfw_checker`.

View File

@ -3,6 +3,8 @@
import os import os
from argparse import Namespace from argparse import Namespace
from invokeai.app.services.metadata import PngMetadataService, MetadataServiceBase
from ..services.default_graphs import create_system_graphs from ..services.default_graphs import create_system_graphs
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -60,7 +62,9 @@ class ApiDependencies:
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')) latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents'))
images = DiskImageStorage(f'{output_folder}/images') metadata = PngMetadataService()
images = DiskImageStorage(f'{output_folder}/images', metadata_service=metadata)
# TODO: build a file/path manager? # TODO: build a file/path manager?
db_location = os.path.join(output_folder, "invokeai.db") db_location = os.path.join(output_folder, "invokeai.db")
@ -70,6 +74,7 @@ class ApiDependencies:
events=events, events=events,
latents=latents, latents=latents,
images=images, images=images,
metadata=metadata,
queue=MemoryInvocationQueue(), queue=MemoryInvocationQueue(),
graph_library=SqliteItemStorage[LibraryGraph]( graph_library=SqliteItemStorage[LibraryGraph](
filename=db_location, table_name="graphs" filename=db_location, table_name="graphs"

View File

@ -45,7 +45,7 @@ class FastAPIEventService(EventServiceBase):
) )
except Empty: except Empty:
await asyncio.sleep(0.001) await asyncio.sleep(0.1)
pass pass
except asyncio.CancelledError as e: except asyncio.CancelledError as e:

View File

@ -1,7 +1,19 @@
from typing import Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.models.image import ImageType from invokeai.app.models.image import ImageType
from invokeai.app.models.metadata import ImageMetadata from invokeai.app.services.metadata import InvokeAIMetadata
class ImageResponseMetadata(BaseModel):
"""An image's metadata. Used only in HTTP responses."""
created: int = Field(description="The creation timestamp of the image")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
invokeai: Optional[InvokeAIMetadata] = Field(
description="The image's InvokeAI-specific metadata"
)
class ImageResponse(BaseModel): class ImageResponse(BaseModel):
@ -11,4 +23,12 @@ class ImageResponse(BaseModel):
image_name: str = Field(description="The name of the image") image_name: str = Field(description="The name of the image")
image_url: str = Field(description="The url of the image") image_url: str = Field(description="The url of the image")
thumbnail_url: str = Field(description="The url of the image's thumbnail") thumbnail_url: str = Field(description="The url of the image's thumbnail")
metadata: ImageMetadata = Field(description="The image's metadata") metadata: ImageResponseMetadata = Field(description="The image's metadata")
class ProgressImage(BaseModel):
"""The progress image sent intermittently during processing"""
width: int = Field(description="The effective width of the image in pixels")
height: int = Field(description="The effective height of the image in pixels")
dataURL: str = Field(description="The image data as a b64 data URL")

View File

@ -1,13 +1,17 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import io
from datetime import datetime, timezone from datetime import datetime, timezone
import json
import os
from typing import Any
import uuid import uuid
from fastapi import Path, Query, Request, UploadFile from fastapi import HTTPException, Path, Query, Request, UploadFile
from fastapi.responses import FileResponse, Response from fastapi.responses import FileResponse, Response
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from PIL import Image from PIL import Image
from invokeai.app.api.models.images import ImageResponse from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
from invokeai.app.services.metadata import InvokeAIMetadata
from invokeai.app.services.item_storage import PaginatedResults from invokeai.app.services.item_storage import PaginatedResults
from ...services.image_storage import ImageType from ...services.image_storage import ImageType
@ -15,70 +19,110 @@ from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"]) images_router = APIRouter(prefix="/v1/images", tags=["images"])
@images_router.get("/{image_type}/{image_name}", operation_id="get_image") @images_router.get("/{image_type}/{image_name}", operation_id="get_image")
async def get_image( async def get_image(
image_type: ImageType = Path(description="The type of image to get"), image_type: ImageType = Path(description="The type of image to get"),
image_name: str = Path(description="The name of the image to get"), image_name: str = Path(description="The name of the image to get"),
): ) -> FileResponse | Response:
"""Gets a result""" """Gets a result"""
# TODO: This is not really secure at all. At least make sure only output results are served
filename = ApiDependencies.invoker.services.images.get_path(image_type, image_name)
return FileResponse(filename)
@images_router.get("/{image_type}/thumbnails/{image_name}", operation_id="get_thumbnail") path = ApiDependencies.invoker.services.images.get_path(
image_type=image_type, image_name=image_name
)
if ApiDependencies.invoker.services.images.validate_path(path):
return FileResponse(path)
else:
raise HTTPException(status_code=404)
@images_router.get(
"/{image_type}/thumbnails/{image_name}", operation_id="get_thumbnail"
)
async def get_thumbnail( async def get_thumbnail(
image_type: ImageType = Path(description="The type of image to get"), image_type: ImageType = Path(description="The type of image to get"),
image_name: str = Path(description="The name of the image to get"), image_name: str = Path(description="The name of the image to get"),
): ) -> FileResponse | Response:
"""Gets a thumbnail""" """Gets a thumbnail"""
# TODO: This is not really secure at all. At least make sure only output results are served
filename = ApiDependencies.invoker.services.images.get_path(image_type, 'thumbnails/' + image_name) path = ApiDependencies.invoker.services.images.get_path(
return FileResponse(filename) image_type=image_type, image_name=image_name, is_thumbnail=True
)
if ApiDependencies.invoker.services.images.validate_path(path):
return FileResponse(path)
else:
raise HTTPException(status_code=404)
@images_router.post( @images_router.post(
"/uploads/", "/uploads/",
operation_id="upload_image", operation_id="upload_image",
responses={ responses={
201: {"description": "The image was uploaded successfully"}, 201: {
404: {"description": "Session not found"}, "description": "The image was uploaded successfully",
"model": ImageResponse,
},
415: {"description": "Image upload failed"},
}, },
status_code=201,
) )
async def upload_image(file: UploadFile, request: Request): async def upload_image(
file: UploadFile, request: Request, response: Response
) -> ImageResponse:
if not file.content_type.startswith("image"): if not file.content_type.startswith("image"):
return Response(status_code=415) raise HTTPException(status_code=415, detail="Not an image")
contents = await file.read() contents = await file.read()
try: try:
im = Image.open(contents) img = Image.open(io.BytesIO(contents))
except: except:
# Error opening the image # Error opening the image
return Response(status_code=415) raise HTTPException(status_code=415, detail="Failed to read image")
filename = f"{uuid.uuid4()}_{str(int(datetime.now(timezone.utc).timestamp()))}.png" filename = f"{uuid.uuid4()}_{str(int(datetime.now(timezone.utc).timestamp()))}.png"
ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, im)
return Response( (image_path, thumbnail_path, ctime) = ApiDependencies.invoker.services.images.save(
status_code=201, ImageType.UPLOAD, filename, img
headers={
"Location": request.url_for(
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
)
},
) )
invokeai_metadata = ApiDependencies.invoker.services.metadata.get_metadata(img)
res = ImageResponse(
image_type=ImageType.UPLOAD,
image_name=filename,
image_url=f"api/v1/images/{ImageType.UPLOAD.value}/{filename}",
thumbnail_url=f"api/v1/images/{ImageType.UPLOAD.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
metadata=ImageResponseMetadata(
created=ctime,
width=img.width,
height=img.height,
invokeai=invokeai_metadata,
),
)
response.status_code = 201
response.headers["Location"] = request.url_for(
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
)
return res
@images_router.get( @images_router.get(
"/", "/",
operation_id="list_images", operation_id="list_images",
responses={200: {"model": PaginatedResults[ImageResponse]}}, responses={200: {"model": PaginatedResults[ImageResponse]}},
) )
async def list_images( async def list_images(
image_type: ImageType = Query(default=ImageType.RESULT, description="The type of images to get"), image_type: ImageType = Query(
default=ImageType.RESULT, description="The type of images to get"
),
page: int = Query(default=0, description="The page of images to get"), page: int = Query(default=0, description="The page of images to get"),
per_page: int = Query(default=10, description="The number of images per page"), per_page: int = Query(default=10, description="The number of images per page"),
) -> PaginatedResults[ImageResponse]: ) -> PaginatedResults[ImageResponse]:
"""Gets a list of images""" """Gets a list of images"""
result = ApiDependencies.invoker.services.images.list( result = ApiDependencies.invoker.services.images.list(image_type, page, per_page)
image_type, page, per_page
)
return result return result

View File

@ -13,6 +13,8 @@ from typing import (
from pydantic import BaseModel from pydantic import BaseModel
from pydantic.fields import Field from pydantic.fields import Field
from invokeai.app.services.metadata import PngMetadataService
from .services.default_graphs import create_system_graphs from .services.default_graphs import create_system_graphs
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -200,6 +202,8 @@ def invoke_cli():
events = EventServiceBase() events = EventServiceBase()
metadata = PngMetadataService()
output_folder = os.path.abspath( output_folder = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../outputs") os.path.join(os.path.dirname(__file__), "../../../outputs")
) )
@ -211,7 +215,8 @@ def invoke_cli():
model_manager=model_manager, model_manager=model_manager,
events=events, events=events,
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')), latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
images=DiskImageStorage(f'{output_folder}/images'), images=DiskImageStorage(f'{output_folder}/images', metadata_service=metadata),
metadata=metadata,
queue=MemoryInvocationQueue(), queue=MemoryInvocationQueue(),
graph_library=SqliteItemStorage[LibraryGraph]( graph_library=SqliteItemStorage[LibraryGraph](
filename=db_location, table_name="graphs" filename=db_location, table_name="graphs"

View File

@ -95,7 +95,7 @@ class UIConfig(TypedDict, total=False):
], ],
] ]
tags: List[str] tags: List[str]
title: str
class CustomisedSchemaExtra(TypedDict): class CustomisedSchemaExtra(TypedDict):
ui: UIConfig ui: UIConfig

View File

@ -1,16 +1,17 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from typing import Literal from typing import Literal, Optional
import cv2 as cv
import numpy as np import numpy as np
import numpy.random import numpy.random
from PIL import Image, ImageOps
from pydantic import Field from pydantic import Field
from ..services.image_storage import ImageType from .baseinvocation import (
from .baseinvocation import BaseInvocation, InvocationContext, BaseInvocationOutput BaseInvocation,
from .image import ImageField, ImageOutput InvocationConfig,
InvocationContext,
BaseInvocationOutput,
)
class IntCollectionOutput(BaseInvocationOutput): class IntCollectionOutput(BaseInvocationOutput):
@ -33,7 +34,9 @@ class RangeInvocation(BaseInvocation):
step: int = Field(default=1, description="The step of the range") step: int = Field(default=1, description="The step of the range")
def invoke(self, context: InvocationContext) -> IntCollectionOutput: def invoke(self, context: InvocationContext) -> IntCollectionOutput:
return IntCollectionOutput(collection=list(range(self.start, self.stop, self.step))) return IntCollectionOutput(
collection=list(range(self.start, self.stop, self.step))
)
class RandomRangeInvocation(BaseInvocation): class RandomRangeInvocation(BaseInvocation):
@ -43,8 +46,19 @@ class RandomRangeInvocation(BaseInvocation):
# Inputs # Inputs
low: int = Field(default=0, description="The inclusive low value") low: int = Field(default=0, description="The inclusive low value")
high: int = Field(default=np.iinfo(np.int32).max, description="The exclusive high value") high: int = Field(
default=np.iinfo(np.int32).max, description="The exclusive high value"
)
size: int = Field(default=1, description="The number of values to generate") size: int = Field(default=1, description="The number of values to generate")
seed: Optional[int] = Field(
ge=0,
le=np.iinfo(np.int32).max,
description="The seed for the RNG",
default_factory=lambda: numpy.random.randint(0, np.iinfo(np.int32).max),
)
def invoke(self, context: InvocationContext) -> IntCollectionOutput: def invoke(self, context: InvocationContext) -> IntCollectionOutput:
return IntCollectionOutput(collection=list(numpy.random.randint(self.low, self.high, size=self.size))) rng = np.random.default_rng(self.seed)
return IntCollectionOutput(
collection=list(rng.integers(low=self.low, high=self.high, size=self.size))
)

View File

@ -9,7 +9,7 @@ from pydantic import BaseModel, Field
from invokeai.app.models.image import ImageField, ImageType from invokeai.app.models.image import ImageField, ImageType
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput from .image import ImageOutput, build_image_output
class CvInvocationConfig(BaseModel): class CvInvocationConfig(BaseModel):
@ -56,7 +56,14 @@ class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, image_inpainted)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_inpainted, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image_inpainted,
) )

View File

@ -9,13 +9,12 @@ from torch import Tensor
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from invokeai.app.models.image import ImageField, ImageType from invokeai.app.models.image import ImageField, ImageType
from invokeai.app.invocations.util.get_model import choose_model from invokeai.app.invocations.util.choose_model import choose_model
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput from .image import ImageOutput, build_image_output
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion import PipelineIntermediateState
from ..models.exceptions import CanceledException from ..util.step_callback import stable_diffusion_step_callback
from ..util.step_callback import diffusers_step_callback_adapter
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())] SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
@ -58,28 +57,31 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching? # TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
) -> None: ) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)): stable_diffusion_step_callback(
raise CanceledException context=context,
intermediate_state=intermediate_state,
step = intermediate_state.step node=self.dict(),
if intermediate_state.predicted_original is not None: source_node_id=source_node_id,
# Some schedulers report not only the noisy latents at the current timestep, )
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
# Handle invalid model parameter # Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model) model = choose_model(context.services.model_manager, self.model)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
outputs = Txt2Img(model).generate( outputs = Txt2Img(model).generate(
prompt=self.prompt, prompt=self.prompt,
step_callback=partial(self.dispatch_progress, context), step_callback=partial(self.dispatch_progress, context, source_node_id),
**self.dict( **self.dict(
exclude={"prompt"} exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
@ -95,9 +97,18 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, generate_output.image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(
image_type, image_name, generate_output.image, metadata
)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=generate_output.image,
) )
@ -117,20 +128,17 @@ class ImageToImageInvocation(TextToImageInvocation):
) )
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
) -> None: ) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)): stable_diffusion_step_callback(
raise CanceledException context=context,
intermediate_state=intermediate_state,
step = intermediate_state.step node=self.dict(),
if intermediate_state.predicted_original is not None: source_node_id=source_node_id,
# Some schedulers report not only the noisy latents at the current timestep, )
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = ( image = (
@ -145,15 +153,21 @@ class ImageToImageInvocation(TextToImageInvocation):
# Handle invalid model parameter # Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model) model = choose_model(context.services.model_manager, self.model)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
outputs = Img2Img(model).generate( outputs = Img2Img(model).generate(
prompt=self.prompt, prompt=self.prompt,
init_image=image, init_image=image,
init_mask=mask, init_mask=mask,
step_callback=partial(self.dispatch_progress, context), step_callback=partial(self.dispatch_progress, context, source_node_id),
**self.dict( **self.dict(
exclude={"prompt", "image", "mask"} exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
) )
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one. # each time it is called. We only need the first one.
@ -168,11 +182,19 @@ class ImageToImageInvocation(TextToImageInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, result_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
) )
context.services.images.save(image_type, image_name, result_image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image,
)
class InpaintInvocation(ImageToImageInvocation): class InpaintInvocation(ImageToImageInvocation):
"""Generates an image using inpaint.""" """Generates an image using inpaint."""
@ -188,20 +210,17 @@ class InpaintInvocation(ImageToImageInvocation):
) )
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
) -> None: ) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)): stable_diffusion_step_callback(
raise CanceledException context=context,
intermediate_state=intermediate_state,
step = intermediate_state.step node=self.dict(),
if intermediate_state.predicted_original is not None: source_node_id=source_node_id,
# Some schedulers report not only the noisy latents at the current timestep, )
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = ( image = (
@ -220,15 +239,21 @@ class InpaintInvocation(ImageToImageInvocation):
# Handle invalid model parameter # Handle invalid model parameter
model = choose_model(context.services.model_manager, self.model) model = choose_model(context.services.model_manager, self.model)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id
)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
outputs = Inpaint(model).generate( outputs = Inpaint(model).generate(
prompt=self.prompt, prompt=self.prompt,
init_img=image, init_img=image,
init_mask=mask, init_mask=mask,
step_callback=partial(self.dispatch_progress, context), step_callback=partial(self.dispatch_progress, context, source_node_id),
**self.dict( **self.dict(
exclude={"prompt", "image", "mask"} exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
) )
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object # Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one. # each time it is called. We only need the first one.
@ -243,7 +268,14 @@ class InpaintInvocation(ImageToImageInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, result_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, result_image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=result_image,
) )

View File

@ -1,6 +1,5 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Literal, Optional from typing import Literal, Optional
import numpy import numpy
@ -8,8 +7,12 @@ from PIL import Image, ImageFilter, ImageOps
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from ..models.image import ImageField, ImageType from ..models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices from .baseinvocation import (
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig BaseInvocation,
BaseInvocationOutput,
InvocationContext,
InvocationConfig,
)
class PILInvocationConfig(BaseModel): class PILInvocationConfig(BaseModel):
@ -22,50 +25,73 @@ class PILInvocationConfig(BaseModel):
}, },
} }
class ImageOutput(BaseInvocationOutput): class ImageOutput(BaseInvocationOutput):
"""Base class for invocations that output an image""" """Base class for invocations that output an image"""
#fmt: off
# fmt: off
type: Literal["image"] = "image" type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image") image: ImageField = Field(default=None, description="The output image")
#fmt: on width: Optional[int] = Field(default=None, description="The width of the image in pixels")
height: Optional[int] = Field(default=None, description="The height of the image in pixels")
# fmt: on
class Config: class Config:
schema_extra = { schema_extra = {
'required': [ "required": ["type", "image", "width", "height", "mode"]
'type',
'image',
]
} }
def build_image_output(
image_type: ImageType, image_name: str, image: Image.Image
) -> ImageOutput:
"""Builds an ImageOutput and its ImageField"""
image_field = ImageField(
image_name=image_name,
image_type=image_type,
)
return ImageOutput(
image=image_field,
width=image.width,
height=image.height,
mode=image.mode,
)
class MaskOutput(BaseInvocationOutput): class MaskOutput(BaseInvocationOutput):
"""Base class for invocations that output a mask""" """Base class for invocations that output a mask"""
#fmt: off
# fmt: off
type: Literal["mask"] = "mask" type: Literal["mask"] = "mask"
mask: ImageField = Field(default=None, description="The output mask") mask: ImageField = Field(default=None, description="The output mask")
#fmt: on # fmt: on
class Config: class Config:
schema_extra = { schema_extra = {
'required': [ "required": [
'type', "type",
'mask', "mask",
] ]
} }
# TODO: this isn't really necessary anymore
class LoadImageInvocation(BaseInvocation): class LoadImageInvocation(BaseInvocation):
"""Load an image from a filename and provide it as output.""" """Load an image and provide it as output."""
#fmt: off
# fmt: off
type: Literal["load_image"] = "load_image" type: Literal["load_image"] = "load_image"
# Inputs # Inputs
image_type: ImageType = Field(description="The type of the image") image_type: ImageType = Field(description="The type of the image")
image_name: str = Field(description="The name of the image") image_name: str = Field(description="The name of the image")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
return ImageOutput( image = context.services.images.get(self.image_type, self.image_name)
image=ImageField(image_type=self.image_type, image_name=self.image_name)
return build_image_output(
image_type=self.image_type,
image_name=self.image_name,
image=image,
) )
@ -86,16 +112,17 @@ class ShowImageInvocation(BaseInvocation):
# TODO: how to handle failure? # TODO: how to handle failure?
return ImageOutput( return build_image_output(
image=ImageField( image_type=self.image.image_type,
image_type=self.image.image_type, image_name=self.image.image_name image_name=self.image.image_name,
) image=image,
) )
class CropImageInvocation(BaseInvocation, PILInvocationConfig): class CropImageInvocation(BaseInvocation, PILInvocationConfig):
"""Crops an image to a specified box. The box can be outside of the image.""" """Crops an image to a specified box. The box can be outside of the image."""
#fmt: off
# fmt: off
type: Literal["crop"] = "crop" type: Literal["crop"] = "crop"
# Inputs # Inputs
@ -104,7 +131,7 @@ class CropImageInvocation(BaseInvocation, PILInvocationConfig):
y: int = Field(default=0, description="The top y coordinate of the crop rectangle") y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
width: int = Field(default=512, gt=0, description="The width of the crop rectangle") width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
height: int = Field(default=512, gt=0, description="The height of the crop rectangle") height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get( image = context.services.images.get(
@ -120,15 +147,23 @@ class CropImageInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, image_crop)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_crop, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image_crop,
) )
class PasteImageInvocation(BaseInvocation, PILInvocationConfig): class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
"""Pastes an image into another image.""" """Pastes an image into another image."""
#fmt: off
# fmt: off
type: Literal["paste"] = "paste" type: Literal["paste"] = "paste"
# Inputs # Inputs
@ -137,7 +172,7 @@ class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting") mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
x: int = Field(default=0, description="The left x coordinate at which to paste the image") x: int = Field(default=0, description="The left x coordinate at which to paste the image")
y: int = Field(default=0, description="The top y coordinate at which to paste the image") y: int = Field(default=0, description="The top y coordinate at which to paste the image")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
base_image = context.services.images.get( base_image = context.services.images.get(
@ -170,21 +205,29 @@ class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, new_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, new_image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=new_image,
) )
class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig): class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
"""Extracts the alpha channel of an image as a mask.""" """Extracts the alpha channel of an image as a mask."""
#fmt: off
# fmt: off
type: Literal["tomask"] = "tomask" type: Literal["tomask"] = "tomask"
# Inputs # Inputs
image: ImageField = Field(default=None, description="The image to create the mask from") image: ImageField = Field(default=None, description="The image to create the mask from")
invert: bool = Field(default=False, description="Whether or not to invert the mask") invert: bool = Field(default=False, description="Whether or not to invert the mask")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> MaskOutput: def invoke(self, context: InvocationContext) -> MaskOutput:
image = context.services.images.get( image = context.services.images.get(
@ -199,21 +242,26 @@ class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, image_mask)
metadata = context.services.metadata.build_metadata(
session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image_mask, metadata)
return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name)) return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name))
class BlurInvocation(BaseInvocation, PILInvocationConfig): class BlurInvocation(BaseInvocation, PILInvocationConfig):
"""Blurs an image""" """Blurs an image"""
#fmt: off # fmt: off
type: Literal["blur"] = "blur" type: Literal["blur"] = "blur"
# Inputs # Inputs
image: ImageField = Field(default=None, description="The image to blur") image: ImageField = Field(default=None, description="The image to blur")
radius: float = Field(default=8.0, ge=0, description="The blur radius") radius: float = Field(default=8.0, ge=0, description="The blur radius")
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur") blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get( image = context.services.images.get(
@ -231,22 +279,28 @@ class BlurInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, blur_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, blur_image, metadata)
return build_image_output(
image_type=image_type, image_name=image_name, image=blur_image
) )
class LerpInvocation(BaseInvocation, PILInvocationConfig): class LerpInvocation(BaseInvocation, PILInvocationConfig):
"""Linear interpolation of all pixels of an image""" """Linear interpolation of all pixels of an image"""
#fmt: off
# fmt: off
type: Literal["lerp"] = "lerp" type: Literal["lerp"] = "lerp"
# Inputs # Inputs
image: ImageField = Field(default=None, description="The image to lerp") image: ImageField = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum output value") min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
max: int = Field(default=255, ge=0, le=255, description="The maximum output value") max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get( image = context.services.images.get(
@ -262,22 +316,28 @@ class LerpInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, lerp_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, lerp_image, metadata)
return build_image_output(
image_type=image_type, image_name=image_name, image=lerp_image
) )
class InverseLerpInvocation(BaseInvocation, PILInvocationConfig): class InverseLerpInvocation(BaseInvocation, PILInvocationConfig):
"""Inverse linear interpolation of all pixels of an image""" """Inverse linear interpolation of all pixels of an image"""
#fmt: off
# fmt: off
type: Literal["ilerp"] = "ilerp" type: Literal["ilerp"] = "ilerp"
# Inputs # Inputs
image: ImageField = Field(default=None, description="The image to lerp") image: ImageField = Field(default=None, description="The image to lerp")
min: int = Field(default=0, ge=0, le=255, description="The minimum input value") min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
max: int = Field(default=255, ge=0, le=255, description="The maximum input value") max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
#fmt: on # fmt: on
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = context.services.images.get( image = context.services.images.get(
@ -298,7 +358,12 @@ class InverseLerpInvocation(BaseInvocation, PILInvocationConfig):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, ilerp_image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, ilerp_image, metadata)
return build_image_output(
image_type=image_type, image_name=image_name, image=ilerp_image
) )

View File

@ -5,9 +5,9 @@ from typing import Literal, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
import torch import torch
from invokeai.app.models.exceptions import CanceledException from invokeai.app.invocations.util.choose_model import choose_model
from invokeai.app.invocations.util.get_model import choose_model
from invokeai.app.util.step_callback import diffusers_step_callback_adapter from invokeai.app.util.step_callback import stable_diffusion_step_callback
from ...backend.model_management.model_manager import ModelManager from ...backend.model_management.model_manager import ModelManager
from ...backend.util.devices import choose_torch_device, torch_dtype from ...backend.util.devices import choose_torch_device, torch_dtype
@ -19,7 +19,7 @@ from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationCont
import numpy as np import numpy as np
from ..services.image_storage import ImageType from ..services.image_storage import ImageType
from .baseinvocation import BaseInvocation, InvocationContext from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput from .image import ImageField, ImageOutput, build_image_output
from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion import PipelineIntermediateState
from diffusers.schedulers import SchedulerMixin as Scheduler from diffusers.schedulers import SchedulerMixin as Scheduler
import diffusers import diffusers
@ -31,6 +31,8 @@ class LatentsField(BaseModel):
latents_name: Optional[str] = Field(default=None, description="The name of the latents") latents_name: Optional[str] = Field(default=None, description="The name of the latents")
class Config:
schema_extra = {"required": ["latents_name"]}
class LatentsOutput(BaseInvocationOutput): class LatentsOutput(BaseInvocationOutput):
"""Base class for invocations that output latents""" """Base class for invocations that output latents"""
@ -170,21 +172,14 @@ class TextToLatentsInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching? # TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState self, context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState
) -> None: ) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)): stable_diffusion_step_callback(
raise CanceledException context=context,
intermediate_state=intermediate_state,
step = intermediate_state.step node=self.dict(),
if intermediate_state.predicted_original is not None: source_node_id=source_node_id,
# Some schedulers report not only the noisy latents at the current timestep, )
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline: def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
model_info = choose_model(model_manager, self.model) model_info = choose_model(model_manager, self.model)
@ -231,8 +226,12 @@ class TextToLatentsInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> LatentsOutput: def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name) noise = context.services.latents.get(self.noise.latents_name)
# Get the source node id (we are invoking the prepared node)
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
def step_callback(state: PipelineIntermediateState): def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state) self.dispatch_progress(context, source_node_id, state)
model = self.get_model(context.services.model_manager) model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model) conditioning_data = self.get_conditioning_data(model)
@ -281,58 +280,12 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
noise = context.services.latents.get(self.noise.latents_name) noise = context.services.latents.get(self.noise.latents_name)
latent = context.services.latents.get(self.latents.latents_name) latent = context.services.latents.get(self.latents.latents_name)
def step_callback(state: PipelineIntermediateState): # Get the source node id (we are invoking the prepared node)
self.dispatch_progress(context, state) graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
# TODO: Verify the noise is the right size
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
latent, device=model.device, dtype=latent.dtype
)
timesteps, _ = model.get_img2img_timesteps(
self.steps,
self.strength,
device=model.device,
)
result_latents, result_attention_map_saver = model.latents_from_embeddings(
latents=initial_latents,
timesteps=timesteps,
noise=noise,
num_inference_steps=self.steps,
conditioning_data=conditioning_data,
callback=step_callback
)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
torch.cuda.empty_cache()
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
class LatentsToLatentsInvocation(TextToLatentsInvocation):
"""Generates latents using latents as base image."""
type: Literal["l2l"] = "l2l"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to use as a base image")
strength: float = Field(default=0.5, description="The strength of the latents to use")
def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name)
latent = context.services.latents.get(self.latents.latents_name)
def step_callback(state: PipelineIntermediateState): def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state) self.dispatch_progress(context, source_node_id, state)
model = self.get_model(context.services.model_manager) model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model) conditioning_data = self.get_conditioning_data(model)
@ -405,7 +358,14 @@ class LatentsToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, image)
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, image, metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=image
) )

View File

@ -1,12 +1,11 @@
from datetime import datetime, timezone
from typing import Literal, Union from typing import Literal, Union
from pydantic import Field from pydantic import Field
from invokeai.app.models.image import ImageField, ImageType from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput from .image import ImageOutput, build_image_output
class RestoreFaceInvocation(BaseInvocation): class RestoreFaceInvocation(BaseInvocation):
"""Restores faces in an image.""" """Restores faces in an image."""
@ -44,7 +43,14 @@ class RestoreFaceInvocation(BaseInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, results[0][0], metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=results[0][0]
) )

View File

@ -1,14 +1,12 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Literal, Union from typing import Literal, Union
from pydantic import Field from pydantic import Field
from invokeai.app.models.image import ImageField, ImageType from invokeai.app.models.image import ImageField, ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from .image import ImageOutput from .image import ImageOutput, build_image_output
class UpscaleInvocation(BaseInvocation): class UpscaleInvocation(BaseInvocation):
@ -49,7 +47,14 @@ class UpscaleInvocation(BaseInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, results[0][0])
return ImageOutput( metadata = context.services.metadata.build_metadata(
image=ImageField(image_type=image_type, image_name=image_name) session_id=context.graph_execution_state_id, node=self
)
context.services.images.save(image_type, image_name, results[0][0], metadata)
return build_image_output(
image_type=image_type,
image_name=image_name,
image=results[0][0]
) )

View File

@ -1,11 +1,14 @@
from invokeai.app.invocations.baseinvocation import InvocationContext
from invokeai.backend.model_management.model_manager import ModelManager from invokeai.backend.model_management.model_manager import ModelManager
def choose_model(model_manager: ModelManager, model_name: str): def choose_model(model_manager: ModelManager, model_name: str):
"""Returns the default model if the `model_name` not a valid model, else returns the selected model.""" """Returns the default model if the `model_name` not a valid model, else returns the selected model."""
if model_manager.valid_model(model_name): if model_manager.valid_model(model_name):
return model_manager.get_model(model_name) model = model_manager.get_model(model_name)
else: else:
print(f"* Warning: '{model_name}' is not a valid model name. Using default model instead.") model = model_manager.get_model()
return model_manager.get_model() print(
f"* Warning: '{model_name}' is not a valid model name. Using default model \'{model['model_name']}\' instead."
)
return model

View File

@ -9,6 +9,14 @@ class ImageType(str, Enum):
UPLOAD = "uploads" UPLOAD = "uploads"
def is_image_type(obj):
try:
ImageType(obj)
except ValueError:
return False
return True
class ImageField(BaseModel): class ImageField(BaseModel):
"""An image field used for passing image objects between invocations""" """An image field used for passing image objects between invocations"""
@ -18,9 +26,4 @@ class ImageField(BaseModel):
image_name: Optional[str] = Field(default=None, description="The name of the image") image_name: Optional[str] = Field(default=None, description="The name of the image")
class Config: class Config:
schema_extra = { schema_extra = {"required": ["image_type", "image_name"]}
"required": [
"image_type",
"image_name",
]
}

View File

@ -1,11 +0,0 @@
from typing import Optional
from pydantic import BaseModel, Field
class ImageMetadata(BaseModel):
"""An image's metadata"""
timestamp: float = Field(description="The creation timestamp of the image")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
# TODO: figure out metadata
sd_metadata: Optional[dict] = Field(default={}, description="The image's SD-specific metadata")

View File

@ -1,10 +1,9 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Any, Dict, TypedDict from typing import Any
from invokeai.app.api.models.images import ProgressImage
from invokeai.app.util.misc import get_timestamp
ProgressImage = TypedDict(
"ProgressImage", {"dataURL": str, "width": int, "height": int}
)
class EventServiceBase: class EventServiceBase:
session_event: str = "session_event" session_event: str = "session_event"
@ -14,7 +13,8 @@ class EventServiceBase:
def dispatch(self, event_name: str, payload: Any) -> None: def dispatch(self, event_name: str, payload: Any) -> None:
pass pass
def __emit_session_event(self, event_name: str, payload: Dict) -> None: def __emit_session_event(self, event_name: str, payload: dict) -> None:
payload["timestamp"] = get_timestamp()
self.dispatch( self.dispatch(
event_name=EventServiceBase.session_event, event_name=EventServiceBase.session_event,
payload=dict(event=event_name, data=payload), payload=dict(event=event_name, data=payload),
@ -25,7 +25,8 @@ class EventServiceBase:
def emit_generator_progress( def emit_generator_progress(
self, self,
graph_execution_state_id: str, graph_execution_state_id: str,
invocation_id: str, node: dict,
source_node_id: str,
progress_image: ProgressImage | None, progress_image: ProgressImage | None,
step: int, step: int,
total_steps: int, total_steps: int,
@ -35,48 +36,60 @@ class EventServiceBase:
event_name="generator_progress", event_name="generator_progress",
payload=dict( payload=dict(
graph_execution_state_id=graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id, node=node,
progress_image=progress_image, source_node_id=source_node_id,
progress_image=progress_image.dict() if progress_image is not None else None,
step=step, step=step,
total_steps=total_steps, total_steps=total_steps,
), ),
) )
def emit_invocation_complete( def emit_invocation_complete(
self, graph_execution_state_id: str, invocation_id: str, result: Dict self,
graph_execution_state_id: str,
result: dict,
node: dict,
source_node_id: str,
) -> None: ) -> None:
"""Emitted when an invocation has completed""" """Emitted when an invocation has completed"""
self.__emit_session_event( self.__emit_session_event(
event_name="invocation_complete", event_name="invocation_complete",
payload=dict( payload=dict(
graph_execution_state_id=graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id, node=node,
source_node_id=source_node_id,
result=result, result=result,
), ),
) )
def emit_invocation_error( def emit_invocation_error(
self, graph_execution_state_id: str, invocation_id: str, error: str self,
graph_execution_state_id: str,
node: dict,
source_node_id: str,
error: str,
) -> None: ) -> None:
"""Emitted when an invocation has completed""" """Emitted when an invocation has completed"""
self.__emit_session_event( self.__emit_session_event(
event_name="invocation_error", event_name="invocation_error",
payload=dict( payload=dict(
graph_execution_state_id=graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id, node=node,
source_node_id=source_node_id,
error=error, error=error,
), ),
) )
def emit_invocation_started( def emit_invocation_started(
self, graph_execution_state_id: str, invocation_id: str self, graph_execution_state_id: str, node: dict, source_node_id: str
) -> None: ) -> None:
"""Emitted when an invocation has started""" """Emitted when an invocation has started"""
self.__emit_session_event( self.__emit_session_event(
event_name="invocation_started", event_name="invocation_started",
payload=dict( payload=dict(
graph_execution_state_id=graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id, node=node,
source_node_id=source_node_id,
), ),
) )
@ -84,5 +97,7 @@ class EventServiceBase:
"""Emitted when a session has completed all invocations""" """Emitted when a session has completed all invocations"""
self.__emit_session_event( self.__emit_session_event(
event_name="graph_execution_state_complete", event_name="graph_execution_state_complete",
payload=dict(graph_execution_state_id=graph_execution_state_id), payload=dict(
graph_execution_state_id=graph_execution_state_id,
),
) )

View File

@ -2,7 +2,6 @@
import copy import copy
import itertools import itertools
import traceback
import uuid import uuid
from types import NoneType from types import NoneType
from typing import ( from typing import (
@ -26,7 +25,6 @@ from ..invocations.baseinvocation import (
BaseInvocationOutput, BaseInvocationOutput,
InvocationContext, InvocationContext,
) )
from .invocation_services import InvocationServices
class EdgeConnection(BaseModel): class EdgeConnection(BaseModel):
@ -215,7 +213,7 @@ InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()]
class Graph(BaseModel): class Graph(BaseModel):
id: str = Field(description="The id of this graph", default_factory=uuid.uuid4) id: str = Field(description="The id of this graph", default_factory=lambda: uuid.uuid4().__str__())
# TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me # TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field( nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict description="The nodes in this graph", default_factory=dict
@ -750,9 +748,7 @@ class Graph(BaseModel):
class GraphExecutionState(BaseModel): class GraphExecutionState(BaseModel):
"""Tracks the state of a graph execution""" """Tracks the state of a graph execution"""
id: str = Field( id: str = Field(description="The id of the execution state", default_factory=lambda: uuid.uuid4().__str__())
description="The id of the execution state", default_factory=uuid.uuid4
)
# TODO: Store a reference to the graph instead of the actual graph? # TODO: Store a reference to the graph instead of the actual graph?
graph: Graph = Field(description="The graph being executed") graph: Graph = Field(description="The graph being executed")

View File

@ -1,24 +1,24 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import datetime
import os import os
from glob import glob from glob import glob
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path from pathlib import Path
from queue import Queue from queue import Queue
from typing import Callable, Dict, List from typing import Dict, List, Tuple
from PIL.Image import Image from PIL.Image import Image
import PIL.Image as PILImage import PIL.Image as PILImage
from pydantic import BaseModel from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
from invokeai.app.api.models.images import ImageResponse from invokeai.app.models.image import ImageType
from invokeai.app.models.image import ImageField, ImageType from invokeai.app.services.metadata import (
from invokeai.app.models.metadata import ImageMetadata InvokeAIMetadata,
MetadataServiceBase,
build_invokeai_metadata_pnginfo,
)
from invokeai.app.services.item_storage import PaginatedResults from invokeai.app.services.item_storage import PaginatedResults
from invokeai.app.util.save_thumbnail import save_thumbnail from invokeai.app.util.misc import get_timestamp
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
from invokeai.backend.image_util import PngWriter
class ImageStorageBase(ABC): class ImageStorageBase(ABC):
@ -26,12 +26,14 @@ class ImageStorageBase(ABC):
@abstractmethod @abstractmethod
def get(self, image_type: ImageType, image_name: str) -> Image: def get(self, image_type: ImageType, image_name: str) -> Image:
"""Retrieves an image as PIL Image."""
pass pass
@abstractmethod @abstractmethod
def list( def list(
self, image_type: ImageType, page: int = 0, per_page: int = 10 self, image_type: ImageType, page: int = 0, per_page: int = 10
) -> PaginatedResults[ImageResponse]: ) -> PaginatedResults[ImageResponse]:
"""Gets a paginated list of images."""
pass pass
# TODO: make this a bit more flexible for e.g. cloud storage # TODO: make this a bit more flexible for e.g. cloud storage
@ -39,35 +41,51 @@ class ImageStorageBase(ABC):
def get_path( def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str: ) -> str:
"""Gets the path to an image or its thumbnail."""
pass
# TODO: make this a bit more flexible for e.g. cloud storage
@abstractmethod
def validate_path(self, path: str) -> bool:
"""Validates an image path."""
pass pass
@abstractmethod @abstractmethod
def save(self, image_type: ImageType, image_name: str, image: Image) -> None: def save(
self,
image_type: ImageType,
image_name: str,
image: Image,
metadata: InvokeAIMetadata | None = None,
) -> Tuple[str, str, int]:
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image path, thumbnail path, and created timestamp."""
pass pass
@abstractmethod @abstractmethod
def delete(self, image_type: ImageType, image_name: str) -> None: def delete(self, image_type: ImageType, image_name: str) -> None:
"""Deletes an image and its thumbnail (if one exists)."""
pass pass
def create_name(self, context_id: str, node_id: str) -> str: def create_name(self, context_id: str, node_id: str) -> str:
return f"{context_id}_{node_id}_{str(int(datetime.datetime.now(datetime.timezone.utc).timestamp()))}.png" """Creates a unique contextual image filename."""
return f"{context_id}_{node_id}_{str(get_timestamp())}.png"
class DiskImageStorage(ImageStorageBase): class DiskImageStorage(ImageStorageBase):
"""Stores images on disk""" """Stores images on disk"""
__output_folder: str __output_folder: str
__pngWriter: PngWriter
__cache_ids: Queue # TODO: this is an incredibly naive cache __cache_ids: Queue # TODO: this is an incredibly naive cache
__cache: Dict[str, Image] __cache: Dict[str, Image]
__max_cache_size: int __max_cache_size: int
__metadata_service: MetadataServiceBase
def __init__(self, output_folder: str): def __init__(self, output_folder: str, metadata_service: MetadataServiceBase):
self.__output_folder = output_folder self.__output_folder = output_folder
self.__pngWriter = PngWriter(output_folder)
self.__cache = dict() self.__cache = dict()
self.__cache_ids = Queue() self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config self.__max_cache_size = 10 # TODO: get this from config
self.__metadata_service = metadata_service
Path(output_folder).mkdir(parents=True, exist_ok=True) Path(output_folder).mkdir(parents=True, exist_ok=True)
@ -100,6 +118,9 @@ class DiskImageStorage(ImageStorageBase):
for path in page_of_image_paths: for path in page_of_image_paths:
filename = os.path.basename(path) filename = os.path.basename(path)
img = PILImage.open(path) img = PILImage.open(path)
invokeai_metadata = self.__metadata_service.get_metadata(img)
page_of_images.append( page_of_images.append(
ImageResponse( ImageResponse(
image_type=image_type.value, image_type=image_type.value,
@ -107,11 +128,12 @@ class DiskImageStorage(ImageStorageBase):
# TODO: DiskImageStorage should not be building URLs...? # TODO: DiskImageStorage should not be building URLs...?
image_url=f"api/v1/images/{image_type.value}/{filename}", image_url=f"api/v1/images/{image_type.value}/{filename}",
thumbnail_url=f"api/v1/images/{image_type.value}/thumbnails/{os.path.splitext(filename)[0]}.webp", thumbnail_url=f"api/v1/images/{image_type.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
# TODO: Creation of this object should happen elsewhere, just making it fit here so it works # TODO: Creation of this object should happen elsewhere (?), just making it fit here so it works
metadata=ImageMetadata( metadata=ImageResponseMetadata(
timestamp=os.path.getctime(path), created=int(os.path.getctime(path)),
width=img.width, width=img.width,
height=img.height, height=img.height,
invokeai=invokeai_metadata,
), ),
) )
) )
@ -142,26 +164,50 @@ class DiskImageStorage(ImageStorageBase):
def get_path( def get_path(
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
) -> str: ) -> str:
# strip out any relative path shenanigans
basename = os.path.basename(image_name)
if is_thumbnail: if is_thumbnail:
path = os.path.join( path = os.path.join(
self.__output_folder, image_type, "thumbnails", image_name self.__output_folder, image_type, "thumbnails", basename
) )
else: else:
path = os.path.join(self.__output_folder, image_type, image_name) path = os.path.join(self.__output_folder, image_type, basename)
return path return path
def save(self, image_type: ImageType, image_name: str, image: Image) -> None: def validate_path(self, path: str) -> bool:
image_subpath = os.path.join(image_type, image_name) try:
self.__pngWriter.save_image_and_prompt_to_png( os.stat(path)
image, "", image_subpath, None return True
) # TODO: just pass full path to png writer except Exception:
save_thumbnail( return False
image=image,
filename=image_name, def save(
path=os.path.join(self.__output_folder, image_type, "thumbnails"), self,
) image_type: ImageType,
image_name: str,
image: Image,
metadata: InvokeAIMetadata | None = None,
) -> Tuple[str, str, int]:
image_path = self.get_path(image_type, image_name) image_path = self.get_path(image_type, image_name)
# TODO: Reading the image and then saving it strips the metadata...
if metadata:
pnginfo = build_invokeai_metadata_pnginfo(metadata=metadata)
image.save(image_path, "PNG", pnginfo=pnginfo)
else:
image.save(image_path) # this saved image has an empty info
thumbnail_name = get_thumbnail_name(image_name)
thumbnail_path = self.get_path(image_type, thumbnail_name, is_thumbnail=True)
thumbnail_image = make_thumbnail(image)
thumbnail_image.save(thumbnail_path)
self.__set_cache(image_path, image) self.__set_cache(image_path, image)
self.__set_cache(thumbnail_path, thumbnail_image)
return (image_path, thumbnail_path, int(os.path.getctime(image_path)))
def delete(self, image_type: ImageType, image_name: str) -> None: def delete(self, image_type: ImageType, image_name: str) -> None:
image_path = self.get_path(image_type, image_name) image_path = self.get_path(image_type, image_name)

View File

@ -1,30 +1,17 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import time
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from queue import Queue from queue import Queue
import time
from pydantic import BaseModel, Field
# TODO: make this serializable class InvocationQueueItem(BaseModel):
class InvocationQueueItem: graph_execution_state_id: str = Field(description="The ID of the graph execution state")
# session_id: str invocation_id: str = Field(description="The ID of the node being invoked")
graph_execution_state_id: str invoke_all: bool = Field(default=False)
invocation_id: str timestamp: float = Field(default_factory=time.time)
invoke_all: bool
timestamp: float
def __init__(
self,
# session_id: str,
graph_execution_state_id: str,
invocation_id: str,
invoke_all: bool = False,
):
# self.session_id = session_id
self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id
self.invoke_all = invoke_all
self.timestamp = time.time()
class InvocationQueueABC(ABC): class InvocationQueueABC(ABC):

View File

@ -1,4 +1,5 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from invokeai.app.services.metadata import MetadataServiceBase
from invokeai.backend import ModelManager from invokeai.backend import ModelManager
from .events import EventServiceBase from .events import EventServiceBase
@ -14,6 +15,7 @@ class InvocationServices:
events: EventServiceBase events: EventServiceBase
latents: LatentsStorageBase latents: LatentsStorageBase
images: ImageStorageBase images: ImageStorageBase
metadata: MetadataServiceBase
queue: InvocationQueueABC queue: InvocationQueueABC
model_manager: ModelManager model_manager: ModelManager
restoration: RestorationServices restoration: RestorationServices
@ -29,6 +31,7 @@ class InvocationServices:
events: EventServiceBase, events: EventServiceBase,
latents: LatentsStorageBase, latents: LatentsStorageBase,
images: ImageStorageBase, images: ImageStorageBase,
metadata: MetadataServiceBase,
queue: InvocationQueueABC, queue: InvocationQueueABC,
graph_library: ItemStorageABC["LibraryGraph"], graph_library: ItemStorageABC["LibraryGraph"],
graph_execution_manager: ItemStorageABC["GraphExecutionState"], graph_execution_manager: ItemStorageABC["GraphExecutionState"],
@ -39,6 +42,7 @@ class InvocationServices:
self.events = events self.events = events
self.latents = latents self.latents = latents
self.images = images self.images = images
self.metadata = metadata
self.queue = queue self.queue = queue
self.graph_library = graph_library self.graph_library = graph_library
self.graph_execution_manager = graph_execution_manager self.graph_execution_manager = graph_execution_manager

View File

@ -0,0 +1,96 @@
import json
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, TypedDict
from PIL import Image, PngImagePlugin
from pydantic import BaseModel
from invokeai.app.models.image import ImageType, is_image_type
class MetadataImageField(TypedDict):
"""Pydantic-less ImageField, used for metadata parsing."""
image_type: ImageType
image_name: str
class MetadataLatentsField(TypedDict):
"""Pydantic-less LatentsField, used for metadata parsing."""
latents_name: str
# TODO: This is a placeholder for `InvocationsUnion` pending resolution of circular imports
NodeMetadata = Dict[
str, str | int | float | bool | MetadataImageField | MetadataLatentsField
]
class InvokeAIMetadata(TypedDict, total=False):
"""InvokeAI-specific metadata format."""
session_id: Optional[str]
node: Optional[NodeMetadata]
def build_invokeai_metadata_pnginfo(
metadata: InvokeAIMetadata | None,
) -> PngImagePlugin.PngInfo:
"""Builds a PngInfo object with key `"invokeai"` and value `metadata`"""
pnginfo = PngImagePlugin.PngInfo()
if metadata is not None:
pnginfo.add_text("invokeai", json.dumps(metadata))
return pnginfo
class MetadataServiceBase(ABC):
@abstractmethod
def get_metadata(self, image: Image.Image) -> InvokeAIMetadata | None:
"""Gets the InvokeAI metadata from a PIL Image, skipping invalid values"""
pass
@abstractmethod
def build_metadata(
self, session_id: str, node: BaseModel
) -> InvokeAIMetadata | None:
"""Builds an InvokeAIMetadata object"""
pass
class PngMetadataService(MetadataServiceBase):
"""Handles loading and building metadata for images."""
# TODO: Use `InvocationsUnion` to **validate** metadata as representing a fully-functioning node
def _load_metadata(self, image: Image.Image) -> dict | None:
"""Loads a specific info entry from a PIL Image."""
try:
info = image.info.get("invokeai")
if type(info) is not str:
return None
loaded_metadata = json.loads(info)
if type(loaded_metadata) is not dict:
return None
if len(loaded_metadata.items()) == 0:
return None
return loaded_metadata
except:
return None
def get_metadata(self, image: Image.Image) -> dict | None:
"""Retrieves an image's metadata as a dict"""
loaded_metadata = self._load_metadata(image)
return loaded_metadata
def build_metadata(self, session_id: str, node: BaseModel) -> InvokeAIMetadata:
metadata = InvokeAIMetadata(session_id=session_id, node=node.dict())
return metadata

View File

@ -43,10 +43,14 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
queue_item.invocation_id queue_item.invocation_id
) )
# get the source node id to provide to clients (the prepared node id is not as useful)
source_node_id = graph_execution_state.prepared_source_mapping[invocation.id]
# Send starting event # Send starting event
self.__invoker.services.events.emit_invocation_started( self.__invoker.services.events.emit_invocation_started(
graph_execution_state_id=graph_execution_state.id, graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id, node=invocation.dict(),
source_node_id=source_node_id
) )
# Invoke # Invoke
@ -75,7 +79,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send complete event # Send complete event
self.__invoker.services.events.emit_invocation_complete( self.__invoker.services.events.emit_invocation_complete(
graph_execution_state_id=graph_execution_state.id, graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id, node=invocation.dict(),
source_node_id=source_node_id,
result=outputs.dict(), result=outputs.dict(),
) )
@ -99,7 +104,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# Send error event # Send error event
self.__invoker.services.events.emit_invocation_error( self.__invoker.services.events.emit_invocation_error(
graph_execution_state_id=graph_execution_state.id, graph_execution_state_id=graph_execution_state.id,
invocation_id=invocation.id, node=invocation.dict(),
source_node_id=source_node_id,
error=error, error=error,
) )

View File

@ -35,7 +35,8 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._create_table() self._create_table()
def _create_table(self): def _create_table(self):
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self._table_name} ( f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
item TEXT, item TEXT,
@ -44,27 +45,34 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._cursor.execute( self._cursor.execute(
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);""" f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
) )
self._conn.commit() finally:
self._lock.release()
def _parse_item(self, item: str) -> T: def _parse_item(self, item: str) -> T:
item_type = get_args(self.__orig_class__)[0] item_type = get_args(self.__orig_class__)[0]
return parse_raw_as(item_type, item) return parse_raw_as(item_type, item)
def set(self, item: T): def set(self, item: T):
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""", f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
(item.json(),), (item.json(),),
) )
self._conn.commit() self._conn.commit()
finally:
self._lock.release()
self._on_changed(item) self._on_changed(item)
def get(self, id: str) -> Union[T, None]: def get(self, id: str) -> Union[T, None]:
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),) f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),)
) )
result = self._cursor.fetchone() result = self._cursor.fetchone()
finally:
self._lock.release()
if not result: if not result:
return None return None
@ -72,15 +80,19 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
return self._parse_item(result[0]) return self._parse_item(result[0])
def delete(self, id: str): def delete(self, id: str):
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),) f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),)
) )
self._conn.commit() self._conn.commit()
finally:
self._lock.release()
self._on_deleted(id) self._on_deleted(id)
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]: def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""", f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
(per_page, page * per_page), (per_page, page * per_page),
@ -91,6 +103,8 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""") self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0] count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1 pageCount = int(count / per_page) + 1
@ -101,7 +115,8 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
def search( def search(
self, query: str, page: int = 0, per_page: int = 10 self, query: str, page: int = 0, per_page: int = 10
) -> PaginatedResults[T]: ) -> PaginatedResults[T]:
with self._lock: try:
self._lock.acquire()
self._cursor.execute( self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""", f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
(f"%{query}%", per_page, page * per_page), (f"%{query}%", per_page, page * per_page),
@ -115,6 +130,8 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
(f"%{query}%",), (f"%{query}%",),
) )
count = self._cursor.fetchone()[0] count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1 pageCount = int(count / per_page) + 1

View File

@ -0,0 +1,5 @@
import datetime
def get_timestamp():
return int(datetime.datetime.now(datetime.timezone.utc).timestamp())

View File

@ -1,25 +0,0 @@
import os
from PIL import Image
def save_thumbnail(
image: Image.Image,
filename: str,
path: str,
size: int = 256,
) -> str:
"""
Saves a thumbnail of an image, returning its path.
"""
base_filename = os.path.splitext(filename)[0]
thumbnail_path = os.path.join(path, base_filename + ".webp")
if os.path.exists(thumbnail_path):
return thumbnail_path
image_copy = image.copy()
image_copy.thumbnail(size=(size, size))
image_copy.save(thumbnail_path, "WEBP")
return thumbnail_path

View File

@ -1,16 +1,41 @@
import torch from invokeai.app.api.models.images import ProgressImage
from invokeai.app.models.exceptions import CanceledException
from ..invocations.baseinvocation import InvocationContext from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL from ...backend.util.util import image_to_dataURL
from ...backend.generator.base import Generator from ...backend.generator.base import Generator
from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion import PipelineIntermediateState
def fast_latents_step_callback(
sample: torch.Tensor, def stable_diffusion_step_callback(
step: int,
steps: int,
id: str,
context: InvocationContext, context: InvocationContext,
intermediate_state: PipelineIntermediateState,
node: dict,
source_node_id: str,
): ):
if context.services.queue.is_canceled(context.graph_execution_state_id):
raise CanceledException
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be. Use
# that estimate if it is available.
if intermediate_state.predicted_original is not None:
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
# TODO: This does not seem to be needed any more?
# # txt2img provides a Tensor in the step_callback
# # img2img provides a PipelineIntermediateState
# if isinstance(sample, PipelineIntermediateState):
# # this was an img2img
# print('img2img')
# latents = sample.latents
# step = sample.step
# else:
# print('txt2img')
# latents = sample
# step = intermediate_state.step
# TODO: only output a preview image when requested # TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample) image = Generator.sample_to_lowres_estimated_image(sample)
@ -21,23 +46,10 @@ def fast_latents_step_callback(
dataURL = image_to_dataURL(image, image_format="JPEG") dataURL = image_to_dataURL(image, image_format="JPEG")
context.services.events.emit_generator_progress( context.services.events.emit_generator_progress(
context.graph_execution_state_id, graph_execution_state_id=context.graph_execution_state_id,
id, node=node,
{"width": width, "height": height, "dataURL": dataURL}, source_node_id=source_node_id,
step, progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
steps, step=intermediate_state.step,
total_steps=node["steps"],
) )
def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
This adapter grabs the needed data and passes it along to the callback function.
"""
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return fast_latents_step_callback(
progress_state.latents, progress_state.step, **kwargs
)
else:
return fast_latents_step_callback(*cb_args, **kwargs)

View File

@ -0,0 +1,15 @@
import os
from PIL import Image
def get_thumbnail_name(image_name: str) -> str:
"""Formats given an image name, returns the appropriate thumbnail image name"""
thumbnail_name = os.path.splitext(image_name)[0] + ".webp"
return thumbnail_name
def make_thumbnail(image: Image.Image, size: int = 256) -> Image.Image:
"""Makes a thumbnail from a PIL Image"""
thumbnail = image.copy()
thumbnail.thumbnail(size=(size, size))
return thumbnail

View File

@ -57,7 +57,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add)) self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list return self.concept_list
return self.concept_list return self.concept_list
else: elif Globals.internet_available is True:
try: try:
models = self.hf_api.list_models( models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/") filter=ModelFilter(model_name="sd-concepts-library/")
@ -73,6 +73,8 @@ class HuggingFaceConceptsLibrary(object):
" ** You may load .bin and .pt file(s) manually using the --embedding_directory argument." " ** You may load .bin and .pt file(s) manually using the --embedding_directory argument."
) )
return self.concept_list return self.concept_list
else:
return self.concept_list
def get_concept_model_path(self, concept_name: str) -> str: def get_concept_model_path(self, concept_name: str) -> str:
""" """

View File

@ -6,3 +6,5 @@ stats.html
index.html index.html
.yarn/ .yarn/
*.scss *.scss
src/services/api/
src/services/fixtures/*

View File

@ -3,4 +3,8 @@ dist/
node_modules/ node_modules/
patches/ patches/
stats.html stats.html
index.html
.yarn/ .yarn/
*.scss
src/services/api/
src/services/fixtures/*

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
import{j as y,cN as Ie,r as _,cO as bt,q as Lr,cP as o,cQ as b,cR as v,cS as S,cT as Vr,cU as ut,cV as vt,cM as ft,cW as mt,n as gt,cX as ht,E as pt}from"./index-f7f41e1f.js";import{d as yt,i as St,T as xt,j as $t,h as kt}from"./storeHooks-eaf47ae3.js";var Or=` import{j as y,cO as Ie,r as _,cP as bt,q as Lr,cQ as o,cR as b,cS as v,cT as S,cU as Vr,cV as ut,cW as vt,cN as ft,cX as mt,n as gt,cY as ht,E as pt}from"./index-e53e8108.js";import{d as yt,i as St,T as xt,j as $t,h as kt}from"./storeHooks-5cde7d31.js";var Or=`
:root { :root {
--chakra-vh: 100vh; --chakra-vh: 100vh;
} }

View File

@ -12,7 +12,7 @@
margin: 0; margin: 0;
} }
</style> </style>
<script type="module" crossorigin src="./assets/index-f7f41e1f.js"></script> <script type="module" crossorigin src="./assets/index-e53e8108.js"></script>
<link rel="stylesheet" href="./assets/index-5483945c.css"> <link rel="stylesheet" href="./assets/index-5483945c.css">
</head> </head>

View File

@ -8,7 +8,6 @@
"darkTheme": "داكن", "darkTheme": "داكن",
"lightTheme": "فاتح", "lightTheme": "فاتح",
"greenTheme": "أخضر", "greenTheme": "أخضر",
"text2img": "نص إلى صورة",
"img2img": "صورة إلى صورة", "img2img": "صورة إلى صورة",
"unifiedCanvas": "لوحة موحدة", "unifiedCanvas": "لوحة موحدة",
"nodes": "عقد", "nodes": "عقد",

View File

@ -7,7 +7,6 @@
"darkTheme": "Dunkel", "darkTheme": "Dunkel",
"lightTheme": "Hell", "lightTheme": "Hell",
"greenTheme": "Grün", "greenTheme": "Grün",
"text2img": "Text zu Bild",
"img2img": "Bild zu Bild", "img2img": "Bild zu Bild",
"nodes": "Knoten", "nodes": "Knoten",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -505,7 +505,9 @@
"info": "Info", "info": "Info",
"deleteImage": "Delete Image", "deleteImage": "Delete Image",
"initialImage": "Initial Image", "initialImage": "Initial Image",
"showOptionsPanel": "Show Options Panel" "showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview",
"showPreview": "Show Preview"
}, },
"settings": { "settings": {
"models": "Models", "models": "Models",

View File

@ -8,7 +8,6 @@
"darkTheme": "Oscuro", "darkTheme": "Oscuro",
"lightTheme": "Claro", "lightTheme": "Claro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto a Imagen",
"img2img": "Imagen a Imagen", "img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado", "unifiedCanvas": "Lienzo Unificado",
"nodes": "Nodos", "nodes": "Nodos",
@ -70,7 +69,11 @@
"langHebrew": "Hebreo", "langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones", "pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando", "loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA" "loadingInvokeAI": "Cargando invocar a la IA",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -404,7 +407,8 @@
"none": "ninguno", "none": "ninguno",
"pickModelType": "Elige el tipo de modelo", "pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia" "addDifference": "Añadir una diferencia",
"scanForModels": "Buscar modelos"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Guardar automáticamente en galería", "autoSaveToGallery": "Guardar automáticamente en galería",
"saveBoxRegionOnly": "Guardar solo región dentro de la caja", "saveBoxRegionOnly": "Guardar solo región dentro de la caja",
"limitStrokesToBox": "Limitar trazos a la caja", "limitStrokesToBox": "Limitar trazos a la caja",
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo", "showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
"clearCanvasHistory": "Limpiar historial de lienzo", "clearCanvasHistory": "Limpiar historial de lienzo",
"clearHistory": "Limpiar historial", "clearHistory": "Limpiar historial",
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.", "clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",

View File

@ -8,7 +8,6 @@
"darkTheme": "Sombre", "darkTheme": "Sombre",
"lightTheme": "Clair", "lightTheme": "Clair",
"greenTheme": "Vert", "greenTheme": "Vert",
"text2img": "Texte en image",
"img2img": "Image en image", "img2img": "Image en image",
"unifiedCanvas": "Canvas unifié", "unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds", "nodes": "Nœuds",
@ -47,7 +46,19 @@
"statusLoadingModel": "Chargement du modèle", "statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé", "statusModelChanged": "Modèle changé",
"discordLabel": "Discord", "discordLabel": "Discord",
"githubLabel": "Github" "githubLabel": "Github",
"accept": "Accepter",
"statusMergingModels": "Mélange des modèles",
"loadingInvokeAI": "Chargement de Invoke AI",
"cancel": "Annuler",
"langEnglish": "Anglais",
"statusConvertingModel": "Conversion du modèle",
"statusModelConverted": "Modèle converti",
"loading": "Chargement",
"pinOptionsPanel": "Épingler la page d'options",
"statusMergedModels": "Modèles mélangés",
"txt2img": "Texte vers image",
"postprocessing": "Post-Traitement"
}, },
"gallery": { "gallery": {
"generations": "Générations", "generations": "Générations",
@ -518,5 +529,15 @@
"betaDarkenOutside": "Assombrir à l'extérieur", "betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte", "betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué" "betaPreserveMasked": "Conserver masqué"
},
"accessibility": {
"uploadImage": "Charger une image",
"reset": "Réinitialiser",
"nextImage": "Image suivante",
"previousImage": "Image précédente",
"useThisParameter": "Utiliser ce paramètre",
"zoomIn": "Zoom avant",
"zoomOut": "Zoom arrière",
"showOptionsPanel": "Montrer la page d'options"
} }
} }

View File

@ -125,7 +125,6 @@
"langSimplifiedChinese": "סינית", "langSimplifiedChinese": "סינית",
"langUkranian": "אוקראינית", "langUkranian": "אוקראינית",
"langSpanish": "ספרדית", "langSpanish": "ספרדית",
"text2img": "טקסט לתמונה",
"img2img": "תמונה לתמונה", "img2img": "תמונה לתמונה",
"unifiedCanvas": "קנבס מאוחד", "unifiedCanvas": "קנבס מאוחד",
"nodes": "צמתים", "nodes": "צמתים",

View File

@ -8,7 +8,6 @@
"darkTheme": "Scuro", "darkTheme": "Scuro",
"lightTheme": "Chiaro", "lightTheme": "Chiaro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine", "img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata", "unifiedCanvas": "Tela unificata",
"nodes": "Nodi", "nodes": "Nodi",
@ -70,7 +69,11 @@
"loading": "Caricamento in corso", "loading": "Caricamento in corso",
"oceanTheme": "Oceano", "oceanTheme": "Oceano",
"langHebrew": "Ebraico", "langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI" "loadingInvokeAI": "Caricamento Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -404,7 +407,8 @@
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"none": "niente", "none": "niente",
"addDifference": "Aggiungi differenza", "addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello" "pickModelType": "Scegli il tipo di modello",
"scanForModels": "Cerca modelli"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Salvataggio automatico nella Galleria", "autoSaveToGallery": "Salvataggio automatico nella Galleria",
"saveBoxRegionOnly": "Salva solo l'area di selezione", "saveBoxRegionOnly": "Salva solo l'area di selezione",
"limitStrokesToBox": "Limita i tratti all'area di selezione", "limitStrokesToBox": "Limita i tratti all'area di selezione",
"showCanvasDebugInfo": "Mostra informazioni di debug della Tela", "showCanvasDebugInfo": "Mostra ulteriori informazioni sulla Tela",
"clearCanvasHistory": "Cancella cronologia Tela", "clearCanvasHistory": "Cancella cronologia Tela",
"clearHistory": "Cancella la cronologia", "clearHistory": "Cancella la cronologia",
"clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.", "clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.",
@ -612,7 +616,7 @@
"copyMetadataJson": "Copia i metadati JSON", "copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore", "exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti", "zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro", "zoomOut": "Zoom indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario", "rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario", "rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente", "flipHorizontally": "Capovolgi orizzontalmente",

View File

@ -11,7 +11,6 @@
"langArabic": "العربية", "langArabic": "العربية",
"langEnglish": "English", "langEnglish": "English",
"langDutch": "Nederlands", "langDutch": "Nederlands",
"text2img": "텍스트->이미지",
"unifiedCanvas": "통합 캔버스", "unifiedCanvas": "통합 캔버스",
"langFrench": "Français", "langFrench": "Français",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Donker", "darkTheme": "Donker",
"lightTheme": "Licht", "lightTheme": "Licht",
"greenTheme": "Groen", "greenTheme": "Groen",
"text2img": "Tekst naar afbeelding",
"img2img": "Afbeelding naar afbeelding", "img2img": "Afbeelding naar afbeelding",
"unifiedCanvas": "Centraal canvas", "unifiedCanvas": "Centraal canvas",
"nodes": "Knooppunten", "nodes": "Knooppunten",

View File

@ -8,7 +8,6 @@
"darkTheme": "Ciemny", "darkTheme": "Ciemny",
"lightTheme": "Jasny", "lightTheme": "Jasny",
"greenTheme": "Zielony", "greenTheme": "Zielony",
"text2img": "Tekst na obraz",
"img2img": "Obraz na obraz", "img2img": "Obraz na obraz",
"unifiedCanvas": "Tryb uniwersalny", "unifiedCanvas": "Tryb uniwersalny",
"nodes": "Węzły", "nodes": "Węzły",

View File

@ -20,7 +20,6 @@
"langSpanish": "Espanhol", "langSpanish": "Espanhol",
"langRussian": "Русский", "langRussian": "Русский",
"langUkranian": "Украї́нська", "langUkranian": "Украї́нська",
"text2img": "Texto para Imagem",
"img2img": "Imagem para Imagem", "img2img": "Imagem para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nós", "nodes": "Nós",

View File

@ -8,7 +8,6 @@
"darkTheme": "Noite", "darkTheme": "Noite",
"lightTheme": "Dia", "lightTheme": "Dia",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto Para Imagem",
"img2img": "Imagem Para Imagem", "img2img": "Imagem Para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nódulos", "nodes": "Nódulos",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темная", "darkTheme": "Темная",
"lightTheme": "Светлая", "lightTheme": "Светлая",
"greenTheme": "Зеленая", "greenTheme": "Зеленая",
"text2img": "Изображение из текста (text2img)",
"img2img": "Изображение в изображение (img2img)", "img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст", "unifiedCanvas": "Универсальный холст",
"nodes": "Ноды", "nodes": "Ноды",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темна", "darkTheme": "Темна",
"lightTheme": "Світла", "lightTheme": "Світла",
"greenTheme": "Зелена", "greenTheme": "Зелена",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)", "img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно", "unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли", "nodes": "Вузли",

View File

@ -8,7 +8,6 @@
"darkTheme": "暗色", "darkTheme": "暗色",
"lightTheme": "亮色", "lightTheme": "亮色",
"greenTheme": "绿色", "greenTheme": "绿色",
"text2img": "文字到图像",
"img2img": "图像到图像", "img2img": "图像到图像",
"unifiedCanvas": "统一画布", "unifiedCanvas": "统一画布",
"nodes": "节点", "nodes": "节点",

View File

@ -33,7 +33,6 @@
"langBrPortuguese": "巴西葡萄牙語", "langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語", "langRussian": "俄語",
"langSpanish": "西班牙語", "langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布" "unifiedCanvas": "統一畫布"
} }
} }

View File

@ -0,0 +1,87 @@
# Generated axios API client
- [Generated axios API client](#generated-axios-api-client)
- [Generation](#generation)
- [Generate the API client from the nodes web server](#generate-the-api-client-from-the-nodes-web-server)
- [Generate the API client from JSON](#generate-the-api-client-from-json)
- [Getting the JSON from the nodes web server](#getting-the-json-from-the-nodes-web-server)
- [Getting the JSON with a python script](#getting-the-json-with-a-python-script)
- [Generate the API client](#generate-the-api-client)
- [The generated client](#the-generated-client)
- [API client customisation](#api-client-customisation)
This API client is generated by an [openapi code generator](https://github.com/ferdikoomen/openapi-typescript-codegen).
All files in `invokeai/frontend/web/src/services/api/` are made by the generator.
## Generation
The axios client may be generated by from the OpenAPI schema from the nodes web server, or from JSON.
### Generate the API client from the nodes web server
We need to start the nodes web server, which serves the OpenAPI schema to the generator.
1. Start the nodes web server.
```bash
# from the repo root
python scripts/invoke-new.py --web
```
2. Generate the API client.
```bash
# from invokeai/frontend/web/
yarn api:web
```
### Generate the API client from JSON
The JSON can be acquired from the nodes web server, or with a python script.
#### Getting the JSON from the nodes web server
Start the nodes web server as described above, then download the file.
```bash
# from invokeai/frontend/web/
curl http://localhost:9090/openapi.json -o openapi.json
```
#### Getting the JSON with a python script
Run this python script from the repo root, so it can access the nodes server modules.
The script will output `openapi.json` in the repo root. Then we need to move it to `invokeai/frontend/web/`.
```bash
# from the repo root
python invokeai/app/util/generate_openapi_json.py
mv invokeai/app/util/openapi.json invokeai/frontend/web/services/fixtures/
```
#### Generate the API client
Now we can generate the API client from the JSON.
```bash
# from invokeai/frontend/web/
yarn api:file
```
## The generated client
The client will be written to `invokeai/frontend/web/services/api/`:
- `axios` client
- TS types
- An easily parseable schema, which we can use to generate UI
## API client customisation
The generator has a default `request.ts` file that implements a base `axios` client. The generated client uses this base client.
One shortcoming of this is base client is it does not provide response headers unless the response body is empty. To fix this, we provide our own lightly-patched `request.ts`.
To access the headers, call `getHeaders(response)` on any response from the generated api client. This function is exported from `invokeai/frontend/web/src/services/util/getHeaders.ts`.

View File

@ -0,0 +1,21 @@
# Events
Events via `socket.io`
## `actions.ts`
Redux actions for all socket events. Payloads all include a timestamp, and optionally some other data.
Any reducer (or middleware) can respond to the actions.
## `middleware.ts`
Redux middleware for events.
Handles dispatching the event actions. Only put logic here if it can't really go anywhere else.
For example, on connect we want to load images to the gallery if it's not populated. This requires dispatching a thunk, so we need to directly dispatch this in the middleware.
## `types.ts`
Hand-written types for the socket events. Cannot generate these from the server, but fortunately they are few and simple.

View File

@ -0,0 +1,17 @@
# Node Editor Design
WIP
nodes
everything in `src/features/nodes/`
have a look at `state.nodes.invocation`
- on socket connect, if no schema saved, fetch `localhost:9090/openapi.json`, save JSON to `state.nodes.schema`
- on fulfilled schema fetch, `parseSchema()` the schema. this outputs a `Record<string, Invocation>` which is saved to `state.nodes.invocations` - `Invocation` is like a template for the node
- when you add a node, the the `Invocation` template is passed to `InvocationComponent.tsx` to build the UI component for that node
- inputs/outputs have field types - and each field type gets an `FieldComponent` which includes a dispatcher to write state changes to redux `nodesSlice`
- `reactflow` sends changes to nodes/edges to redux
- to invoke, `buildNodesGraph()` state, then send this
- changed onClick Invoke button actions to build the schema, then when schema builds it dispatches the actual network request to create the session - see `session.ts`

View File

@ -0,0 +1,29 @@
# Package Scripts
WIP walkthrough of `package.json` scripts.
## `theme` & `theme:watch`
These run the Chakra CLI to generate types for the theme, or watch for code change and re-generate the types.
The CLI essentially monkeypatches Chakra's files in `node_modules`.
## `postinstall`
The `postinstall` script patches a few packages and runs the Chakra CLI to generate types for the theme.
### Patch `@chakra-ui/cli`
See: <https://github.com/chakra-ui/chakra-ui/issues/7394>
### Patch `redux-persist`
We want to persist the canvas state to `localStorage` but many canvas operations change data very quickly, so we need to debounce the writes to `localStorage`.
`redux-persist` is unfortunately unmaintained. The repo's current code is nonfunctional, but the last release's code depends on a package that was removed from `npm` for being malware, so we cannot just fork it.
So, we have to patch it directly. Perhaps a better way would be to write a debounced storage adapter, but I couldn't figure out how to do that.
### Patch `redux-deep-persist`
This package makes blacklisting and whitelisting persist configs very simple, but we have to patch it to match `redux-persist` for the types to work.

View File

@ -1,10 +1,16 @@
# InvokeAI Web UI # InvokeAI Web UI
- [InvokeAI Web UI](#invokeai-web-ui)
- [Stack](#stack)
- [Contributing](#contributing)
- [Dev Environment](#dev-environment)
- [Production builds](#production-builds)
The UI is a fairly straightforward Typescript React app. The only really fancy stuff is the Unified Canvas. The UI is a fairly straightforward Typescript React app. The only really fancy stuff is the Unified Canvas.
Code in `invokeai/frontend/web/` if you want to have a look. Code in `invokeai/frontend/web/` if you want to have a look.
## Details ## Stack
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a custom redux middleware to help). State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a custom redux middleware to help).
@ -32,7 +38,7 @@ Start everything in dev mode:
1. Start the dev server: `yarn dev` 1. Start the dev server: `yarn dev`
2. Start the InvokeAI UI per usual: `invokeai --web` 2. Start the InvokeAI UI per usual: `invokeai --web`
3. Point your browser to the dev server address e.g. `http://localhost:5173/` 3. Point your browser to the dev server address e.g. <http://localhost:5173/>
### Production builds ### Production builds

View File

@ -1,6 +1,7 @@
import React, { PropsWithChildren } from 'react'; import React, { PropsWithChildren } from 'react';
import { IAIPopoverProps } from '../web/src/common/components/IAIPopover'; import { IAIPopoverProps } from '../web/src/common/components/IAIPopover';
import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton'; import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton';
import { InvokeTabName } from 'features/ui/store/tabMap';
export {}; export {};
@ -64,9 +65,26 @@ declare module '@invoke-ai/invoke-ai-ui' {
declare class SettingsModal extends React.Component<SettingsModalProps> { declare class SettingsModal extends React.Component<SettingsModalProps> {
public constructor(props: SettingsModalProps); public constructor(props: SettingsModalProps);
} }
declare class StatusIndicator extends React.Component<StatusIndicatorProps> {
public constructor(props: StatusIndicatorProps);
}
declare class ModelSelect extends React.Component<ModelSelectProps> {
public constructor(props: ModelSelectProps);
}
} }
declare function Invoke(props: PropsWithChildren): JSX.Element; interface InvokeProps extends PropsWithChildren {
apiUrl?: string;
disabledPanels?: string[];
disabledTabs?: InvokeTabName[];
token?: string;
shouldTransformUrls?: boolean;
shouldFetchImages?: boolean;
}
declare function Invoke(props: InvokeProps): JSX.Element;
export { export {
ThemeChanger, ThemeChanger,
@ -74,5 +92,7 @@ export {
IAIPopover, IAIPopover,
IAIIconButton, IAIIconButton,
SettingsModal, SettingsModal,
StatusIndicator,
ModelSelect,
}; };
export = Invoke; export = Invoke;

View File

@ -5,7 +5,11 @@
"scripts": { "scripts": {
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky", "prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"", "dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"dev:nodes": "concurrently \"vite dev --mode nodes\" \"yarn run theme:watch\"",
"dev:host": "concurrently \"vite dev --host\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build", "build": "yarn run lint && vite build",
"api:web": "openapi -i http://localhost:9090/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
"api:file": "openapi -i src/services/fixtures/openapi.json -o src/services/api --client axios --useOptions --useUnionTypes --exportSchemas true --indent 2 --request src/services/fixtures/request.ts",
"preview": "vite preview", "preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx", "lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .", "lint:eslint": "eslint --max-warnings=0 .",
@ -41,13 +45,16 @@
"@chakra-ui/react": "^2.5.1", "@chakra-ui/react": "^2.5.1",
"@chakra-ui/styled-system": "^2.6.1", "@chakra-ui/styled-system": "^2.6.1",
"@chakra-ui/theme-tools": "^2.0.16", "@chakra-ui/theme-tools": "^2.0.16",
"@dagrejs/graphlib": "^2.1.12",
"@emotion/react": "^11.10.6", "@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6", "@emotion/styled": "^11.10.6",
"@reduxjs/toolkit": "^1.9.2", "@fontsource/inter": "^4.5.15",
"@reduxjs/toolkit": "^1.9.3",
"chakra-ui-contextmenu": "^1.0.5", "chakra-ui-contextmenu": "^1.0.5",
"dateformat": "^5.0.3", "dateformat": "^5.0.3",
"formik": "^2.2.9", "formik": "^2.2.9",
"framer-motion": "^9.0.4", "framer-motion": "^9.0.4",
"fuse.js": "^6.6.2",
"i18next": "^22.4.10", "i18next": "^22.4.10",
"i18next-browser-languagedetector": "^7.0.1", "i18next-browser-languagedetector": "^7.0.1",
"i18next-http-backend": "^2.1.1", "i18next-http-backend": "^2.1.1",
@ -67,15 +74,17 @@
"react-redux": "^8.0.5", "react-redux": "^8.0.5",
"react-transition-group": "^4.4.5", "react-transition-group": "^4.4.5",
"react-zoom-pan-pinch": "^2.6.1", "react-zoom-pan-pinch": "^2.6.1",
"reactflow": "^11.7.0",
"redux-deep-persist": "^1.0.7", "redux-deep-persist": "^1.0.7",
"redux-dynamic-middlewares": "^2.2.0",
"redux-persist": "^6.0.0", "redux-persist": "^6.0.0",
"socket.io-client": "^4.6.0", "socket.io-client": "^4.6.0",
"use-image": "^1.1.0", "use-image": "^1.1.0",
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"devDependencies": { "devDependencies": {
"@fontsource/inter": "^4.5.15",
"@types/dateformat": "^5.0.0", "@types/dateformat": "^5.0.0",
"@types/lodash": "^4.14.194",
"@types/react": "^18.0.28", "@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11", "@types/react-dom": "^18.0.11",
"@types/react-transition-group": "^4.4.5", "@types/react-transition-group": "^4.4.5",
@ -83,6 +92,7 @@
"@typescript-eslint/eslint-plugin": "^5.52.0", "@typescript-eslint/eslint-plugin": "^5.52.0",
"@typescript-eslint/parser": "^5.52.0", "@typescript-eslint/parser": "^5.52.0",
"@vitejs/plugin-react-swc": "^3.2.0", "@vitejs/plugin-react-swc": "^3.2.0",
"axios": "^1.3.4",
"babel-plugin-transform-imports": "^2.0.0", "babel-plugin-transform-imports": "^2.0.0",
"concurrently": "^7.6.0", "concurrently": "^7.6.0",
"eslint": "^8.34.0", "eslint": "^8.34.0",
@ -90,13 +100,17 @@
"eslint-plugin-prettier": "^4.2.1", "eslint-plugin-prettier": "^4.2.1",
"eslint-plugin-react": "^7.32.2", "eslint-plugin-react": "^7.32.2",
"eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-hooks": "^4.6.0",
"form-data": "^4.0.0",
"husky": "^8.0.3", "husky": "^8.0.3",
"lint-staged": "^13.1.2", "lint-staged": "^13.1.2",
"madge": "^6.0.0", "madge": "^6.0.0",
"openapi-types": "^12.1.0",
"openapi-typescript-codegen": "^0.23.0",
"postinstall-postinstall": "^2.1.0", "postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.4", "prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0", "rollup-plugin-visualizer": "^5.9.0",
"terser": "^5.16.4", "terser": "^5.16.4",
"typescript": "4.9.5",
"vite": "^4.1.2", "vite": "^4.1.2",
"vite-plugin-eslint": "^1.8.1", "vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.0.5", "vite-tsconfig-paths": "^4.0.5",

View File

@ -18,7 +18,7 @@
"training": "Training", "training": "Training",
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.", "trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.", "trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
"upload": "Upload", "upload": "Hochladen",
"close": "Schließen", "close": "Schließen",
"load": "Laden", "load": "Laden",
"statusConnected": "Verbunden", "statusConnected": "Verbunden",
@ -41,12 +41,34 @@
"statusUpscaling": "Hochskalierung", "statusUpscaling": "Hochskalierung",
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)", "statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
"statusLoadingModel": "Laden des Modells", "statusLoadingModel": "Laden des Modells",
"statusModelChanged": "Modell Geändert" "statusModelChanged": "Modell Geändert",
"cancel": "Abbruch",
"accept": "Annehmen",
"back": "Zurück",
"langEnglish": "Englisch",
"langDutch": "Niederländisch",
"langFrench": "Französisch",
"oceanTheme": "Ozean",
"langItalian": "Italienisch",
"langPortuguese": "Portogisisch",
"langRussian": "Russisch",
"langUkranian": "Ukrainisch",
"hotkeysLabel": "Tastenkombinationen",
"githubLabel": "Github",
"discordLabel": "Discord",
"txt2img": "Text zu Bild",
"postprocessing": "Nachbearbeitung",
"langPolish": "Polnisch",
"langJapanese": "Japanisch",
"langArabic": "Arabisch",
"langKorean": "Koreanisch",
"langHebrew": "Hebräisch",
"langSpanish": "Spanisch"
}, },
"gallery": { "gallery": {
"generations": "Erzeugungen", "generations": "Erzeugungen",
"showGenerations": "Zeige Erzeugnisse", "showGenerations": "Zeige Erzeugnisse",
"uploads": "Uploads", "uploads": "Hochgelades",
"showUploads": "Zeige Uploads", "showUploads": "Zeige Uploads",
"galleryImageSize": "Bildgröße", "galleryImageSize": "Bildgröße",
"galleryImageResetSize": "Größe zurücksetzen", "galleryImageResetSize": "Größe zurücksetzen",
@ -312,7 +334,11 @@
"deleteModel": "Model löschen", "deleteModel": "Model löschen",
"deleteConfig": "Konfiguration löschen", "deleteConfig": "Konfiguration löschen",
"deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?", "deleteMsg1": "Möchten Sie diesen Model-Eintrag wirklich aus InvokeAI löschen?",
"deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen." "deleteMsg2": "Dadurch wird die Modellprüfpunktdatei nicht von Ihrer Festplatte gelöscht. Sie können sie bei Bedarf erneut hinzufügen.",
"customConfig": "Benutzerdefinierte Konfiguration",
"invokeRoot": "InvokeAI Ordner",
"formMessageDiffusersVAELocationDesc": "Falls nicht angegeben, sucht InvokeAI nach der VAE-Datei innerhalb des oben angegebenen Modell Speicherortes.",
"checkpointModels": "Kontrollpunkte"
}, },
"parameters": { "parameters": {
"images": "Bilder", "images": "Bilder",
@ -370,7 +396,10 @@
"useInitImg": "Ausgangsbild verwenden", "useInitImg": "Ausgangsbild verwenden",
"deleteImage": "Bild löschen", "deleteImage": "Bild löschen",
"initialImage": "Ursprüngliches Bild", "initialImage": "Ursprüngliches Bild",
"showOptionsPanel": "Optionsleiste zeigen" "showOptionsPanel": "Optionsleiste zeigen",
"cancel": {
"setType": "Abbruchart festlegen"
}
}, },
"settings": { "settings": {
"displayInProgress": "Bilder in Bearbeitung anzeigen", "displayInProgress": "Bilder in Bearbeitung anzeigen",
@ -489,5 +518,25 @@
"betaDarkenOutside": "Außen abdunkeln", "betaDarkenOutside": "Außen abdunkeln",
"betaLimitToBox": "Begrenzung auf das Feld", "betaLimitToBox": "Begrenzung auf das Feld",
"betaPreserveMasked": "Maskiertes bewahren" "betaPreserveMasked": "Maskiertes bewahren"
},
"accessibility": {
"modelSelect": "Model Auswahl",
"uploadImage": "Bild hochladen",
"previousImage": "Voriges Bild",
"useThisParameter": "Benutze diesen Parameter",
"copyMetadataJson": "Kopiere metadata JSON",
"zoomIn": "Vergrößern",
"rotateClockwise": "Im Uhrzeigersinn drehen",
"flipHorizontally": "Horizontal drehen",
"flipVertically": "Vertikal drehen",
"modifyConfig": "Optionen einstellen",
"toggleAutoscroll": "Auroscroll ein/ausschalten",
"toggleLogViewer": "Log Betrachter ein/ausschalten",
"showGallery": "Zeige Galerie",
"showOptionsPanel": "Zeige Optionen",
"reset": "Zurücksetzen",
"nextImage": "Nächstes Bild",
"zoomOut": "Verkleinern",
"rotateCounterClockwise": "Gegen den Uhrzeigersinn verdrehen"
} }
} }

View File

@ -8,7 +8,7 @@
"nextImage": "Next Image", "nextImage": "Next Image",
"useThisParameter": "Use this parameter", "useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON", "copyMetadataJson": "Copy metadata JSON",
"exitViewer": "ExitViewer", "exitViewer": "Exit Viewer",
"zoomIn": "Zoom In", "zoomIn": "Zoom In",
"zoomOut": "Zoom Out", "zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise", "rotateCounterClockwise": "Rotate Counter-Clockwise",
@ -19,7 +19,8 @@
"toggleAutoscroll": "Toggle autoscroll", "toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer", "toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery", "showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel" "showOptionsPanel": "Show Options Panel",
"menu": "Menu"
}, },
"common": { "common": {
"hotkeysLabel": "Hotkeys", "hotkeysLabel": "Hotkeys",
@ -52,6 +53,7 @@
"txt2img": "Text To Image", "txt2img": "Text To Image",
"img2img": "Image To Image", "img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas", "unifiedCanvas": "Unified Canvas",
"linear": "Linear",
"nodes": "Nodes", "nodes": "Nodes",
"postprocessing": "Post Processing", "postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.", "nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
@ -505,7 +507,9 @@
"info": "Info", "info": "Info",
"deleteImage": "Delete Image", "deleteImage": "Delete Image",
"initialImage": "Initial Image", "initialImage": "Initial Image",
"showOptionsPanel": "Show Options Panel" "showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview",
"showPreview": "Show Preview"
}, },
"settings": { "settings": {
"models": "Models", "models": "Models",
@ -522,6 +526,10 @@
"resetComplete": "Web UI has been reset. Refresh the page to reload." "resetComplete": "Web UI has been reset. Refresh the page to reload."
}, },
"toast": { "toast": {
"serverError": "Server Error",
"disconnected": "Disconnected from Server",
"connected": "Connected to Server",
"canceled": "Processing Canceled",
"tempFoldersEmptied": "Temp Folder Emptied", "tempFoldersEmptied": "Temp Folder Emptied",
"uploadFailed": "Upload failed", "uploadFailed": "Upload failed",
"uploadFailedMultipleImagesDesc": "Multiple images pasted, may only upload one image at a time", "uploadFailedMultipleImagesDesc": "Multiple images pasted, may only upload one image at a time",

View File

@ -73,7 +73,8 @@
"postprocessing": "Tratamiento posterior", "postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen", "txt2img": "De texto a imagen",
"accept": "Aceptar", "accept": "Aceptar",
"cancel": "Cancelar" "cancel": "Cancelar",
"linear": "Lineal"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -483,7 +484,9 @@
"negativePrompts": "Preguntas negativas", "negativePrompts": "Preguntas negativas",
"imageToImage": "Imagen a imagen", "imageToImage": "Imagen a imagen",
"denoisingStrength": "Intensidad de la eliminación del ruido", "denoisingStrength": "Intensidad de la eliminación del ruido",
"hiresStrength": "Alta resistencia" "hiresStrength": "Alta resistencia",
"showPreview": "Mostrar la vista previa",
"hidePreview": "Ocultar la vista previa"
}, },
"settings": { "settings": {
"models": "Modelos", "models": "Modelos",
@ -529,7 +532,11 @@
"metadataLoadFailed": "Error al cargar metadatos", "metadataLoadFailed": "Error al cargar metadatos",
"initialImageSet": "Imágen inicial establecida", "initialImageSet": "Imágen inicial establecida",
"initialImageNotSet": "Imagen inicial no establecida", "initialImageNotSet": "Imagen inicial no establecida",
"initialImageNotSetDesc": "Error al establecer la imágen inicial" "initialImageNotSetDesc": "Error al establecer la imágen inicial",
"serverError": "Error en el servidor",
"disconnected": "Desconectado del servidor",
"canceled": "Procesando la cancelación",
"connected": "Conectado al servidor"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -625,6 +632,7 @@
"toggleAutoscroll": "Activar el autodesplazamiento", "toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros", "toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería", "showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones" "showOptionsPanel": "Mostrar el panel de opciones",
"menu": "Menú"
} }
} }

View File

@ -0,0 +1,122 @@
{
"accessibility": {
"reset": "Resetoi",
"useThisParameter": "Käytä tätä parametria",
"modelSelect": "Mallin Valinta",
"exitViewer": "Poistu katselimesta",
"uploadImage": "Lataa kuva",
"copyMetadataJson": "Kopioi metadata JSON:iin",
"invokeProgressBar": "Invoken edistymispalkki",
"nextImage": "Seuraava kuva",
"previousImage": "Edellinen kuva",
"zoomIn": "Lähennä",
"flipHorizontally": "Käännä vaakasuoraan",
"zoomOut": "Loitonna",
"rotateCounterClockwise": "Kierrä vastapäivään",
"rotateClockwise": "Kierrä myötäpäivään",
"flipVertically": "Käännä pystysuoraan",
"showGallery": "Näytä galleria",
"modifyConfig": "Muokkaa konfiguraatiota",
"toggleAutoscroll": "Kytke automaattinen vieritys",
"toggleLogViewer": "Kytke lokin katselutila",
"showOptionsPanel": "Näytä asetukset"
},
"common": {
"postProcessDesc2": "Erillinen käyttöliittymä tullaan julkaisemaan helpottaaksemme työnkulkua jälkikäsittelyssä.",
"training": "Kouluta",
"statusLoadingModel": "Ladataan mallia",
"statusModelChanged": "Malli vaihdettu",
"statusConvertingModel": "Muunnetaan mallia",
"statusModelConverted": "Malli muunnettu",
"langFrench": "Ranska",
"langItalian": "Italia",
"languagePickerLabel": "Kielen valinta",
"hotkeysLabel": "Pikanäppäimet",
"reportBugLabel": "Raportoi Bugista",
"langPolish": "Puola",
"themeLabel": "Teema",
"langDutch": "Hollanti",
"settingsLabel": "Asetukset",
"githubLabel": "Github",
"darkTheme": "Tumma",
"lightTheme": "Vaalea",
"greenTheme": "Vihreä",
"langGerman": "Saksa",
"langPortuguese": "Portugali",
"discordLabel": "Discord",
"langEnglish": "Englanti",
"oceanTheme": "Meren sininen",
"langRussian": "Venäjä",
"langUkranian": "Ukraina",
"langSpanish": "Espanja",
"upload": "Lataa",
"statusMergedModels": "Mallit yhdistelty",
"img2img": "Kuva kuvaksi",
"nodes": "Solmut",
"nodesDesc": "Solmupohjainen järjestelmä kuvien generoimiseen on parhaillaan kehitteillä. Pysy kuulolla päivityksistä tähän uskomattomaan ominaisuuteen liittyen.",
"postProcessDesc1": "Invoke AI tarjoaa monenlaisia jälkikäsittelyominaisuukisa. Kuvan laadun skaalaus sekä kasvojen korjaus ovat jo saatavilla WebUI:ssä. Voit ottaa ne käyttöön lisäasetusten valikosta teksti kuvaksi sekä kuva kuvaksi -välilehdiltä. Voit myös suoraan prosessoida kuvia käyttämällä kuvan toimintapainikkeita nykyisen kuvan yläpuolella tai tarkastelussa.",
"postprocessing": "Jälkikäsitellään",
"postProcessing": "Jälkikäsitellään",
"cancel": "Peruuta",
"close": "Sulje",
"accept": "Hyväksy",
"statusConnected": "Yhdistetty",
"statusError": "Virhe",
"statusProcessingComplete": "Prosessointi valmis",
"load": "Lataa",
"back": "Takaisin",
"statusGeneratingTextToImage": "Generoidaan tekstiä kuvaksi",
"trainingDesc2": "InvokeAI tukee jo mukautettujen upotusten kouluttamista tekstin inversiolla käyttäen pääskriptiä.",
"statusDisconnected": "Yhteys katkaistu",
"statusPreparing": "Valmistellaan",
"statusIterationComplete": "Iteraatio valmis",
"statusMergingModels": "Yhdistellään malleja",
"statusProcessingCanceled": "Valmistelu peruutettu",
"statusSavingImage": "Tallennetaan kuvaa",
"statusGeneratingImageToImage": "Generoidaan kuvaa kuvaksi",
"statusRestoringFacesGFPGAN": "Korjataan kasvoja (GFPGAN)",
"statusRestoringFacesCodeFormer": "Korjataan kasvoja (CodeFormer)",
"statusGeneratingInpainting": "Generoidaan sisällemaalausta",
"statusGeneratingOutpainting": "Generoidaan ulosmaalausta",
"statusRestoringFaces": "Korjataan kasvoja",
"pinOptionsPanel": "Kiinnitä asetukset -paneeli",
"loadingInvokeAI": "Ladataan Invoke AI:ta",
"loading": "Ladataan",
"statusGenerating": "Generoidaan",
"txt2img": "Teksti kuvaksi",
"trainingDesc1": "Erillinen työnkulku omien upotusten ja tarkastuspisteiden kouluttamiseksi käyttäen tekstin inversiota ja dreamboothia selaimen käyttöliittymässä.",
"postProcessDesc3": "Invoke AI:n komentorivi tarjoaa paljon muita ominaisuuksia, kuten esimerkiksi Embiggenin.",
"unifiedCanvas": "Yhdistetty kanvas",
"statusGenerationComplete": "Generointi valmis"
},
"gallery": {
"uploads": "Lataukset",
"showUploads": "Näytä lataukset",
"galleryImageResetSize": "Resetoi koko",
"maintainAspectRatio": "Säilytä kuvasuhde",
"galleryImageSize": "Kuvan koko",
"pinGallery": "Kiinnitä galleria",
"showGenerations": "Näytä generaatiot",
"singleColumnLayout": "Yhden sarakkeen asettelu",
"generations": "Generoinnit",
"gallerySettings": "Gallerian asetukset",
"autoSwitchNewImages": "Vaihda uusiin kuviin automaattisesti",
"allImagesLoaded": "Kaikki kuvat ladattu",
"noImagesInGallery": "Ei kuvia galleriassa",
"loadMore": "Lataa lisää"
},
"hotkeys": {
"keyboardShortcuts": "näppäimistön pikavalinnat",
"appHotkeys": "Sovelluksen pikanäppäimet",
"generalHotkeys": "Yleiset pikanäppäimet",
"galleryHotkeys": "Gallerian pikanäppäimet",
"unifiedCanvasHotkeys": "Yhdistetyn kanvaan pikanäppäimet",
"cancel": {
"desc": "Peruuta kuvan luominen",
"title": "Peruuta"
},
"invoke": {
"desc": "Luo kuva"
}
}
}

View File

@ -73,7 +73,8 @@
"postprocessing": "Post Elaborazione", "postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine", "txt2img": "Testo a Immagine",
"accept": "Accetta", "accept": "Accetta",
"cancel": "Annulla" "cancel": "Annulla",
"linear": "Lineare"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -483,7 +484,9 @@
}, },
"hSymmetryStep": "Passi Simmetria Orizzontale", "hSymmetryStep": "Passi Simmetria Orizzontale",
"vSymmetryStep": "Passi Simmetria Verticale", "vSymmetryStep": "Passi Simmetria Verticale",
"symmetry": "Simmetria" "symmetry": "Simmetria",
"hidePreview": "Nascondi l'anteprima",
"showPreview": "Mostra l'anteprima"
}, },
"settings": { "settings": {
"models": "Modelli", "models": "Modelli",
@ -529,7 +532,11 @@
"metadataLoadFailed": "Impossibile caricare i metadati", "metadataLoadFailed": "Impossibile caricare i metadati",
"initialImageSet": "Immagine iniziale impostata", "initialImageSet": "Immagine iniziale impostata",
"initialImageNotSet": "Immagine iniziale non impostata", "initialImageNotSet": "Immagine iniziale non impostata",
"initialImageNotSetDesc": "Impossibile caricare l'immagine iniziale" "initialImageNotSetDesc": "Impossibile caricare l'immagine iniziale",
"serverError": "Errore del Server",
"disconnected": "Disconnesso dal Server",
"connected": "Connesso al Server",
"canceled": "Elaborazione annullata"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -625,6 +632,7 @@
"showOptionsPanel": "Mostra il pannello opzioni", "showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente", "flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico", "toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione" "modifyConfig": "Modifica configurazione",
"menu": "Menu"
} }
} }

View File

@ -37,7 +37,43 @@
"statusUpscaling": "アップスケーリング", "statusUpscaling": "アップスケーリング",
"statusUpscalingESRGAN": "アップスケーリング (ESRGAN)", "statusUpscalingESRGAN": "アップスケーリング (ESRGAN)",
"statusLoadingModel": "モデルを読み込む", "statusLoadingModel": "モデルを読み込む",
"statusModelChanged": "モデルを変更" "statusModelChanged": "モデルを変更",
"cancel": "キャンセル",
"accept": "同意",
"langBrPortuguese": "Português do Brasil",
"langRussian": "Русский",
"langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"img2img": "img2img",
"unifiedCanvas": "Unified Canvas",
"statusMergingModels": "モデルのマージ",
"statusModelConverted": "変換済モデル",
"statusGeneratingInpainting": "Inpaintingを生成",
"statusIterationComplete": "Iteration Complete",
"statusGeneratingOutpainting": "Outpaintingを生成",
"loading": "ロード中",
"loadingInvokeAI": "Invoke AIをロード中",
"statusConvertingModel": "モデルの変換",
"statusMergedModels": "マージ済モデル",
"pinOptionsPanel": "オプションパネルを固定",
"githubLabel": "Github",
"hotkeysLabel": "ホットキー",
"langHebrew": "עברית",
"discordLabel": "Discord",
"langItalian": "Italiano",
"langEnglish": "English",
"oceanTheme": "オーシャン",
"langArabic": "アラビア語",
"langDutch": "Nederlands",
"langFrench": "Français",
"langGerman": "Deutsch",
"langPortuguese": "Português",
"nodes": "ノード",
"langKorean": "한국어",
"langPolish": "Polski",
"txt2img": "txt2img",
"postprocessing": "Post Processing"
}, },
"gallery": { "gallery": {
"uploads": "アップロード", "uploads": "アップロード",
@ -46,11 +82,14 @@
"galleryImageResetSize": "サイズをリセット", "galleryImageResetSize": "サイズをリセット",
"gallerySettings": "ギャラリーの設定", "gallerySettings": "ギャラリーの設定",
"maintainAspectRatio": "アスペクト比を維持", "maintainAspectRatio": "アスペクト比を維持",
"singleColumnLayout": "シングルカラムレイアウト", "singleColumnLayout": "1カラムレイアウト",
"pinGallery": "ギャラリーにピン留め", "pinGallery": "ギャラリーにピン留め",
"allImagesLoaded": "すべての画像を読み込む", "allImagesLoaded": "すべての画像を読み込む",
"loadMore": "さらに読み込む", "loadMore": "さらに読み込む",
"noImagesInGallery": "ギャラリーに画像がありません" "noImagesInGallery": "ギャラリーに画像がありません",
"generations": "生成",
"showGenerations": "生成過程を見る",
"autoSwitchNewImages": "新しい画像に自動切替"
}, },
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "キーボードショートカット", "keyboardShortcuts": "キーボードショートカット",
@ -59,14 +98,16 @@
"galleryHotkeys": "ギャラリーのホットキー", "galleryHotkeys": "ギャラリーのホットキー",
"unifiedCanvasHotkeys": "Unified Canvasのホットキー", "unifiedCanvasHotkeys": "Unified Canvasのホットキー",
"invoke": { "invoke": {
"desc": "画像を生成" "desc": "画像を生成",
"title": "Invoke"
}, },
"cancel": { "cancel": {
"title": "キャンセル", "title": "キャンセル",
"desc": "画像の生成をキャンセル" "desc": "画像の生成をキャンセル"
}, },
"focusPrompt": { "focusPrompt": {
"desc": "プロンプトテキストボックスにフォーカス" "desc": "プロンプトテキストボックスにフォーカス",
"title": "プロジェクトにフォーカス"
}, },
"toggleOptions": { "toggleOptions": {
"title": "オプションパネルのトグル", "title": "オプションパネルのトグル",
@ -410,5 +451,27 @@
"accept": "同意", "accept": "同意",
"showHide": "表示/非表示", "showHide": "表示/非表示",
"discardAll": "すべて破棄" "discardAll": "すべて破棄"
},
"accessibility": {
"modelSelect": "モデルを選択",
"invokeProgressBar": "進捗バー",
"reset": "リセット",
"uploadImage": "画像をアップロード",
"previousImage": "前の画像",
"nextImage": "次の画像",
"useThisParameter": "このパラメータを使用する",
"copyMetadataJson": "メタデータをコピー(JSON)",
"zoomIn": "ズームイン",
"exitViewer": "ExitViewer",
"zoomOut": "ズームアウト",
"rotateCounterClockwise": "反時計回りに回転",
"rotateClockwise": "時計回りに回転",
"flipHorizontally": "水平方向に反転",
"flipVertically": "垂直方向に反転",
"toggleAutoscroll": "自動スクロールの切替",
"modifyConfig": "Modify Config",
"toggleLogViewer": "Log Viewerの切替",
"showGallery": "ギャラリーを表示",
"showOptionsPanel": "オプションパネルを表示"
} }
} }

View File

@ -0,0 +1 @@
{}

View File

@ -62,7 +62,18 @@
"statusConvertingModel": "Omzetten van model", "statusConvertingModel": "Omzetten van model",
"statusModelConverted": "Model omgezet", "statusModelConverted": "Model omgezet",
"statusMergingModels": "Samenvoegen van modellen", "statusMergingModels": "Samenvoegen van modellen",
"statusMergedModels": "Modellen samengevoegd" "statusMergedModels": "Modellen samengevoegd",
"cancel": "Annuleer",
"accept": "Akkoord",
"langPortuguese": "Português",
"pinOptionsPanel": "Zet deelscherm Opties vast",
"loading": "Bezig met laden",
"loadingInvokeAI": "Bezig met laden van Invoke AI",
"oceanTheme": "Oceaan",
"langHebrew": "עברית",
"langKorean": "한국어",
"txt2img": "Tekst naar afbeelding",
"postprocessing": "Nabewerking"
}, },
"gallery": { "gallery": {
"generations": "Gegenereerde afbeeldingen", "generations": "Gegenereerde afbeeldingen",
@ -301,7 +312,7 @@
"name": "Naam", "name": "Naam",
"nameValidationMsg": "Geef een naam voor je model", "nameValidationMsg": "Geef een naam voor je model",
"description": "Beschrijving", "description": "Beschrijving",
"descriptionValidationMsg": "Voeg een beschrijving toe voor je model.", "descriptionValidationMsg": "Voeg een beschrijving toe voor je model",
"config": "Configuratie", "config": "Configuratie",
"configValidationMsg": "Pad naar het configuratiebestand van je model.", "configValidationMsg": "Pad naar het configuratiebestand van je model.",
"modelLocation": "Locatie model", "modelLocation": "Locatie model",
@ -391,7 +402,13 @@
"modelMergeInterpAddDifferenceHelp": "In deze stand wordt model 3 eerst van model 2 afgehaald. Wat daar uitkomt wordt gemengd met model 1, gebruikmakend van de hierboven ingestelde alfawaarde.", "modelMergeInterpAddDifferenceHelp": "In deze stand wordt model 3 eerst van model 2 afgehaald. Wat daar uitkomt wordt gemengd met model 1, gebruikmakend van de hierboven ingestelde alfawaarde.",
"inverseSigmoid": "Keer Sigmoid om", "inverseSigmoid": "Keer Sigmoid om",
"sigmoid": "Sigmoid", "sigmoid": "Sigmoid",
"weightedSum": "Gewogen som" "weightedSum": "Gewogen som",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "geen",
"addDifference": "Voeg verschil toe",
"scanForModels": "Scan naar modellen",
"pickModelType": "Kies modelsoort"
}, },
"parameters": { "parameters": {
"images": "Afbeeldingen", "images": "Afbeeldingen",
@ -561,7 +578,7 @@
"autoSaveToGallery": "Bewaar automatisch naar galerij", "autoSaveToGallery": "Bewaar automatisch naar galerij",
"saveBoxRegionOnly": "Bewaar alleen tekengebied", "saveBoxRegionOnly": "Bewaar alleen tekengebied",
"limitStrokesToBox": "Beperk streken tot tekenvak", "limitStrokesToBox": "Beperk streken tot tekenvak",
"showCanvasDebugInfo": "Toon foutopsporingsgegevens canvas", "showCanvasDebugInfo": "Toon aanvullende canvasgegevens",
"clearCanvasHistory": "Wis canvasgeschiedenis", "clearCanvasHistory": "Wis canvasgeschiedenis",
"clearHistory": "Wis geschiedenis", "clearHistory": "Wis geschiedenis",
"clearCanvasHistoryMessage": "Het wissen van de canvasgeschiedenis laat het huidige canvas ongemoeid, maar wist onherstelbaar de geschiedenis voor het ongedaan maken en herhalen.", "clearCanvasHistoryMessage": "Het wissen van de canvasgeschiedenis laat het huidige canvas ongemoeid, maar wist onherstelbaar de geschiedenis voor het ongedaan maken en herhalen.",
@ -587,5 +604,27 @@
"betaDarkenOutside": "Verduister buiten tekenvak", "betaDarkenOutside": "Verduister buiten tekenvak",
"betaLimitToBox": "Beperk tot tekenvak", "betaLimitToBox": "Beperk tot tekenvak",
"betaPreserveMasked": "Behoud masker" "betaPreserveMasked": "Behoud masker"
},
"accessibility": {
"exitViewer": "Stop viewer",
"zoomIn": "Zoom in",
"rotateCounterClockwise": "Draai tegen de klok in",
"modelSelect": "Modelkeuze",
"invokeProgressBar": "Voortgangsbalk Invoke",
"reset": "Herstel",
"uploadImage": "Upload afbeelding",
"previousImage": "Vorige afbeelding",
"nextImage": "Volgende afbeelding",
"useThisParameter": "Gebruik deze parameter",
"copyMetadataJson": "Kopieer metagegevens-JSON",
"zoomOut": "Zoom uit",
"rotateClockwise": "Draai met de klok mee",
"flipHorizontally": "Spiegel horizontaal",
"flipVertically": "Spiegel verticaal",
"modifyConfig": "Wijzig configuratie",
"toggleAutoscroll": "Autom. scrollen aan/uit",
"toggleLogViewer": "Logboekviewer aan/uit",
"showGallery": "Toon galerij",
"showOptionsPanel": "Toon deelscherm Opties"
} }
} }

View File

@ -9,7 +9,7 @@
"lightTheme": "Светлая", "lightTheme": "Светлая",
"greenTheme": "Зеленая", "greenTheme": "Зеленая",
"img2img": "Изображение в изображение (img2img)", "img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст", "unifiedCanvas": "Единый холст",
"nodes": "Ноды", "nodes": "Ноды",
"langRussian": "Русский", "langRussian": "Русский",
"nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.", "nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.",
@ -53,7 +53,28 @@
"loading": "Загрузка", "loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI", "loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад", "back": "Назад",
"statusConvertingModel": "Конвертация модели" "statusConvertingModel": "Конвертация модели",
"cancel": "Отменить",
"accept": "Принять",
"oceanTheme": "Океан",
"langUkranian": "Украинский",
"langEnglish": "Английский",
"postprocessing": "Постобработка",
"langArabic": "Арабский",
"langSpanish": "Испанский",
"langSimplifiedChinese": "Китайский (упрощенный)",
"langDutch": "Нидерландский",
"langFrench": "Французский",
"langGerman": "Немецкий",
"langHebrew": "Иврит",
"langItalian": "Итальянский",
"langJapanese": "Японский",
"langKorean": "Корейский",
"langPolish": "Польский",
"langPortuguese": "Португальский",
"txt2img": "Текст в изображение (txt2img)",
"langBrPortuguese": "Португальский (Бразилия)",
"linear": "Линейная обработка"
}, },
"gallery": { "gallery": {
"generations": "Генерации", "generations": "Генерации",
@ -72,11 +93,11 @@
"noImagesInGallery": "Изображений нет" "noImagesInGallery": "Изображений нет"
}, },
"hotkeys": { "hotkeys": {
"keyboardShortcuts": "Клавиатурные сокращения", "keyboardShortcuts": "Горячие клавиши",
"appHotkeys": "Горячие клавиши приложения", "appHotkeys": "Горячие клавиши приложения",
"generalHotkeys": "Общие горячие клавиши", "generalHotkeys": "Общие горячие клавиши",
"galleryHotkeys": "Горячие клавиши галереи", "galleryHotkeys": "Горячие клавиши галереи",
"unifiedCanvasHotkeys": "Горячие клавиши универсального холста", "unifiedCanvasHotkeys": "Горячие клавиши Единого холста",
"invoke": { "invoke": {
"title": "Invoke", "title": "Invoke",
"desc": "Сгенерировать изображение" "desc": "Сгенерировать изображение"
@ -266,12 +287,12 @@
"desc": "Сбросить вид холста" "desc": "Сбросить вид холста"
}, },
"previousStagingImage": { "previousStagingImage": {
"title": "Previous Staging Image", "title": "Предыдущее изображение",
"desc": "Предыдущее изображение" "desc": "Предыдущая область изображения"
}, },
"nextStagingImage": { "nextStagingImage": {
"title": "Next Staging Image", "title": "Следующее изображение",
"desc": "Следующее изображение" "desc": "Следующая область изображения"
}, },
"acceptStagingImage": { "acceptStagingImage": {
"title": "Принять изображение", "title": "Принять изображение",
@ -353,7 +374,42 @@
"modelConverted": "Модель преобразована", "modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI", "invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены", "modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели" "mergeModels": "Объединить модели",
"scanForModels": "Просканировать модели",
"sigmoid": "Сигмоид",
"formMessageDiffusersModelLocation": "Расположение Diffusers-модели",
"modelThree": "Модель 3",
"modelMergeHeaderHelp2": "Только Diffusers-модели доступны для объединения. Если вы хотите объединить checkpoint-модели, сначала преобразуйте их в Diffusers.",
"pickModelType": "Выбрать тип модели",
"formMessageDiffusersVAELocation": "Расположение VAE",
"v1": "v1",
"convertToDiffusersSaveLocation": "Путь сохранения",
"customSaveLocation": "Пользовательский путь сохранения",
"alpha": "Альфа",
"diffusersModels": "Diffusers",
"customConfig": "Пользовательский конфиг",
"pathToCustomConfig": "Путь к пользовательскому конфигу",
"inpainting": "v1 Inpainting",
"sameFolder": "В ту же папку",
"modelOne": "Модель 1",
"mergedModelCustomSaveLocation": "Пользовательский путь",
"none": "пусто",
"addDifference": "Добавить разницу",
"vaeRepoIDValidationMsg": "Онлайн репозиторий VAE",
"convertToDiffusersHelpText2": "Этот процесс заменит вашу запись в Model Manager на версию той же модели в Diffusers.",
"custom": "Пользовательский",
"modelTwo": "Модель 2",
"mergedModelSaveLocation": "Путь сохранения",
"merge": "Объединить",
"interpolationType": "Тип интерполяции",
"modelMergeInterpAddDifferenceHelp": "В этом режиме Модель 3 сначала вычитается из Модели 2. Результирующая версия смешивается с Моделью 1 с установленным выше коэффициентом Альфа.",
"modelMergeHeaderHelp1": "Вы можете объединить до трех разных моделей, чтобы создать смешанную, соответствующую вашим потребностям.",
"modelMergeAlphaHelp": "Альфа влияет на силу смешивания моделей. Более низкие значения альфа приводят к меньшему влиянию второй модели.",
"inverseSigmoid": "Обратный Сигмоид",
"weightedSum": "Взвешенная сумма",
"safetensorModels": "SafeTensors",
"v2_768": "v2 (768px)",
"v2_base": "v2 (512px)"
}, },
"parameters": { "parameters": {
"images": "Изображения", "images": "Изображения",
@ -380,7 +436,7 @@
"scale": "Масштаб", "scale": "Масштаб",
"otherOptions": "Другие параметры", "otherOptions": "Другие параметры",
"seamlessTiling": "Бесшовный узор", "seamlessTiling": "Бесшовный узор",
"hiresOptim": "Высокое разрешение", "hiresOptim": "Оптимизация High Res",
"imageFit": "Уместить изображение", "imageFit": "Уместить изображение",
"codeformerFidelity": "Точность", "codeformerFidelity": "Точность",
"seamSize": "Размер шва", "seamSize": "Размер шва",
@ -397,11 +453,11 @@
"infillScalingHeader": "Заполнение и масштабирование", "infillScalingHeader": "Заполнение и масштабирование",
"img2imgStrength": "Сила обработки img2img", "img2imgStrength": "Сила обработки img2img",
"toggleLoopback": "Зациклить обработку", "toggleLoopback": "Зациклить обработку",
"invoke": "Вызвать", "invoke": "Invoke",
"promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)", "promptPlaceholder": "Введите запрос здесь (на английском). [исключенные токены], (более значимые)++, (менее значимые)--, swap и blend тоже доступны (смотрите Github)",
"sendTo": "Отправить", "sendTo": "Отправить",
"sendToImg2Img": "Отправить в img2img", "sendToImg2Img": "Отправить в img2img",
"sendToUnifiedCanvas": "Отправить на холст", "sendToUnifiedCanvas": "Отправить на Единый холст",
"copyImageToLink": "Скопировать ссылку", "copyImageToLink": "Скопировать ссылку",
"downloadImage": "Скачать", "downloadImage": "Скачать",
"openInViewer": "Открыть в просмотрщике", "openInViewer": "Открыть в просмотрщике",
@ -413,7 +469,24 @@
"info": "Метаданные", "info": "Метаданные",
"deleteImage": "Удалить изображение", "deleteImage": "Удалить изображение",
"initialImage": "Исходное изображение", "initialImage": "Исходное изображение",
"showOptionsPanel": "Показать панель настроек" "showOptionsPanel": "Показать панель настроек",
"vSymmetryStep": "Шаг верт. симметрии",
"cancel": {
"immediate": "Отменить немедленно",
"schedule": "Отменить после текущей итерации",
"isScheduled": "Отмена",
"setType": "Установить тип отмены"
},
"general": "Основное",
"hiresStrength": "Сила High Res",
"symmetry": "Симметрия",
"hSymmetryStep": "Шаг гор. симметрии",
"hidePreview": "Скрыть предпросмотр",
"imageToImage": "Изображение в изображение",
"denoisingStrength": "Сила шумоподавления",
"copyImage": "Скопировать изображение",
"negativePrompts": "Исключающий запрос",
"showPreview": "Показать предпросмотр"
}, },
"settings": { "settings": {
"models": "Модели", "models": "Модели",
@ -423,10 +496,11 @@
"displayHelpIcons": "Показывать значки подсказок", "displayHelpIcons": "Показывать значки подсказок",
"useCanvasBeta": "Показывать инструменты слева (Beta UI)", "useCanvasBeta": "Показывать инструменты слева (Beta UI)",
"enableImageDebugging": "Включить отладку", "enableImageDebugging": "Включить отладку",
"resetWebUI": "Вернуть умолчания", "resetWebUI": "Сброс настроек Web UI",
"resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.", "resetWebUIDesc1": "Сброс настроек веб-интерфейса удаляет только локальный кэш браузера с вашими изображениями и настройками. Он не удаляет изображения с диска.",
"resetWebUIDesc2": "Если изображения не отображаются в галерее или не работает что-то еще, пожалуйста, попробуйте сбросить настройки, прежде чем сообщать о проблеме на GitHub.", "resetWebUIDesc2": "Если изображения не отображаются в галерее или не работает что-то еще, пожалуйста, попробуйте сбросить настройки, прежде чем сообщать о проблеме на GitHub.",
"resetComplete": "Интерфейс сброшен. Обновите эту страницу." "resetComplete": "Интерфейс сброшен. Обновите эту страницу.",
"useSlidersForAll": "Использовать ползунки для всех параметров"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Временная папка очищена", "tempFoldersEmptied": "Временная папка очищена",
@ -441,7 +515,7 @@
"imageSavedToGallery": "Изображение сохранено в галерею", "imageSavedToGallery": "Изображение сохранено в галерею",
"canvasMerged": "Холст объединен", "canvasMerged": "Холст объединен",
"sentToImageToImage": "Отправить в img2img", "sentToImageToImage": "Отправить в img2img",
"sentToUnifiedCanvas": "Отправить на холст", "sentToUnifiedCanvas": "Отправлено на Единый холст",
"parametersSet": "Параметры заданы", "parametersSet": "Параметры заданы",
"parametersNotSet": "Параметры не заданы", "parametersNotSet": "Параметры не заданы",
"parametersNotSetDesc": "Не найдены метаданные изображения.", "parametersNotSetDesc": "Не найдены метаданные изображения.",
@ -458,7 +532,11 @@
"metadataLoadFailed": "Не удалось загрузить метаданные", "metadataLoadFailed": "Не удалось загрузить метаданные",
"initialImageSet": "Исходное изображение задано", "initialImageSet": "Исходное изображение задано",
"initialImageNotSet": "Исходное изображение не задано", "initialImageNotSet": "Исходное изображение не задано",
"initialImageNotSetDesc": "Не получилось загрузить исходное изображение" "initialImageNotSetDesc": "Не получилось загрузить исходное изображение",
"serverError": "Ошибка сервера",
"disconnected": "Отключено от сервера",
"connected": "Подключено к серверу",
"canceled": "Обработка отменена"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -507,7 +585,7 @@
"autoSaveToGallery": "Автосохранение в галерее", "autoSaveToGallery": "Автосохранение в галерее",
"saveBoxRegionOnly": "Сохранять только выделение", "saveBoxRegionOnly": "Сохранять только выделение",
"limitStrokesToBox": "Ограничить штрихи выделением", "limitStrokesToBox": "Ограничить штрихи выделением",
"showCanvasDebugInfo": "Показать отладку холста", "showCanvasDebugInfo": "Показать доп. информацию о холсте",
"clearCanvasHistory": "Очистить историю холста", "clearCanvasHistory": "Очистить историю холста",
"clearHistory": "Очистить историю", "clearHistory": "Очистить историю",
"clearCanvasHistoryMessage": "Очистка истории холста оставляет текущий холст нетронутым, но удаляет историю отмен и повторов.", "clearCanvasHistoryMessage": "Очистка истории холста оставляет текущий холст нетронутым, но удаляет историю отмен и повторов.",
@ -535,6 +613,26 @@
"betaPreserveMasked": "Сохранять маскируемую область" "betaPreserveMasked": "Сохранять маскируемую область"
}, },
"accessibility": { "accessibility": {
"modelSelect": "Выбор модели" "modelSelect": "Выбор модели",
"uploadImage": "Загрузить изображение",
"nextImage": "Следующее изображение",
"previousImage": "Предыдущее изображение",
"zoomIn": "Приблизить",
"zoomOut": "Отдалить",
"rotateClockwise": "Повернуть по часовой стрелке",
"rotateCounterClockwise": "Повернуть против часовой стрелки",
"flipVertically": "Перевернуть вертикально",
"flipHorizontally": "Отразить горизонтально",
"toggleAutoscroll": "Включить автопрокрутку",
"toggleLogViewer": "Показать или скрыть просмотрщик логов",
"showOptionsPanel": "Показать опции",
"showGallery": "Показать галерею",
"invokeProgressBar": "Индикатор выполнения",
"reset": "Сброс",
"modifyConfig": "Изменить конфиг",
"useThisParameter": "Использовать этот параметр",
"copyMetadataJson": "Скопировать метаданные JSON",
"exitViewer": "Закрыть просмотрщик",
"menu": "Меню"
} }
} }

View File

@ -0,0 +1,254 @@
{
"accessibility": {
"copyMetadataJson": "Kopiera metadata JSON",
"zoomIn": "Zooma in",
"exitViewer": "Avslutningsvisare",
"modelSelect": "Välj modell",
"uploadImage": "Ladda upp bild",
"invokeProgressBar": "Invoke förloppsmätare",
"nextImage": "Nästa bild",
"toggleAutoscroll": "Växla automatisk rullning",
"flipHorizontally": "Vänd vågrätt",
"flipVertically": "Vänd lodrätt",
"zoomOut": "Zooma ut",
"toggleLogViewer": "Växla logvisare",
"reset": "Starta om",
"previousImage": "Föregående bild",
"useThisParameter": "Använd denna parametern",
"showGallery": "Visa galleri",
"rotateCounterClockwise": "Rotera moturs",
"rotateClockwise": "Rotera medurs",
"modifyConfig": "Ändra konfiguration",
"showOptionsPanel": "Visa inställningspanelen"
},
"common": {
"hotkeysLabel": "Snabbtangenter",
"reportBugLabel": "Rapportera bugg",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Inställningar",
"darkTheme": "Mörk",
"lightTheme": "Ljus",
"greenTheme": "Grön",
"oceanTheme": "Hav",
"langEnglish": "Engelska",
"langDutch": "Nederländska",
"langFrench": "Franska",
"langGerman": "Tyska",
"langItalian": "Italienska",
"langArabic": "العربية",
"langHebrew": "עברית",
"langPolish": "Polski",
"langPortuguese": "Português",
"langBrPortuguese": "Português do Brasil",
"langSimplifiedChinese": "简体中文",
"langJapanese": "日本語",
"langKorean": "한국어",
"langRussian": "Русский",
"unifiedCanvas": "Förenad kanvas",
"nodesDesc": "Ett nodbaserat system för bildgenerering är under utveckling. Håll utkik för uppdateringar om denna fantastiska funktion.",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"postProcessDesc2": "Ett dedikerat användargränssnitt kommer snart att släppas för att underlätta mer avancerade arbetsflöden av efterbehandling.",
"trainingDesc1": "Ett dedikerat arbetsflöde för träning av dina egna inbäddningar och kontrollpunkter genom Textual Inversion eller Dreambooth från webbgränssnittet.",
"trainingDesc2": "InvokeAI stöder redan träning av anpassade inbäddningar med hjälp av Textual Inversion genom huvudscriptet.",
"upload": "Ladda upp",
"close": "Stäng",
"cancel": "Avbryt",
"accept": "Acceptera",
"statusDisconnected": "Frånkopplad",
"statusGeneratingTextToImage": "Genererar text till bild",
"statusGeneratingImageToImage": "Genererar Bild till bild",
"statusGeneratingInpainting": "Genererar Måla i",
"statusGenerationComplete": "Generering klar",
"statusModelConverted": "Modell konverterad",
"statusMergingModels": "Sammanfogar modeller",
"pinOptionsPanel": "Nåla fast inställningspanelen",
"loading": "Laddar",
"loadingInvokeAI": "Laddar Invoke AI",
"statusRestoringFaces": "Återskapar ansikten",
"languagePickerLabel": "Språkväljare",
"themeLabel": "Tema",
"txt2img": "Text till bild",
"nodes": "Noder",
"img2img": "Bild till bild",
"postprocessing": "Efterbehandling",
"postProcessing": "Efterbehandling",
"load": "Ladda",
"training": "Träning",
"postProcessDesc1": "Invoke AI erbjuder ett brett utbud av efterbehandlingsfunktioner. Uppskalning och ansiktsåterställning finns redan tillgängligt i webbgränssnittet. Du kommer åt dem ifrån Avancerade inställningar-menyn under Bild till bild-fliken. Du kan också behandla bilder direkt genom att använda knappen bildåtgärder ovanför nuvarande bild eller i bildvisaren.",
"postProcessDesc3": "Invoke AI's kommandotolk erbjuder många olika funktioner, bland annat \"Förstora\".",
"statusGenerating": "Genererar",
"statusError": "Fel",
"back": "Bakåt",
"statusConnected": "Ansluten",
"statusPreparing": "Förbereder",
"statusProcessingCanceled": "Bearbetning avbruten",
"statusProcessingComplete": "Bearbetning färdig",
"statusGeneratingOutpainting": "Genererar Fyll ut",
"statusIterationComplete": "Itterering klar",
"statusSavingImage": "Sparar bild",
"statusRestoringFacesGFPGAN": "Återskapar ansikten (GFPGAN)",
"statusRestoringFacesCodeFormer": "Återskapar ansikten (CodeFormer)",
"statusUpscaling": "Skala upp",
"statusUpscalingESRGAN": "Uppskalning (ESRGAN)",
"statusModelChanged": "Modell ändrad",
"statusLoadingModel": "Laddar modell",
"statusConvertingModel": "Konverterar modell",
"statusMergedModels": "Modeller sammanfogade"
},
"gallery": {
"generations": "Generationer",
"showGenerations": "Visa generationer",
"uploads": "Uppladdningar",
"showUploads": "Visa uppladdningar",
"galleryImageSize": "Bildstorlek",
"allImagesLoaded": "Alla bilder laddade",
"loadMore": "Ladda mer",
"galleryImageResetSize": "Återställ storlek",
"gallerySettings": "Galleriinställningar",
"maintainAspectRatio": "Behåll bildförhållande",
"pinGallery": "Nåla fast galleri",
"noImagesInGallery": "Inga bilder i galleriet",
"autoSwitchNewImages": "Ändra automatiskt till nya bilder",
"singleColumnLayout": "Enkolumnslayout"
},
"hotkeys": {
"generalHotkeys": "Allmänna snabbtangenter",
"galleryHotkeys": "Gallerisnabbtangenter",
"unifiedCanvasHotkeys": "Snabbtangenter för sammanslagskanvas",
"invoke": {
"title": "Anropa",
"desc": "Genererar en bild"
},
"cancel": {
"title": "Avbryt",
"desc": "Avbryt bildgenerering"
},
"focusPrompt": {
"desc": "Fokusera området för promptinmatning",
"title": "Fokusprompt"
},
"pinOptions": {
"desc": "Nåla fast alternativpanelen",
"title": "Nåla fast alternativ"
},
"toggleOptions": {
"title": "Växla inställningar",
"desc": "Öppna och stäng alternativpanelen"
},
"toggleViewer": {
"title": "Växla visaren",
"desc": "Öppna och stäng bildvisaren"
},
"toggleGallery": {
"title": "Växla galleri",
"desc": "Öppna eller stäng galleribyrån"
},
"maximizeWorkSpace": {
"title": "Maximera arbetsyta",
"desc": "Stäng paneler och maximera arbetsyta"
},
"changeTabs": {
"title": "Växla flik",
"desc": "Byt till en annan arbetsyta"
},
"consoleToggle": {
"title": "Växla konsol",
"desc": "Öppna och stäng konsol"
},
"setSeed": {
"desc": "Använd seed för nuvarande bild",
"title": "välj seed"
},
"setParameters": {
"title": "Välj parametrar",
"desc": "Använd alla parametrar från nuvarande bild"
},
"setPrompt": {
"desc": "Använd prompt för nuvarande bild",
"title": "Välj prompt"
},
"restoreFaces": {
"title": "Återskapa ansikten",
"desc": "Återskapa nuvarande bild"
},
"upscale": {
"title": "Skala upp",
"desc": "Skala upp nuvarande bild"
},
"showInfo": {
"title": "Visa info",
"desc": "Visa metadata för nuvarande bild"
},
"sendToImageToImage": {
"title": "Skicka till Bild till bild",
"desc": "Skicka nuvarande bild till Bild till bild"
},
"deleteImage": {
"title": "Radera bild",
"desc": "Radera nuvarande bild"
},
"closePanels": {
"title": "Stäng paneler",
"desc": "Stäng öppna paneler"
},
"previousImage": {
"title": "Föregående bild",
"desc": "Visa föregående bild"
},
"nextImage": {
"title": "Nästa bild",
"desc": "Visa nästa bild"
},
"toggleGalleryPin": {
"title": "Växla gallerinål",
"desc": "Nålar fast eller nålar av galleriet i gränssnittet"
},
"increaseGalleryThumbSize": {
"title": "Förstora galleriets bildstorlek",
"desc": "Förstora miniatyrbildernas storlek"
},
"decreaseGalleryThumbSize": {
"title": "Minska gelleriets bildstorlek",
"desc": "Minska miniatyrbildernas storlek i galleriet"
},
"decreaseBrushSize": {
"desc": "Förminska storleken på kanvas- pensel eller suddgummi",
"title": "Minska penselstorlek"
},
"increaseBrushSize": {
"title": "Öka penselstorlek",
"desc": "Öka stoleken på kanvas- pensel eller suddgummi"
},
"increaseBrushOpacity": {
"title": "Öka penselns opacitet",
"desc": "Öka opaciteten för kanvaspensel"
},
"decreaseBrushOpacity": {
"desc": "Minska kanvaspenselns opacitet",
"title": "Minska penselns opacitet"
},
"moveTool": {
"title": "Flytta",
"desc": "Tillåt kanvasnavigation"
},
"fillBoundingBox": {
"title": "Fyll ram",
"desc": "Fyller ramen med pensels färg"
},
"keyboardShortcuts": "Snabbtangenter",
"appHotkeys": "Appsnabbtangenter",
"selectBrush": {
"desc": "Välj kanvaspensel",
"title": "Välj pensel"
},
"selectEraser": {
"desc": "Välj kanvassuddgummi",
"title": "Välj suddgummi"
},
"eraseBoundingBox": {
"title": "Ta bort ram"
}
}
}

View File

@ -0,0 +1,64 @@
{
"accessibility": {
"invokeProgressBar": "Invoke ilerleme durumu",
"nextImage": "Sonraki Resim",
"useThisParameter": "Kullanıcı parametreleri",
"copyMetadataJson": "Metadata verilerini kopyala (JSON)",
"exitViewer": "Görüntüleme Modundan Çık",
"zoomIn": "Yakınlaştır",
"zoomOut": "Uzaklaştır",
"rotateCounterClockwise": "Döndür (Saat yönünün tersine)",
"rotateClockwise": "Döndür (Saat yönünde)",
"flipHorizontally": "Yatay Çevir",
"flipVertically": "Dikey Çevir",
"modifyConfig": "Ayarları Değiştir",
"toggleAutoscroll": "Otomatik kaydırmayı aç/kapat",
"toggleLogViewer": "Günlük Görüntüleyici Aç/Kapa",
"showOptionsPanel": "Ayarlar Panelini Göster",
"modelSelect": "Model Seçin",
"reset": "Sıfırla",
"uploadImage": "Resim Yükle",
"previousImage": "Önceki Resim",
"menu": "Menü",
"showGallery": "Galeriyi Göster"
},
"common": {
"hotkeysLabel": "Kısayol Tuşları",
"themeLabel": "Tema",
"languagePickerLabel": "Dil Seçimi",
"reportBugLabel": "Hata Bildir",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Ayarlar",
"darkTheme": "Karanlık Tema",
"lightTheme": "Aydınlık Tema",
"greenTheme": "Yeşil Tema",
"oceanTheme": "Okyanus Tema",
"langArabic": "Arapça",
"langEnglish": "İngilizce",
"langDutch": "Hollandaca",
"langFrench": "Fransızca",
"langGerman": "Almanca",
"langItalian": "İtalyanca",
"langJapanese": "Japonca",
"langPolish": "Lehçe",
"langPortuguese": "Portekizce",
"langBrPortuguese": "Portekizcr (Brezilya)",
"langRussian": "Rusça",
"langSimplifiedChinese": "Çince (Basit)",
"langUkranian": "Ukraynaca",
"langSpanish": "İspanyolca",
"txt2img": "Metinden Resime",
"img2img": "Resimden Metine",
"linear": "Çizgisel",
"nodes": "Düğümler",
"postprocessing": "İşlem Sonrası",
"postProcessing": "İşlem Sonrası",
"postProcessDesc2": "Daha gelişmiş özellikler için ve iş akışını kolaylaştırmak için özel bir kullanıcı arayüzü çok yakında yayınlanacaktır.",
"postProcessDesc3": "Invoke AI komut satırı arayüzü, bir çok yeni özellik sunmaktadır.",
"langKorean": "Korece",
"unifiedCanvas": "Akıllı Tuval",
"nodesDesc": "Görüntülerin oluşturulmasında hazırladığımız yeni bir sistem geliştirme aşamasındadır. Bu harika özellikler ve çok daha fazlası için bizi takip etmeye devam edin.",
"postProcessDesc1": "Invoke AI son kullanıcıya yönelik bir çok özellik sunar. Görüntü kalitesi yükseltme, yüz restorasyonu WebUI üzerinden kullanılabilir. Metinden resime ve resimden metne araçlarına gelişmiş seçenekler menüsünden ulaşabilirsiniz. İsterseniz mevcut görüntü ekranının üzerindeki veya görüntüleyicideki görüntüyü doğrudan düzenleyebilirsiniz."
}
}

View File

@ -16,9 +16,9 @@
"postProcessing": "Постобробка", "postProcessing": "Постобробка",
"postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.", "postProcessDesc1": "Invoke AI пропонує широкий спектр функцій постобробки. Збільшення зображення (upscale) та відновлення облич вже доступні в інтерфейсі. Отримайте доступ до них з меню 'Додаткові параметри' на вкладках 'Зображення із тексту' та 'Зображення із зображення'. Обробляйте зображення безпосередньо, використовуючи кнопки дій із зображеннями над поточним зображенням або в режимі перегляду.",
"postProcessDesc2": "Найближчим часом буде випущено спеціальний інтерфейс для більш сучасних процесів постобробки.", "postProcessDesc2": "Найближчим часом буде випущено спеціальний інтерфейс для більш сучасних процесів постобробки.",
"postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen", "postProcessDesc3": "Інтерфейс командного рядка Invoke AI пропонує різні інші функції, включаючи збільшення Embiggen.",
"training": "Навчання", "training": "Навчання",
"trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth", "trainingDesc1": "Спеціальний інтерфейс для навчання власних моделей з використанням Textual Inversion та Dreambooth.",
"trainingDesc2": "InvokeAI вже підтримує навчання моделей за допомогою TI, через інтерфейс командного рядка.", "trainingDesc2": "InvokeAI вже підтримує навчання моделей за допомогою TI, через інтерфейс командного рядка.",
"upload": "Завантажити", "upload": "Завантажити",
"close": "Закрити", "close": "Закрити",
@ -43,7 +43,38 @@
"statusUpscaling": "Збільшення", "statusUpscaling": "Збільшення",
"statusUpscalingESRGAN": "Збільшення (ESRGAN)", "statusUpscalingESRGAN": "Збільшення (ESRGAN)",
"statusLoadingModel": "Завантаження моделі", "statusLoadingModel": "Завантаження моделі",
"statusModelChanged": "Модель змінено" "statusModelChanged": "Модель змінено",
"cancel": "Скасувати",
"accept": "Підтвердити",
"back": "Назад",
"postprocessing": "Постобробка",
"statusModelConverted": "Модель сконвертована",
"statusMergingModels": "Злиття моделей",
"loading": "Завантаження",
"loadingInvokeAI": "Завантаження Invoke AI",
"langHebrew": "Іврит",
"langKorean": "Корейська",
"langPortuguese": "Португальська",
"pinOptionsPanel": "Закріпити панель налаштувань",
"oceanTheme": "Океан",
"langArabic": "Арабська",
"langSimplifiedChinese": "Китайська (спрощена)",
"langSpanish": "Іспанська",
"langEnglish": "Англійська",
"langGerman": "Німецька",
"langItalian": "Італійська",
"langJapanese": "Японська",
"langPolish": "Польська",
"langBrPortuguese": "Португальська (Бразилія)",
"langRussian": "Російська",
"githubLabel": "Github",
"txt2img": "Текст в зображення (txt2img)",
"discordLabel": "Discord",
"langDutch": "Голландська",
"langFrench": "Французька",
"statusMergedModels": "Моделі об'єднані",
"statusConvertingModel": "Конвертація моделі",
"linear": "Лінійна обробка"
}, },
"gallery": { "gallery": {
"generations": "Генерації", "generations": "Генерації",
@ -284,15 +315,15 @@
"description": "Опис", "description": "Опис",
"descriptionValidationMsg": "Введіть опис моделі", "descriptionValidationMsg": "Введіть опис моделі",
"config": "Файл конфігурації", "config": "Файл конфігурації",
"configValidationMsg": "Шлях до файлу конфігурації", "configValidationMsg": "Шлях до файлу конфігурації.",
"modelLocation": "Розташування моделі", "modelLocation": "Розташування моделі",
"modelLocationValidationMsg": "Шлях до файлу з моделлю", "modelLocationValidationMsg": "Шлях до файлу з моделлю.",
"vaeLocation": "Розтышування VAE", "vaeLocation": "Розтышування VAE",
"vaeLocationValidationMsg": "Шлях до VAE", "vaeLocationValidationMsg": "Шлях до VAE.",
"width": "Ширина", "width": "Ширина",
"widthValidationMsg": "Початкова ширина зображень", "widthValidationMsg": "Початкова ширина зображень.",
"height": "Висота", "height": "Висота",
"heightValidationMsg": "Початкова висота зображень", "heightValidationMsg": "Початкова висота зображень.",
"addModel": "Додати модель", "addModel": "Додати модель",
"updateModel": "Оновити модель", "updateModel": "Оновити модель",
"availableModels": "Доступні моделі", "availableModels": "Доступні моделі",
@ -319,7 +350,66 @@
"deleteModel": "Видалити модель", "deleteModel": "Видалити модель",
"deleteConfig": "Видалити конфігурацію", "deleteConfig": "Видалити конфігурацію",
"deleteMsg1": "Ви точно хочете видалити модель із InvokeAI?", "deleteMsg1": "Ви точно хочете видалити модель із InvokeAI?",
"deleteMsg2": "Це не призведе до видалення файлу моделі з диску. Позніше ви можете додати його знову." "deleteMsg2": "Це не призведе до видалення файлу моделі з диску. Позніше ви можете додати його знову.",
"allModels": "Усі моделі",
"diffusersModels": "Diffusers",
"scanForModels": "Сканувати моделі",
"convert": "Конвертувати",
"convertToDiffusers": "Конвертувати в Diffusers",
"formMessageDiffusersVAELocationDesc": "Якщо не надано, InvokeAI буде шукати файл VAE в розташуванні моделі, вказаній вище.",
"convertToDiffusersHelpText3": "Файл моделі на диску НЕ буде видалено або змінено. Ви можете знову додати його в Model Manager, якщо потрібно.",
"customConfig": "Користувальницький конфіг",
"invokeRoot": "Каталог InvokeAI",
"custom": "Користувальницький",
"modelTwo": "Модель 2",
"modelThree": "Модель 3",
"mergedModelName": "Назва об'єднаної моделі",
"alpha": "Альфа",
"interpolationType": "Тип інтерполяції",
"mergedModelSaveLocation": "Шлях збереження",
"mergedModelCustomSaveLocation": "Користувальницький шлях",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Ігнорувати невідповідності між вибраними моделями",
"modelMergeHeaderHelp2": "Тільки Diffusers-моделі доступні для об'єднання. Якщо ви хочете об'єднати checkpoint-моделі, спочатку перетворіть їх на Diffusers.",
"checkpointModels": "Checkpoints",
"repo_id": "ID репозиторію",
"v2_base": "v2 (512px)",
"repoIDValidationMsg": "Онлайн-репозиторій моделі",
"formMessageDiffusersModelLocationDesc": "Вкажіть хоча б одне.",
"formMessageDiffusersModelLocation": "Шлях до Diffusers-моделі",
"v2_768": "v2 (768px)",
"formMessageDiffusersVAELocation": "Шлях до VAE",
"convertToDiffusersHelpText5": "Переконайтеся, що у вас достатньо місця на диску. Моделі зазвичай займають від 4 до 7 Гб.",
"convertToDiffusersSaveLocation": "Шлях збереження",
"v1": "v1",
"convertToDiffusersHelpText6": "Ви хочете перетворити цю модель?",
"inpainting": "v1 Inpainting",
"modelConverted": "Модель перетворено",
"sameFolder": "У ту ж папку",
"statusConverting": "Перетворення",
"merge": "Об'єднати",
"mergeModels": "Об'єднати моделі",
"modelOne": "Модель 1",
"sigmoid": "Сігмоїд",
"weightedSum": "Зважена сума",
"none": "пусто",
"addDifference": "Додати різницю",
"pickModelType": "Вибрати тип моделі",
"convertToDiffusersHelpText4": "Це одноразова дія. Вона може зайняти від 30 до 60 секунд в залежності від характеристик вашого комп'ютера.",
"pathToCustomConfig": "Шлях до конфігу користувача",
"safetensorModels": "SafeTensors",
"addCheckpointModel": "Додати модель Checkpoint/Safetensor",
"addDiffuserModel": "Додати Diffusers",
"vaeRepoID": "ID репозиторію VAE",
"vaeRepoIDValidationMsg": "Онлайн-репозиторій VAE",
"modelMergeInterpAddDifferenceHelp": "У цьому режимі Модель 3 спочатку віднімається з Моделі 2. Результуюча версія змішується з Моделью 1 із встановленим вище коефіцієнтом Альфа.",
"customSaveLocation": "Користувальницький шлях збереження",
"modelMergeAlphaHelp": "Альфа впливає силу змішування моделей. Нижчі значення альфа призводять до меншого впливу другої моделі.",
"convertToDiffusersHelpText1": "Ця модель буде конвертована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Цей процес замінить ваш запис в Model Manager на версію тієї ж моделі в Diffusers.",
"modelsMerged": "Моделі об'єднані",
"modelMergeHeaderHelp1": "Ви можете об'єднати до трьох різних моделей, щоб створити змішану, що відповідає вашим потребам.",
"inverseSigmoid": "Зворотній Сігмоїд"
}, },
"parameters": { "parameters": {
"images": "Зображення", "images": "Зображення",
@ -346,7 +436,7 @@
"scale": "Масштаб", "scale": "Масштаб",
"otherOptions": "інші параметри", "otherOptions": "інші параметри",
"seamlessTiling": "Безшовний узор", "seamlessTiling": "Безшовний узор",
"hiresOptim": "Висока роздільна здатність", "hiresOptim": "Оптимізація High Res",
"imageFit": "Вмістити зображення", "imageFit": "Вмістити зображення",
"codeformerFidelity": "Точність", "codeformerFidelity": "Точність",
"seamSize": "Размір шву", "seamSize": "Размір шву",
@ -379,7 +469,24 @@
"info": "Метадані", "info": "Метадані",
"deleteImage": "Видалити зображення", "deleteImage": "Видалити зображення",
"initialImage": "Початкове зображення", "initialImage": "Початкове зображення",
"showOptionsPanel": "Показати панель налаштувань" "showOptionsPanel": "Показати панель налаштувань",
"general": "Основне",
"cancel": {
"immediate": "Скасувати негайно",
"schedule": "Скасувати після поточної ітерації",
"isScheduled": "Відміна",
"setType": "Встановити тип скасування"
},
"vSymmetryStep": "Крок верт. симетрії",
"hiresStrength": "Сила High Res",
"hidePreview": "Сховати попередній перегляд",
"showPreview": "Показати попередній перегляд",
"imageToImage": "Зображення до зображення",
"denoisingStrength": "Сила шумоподавлення",
"copyImage": "Копіювати зображення",
"symmetry": "Симетрія",
"hSymmetryStep": "Крок гор. симетрії",
"negativePrompts": "Виключний запит"
}, },
"settings": { "settings": {
"models": "Моделі", "models": "Моделі",
@ -392,7 +499,8 @@
"resetWebUI": "Повернути початкові", "resetWebUI": "Повернути початкові",
"resetWebUIDesc1": "Скидання настройок веб-інтерфейсу видаляє лише локальний кеш браузера з вашими зображеннями та налаштуваннями. Це не призводить до видалення зображень з диску.", "resetWebUIDesc1": "Скидання настройок веб-інтерфейсу видаляє лише локальний кеш браузера з вашими зображеннями та налаштуваннями. Це не призводить до видалення зображень з диску.",
"resetWebUIDesc2": "Якщо зображення не відображаються в галереї або не працює ще щось, спробуйте скинути налаштування, перш ніж повідомляти про проблему на GitHub.", "resetWebUIDesc2": "Якщо зображення не відображаються в галереї або не працює ще щось, спробуйте скинути налаштування, перш ніж повідомляти про проблему на GitHub.",
"resetComplete": "Інтерфейс скинуто. Оновіть цю сторінку." "resetComplete": "Інтерфейс скинуто. Оновіть цю сторінку.",
"useSlidersForAll": "Використовувати повзунки для всіх параметрів"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Тимчасова папка очищена", "tempFoldersEmptied": "Тимчасова папка очищена",
@ -410,21 +518,25 @@
"sentToUnifiedCanvas": "Надіслати на полотно", "sentToUnifiedCanvas": "Надіслати на полотно",
"parametersSet": "Параметри задані", "parametersSet": "Параметри задані",
"parametersNotSet": "Параметри не задані", "parametersNotSet": "Параметри не задані",
"parametersNotSetDesc": "Не знайдені метадані цього зображення", "parametersNotSetDesc": "Не знайдені метадані цього зображення.",
"parametersFailed": "Проблема із завантаженням параметрів", "parametersFailed": "Проблема із завантаженням параметрів",
"parametersFailedDesc": "Неможливо завантажити початкове зображення", "parametersFailedDesc": "Неможливо завантажити початкове зображення.",
"seedSet": "Сід заданий", "seedSet": "Сід заданий",
"seedNotSet": "Сід не заданий", "seedNotSet": "Сід не заданий",
"seedNotSetDesc": "Не вдалося знайти сід для зображення", "seedNotSetDesc": "Не вдалося знайти сід для зображення.",
"promptSet": "Запит заданий", "promptSet": "Запит заданий",
"promptNotSet": "Запит не заданий", "promptNotSet": "Запит не заданий",
"promptNotSetDesc": "Не вдалося знайти запит для зображення", "promptNotSetDesc": "Не вдалося знайти запит для зображення.",
"upscalingFailed": "Збільшення не вдалося", "upscalingFailed": "Збільшення не вдалося",
"faceRestoreFailed": "Відновлення облич не вдалося", "faceRestoreFailed": "Відновлення облич не вдалося",
"metadataLoadFailed": "Не вдалося завантажити метадані", "metadataLoadFailed": "Не вдалося завантажити метадані",
"initialImageSet": "Початкове зображення задане", "initialImageSet": "Початкове зображення задане",
"initialImageNotSet": "Початкове зображення не задане", "initialImageNotSet": "Початкове зображення не задане",
"initialImageNotSetDesc": "Не вдалося завантажити початкове зображення" "initialImageNotSetDesc": "Не вдалося завантажити початкове зображення",
"serverError": "Помилка сервера",
"disconnected": "Відключено від сервера",
"connected": "Підключено до сервера",
"canceled": "Обробку скасовано"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -473,10 +585,10 @@
"autoSaveToGallery": "Автозбереження до галереї", "autoSaveToGallery": "Автозбереження до галереї",
"saveBoxRegionOnly": "Зберiгати тiльки видiлення", "saveBoxRegionOnly": "Зберiгати тiльки видiлення",
"limitStrokesToBox": "Обмежити штрихи виділенням", "limitStrokesToBox": "Обмежити штрихи виділенням",
"showCanvasDebugInfo": "Показати налаштування полотна", "showCanvasDebugInfo": "Показати дод. інформацію про полотно",
"clearCanvasHistory": "Очистити iсторiю полотна", "clearCanvasHistory": "Очистити iсторiю полотна",
"clearHistory": "Очистити iсторiю", "clearHistory": "Очистити iсторiю",
"clearCanvasHistoryMessage": "Очищення історії полотна залишає поточне полотно незайманим, але видаляє історію скасування та повтору", "clearCanvasHistoryMessage": "Очищення історії полотна залишає поточне полотно незайманим, але видаляє історію скасування та повтору.",
"clearCanvasHistoryConfirm": "Ви впевнені, що хочете очистити історію полотна?", "clearCanvasHistoryConfirm": "Ви впевнені, що хочете очистити історію полотна?",
"emptyTempImageFolder": "Очистити тимчасову папку", "emptyTempImageFolder": "Очистити тимчасову папку",
"emptyFolder": "Очистити папку", "emptyFolder": "Очистити папку",
@ -499,5 +611,28 @@
"betaDarkenOutside": "Затемнити зовні", "betaDarkenOutside": "Затемнити зовні",
"betaLimitToBox": "Обмежити виділенням", "betaLimitToBox": "Обмежити виділенням",
"betaPreserveMasked": "Зберiгати замасковану область" "betaPreserveMasked": "Зберiгати замасковану область"
},
"accessibility": {
"nextImage": "Наступне зображення",
"modelSelect": "Вибір моделі",
"invokeProgressBar": "Індикатор виконання",
"reset": "Скинути",
"uploadImage": "Завантажити зображення",
"useThisParameter": "Використовувати цей параметр",
"exitViewer": "Вийти з переглядача",
"zoomIn": "Збільшити",
"zoomOut": "Зменшити",
"rotateCounterClockwise": "Обертати проти годинникової стрілки",
"rotateClockwise": "Обертати за годинниковою стрілкою",
"toggleAutoscroll": "Увімкнути автопрокручування",
"toggleLogViewer": "Показати або приховати переглядач журналів",
"showGallery": "Показати галерею",
"previousImage": "Попереднє зображення",
"copyMetadataJson": "Скопіювати метадані JSON",
"flipVertically": "Перевернути по вертикалі",
"flipHorizontally": "Відобразити по горизонталі",
"showOptionsPanel": "Показати опції",
"modifyConfig": "Змінити конфігурацію",
"menu": "Меню"
} }
} }

View File

@ -0,0 +1 @@
{}

View File

@ -481,5 +481,22 @@
"betaDarkenOutside": "暗化外部区域", "betaDarkenOutside": "暗化外部区域",
"betaLimitToBox": "限制在框内", "betaLimitToBox": "限制在框内",
"betaPreserveMasked": "保留遮罩层" "betaPreserveMasked": "保留遮罩层"
},
"accessibility": {
"modelSelect": "模型选择",
"invokeProgressBar": "Invoke 进度条",
"reset": "重置",
"nextImage": "下一张图片",
"useThisParameter": "使用此参数",
"uploadImage": "上传图片",
"previousImage": "上一张图片",
"copyMetadataJson": "复制JSON元数据",
"exitViewer": "退出视口ExitViewer",
"zoomIn": "放大",
"zoomOut": "缩小",
"rotateCounterClockwise": "逆时针旋转",
"rotateClockwise": "顺时针旋转",
"flipHorizontally": "水平翻转",
"flipVertically": "垂直翻转"
} }
} }

View File

@ -13,16 +13,48 @@ import { Box, Flex, Grid, Portal, useColorMode } from '@chakra-ui/react';
import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants'; import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants';
import ImageGalleryPanel from 'features/gallery/components/ImageGalleryPanel'; import ImageGalleryPanel from 'features/gallery/components/ImageGalleryPanel';
import Lightbox from 'features/lightbox/components/Lightbox'; import Lightbox from 'features/lightbox/components/Lightbox';
import { useAppSelector } from './storeHooks'; import { useAppDispatch, useAppSelector } from './storeHooks';
import { PropsWithChildren, useEffect } from 'react'; import { PropsWithChildren, useEffect } from 'react';
import { setDisabledPanels, setDisabledTabs } from 'features/ui/store/uiSlice';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { shouldTransformUrlsChanged } from 'features/system/store/systemSlice';
import { setShouldFetchImages } from 'features/gallery/store/resultsSlice';
keepGUIAlive(); keepGUIAlive();
const App = (props: PropsWithChildren) => { interface Props extends PropsWithChildren {
options: {
disabledPanels: string[];
disabledTabs: InvokeTabName[];
shouldTransformUrls?: boolean;
shouldFetchImages: boolean;
};
}
const App = (props: Props) => {
useToastWatcher(); useToastWatcher();
const currentTheme = useAppSelector((state) => state.ui.currentTheme); const currentTheme = useAppSelector((state) => state.ui.currentTheme);
const { setColorMode } = useColorMode(); const { setColorMode } = useColorMode();
const dispatch = useAppDispatch();
useEffect(() => {
dispatch(setDisabledPanels(props.options.disabledPanels));
}, [dispatch, props.options.disabledPanels]);
useEffect(() => {
dispatch(setDisabledTabs(props.options.disabledTabs));
}, [dispatch, props.options.disabledTabs]);
useEffect(() => {
dispatch(
shouldTransformUrlsChanged(Boolean(props.options.shouldTransformUrls))
);
}, [dispatch, props.options.shouldTransformUrls]);
useEffect(() => {
dispatch(setShouldFetchImages(props.options.shouldFetchImages));
}, [dispatch, props.options.shouldFetchImages]);
useEffect(() => { useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark'); setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');
@ -41,7 +73,12 @@ const App = (props: PropsWithChildren) => {
h={APP_HEIGHT} h={APP_HEIGHT}
> >
{props.children || <SiteHeader />} {props.children || <SiteHeader />}
<Flex gap={4} w="full" h="full"> <Flex
gap={4}
w={{ base: '100vw', xl: 'full' }}
h="full"
flexDir={{ base: 'column', xl: 'row' }}
>
<InvokeTabs /> <InvokeTabs />
<ImageGalleryPanel /> <ImageGalleryPanel />
</Flex> </Flex>

View File

@ -31,13 +31,13 @@ export const DIFFUSERS_SAMPLERS: Array<string> = [
]; ];
// Valid image widths // Valid image widths
export const WIDTHS: Array<number> = Array.from(Array(65)).map( export const WIDTHS: Array<number> = Array.from(Array(64)).map(
(_x, i) => i * 64 (_x, i) => (i + 1) * 64
); );
// Valid image heights // Valid image heights
export const HEIGHTS: Array<number> = Array.from(Array(65)).map( export const HEIGHTS: Array<number> = Array.from(Array(64)).map(
(_x, i) => i * 64 (_x, i) => (i + 1) * 64
); );
// Valid upscaling levels // Valid upscaling levels
@ -60,3 +60,5 @@ export const IN_PROGRESS_IMAGE_TYPES: Array<{
{ key: 'Fast', value: 'latents' }, { key: 'Fast', value: 'latents' },
{ key: 'Accurate', value: 'full-res' }, { key: 'Accurate', value: 'full-res' },
]; ];
export const NODE_MIN_WIDTH = 250;

View File

@ -14,6 +14,8 @@
import { InvokeTabName } from 'features/ui/store/tabMap'; import { InvokeTabName } from 'features/ui/store/tabMap';
import { IRect } from 'konva/lib/types'; import { IRect } from 'konva/lib/types';
import { ImageMetadata, ImageType } from 'services/api';
import { AnyInvocation } from 'services/events/types';
/** /**
* TODO: * TODO:
@ -113,7 +115,7 @@ export declare type Metadata = SystemGenerationMetadata & {
}; };
// An Image has a UUID, url, modified timestamp, width, height and maybe metadata // An Image has a UUID, url, modified timestamp, width, height and maybe metadata
export declare type Image = { export declare type _Image = {
uuid: string; uuid: string;
url: string; url: string;
thumbnail: string; thumbnail: string;
@ -124,11 +126,23 @@ export declare type Image = {
category: GalleryCategory; category: GalleryCategory;
isBase64?: boolean; isBase64?: boolean;
dreamPrompt?: 'string'; dreamPrompt?: 'string';
name?: string;
};
/**
* ResultImage
*/
export declare type Image = {
name: string;
type: ImageType;
url: string;
thumbnail: string;
metadata: ImageMetadata;
}; };
// GalleryImages is an array of Image. // GalleryImages is an array of Image.
export declare type GalleryImages = { export declare type GalleryImages = {
images: Array<Image>; images: Array<_Image>;
}; };
/** /**
@ -275,7 +289,7 @@ export declare type SystemStatusResponse = SystemStatus;
export declare type SystemConfigResponse = SystemConfig; export declare type SystemConfigResponse = SystemConfig;
export declare type ImageResultResponse = Omit<Image, 'uuid'> & { export declare type ImageResultResponse = Omit<_Image, 'uuid'> & {
boundingBox?: IRect; boundingBox?: IRect;
generationMode: InvokeTabName; generationMode: InvokeTabName;
}; };
@ -296,7 +310,7 @@ export declare type ErrorResponse = {
}; };
export declare type GalleryImagesResponse = { export declare type GalleryImagesResponse = {
images: Array<Omit<Image, 'uuid'>>; images: Array<Omit<_Image, 'uuid'>>;
areMoreImagesAvailable: boolean; areMoreImagesAvailable: boolean;
category: GalleryCategory; category: GalleryCategory;
}; };

View File

@ -20,6 +20,7 @@ export const readinessSelector = createSelector(
seedWeights, seedWeights,
initialImage, initialImage,
seed, seed,
isImageToImageEnabled,
} = generation; } = generation;
const { isProcessing, isConnected } = system; const { isProcessing, isConnected } = system;
@ -33,7 +34,7 @@ export const readinessSelector = createSelector(
reasonsWhyNotReady.push('Missing prompt'); reasonsWhyNotReady.push('Missing prompt');
} }
if (activeTabName === 'img2img' && !initialImage) { if (isImageToImageEnabled && !initialImage) {
isReady = false; isReady = false;
reasonsWhyNotReady.push('No initial image selected'); reasonsWhyNotReady.push('No initial image selected');
} }

View File

@ -13,9 +13,13 @@ import { InvokeTabName } from 'features/ui/store/tabMap';
export const generateImage = createAction<InvokeTabName>( export const generateImage = createAction<InvokeTabName>(
'socketio/generateImage' 'socketio/generateImage'
); );
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN'); export const runESRGAN = createAction<InvokeAI._Image>('socketio/runESRGAN');
export const runFacetool = createAction<InvokeAI.Image>('socketio/runFacetool'); export const runFacetool = createAction<InvokeAI._Image>(
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage'); 'socketio/runFacetool'
);
export const deleteImage = createAction<InvokeAI._Image>(
'socketio/deleteImage'
);
export const requestImages = createAction<GalleryCategory>( export const requestImages = createAction<GalleryCategory>(
'socketio/requestImages' 'socketio/requestImages'
); );

View File

@ -91,7 +91,7 @@ const makeSocketIOEmitters = (
}) })
); );
}, },
emitRunESRGAN: (imageToProcess: InvokeAI.Image) => { emitRunESRGAN: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true)); dispatch(setIsProcessing(true));
const { const {
@ -119,7 +119,7 @@ const makeSocketIOEmitters = (
}) })
); );
}, },
emitRunFacetool: (imageToProcess: InvokeAI.Image) => { emitRunFacetool: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true)); dispatch(setIsProcessing(true));
const { const {
@ -150,7 +150,7 @@ const makeSocketIOEmitters = (
}) })
); );
}, },
emitDeleteImage: (imageToDelete: InvokeAI.Image) => { emitDeleteImage: (imageToDelete: InvokeAI._Image) => {
const { url, uuid, category, thumbnail } = imageToDelete; const { url, uuid, category, thumbnail } = imageToDelete;
dispatch(removeImage(imageToDelete)); dispatch(removeImage(imageToDelete));
socketio.emit('deleteImage', url, thumbnail, uuid, category); socketio.emit('deleteImage', url, thumbnail, uuid, category);

View File

@ -34,8 +34,9 @@ import type { RootState } from 'app/store';
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice'; import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
import { import {
clearInitialImage, clearInitialImage,
initialImageSelected,
setInfillMethod, setInfillMethod,
setInitialImage, // setInitialImage,
setMaskPath, setMaskPath,
} from 'features/parameters/store/generationSlice'; } from 'features/parameters/store/generationSlice';
import { tabMap } from 'features/ui/store/tabMap'; import { tabMap } from 'features/ui/store/tabMap';
@ -142,15 +143,17 @@ const makeSocketIOListeners = (
} }
} }
if (shouldLoopback) { // TODO: fix
const activeTabName = tabMap[activeTab]; // if (shouldLoopback) {
switch (activeTabName) { // const activeTabName = tabMap[activeTab];
case 'img2img': { // switch (activeTabName) {
dispatch(setInitialImage(newImage)); // case 'img2img': {
break; // dispatch(initialImageSelected(newImage.uuid));
} // // dispatch(setInitialImage(newImage));
} // break;
} // }
// }
// }
dispatch(clearIntermediateImage()); dispatch(clearIntermediateImage());
@ -262,7 +265,7 @@ const makeSocketIOListeners = (
*/ */
// Generate a UUID for each image // Generate a UUID for each image
const preparedImages = images.map((image): InvokeAI.Image => { const preparedImages = images.map((image): InvokeAI._Image => {
return { return {
uuid: uuidv4(), uuid: uuidv4(),
...image, ...image,
@ -334,7 +337,7 @@ const makeSocketIOListeners = (
if ( if (
initialImage === url || initialImage === url ||
(initialImage as InvokeAI.Image)?.url === url (initialImage as InvokeAI._Image)?.url === url
) { ) {
dispatch(clearInitialImage()); dispatch(clearInitialImage());
} }

View File

@ -29,6 +29,8 @@ export const socketioMiddleware = () => {
path: `${window.location.pathname}socket.io`, path: `${window.location.pathname}socket.io`,
}); });
socketio.disconnect();
let areListenersSet = false; let areListenersSet = false;
const middleware: Middleware = (store) => (next) => (action) => { const middleware: Middleware = (store) => (next) => (action) => {

View File

@ -2,18 +2,32 @@ import { combineReducers, configureStore } from '@reduxjs/toolkit';
import { persistReducer } from 'redux-persist'; import { persistReducer } from 'redux-persist';
import storage from 'redux-persist/lib/storage'; // defaults to localStorage for web import storage from 'redux-persist/lib/storage'; // defaults to localStorage for web
import dynamicMiddlewares from 'redux-dynamic-middlewares';
import { getPersistConfig } from 'redux-deep-persist'; import { getPersistConfig } from 'redux-deep-persist';
import canvasReducer from 'features/canvas/store/canvasSlice'; import canvasReducer from 'features/canvas/store/canvasSlice';
import galleryReducer from 'features/gallery/store/gallerySlice'; import galleryReducer from 'features/gallery/store/gallerySlice';
import resultsReducer from 'features/gallery/store/resultsSlice';
import uploadsReducer from 'features/gallery/store/uploadsSlice';
import lightboxReducer from 'features/lightbox/store/lightboxSlice'; import lightboxReducer from 'features/lightbox/store/lightboxSlice';
import generationReducer from 'features/parameters/store/generationSlice'; import generationReducer from 'features/parameters/store/generationSlice';
import postprocessingReducer from 'features/parameters/store/postprocessingSlice'; import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
import systemReducer from 'features/system/store/systemSlice'; import systemReducer from 'features/system/store/systemSlice';
import uiReducer from 'features/ui/store/uiSlice'; import uiReducer from 'features/ui/store/uiSlice';
import modelsReducer from 'features/system/store/modelSlice';
import nodesReducer from 'features/nodes/store/nodesSlice';
import { socketioMiddleware } from './socketio/middleware'; import { socketioMiddleware } from './socketio/middleware';
import { socketMiddleware } from 'services/events/middleware';
import { canvasBlacklist } from 'features/canvas/store/canvasPersistBlacklist';
import { galleryBlacklist } from 'features/gallery/store/galleryPersistBlacklist';
import { generationBlacklist } from 'features/parameters/store/generationPersistBlacklist';
import { lightboxBlacklist } from 'features/lightbox/store/lightboxPersistBlacklist';
import { modelsBlacklist } from 'features/system/store/modelsPersistBlacklist';
import { nodesBlacklist } from 'features/nodes/store/nodesPersistBlacklist';
import { postprocessingBlacklist } from 'features/parameters/store/postprocessingPersistBlacklist';
import { systemBlacklist } from 'features/system/store/systemPersistsBlacklist';
import { uiBlacklist } from 'features/ui/store/uiPersistBlacklist';
/** /**
* redux-persist provides an easy and reliable way to persist state across reloads. * redux-persist provides an easy and reliable way to persist state across reloads.
@ -29,49 +43,18 @@ import { socketioMiddleware } from './socketio/middleware';
* The necesssary nested persistors with blacklists are configured below. * The necesssary nested persistors with blacklists are configured below.
*/ */
const canvasBlacklist = [
'cursorPosition',
'isCanvasInitialized',
'doesCanvasNeedScaling',
].map((blacklistItem) => `canvas.${blacklistItem}`);
const systemBlacklist = [
'currentIteration',
'currentStatus',
'currentStep',
'isCancelable',
'isConnected',
'isESRGANAvailable',
'isGFPGANAvailable',
'isProcessing',
'socketId',
'totalIterations',
'totalSteps',
'openModel',
'cancelOptions.cancelAfter',
].map((blacklistItem) => `system.${blacklistItem}`);
const galleryBlacklist = [
'categories',
'currentCategory',
'currentImage',
'currentImageUuid',
'shouldAutoSwitchToNewImages',
'intermediateImage',
].map((blacklistItem) => `gallery.${blacklistItem}`);
const lightboxBlacklist = ['isLightboxOpen'].map(
(blacklistItem) => `lightbox.${blacklistItem}`
);
const rootReducer = combineReducers({ const rootReducer = combineReducers({
generation: generationReducer,
postprocessing: postprocessingReducer,
gallery: galleryReducer,
system: systemReducer,
canvas: canvasReducer, canvas: canvasReducer,
ui: uiReducer, gallery: galleryReducer,
generation: generationReducer,
lightbox: lightboxReducer, lightbox: lightboxReducer,
models: modelsReducer,
nodes: nodesReducer,
postprocessing: postprocessingReducer,
results: resultsReducer,
system: systemReducer,
ui: uiReducer,
uploads: uploadsReducer,
}); });
const rootPersistConfig = getPersistConfig({ const rootPersistConfig = getPersistConfig({
@ -80,23 +63,40 @@ const rootPersistConfig = getPersistConfig({
rootReducer, rootReducer,
blacklist: [ blacklist: [
...canvasBlacklist, ...canvasBlacklist,
...systemBlacklist,
...galleryBlacklist, ...galleryBlacklist,
...generationBlacklist,
...lightboxBlacklist, ...lightboxBlacklist,
...modelsBlacklist,
...nodesBlacklist,
...postprocessingBlacklist,
// ...resultsBlacklist,
'results',
...systemBlacklist,
...uiBlacklist,
// ...uploadsBlacklist,
'uploads',
], ],
debounce: 300, debounce: 300,
}); });
const persistedReducer = persistReducer(rootPersistConfig, rootReducer); const persistedReducer = persistReducer(rootPersistConfig, rootReducer);
// Continue with store setup // TODO: rip the old middleware out when nodes is complete
export function buildMiddleware() {
if (import.meta.env.MODE === 'nodes' || import.meta.env.MODE === 'package') {
return socketMiddleware();
} else {
return socketioMiddleware();
}
}
export const store = configureStore({ export const store = configureStore({
reducer: persistedReducer, reducer: persistedReducer,
middleware: (getDefaultMiddleware) => middleware: (getDefaultMiddleware) =>
getDefaultMiddleware({ getDefaultMiddleware({
immutableCheck: false, immutableCheck: false,
serializableCheck: false, serializableCheck: false,
}).concat(socketioMiddleware()), }).concat(dynamicMiddlewares),
devTools: { devTools: {
// Uncommenting these very rapidly called actions makes the redux dev tools output much more readable // Uncommenting these very rapidly called actions makes the redux dev tools output much more readable
actionsDenylist: [ actionsDenylist: [

View File

@ -0,0 +1,8 @@
import { createAsyncThunk } from '@reduxjs/toolkit';
import { AppDispatch, RootState } from './store';
// https://redux-toolkit.js.org/usage/usage-with-typescript#defining-a-pre-typed-createasyncthunk
export const createAppAsyncThunk = createAsyncThunk.withTypes<{
state: RootState;
dispatch: AppDispatch;
}>();

View File

@ -44,12 +44,10 @@ export type IAIFullSliderProps = {
inputReadOnly?: boolean; inputReadOnly?: boolean;
withReset?: boolean; withReset?: boolean;
handleReset?: () => void; handleReset?: () => void;
isResetDisabled?: boolean;
isSliderDisabled?: boolean;
isInputDisabled?: boolean;
tooltipSuffix?: string; tooltipSuffix?: string;
hideTooltip?: boolean; hideTooltip?: boolean;
isCompact?: boolean; isCompact?: boolean;
isDisabled?: boolean;
sliderFormControlProps?: FormControlProps; sliderFormControlProps?: FormControlProps;
sliderFormLabelProps?: FormLabelProps; sliderFormLabelProps?: FormLabelProps;
sliderMarkProps?: Omit<SliderMarkProps, 'value'>; sliderMarkProps?: Omit<SliderMarkProps, 'value'>;
@ -80,10 +78,8 @@ const IAISlider = (props: IAIFullSliderProps) => {
withReset = false, withReset = false,
hideTooltip = false, hideTooltip = false,
isCompact = false, isCompact = false,
isDisabled = false,
handleReset, handleReset,
isResetDisabled,
isSliderDisabled,
isInputDisabled,
sliderFormControlProps, sliderFormControlProps,
sliderFormLabelProps, sliderFormLabelProps,
sliderMarkProps, sliderMarkProps,
@ -149,6 +145,7 @@ const IAISlider = (props: IAIFullSliderProps) => {
} }
: {} : {}
} }
isDisabled={isDisabled}
{...sliderFormControlProps} {...sliderFormControlProps}
> >
<FormLabel {...sliderFormLabelProps} mb={-1}> <FormLabel {...sliderFormLabelProps} mb={-1}>
@ -166,15 +163,13 @@ const IAISlider = (props: IAIFullSliderProps) => {
onMouseEnter={() => setShowTooltip(true)} onMouseEnter={() => setShowTooltip(true)}
onMouseLeave={() => setShowTooltip(false)} onMouseLeave={() => setShowTooltip(false)}
focusThumbOnChange={false} focusThumbOnChange={false}
isDisabled={isSliderDisabled} isDisabled={isDisabled}
// width={width}
{...rest} {...rest}
> >
{withSliderMarks && ( {withSliderMarks && (
<> <>
<SliderMark <SliderMark
value={min} value={min}
// insetInlineStart={0}
sx={{ sx={{
insetInlineStart: '0 !important', insetInlineStart: '0 !important',
insetInlineEnd: 'unset !important', insetInlineEnd: 'unset !important',
@ -185,7 +180,6 @@ const IAISlider = (props: IAIFullSliderProps) => {
</SliderMark> </SliderMark>
<SliderMark <SliderMark
value={max} value={max}
// insetInlineEnd={0}
sx={{ sx={{
insetInlineStart: 'unset !important', insetInlineStart: 'unset !important',
insetInlineEnd: '0 !important', insetInlineEnd: '0 !important',
@ -221,7 +215,6 @@ const IAISlider = (props: IAIFullSliderProps) => {
value={localInputValue} value={localInputValue}
onChange={handleInputChange} onChange={handleInputChange}
onBlur={handleInputBlur} onBlur={handleInputBlur}
isDisabled={isInputDisabled}
{...sliderNumberInputProps} {...sliderNumberInputProps}
> >
<NumberInputField <NumberInputField
@ -246,8 +239,8 @@ const IAISlider = (props: IAIFullSliderProps) => {
aria-label={t('accessibility.reset')} aria-label={t('accessibility.reset')}
tooltip="Reset" tooltip="Reset"
icon={<BiReset />} icon={<BiReset />}
isDisabled={isDisabled}
onClick={handleResetDisable} onClick={handleResetDisable}
isDisabled={isResetDisabled}
{...sliderIAIIconButtonProps} {...sliderIAIIconButtonProps}
/> />
)} )}

View File

@ -0,0 +1,79 @@
import { Badge, Box, ButtonGroup, Flex } from '@chakra-ui/react';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { clearInitialImage } from 'features/parameters/store/generationSlice';
import { useCallback } from 'react';
import IAIIconButton from 'common/components/IAIIconButton';
import { FaUndo, FaUpload } from 'react-icons/fa';
import { useTranslation } from 'react-i18next';
import { Image } from 'app/invokeai';
type ImageToImageOverlayProps = {
setIsLoaded: (isLoaded: boolean) => void;
image: Image;
};
const ImageToImageOverlay = ({
setIsLoaded,
image,
}: ImageToImageOverlayProps) => {
const isImageToImageEnabled = useAppSelector(
(state: RootState) => state.generation.isImageToImageEnabled
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleResetInitialImage = useCallback(() => {
dispatch(clearInitialImage());
setIsLoaded(false);
}, [dispatch, setIsLoaded]);
return (
<Box
sx={{
top: 0,
left: 0,
w: 'full',
h: 'full',
position: 'absolute',
}}
>
<ButtonGroup
sx={{
position: 'absolute',
top: 0,
right: 0,
p: 2,
}}
>
<IAIIconButton
size="sm"
isDisabled={!isImageToImageEnabled}
icon={<FaUndo />}
aria-label={t('accessibility.reset')}
onClick={handleResetInitialImage}
/>
<IAIIconButton
size="sm"
isDisabled={!isImageToImageEnabled}
icon={<FaUpload />}
aria-label={t('common.upload')}
/>
</ButtonGroup>
<Flex
sx={{
position: 'absolute',
bottom: 0,
left: 0,
p: 2,
alignItems: 'flex-start',
}}
>
<Badge variant="solid" colorScheme="base">
{image.metadata?.width} × {image.metadata?.height}
</Badge>
</Flex>
</Box>
);
};
export default ImageToImageOverlay;

View File

@ -2,7 +2,6 @@ import { Box, useToast } from '@chakra-ui/react';
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext'; import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import useImageUploader from 'common/hooks/useImageUploader'; import useImageUploader from 'common/hooks/useImageUploader';
import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ResourceKey } from 'i18next'; import { ResourceKey } from 'i18next';
import { import {
@ -15,6 +14,7 @@ import {
} from 'react'; } from 'react';
import { FileRejection, useDropzone } from 'react-dropzone'; import { FileRejection, useDropzone } from 'react-dropzone';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { imageUploaded } from 'services/thunks/image';
import ImageUploadOverlay from './ImageUploadOverlay'; import ImageUploadOverlay from './ImageUploadOverlay';
type ImageUploaderProps = { type ImageUploaderProps = {
@ -49,7 +49,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
const fileAcceptedCallback = useCallback( const fileAcceptedCallback = useCallback(
async (file: File) => { async (file: File) => {
dispatch(uploadImage({ imageFile: file })); dispatch(imageUploaded({ formData: { file } }));
}, },
[dispatch] [dispatch]
); );
@ -124,7 +124,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
return; return;
} }
dispatch(uploadImage({ imageFile: file })); dispatch(imageUploaded({ formData: { file } }));
}; };
document.addEventListener('paste', pasteImageListener); document.addEventListener('paste', pasteImageListener);
return () => { return () => {

View File

@ -0,0 +1,12 @@
import { Flex, Icon } from '@chakra-ui/react';
import { FaImage } from 'react-icons/fa';
const SelectImagePlaceholder = () => {
return (
<Flex sx={{ h: 36, alignItems: 'center', justifyContent: 'center' }}>
<Icon color="base.400" boxSize={32} as={FaImage}></Icon>
</Flex>
);
};
export default SelectImagePlaceholder;

View File

@ -1,27 +1,160 @@
import { Flex, Heading, Text, VStack } from '@chakra-ui/react'; // import WorkInProgress from './WorkInProgress';
import { useTranslation } from 'react-i18next'; // import ReactFlow, {
import WorkInProgress from './WorkInProgress'; // applyEdgeChanges,
// applyNodeChanges,
// Background,
// Controls,
// Edge,
// Handle,
// Node,
// NodeTypes,
// OnEdgesChange,
// OnNodesChange,
// Position,
// } from 'reactflow';
export default function NodesWIP() { // import 'reactflow/dist/style.css';
const { t } = useTranslation(); // import {
return ( // Fragment,
<WorkInProgress> // FunctionComponent,
<Flex // ReactNode,
sx={{ // useCallback,
flexDirection: 'column', // useMemo,
alignItems: 'center', // useState,
justifyContent: 'center', // } from 'react';
w: '100%', // import { OpenAPIV3 } from 'openapi-types';
h: '100%', // import { filter, map, reduce } from 'lodash';
gap: 4, // import {
textAlign: 'center', // Box,
}} // Flex,
> // FormControl,
<Heading>{t('common.nodes')}</Heading> // FormLabel,
<VStack maxW="50rem" gap={4}> // Input,
<Text>{t('common.nodesDesc')}</Text> // Select,
</VStack> // Switch,
</Flex> // Text,
</WorkInProgress> // NumberInput,
); // NumberInputField,
} // NumberInputStepper,
// NumberIncrementStepper,
// NumberDecrementStepper,
// Tooltip,
// chakra,
// Badge,
// Heading,
// VStack,
// HStack,
// Menu,
// MenuButton,
// MenuList,
// MenuItem,
// MenuItemOption,
// MenuGroup,
// MenuOptionGroup,
// MenuDivider,
// IconButton,
// } from '@chakra-ui/react';
// import { FaPlus } from 'react-icons/fa';
// import {
// FIELD_NAMES as FIELD_NAMES,
// FIELDS,
// INVOCATION_NAMES as INVOCATION_NAMES,
// INVOCATIONS,
// } from 'features/nodeEditor/constants';
// console.log('invocations', INVOCATIONS);
// const nodeTypes = reduce(
// INVOCATIONS,
// (acc, val, key) => {
// acc[key] = val.component;
// return acc;
// },
// {} as NodeTypes
// );
// console.log('nodeTypes', nodeTypes);
// // make initial nodes one of every node for now
// let n = 0;
// const initialNodes = map(INVOCATIONS, (i) => ({
// id: i.type,
// type: i.title,
// position: { x: (n += 20), y: (n += 20) },
// data: {},
// }));
// console.log('initialNodes', initialNodes);
// export default function NodesWIP() {
// const [nodes, setNodes] = useState<Node[]>([]);
// const [edges, setEdges] = useState<Edge[]>([]);
// const onNodesChange: OnNodesChange = useCallback(
// (changes) => setNodes((nds) => applyNodeChanges(changes, nds)),
// []
// );
// const onEdgesChange: OnEdgesChange = useCallback(
// (changes) => setEdges((eds: Edge[]) => applyEdgeChanges(changes, eds)),
// []
// );
// return (
// <Box
// sx={{
// position: 'relative',
// width: 'full',
// height: 'full',
// borderRadius: 'md',
// }}
// >
// <ReactFlow
// nodeTypes={nodeTypes}
// nodes={nodes}
// edges={edges}
// onNodesChange={onNodesChange}
// onEdgesChange={onEdgesChange}
// >
// <Background />
// <Controls />
// </ReactFlow>
// <HStack sx={{ position: 'absolute', top: 2, right: 2 }}>
// {FIELD_NAMES.map((field) => (
// <Badge
// key={field}
// colorScheme={FIELDS[field].color}
// sx={{ userSelect: 'none' }}
// >
// {field}
// </Badge>
// ))}
// </HStack>
// <Menu>
// <MenuButton
// as={IconButton}
// aria-label="Options"
// icon={<FaPlus />}
// sx={{ position: 'absolute', top: 2, left: 2 }}
// />
// <MenuList>
// {INVOCATION_NAMES.map((name) => {
// const invocation = INVOCATIONS[name];
// return (
// <Tooltip
// key={name}
// label={invocation.description}
// placement="end"
// hasArrow
// >
// <MenuItem>{invocation.title}</MenuItem>
// </Tooltip>
// );
// })}
// </MenuList>
// </Menu>
// </Box>
// );
// }
export default {};

View File

@ -14,6 +14,8 @@ const WorkInProgress = (props: WorkInProgressProps) => {
width: '100%', width: '100%',
height: '100%', height: '100%',
bg: 'base.850', bg: 'base.850',
borderRadius: 'base',
position: 'relative',
}} }}
> >
{children} {children}

View File

@ -0,0 +1,18 @@
import { useBreakpoint } from '@chakra-ui/react';
export default function useResolution():
| 'mobile'
| 'tablet'
| 'desktop'
| 'unknown' {
const breakpointValue = useBreakpoint();
const mobileResolutions = ['base', 'sm'];
const tabletResolutions = ['md', 'lg'];
const desktopResolutions = ['xl', '2xl'];
if (mobileResolutions.includes(breakpointValue)) return 'mobile';
if (tabletResolutions.includes(breakpointValue)) return 'tablet';
if (desktopResolutions.includes(breakpointValue)) return 'desktop';
return 'unknown';
}

View File

@ -0,0 +1,119 @@
/**
* PARTIAL ZOD IMPLEMENTATION
*
* doesn't work well bc like most validators, zod is not built to skip invalid values.
* it mostly works but just seems clearer and simpler to manually parse for now.
*
* in the future it would be really nice if we could use zod for some things:
* - zodios (axios + zod): https://github.com/ecyrbe/zodios
* - openapi to zodios: https://github.com/astahmer/openapi-zod-client
*/
// import { z } from 'zod';
// const zMetadataStringField = z.string();
// export type MetadataStringField = z.infer<typeof zMetadataStringField>;
// const zMetadataIntegerField = z.number().int();
// export type MetadataIntegerField = z.infer<typeof zMetadataIntegerField>;
// const zMetadataFloatField = z.number();
// export type MetadataFloatField = z.infer<typeof zMetadataFloatField>;
// const zMetadataBooleanField = z.boolean();
// export type MetadataBooleanField = z.infer<typeof zMetadataBooleanField>;
// const zMetadataImageField = z.object({
// image_type: z.union([
// z.literal('results'),
// z.literal('uploads'),
// z.literal('intermediates'),
// ]),
// image_name: z.string().min(1),
// });
// export type MetadataImageField = z.infer<typeof zMetadataImageField>;
// const zMetadataLatentsField = z.object({
// latents_name: z.string().min(1),
// });
// export type MetadataLatentsField = z.infer<typeof zMetadataLatentsField>;
// /**
// * zod Schema for any node field. Use a `transform()` to manually parse, skipping invalid values.
// */
// const zAnyMetadataField = z.any().transform((val, ctx) => {
// // Grab the field name from the path
// const fieldName = String(ctx.path[ctx.path.length - 1]);
// // `id` and `type` must be strings if they exist
// if (['id', 'type'].includes(fieldName)) {
// const reservedStringPropertyResult = zMetadataStringField.safeParse(val);
// if (reservedStringPropertyResult.success) {
// return reservedStringPropertyResult.data;
// }
// return;
// }
// // Parse the rest of the fields, only returning the data if the parsing is successful
// const stringFieldResult = zMetadataStringField.safeParse(val);
// if (stringFieldResult.success) {
// return stringFieldResult.data;
// }
// const integerFieldResult = zMetadataIntegerField.safeParse(val);
// if (integerFieldResult.success) {
// return integerFieldResult.data;
// }
// const floatFieldResult = zMetadataFloatField.safeParse(val);
// if (floatFieldResult.success) {
// return floatFieldResult.data;
// }
// const booleanFieldResult = zMetadataBooleanField.safeParse(val);
// if (booleanFieldResult.success) {
// return booleanFieldResult.data;
// }
// const imageFieldResult = zMetadataImageField.safeParse(val);
// if (imageFieldResult.success) {
// return imageFieldResult.data;
// }
// const latentsFieldResult = zMetadataImageField.safeParse(val);
// if (latentsFieldResult.success) {
// return latentsFieldResult.data;
// }
// });
// /**
// * The node metadata schema.
// */
// const zNodeMetadata = z.object({
// session_id: z.string().min(1).optional(),
// node: z.record(z.string().min(1), zAnyMetadataField).optional(),
// });
// export type NodeMetadata = z.infer<typeof zNodeMetadata>;
// const zMetadata = z.object({
// invokeai: zNodeMetadata.optional(),
// 'sd-metadata': z.record(z.string().min(1), z.any()).optional(),
// });
// export type Metadata = z.infer<typeof zMetadata>;
// export const parseMetadata = (
// metadata: Record<string, any>
// ): Metadata | undefined => {
// const result = zMetadata.safeParse(metadata);
// if (!result.success) {
// console.log(result.error.issues);
// return;
// }
// return result.data;
// };
export default {};

View File

@ -0,0 +1,6 @@
import dateFormat from 'dateformat';
/**
* Get a `now` timestamp with 1s precision, formatted as ISO datetime.
*/
export const getTimestamp = () => dateFormat(new Date(), 'isoDateTime');

View File

@ -0,0 +1,28 @@
import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks';
import { OpenAPI } from 'services/api';
export const getUrlAlt = (url: string, shouldTransformUrls: boolean) => {
if (OpenAPI.BASE && shouldTransformUrls) {
return [OpenAPI.BASE, url].join('/');
}
return url;
};
export const useGetUrl = () => {
const shouldTransformUrls = useAppSelector(
(state: RootState) => state.system.shouldTransformUrls
);
return {
shouldTransformUrls,
getUrl: (url?: string) => {
if (OpenAPI.BASE && shouldTransformUrls) {
return [OpenAPI.BASE, url].join('/');
}
return url;
},
};
};

View File

@ -0,0 +1,169 @@
import { forEach, size } from 'lodash';
import { ImageField, LatentsField } from 'services/api';
const OBJECT_TYPESTRING = '[object Object]';
const STRING_TYPESTRING = '[object String]';
const NUMBER_TYPESTRING = '[object Number]';
const BOOLEAN_TYPESTRING = '[object Boolean]';
const ARRAY_TYPESTRING = '[object Array]';
const isObject = (obj: unknown): obj is Record<string | number, any> =>
Object.prototype.toString.call(obj) === OBJECT_TYPESTRING;
const isString = (obj: unknown): obj is string =>
Object.prototype.toString.call(obj) === STRING_TYPESTRING;
const isNumber = (obj: unknown): obj is number =>
Object.prototype.toString.call(obj) === NUMBER_TYPESTRING;
const isBoolean = (obj: unknown): obj is boolean =>
Object.prototype.toString.call(obj) === BOOLEAN_TYPESTRING;
const isArray = (obj: unknown): obj is Array<any> =>
Object.prototype.toString.call(obj) === ARRAY_TYPESTRING;
const parseImageField = (imageField: unknown): ImageField | undefined => {
// Must be an object
if (!isObject(imageField)) {
return;
}
// An ImageField must have both `image_name` and `image_type`
if (!('image_name' in imageField && 'image_type' in imageField)) {
return;
}
// An ImageField's `image_type` must be one of the allowed values
if (
!['results', 'uploads', 'intermediates'].includes(imageField.image_type)
) {
return;
}
// An ImageField's `image_name` must be a string
if (typeof imageField.image_name !== 'string') {
return;
}
// Build a valid ImageField
return {
image_type: imageField.image_type,
image_name: imageField.image_name,
};
};
const parseLatentsField = (latentsField: unknown): LatentsField | undefined => {
// Must be an object
if (!isObject(latentsField)) {
return;
}
// A LatentsField must have a `latents_name`
if (!('latents_name' in latentsField)) {
return;
}
// A LatentsField's `latents_name` must be a string
if (typeof latentsField.latents_name !== 'string') {
return;
}
// Build a valid LatentsField
return {
latents_name: latentsField.latents_name,
};
};
type NodeMetadata = {
[key: string]: string | number | boolean | ImageField | LatentsField;
};
type InvokeAIMetadata = {
session_id?: string;
node?: NodeMetadata;
};
export const parseNodeMetadata = (
nodeMetadata: Record<string | number, any>
): NodeMetadata | undefined => {
if (!isObject(nodeMetadata)) {
return;
}
const parsed: NodeMetadata = {};
forEach(nodeMetadata, (nodeItem, nodeKey) => {
// `id` and `type` must be strings if they are present
if (['id', 'type'].includes(nodeKey)) {
if (isString(nodeItem)) {
parsed[nodeKey] = nodeItem;
}
return;
}
// the only valid object types are ImageField and LatentsField
if (isObject(nodeItem)) {
if ('image_name' in nodeItem || 'image_type' in nodeItem) {
const imageField = parseImageField(nodeItem);
if (imageField) {
parsed[nodeKey] = imageField;
}
return;
}
if ('latents_name' in nodeItem) {
const latentsField = parseLatentsField(nodeItem);
if (latentsField) {
parsed[nodeKey] = latentsField;
}
return;
}
}
// otherwise we accept any string, number or boolean
if (isString(nodeItem) || isNumber(nodeItem) || isBoolean(nodeItem)) {
parsed[nodeKey] = nodeItem;
return;
}
});
if (size(parsed) === 0) {
return;
}
return parsed;
};
export const parseInvokeAIMetadata = (
metadata: Record<string | number, any> | undefined
): InvokeAIMetadata | undefined => {
if (metadata === undefined) {
return;
}
if (!isObject(metadata)) {
return;
}
const parsed: InvokeAIMetadata = {};
forEach(metadata, (item, key) => {
if (key === 'session_id' && isString(item)) {
parsed['session_id'] = item;
}
if (key === 'node' && isObject(item)) {
const nodeMetadata = parseNodeMetadata(item);
if (nodeMetadata) {
parsed['node'] = nodeMetadata;
}
}
});
if (size(parsed) === 0) {
return;
}
return parsed;
};

View File

@ -1,8 +1,10 @@
import React, { lazy, PropsWithChildren } from 'react'; import React, { lazy, PropsWithChildren, useEffect, useState } from 'react';
import { Provider } from 'react-redux'; import { Provider } from 'react-redux';
import { PersistGate } from 'redux-persist/integration/react'; import { PersistGate } from 'redux-persist/integration/react';
import { store } from './app/store'; import { buildMiddleware, store } from './app/store';
import { persistor } from './persistor'; import { persistor } from './persistor';
import { OpenAPI } from 'services/api';
import { InvokeTabName } from 'features/ui/store/tabMap';
import '@fontsource/inter/100.css'; import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css'; import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css'; import '@fontsource/inter/300.css';
@ -17,18 +19,68 @@ import Loading from './Loading';
// Localization // Localization
import './i18n'; import './i18n';
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
const App = lazy(() => import('./app/App')); const App = lazy(() => import('./app/App'));
const ThemeLocaleProvider = lazy(() => import('./app/ThemeLocaleProvider')); const ThemeLocaleProvider = lazy(() => import('./app/ThemeLocaleProvider'));
export default function Component(props: PropsWithChildren) { interface Props extends PropsWithChildren {
apiUrl?: string;
disabledPanels?: string[];
disabledTabs?: InvokeTabName[];
token?: string;
shouldTransformUrls?: boolean;
shouldFetchImages?: boolean;
}
export default function Component({
apiUrl,
disabledPanels = [],
disabledTabs = [],
token,
children,
shouldTransformUrls,
shouldFetchImages = false,
}: Props) {
useEffect(() => {
// configure API client token
if (token) {
OpenAPI.TOKEN = token;
}
// configure API client base url
if (apiUrl) {
OpenAPI.BASE = apiUrl;
}
// reset dynamically added middlewares
resetMiddlewares();
// TODO: at this point, after resetting the middleware, we really ought to clean up the socket
// stuff by calling `dispatch(socketReset())`. but we cannot dispatch from here as we are
// outside the provider. it's not needed until there is the possibility that we will change
// the `apiUrl`/`token` dynamically.
// rebuild socket middleware with token and apiUrl
addMiddleware(buildMiddleware());
}, [apiUrl, token]);
return ( return (
<React.StrictMode> <React.StrictMode>
<Provider store={store}> <Provider store={store}>
<PersistGate loading={<Loading />} persistor={persistor}> <PersistGate loading={<Loading />} persistor={persistor}>
<React.Suspense fallback={<Loading showText />}> <React.Suspense fallback={<Loading showText />}>
<ThemeLocaleProvider> <ThemeLocaleProvider>
<App>{props.children}</App> <App
options={{
disabledPanels,
disabledTabs,
shouldTransformUrls,
shouldFetchImages,
}}
>
{children}
</App>
</ThemeLocaleProvider> </ThemeLocaleProvider>
</React.Suspense> </React.Suspense>
</PersistGate> </PersistGate>

Some files were not shown because too many files have changed in this diff Show More