Merge branch 'main' into main-text-fixup-PR

This commit is contained in:
blessedcoolant 2023-03-16 04:43:22 +13:00 committed by GitHub
commit 7cf59c1e60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
190 changed files with 6762 additions and 2992 deletions

View File

@ -65,6 +65,16 @@ body:
placeholder: 8GB
validations:
required: false
- type: input
id: version-number
attributes:
label: What version did you experience this issue on?
description: |
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: X.X.X
validations:
required: true
- type: textarea
id: what-happened

View File

@ -2,8 +2,6 @@ name: Close inactive issues
on:
schedule:
- cron: "00 6 * * *"
issue_comment:
types: [ "created" ]
env:
DAYS_BEFORE_ISSUE_STALE: 14
@ -12,7 +10,6 @@ env:
jobs:
close-issues:
runs-on: ubuntu-latest
if: ${{ !github.event.issue.pull_request }}
permissions:
issues: write
pull-requests: write
@ -21,9 +18,10 @@ jobs:
with:
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
stale-issue-label: "stale"
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. Please reply with a comment to keep the issue open. We recommend testing with the latest release to make sure it hasn't been already fixed."
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please open a new one and reference issue ${{ github.event.issue.number }}."
stale-issue-label: "Inactive Issue"
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
days-before-pr-stale: -1
days-before-pr-close: -1
repo-token: ${{ secrets.GITHUB_TOKEN }}
operations-per-run: 500

View File

@ -1,12 +1,12 @@
name: Test invoke.py pip
on:
pull_request:
paths-ignore:
- 'pyproject.toml'
- 'invokeai/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/web/dist/**'
paths:
- '**'
- '!pyproject.toml'
- '!invokeai/**'
- 'invokeai/frontend/web/**'
- '!invokeai/frontend/web/dist/**'
merge_group:
workflow_dispatch:

View File

@ -6,15 +6,13 @@ on:
paths:
- 'pyproject.toml'
- 'invokeai/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
pull_request:
paths:
- 'pyproject.toml'
- 'invokeai/**'
- 'invokeai/backend/**'
- 'invokeai/configs/**'
- '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
types:
- 'ready_for_review'

View File

@ -148,7 +148,7 @@ manager, please follow these steps:
=== "CUDA (NVidia)"
```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"

View File

@ -4,7 +4,8 @@ import os
from argparse import Namespace
from ...backend import Globals
from ..services.generate_initializer import get_generate
from ..services.model_manager_initializer import get_model_manager
from ..services.restoration_services import RestorationServices
from ..services.graph import GraphExecutionState
from ..services.image_storage import DiskImageStorage
from ..services.invocation_queue import MemoryInvocationQueue
@ -37,18 +38,16 @@ class ApiDependencies:
invoker: Invoker = None
@staticmethod
def initialize(args, config, event_handler_id: int):
Globals.try_patchmatch = args.patchmatch
Globals.always_use_cpu = args.always_use_cpu
Globals.internet_available = args.internet_available and check_internet()
Globals.disable_xformers = not args.xformers
Globals.ckpt_convert = args.ckpt_convert
def initialize(config, event_handler_id: int):
Globals.try_patchmatch = config.patchmatch
Globals.always_use_cpu = config.always_use_cpu
Globals.internet_available = config.internet_available and check_internet()
Globals.disable_xformers = not config.xformers
Globals.ckpt_convert = config.ckpt_convert
# TODO: Use a logger
print(f">> Internet connectivity is {Globals.internet_available}")
generate = get_generate(args, config)
events = FastAPIEventService(event_handler_id)
output_folder = os.path.abspath(
@ -61,7 +60,7 @@ class ApiDependencies:
db_location = os.path.join(output_folder, "invokeai.db")
services = InvocationServices(
generate=generate,
model_manager=get_model_manager(config),
events=events,
images=images,
queue=MemoryInvocationQueue(),
@ -69,6 +68,7 @@ class ApiDependencies:
filename=db_location, table_name="graph_executions"
),
processor=DefaultInvocationProcessor(),
restoration=RestorationServices(config),
)
ApiDependencies.invoker = Invoker(services)

View File

@ -10,6 +10,7 @@ from pydantic.fields import Field
from ...invocations import *
from ...invocations.baseinvocation import BaseInvocation
from ...services.graph import (
Edge,
EdgeConnection,
Graph,
GraphExecutionState,
@ -92,7 +93,7 @@ async def get_session(
async def add_node(
session_id: str = Path(description="The id of the session"),
node: Annotated[
Union[BaseInvocation.get_invocations()], Field(discriminator="type")
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
] = Body(description="The node to add"),
) -> str:
"""Adds a node to the graph"""
@ -125,7 +126,7 @@ async def update_node(
session_id: str = Path(description="The id of the session"),
node_path: str = Path(description="The path to the node in the graph"),
node: Annotated[
Union[BaseInvocation.get_invocations()], Field(discriminator="type")
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
] = Body(description="The new node"),
) -> GraphExecutionState:
"""Updates a node in the graph and removes all linked edges"""
@ -186,7 +187,7 @@ async def delete_node(
)
async def add_edge(
session_id: str = Path(description="The id of the session"),
edge: tuple[EdgeConnection, EdgeConnection] = Body(description="The edge to add"),
edge: Edge = Body(description="The edge to add"),
) -> GraphExecutionState:
"""Adds an edge to the graph"""
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
@ -228,9 +229,9 @@ async def delete_edge(
return Response(status_code=404)
try:
edge = (
EdgeConnection(node_id=from_node_id, field=from_field),
EdgeConnection(node_id=to_node_id, field=to_field),
edge = Edge(
source=EdgeConnection(node_id=from_node_id, field=from_field),
destination=EdgeConnection(node_id=to_node_id, field=to_field)
)
session.delete_edge(edge)
ApiDependencies.invoker.services.graph_execution_manager.set(

View File

@ -1,5 +1,4 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import asyncio
from inspect import signature
@ -53,11 +52,11 @@ config = {}
# Add startup event to load dependencies
@app.on_event("startup")
async def startup_event():
args = Args()
config = args.parse_args()
config = Args()
config.parse_args()
ApiDependencies.initialize(
args=args, config=config, event_handler_id=event_handler_id
config=config, event_handler_id=event_handler_id
)
@ -113,10 +112,8 @@ def custom_openapi():
output_type_title = output_type_titles[output_type.__name__]
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
if "additionalProperties" not in invoker_schema:
invoker_schema["additionalProperties"] = {}
invoker_schema["additionalProperties"]["outputs"] = outputs_ref
invoker_schema["output"] = outputs_ref
app.openapi_schema = openapi_schema
return app.openapi_schema

View File

@ -17,8 +17,9 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra
from .invocations import *
from .invocations.baseinvocation import BaseInvocation
from .services.events import EventServiceBase
from .services.generate_initializer import get_generate
from .services.graph import EdgeConnection, GraphExecutionState
from .services.model_manager_initializer import get_model_manager
from .services.restoration_services import RestorationServices
from .services.graph import Edge, EdgeConnection, GraphExecutionState
from .services.image_storage import DiskImageStorage
from .services.invocation_queue import MemoryInvocationQueue
from .services.invocation_services import InvocationServices
@ -76,7 +77,7 @@ def get_command_parser() -> argparse.ArgumentParser:
def generate_matching_edges(
a: BaseInvocation, b: BaseInvocation
) -> list[tuple[EdgeConnection, EdgeConnection]]:
) -> list[Edge]:
"""Generates all possible edges between two invocations"""
atype = type(a)
btype = type(b)
@ -93,9 +94,9 @@ def generate_matching_edges(
matching_fields = matching_fields.difference(invalid_fields)
edges = [
(
EdgeConnection(node_id=a.id, field=field),
EdgeConnection(node_id=b.id, field=field),
Edge(
source=EdgeConnection(node_id=a.id, field=field),
destination=EdgeConnection(node_id=b.id, field=field)
)
for field in matching_fields
]
@ -110,30 +111,24 @@ class SessionError(Exception):
def invoke_all(context: CliContext):
"""Runs all invocations in the specified session"""
context.invoker.invoke(context.session, invoke_all=True)
while not context.session.is_complete():
while not context.get_session().is_complete():
# Wait some time
session = context.get_session()
time.sleep(0.1)
# Print any errors
if context.session.has_error():
for n in context.session.errors:
print(
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {session.errors[n]}"
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
)
raise SessionError()
def invoke_cli():
args = Args()
config = args.parse_args()
generate = get_generate(args, config)
# NOTE: load model on first use, uncomment to load at startup
# TODO: Make this a config option?
# generate.load_model()
config = Args()
config.parse_args()
model_manager = get_model_manager(config)
events = EventServiceBase()
@ -145,7 +140,7 @@ def invoke_cli():
db_location = os.path.join(output_folder, "invokeai.db")
services = InvocationServices(
generate=generate,
model_manager=model_manager,
events=events,
images=DiskImageStorage(output_folder),
queue=MemoryInvocationQueue(),
@ -153,6 +148,7 @@ def invoke_cli():
filename=db_location, table_name="graph_executions"
),
processor=DefaultInvocationProcessor(),
restoration=RestorationServices(config),
)
invoker = Invoker(services)
@ -206,7 +202,7 @@ def invoke_cli():
continue
# Pipe previous command output (if there was a previous command)
edges = []
edges: list[Edge] = list()
if len(history) > 0 or current_id != start_id:
from_id = (
history[0] if current_id == start_id else str(current_id - 1)
@ -228,19 +224,19 @@ def invoke_cli():
matching_edges = generate_matching_edges(
link_node, command.command
)
matching_destinations = [e[1] for e in matching_edges]
edges = [e for e in edges if e[1] not in matching_destinations]
matching_destinations = [e.destination for e in matching_edges]
edges = [e for e in edges if e.destination not in matching_destinations]
edges.extend(matching_edges)
if "link" in args and args["link"]:
for link in args["link"]:
edges = [e for e in edges if e[1].node_id != command.command.id and e[1].field != link[2]]
edges = [e for e in edges if e.destination.node_id != command.command.id and e.destination.field != link[2]]
edges.append(
(
EdgeConnection(node_id=link[1], field=link[0]),
EdgeConnection(
Edge(
source=EdgeConnection(node_id=link[1], field=link[0]),
destination=EdgeConnection(
node_id=command.command.id, field=link[2]
),
)
)
)

View File

@ -12,12 +12,12 @@ from ..services.image_storage import ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
SAMPLER_NAME_VALUES = Literal[
"ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun"
tuple(InvokeAIGenerator.schedulers())
]
# Text to image
class TextToImageInvocation(BaseInvocation):
"""Generates an image using text2img."""
@ -57,19 +57,18 @@ class TextToImageInvocation(BaseInvocation):
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
if self.model is None or self.model == "":
self.model = context.services.generate.model_name
# Set the model (if already cached, this does nothing)
context.services.generate.set_model(self.model)
results = context.services.generate.prompt2image(
# (right now uses whatever current model is set in model manager)
model= context.services.model_manager.get_model()
outputs = Txt2Img(model).generate(
prompt=self.prompt,
step_callback=step_callback,
**self.dict(
exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generate_output = next(outputs)
# Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed?
@ -78,7 +77,7 @@ class TextToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id
)
context.services.images.save(image_type, image_name, results[0][0])
context.services.images.save(image_type, image_name, generate_output.image)
return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name)
)
@ -115,23 +114,20 @@ class ImageToImageInvocation(TextToImageInvocation):
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
if self.model is None or self.model == "":
self.model = context.services.generate.model_name
# Set the model (if already cached, this does nothing)
context.services.generate.set_model(self.model)
results = context.services.generate.prompt2image(
prompt=self.prompt,
init_img=image,
init_mask=mask,
step_callback=step_callback,
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
model = context.services.model_manager.get_model()
generator_output = next(
Img2Img(model).generate(
prompt=self.prompt,
init_image=image,
init_mask=mask,
step_callback=step_callback,
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
)
result_image = results[0][0]
result_image = generator_output.image
# Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed?
@ -145,7 +141,6 @@ class ImageToImageInvocation(TextToImageInvocation):
image=ImageField(image_type=image_type, image_name=image_name)
)
class InpaintInvocation(ImageToImageInvocation):
"""Generates an image using inpaint."""
@ -180,23 +175,20 @@ class InpaintInvocation(ImageToImageInvocation):
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
if self.model is None or self.model == "":
self.model = context.services.generate.model_name
# Set the model (if already cached, this does nothing)
context.services.generate.set_model(self.model)
results = context.services.generate.prompt2image(
prompt=self.prompt,
init_img=image,
init_mask=mask,
step_callback=step_callback,
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
manager = context.services.model_manager.get_model()
generator_output = next(
Inpaint(model).generate(
prompt=self.prompt,
init_image=image,
mask_image=mask,
step_callback=step_callback,
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
)
result_image = results[0][0]
result_image = generator_output.image
# Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed?

View File

@ -8,7 +8,6 @@ from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
class RestoreFaceInvocation(BaseInvocation):
"""Restores faces in an image."""
#fmt: off
@ -23,7 +22,7 @@ class RestoreFaceInvocation(BaseInvocation):
image = context.services.images.get(
self.image.image_type, self.image.image_name
)
results = context.services.generate.upscale_and_reconstruct(
results = context.services.restoration.upscale_and_reconstruct(
image_list=[[image, 0]],
upscale=None,
strength=self.strength, # GFPGAN strength

View File

@ -26,7 +26,7 @@ class UpscaleInvocation(BaseInvocation):
image = context.services.images.get(
self.image.image_type, self.image.image_name
)
results = context.services.generate.upscale_and_reconstruct(
results = context.services.restoration.upscale_and_reconstruct(
image_list=[[image, 0]],
upscale=(self.level, self.strength),
strength=0.0, # GFPGAN strength

View File

@ -1,255 +0,0 @@
import os
import sys
import traceback
from argparse import Namespace
import invokeai.version
from invokeai.backend import Generate, ModelManager
from ...backend import Globals
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated
def get_generate(args, config) -> Generate:
if not args.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml")
if not os.path.exists(config_file):
report_model_error(
args, FileNotFoundError(f"The file {config_file} could not be found.")
)
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
transformers.logging.set_verbosity_error()
import diffusers
diffusers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = load_face_restoration(args)
# normalize the config directory relative to root
if not os.path.isabs(args.conf):
args.conf = os.path.normpath(os.path.join(Globals.root, args.conf))
if args.embeddings:
if not os.path.isabs(args.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, args.embedding_path)
)
else:
embedding_path = args.embedding_path
else:
embedding_path = None
# migrate legacy models
ModelManager.migrate_models()
# load the infile as a list of lines
if args.infile:
try:
if os.path.isfile(args.infile):
infile = open(args.infile, "r", encoding="utf-8")
elif args.infile == "-": # stdin
infile = sys.stdin
else:
raise FileNotFoundError(f"{args.infile} not found.")
except (FileNotFoundError, IOError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
# creating a Generate object:
try:
gen = Generate(
conf=args.conf,
model=args.model,
sampler_name=args.sampler_name,
embedding_path=embedding_path,
full_precision=args.full_precision,
precision=args.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=args.free_gpu_mem,
safety_checker=args.safety_checker,
max_loaded_models=args.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(opt, e)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
if args.seamless:
print(">> changed to seamless tiling mode")
# preload the model
try:
gen.load_model()
except KeyError:
pass
except Exception as e:
report_model_error(args, e)
# try to autoconvert new models
# autoimport new .ckpt files
if path := args.autoconvert:
gen.model_manager.autoconvert_weights(
conf_path=args.conf,
weights_directory=path,
)
return gen
def load_face_restoration(opt):
try:
gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan:
from invokeai.backend.restoration import Restoration
restoration = Restoration()
if opt.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
opt.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if opt.esrgan:
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
return gfpgan, codeformer, esrgan
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
response = input(
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
)
if response.startswith(("n", "N")):
return
print("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
previous_args = sys.argv
sys.argv = ["invokeai-configure"]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
for arg in yes_to_all.split():
sys.argv.append(arg)
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
# TODO: Figure out how to restart
# print('** InvokeAI will now restart')
# sys.argv = previous_args
# main() # would rather do a os.exec(), but doesn't exist?
# sys.exit(0)
# Temporary initializer for Generate until we migrate off of it
def old_get_generate(args, config) -> Generate:
# TODO: Remove the need for globals
from invokeai.backend.globals import Globals
# alert - setting globals here
Globals.root = os.path.expanduser(
args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".")
)
Globals.try_patchmatch = args.patchmatch
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers
transformers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = None, None, None
try:
if config.restore or config.esrgan:
from ldm.invoke.restoration import Restoration
restoration = Restoration()
if config.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
config.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if config.esrgan:
esrgan = restoration.load_esrgan(config.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
# normalize the config directory relative to root
if not os.path.isabs(config.conf):
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
if config.embeddings:
if not os.path.isabs(config.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, config.embedding_path)
)
else:
embedding_path = None
# TODO: lazy-initialize this by wrapping it
try:
generate = Generate(
conf=config.conf,
model=config.model,
sampler_name=config.sampler_name,
embedding_path=embedding_path,
full_precision=config.full_precision,
precision=config.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=config.free_gpu_mem,
safety_checker=config.safety_checker,
max_loaded_models=config.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError):
# emergency_model_reconfigure() # TODO?
sys.exit(-1)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
generate.free_gpu_mem = config.free_gpu_mem
return generate

View File

@ -44,6 +44,11 @@ class EdgeConnection(BaseModel):
return hash(f"{self.node_id}.{self.field}")
class Edge(BaseModel):
source: EdgeConnection = Field(description="The connection for the edge's from node and field")
destination: EdgeConnection = Field(description="The connection for the edge's to node and field")
def get_output_field(node: BaseInvocation, field: str) -> Any:
node_type = type(node)
node_outputs = get_type_hints(node_type.get_output_type())
@ -194,7 +199,7 @@ class Graph(BaseModel):
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict
)
edges: list[tuple[EdgeConnection, EdgeConnection]] = Field(
edges: list[Edge] = Field(
description="The connections between nodes and their fields in this graph",
default_factory=list,
)
@ -251,7 +256,7 @@ class Graph(BaseModel):
except NodeNotFoundError:
pass # Ignore, not doesn't exist (should this throw?)
def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None:
def add_edge(self, edge: Edge) -> None:
"""Adds an edge to a graph
:raises InvalidEdgeError: the provided edge is invalid.
@ -262,7 +267,7 @@ class Graph(BaseModel):
else:
raise InvalidEdgeError()
def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None:
def delete_edge(self, edge: Edge) -> None:
"""Deletes an edge from a graph"""
try:
@ -280,7 +285,7 @@ class Graph(BaseModel):
# Validate all edges reference nodes in the graph
node_ids = set(
[e[0].node_id for e in self.edges] + [e[1].node_id for e in self.edges]
[e.source.node_id for e in self.edges] + [e.destination.node_id for e in self.edges]
)
if not all((self.has_node(node_id) for node_id in node_ids)):
return False
@ -294,10 +299,10 @@ class Graph(BaseModel):
if not all(
(
are_connections_compatible(
self.get_node(e[0].node_id),
e[0].field,
self.get_node(e[1].node_id),
e[1].field,
self.get_node(e.source.node_id),
e.source.field,
self.get_node(e.destination.node_id),
e.destination.field,
)
for e in self.edges
)
@ -328,58 +333,58 @@ class Graph(BaseModel):
return True
def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool:
def _is_edge_valid(self, edge: Edge) -> bool:
"""Validates that a new edge doesn't create a cycle in the graph"""
# Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)
try:
from_node = self.get_node(edge[0].node_id)
to_node = self.get_node(edge[1].node_id)
from_node = self.get_node(edge.source.node_id)
to_node = self.get_node(edge.destination.node_id)
except NodeNotFoundError:
return False
# Validate that an edge to this node+field doesn't already exist
input_edges = self._get_input_edges(edge[1].node_id, edge[1].field)
input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
return False
# Validate that no cycles would be created
g = self.nx_graph_flat()
g.add_edge(edge[0].node_id, edge[1].node_id)
g.add_edge(edge.source.node_id, edge.destination.node_id)
if not nx.is_directed_acyclic_graph(g):
return False
# Validate that the field types are compatible
if not are_connections_compatible(
from_node, edge[0].field, to_node, edge[1].field
from_node, edge.source.field, to_node, edge.destination.field
):
return False
# Validate if iterator output type matches iterator input type (if this edge results in both being set)
if isinstance(to_node, IterateInvocation) and edge[1].field == "collection":
if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
if not self._is_iterator_connection_valid(
edge[1].node_id, new_input=edge[0]
edge.destination.node_id, new_input=edge.source
):
return False
# Validate if iterator input type matches output type (if this edge results in both being set)
if isinstance(from_node, IterateInvocation) and edge[0].field == "item":
if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
if not self._is_iterator_connection_valid(
edge[0].node_id, new_output=edge[1]
edge.source.node_id, new_output=edge.destination
):
return False
# Validate if collector input type matches output type (if this edge results in both being set)
if isinstance(to_node, CollectInvocation) and edge[1].field == "item":
if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
if not self._is_collector_connection_valid(
edge[1].node_id, new_input=edge[0]
edge.destination.node_id, new_input=edge.source
):
return False
# Validate if collector output type matches input type (if this edge results in both being set)
if isinstance(from_node, CollectInvocation) and edge[0].field == "collection":
if isinstance(from_node, CollectInvocation) and edge.source.field == "collection":
if not self._is_collector_connection_valid(
edge[0].node_id, new_output=edge[1]
edge.source.node_id, new_output=edge.destination
):
return False
@ -438,15 +443,15 @@ class Graph(BaseModel):
# Remove the graph prefix from the node path
new_graph_node_path = (
new_node.id
if "." not in edge[1].node_id
else f'{edge[1].node_id[edge[1].node_id.rindex("."):]}.{new_node.id}'
if "." not in edge.destination.node_id
else f'{edge.destination.node_id[edge.destination.node_id.rindex("."):]}.{new_node.id}'
)
graph.add_edge(
(
edge[0],
EdgeConnection(
node_id=new_graph_node_path, field=edge[1].field
),
Edge(
source=edge.source,
destination=EdgeConnection(
node_id=new_graph_node_path, field=edge.destination.field
)
)
)
@ -454,51 +459,51 @@ class Graph(BaseModel):
# Remove the graph prefix from the node path
new_graph_node_path = (
new_node.id
if "." not in edge[0].node_id
else f'{edge[0].node_id[edge[0].node_id.rindex("."):]}.{new_node.id}'
if "." not in edge.source.node_id
else f'{edge.source.node_id[edge.source.node_id.rindex("."):]}.{new_node.id}'
)
graph.add_edge(
(
EdgeConnection(
node_id=new_graph_node_path, field=edge[0].field
Edge(
source=EdgeConnection(
node_id=new_graph_node_path, field=edge.source.field
),
edge[1],
destination=edge.destination
)
)
def _get_input_edges(
self, node_path: str, field: Optional[str] = None
) -> list[tuple[EdgeConnection, EdgeConnection]]:
) -> list[Edge]:
"""Gets all input edges for a node"""
edges = self._get_input_edges_and_graphs(node_path)
# Filter to edges that match the field
filtered_edges = (e for e in edges if field is None or e[2][1].field == field)
filtered_edges = (e for e in edges if field is None or e[2].destination.field == field)
# Create full node paths for each edge
return [
(
EdgeConnection(
node_id=self._get_node_path(e[0].node_id, prefix=prefix),
field=e[0].field,
),
EdgeConnection(
node_id=self._get_node_path(e[1].node_id, prefix=prefix),
field=e[1].field,
Edge(
source=EdgeConnection(
node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e.source.field,
),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
)
)
for _, prefix, e in filtered_edges
]
def _get_input_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]:
) -> list[tuple["Graph", str, Edge]]:
"""Gets all input edges for a node along with the graph they are in and the graph's path"""
edges = list()
# Return any input edges that appear in this graph
edges.extend(
[(self, prefix, e) for e in self.edges if e[1].node_id == node_path]
[(self, prefix, e) for e in self.edges if e.destination.node_id == node_path]
)
node_id = (
@ -522,37 +527,37 @@ class Graph(BaseModel):
def _get_output_edges(
self, node_path: str, field: str
) -> list[tuple[EdgeConnection, EdgeConnection]]:
) -> list[Edge]:
"""Gets all output edges for a node"""
edges = self._get_output_edges_and_graphs(node_path)
# Filter to edges that match the field
filtered_edges = (e for e in edges if e[2][0].field == field)
filtered_edges = (e for e in edges if e[2].source.field == field)
# Create full node paths for each edge
return [
(
EdgeConnection(
node_id=self._get_node_path(e[0].node_id, prefix=prefix),
field=e[0].field,
),
EdgeConnection(
node_id=self._get_node_path(e[1].node_id, prefix=prefix),
field=e[1].field,
Edge(
source=EdgeConnection(
node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e.source.field,
),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
)
)
for _, prefix, e in filtered_edges
]
def _get_output_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]:
) -> list[tuple["Graph", str, Edge]]:
"""Gets all output edges for a node along with the graph they are in and the graph's path"""
edges = list()
# Return any input edges that appear in this graph
edges.extend(
[(self, prefix, e) for e in self.edges if e[0].node_id == node_path]
[(self, prefix, e) for e in self.edges if e.source.node_id == node_path]
)
node_id = (
@ -580,8 +585,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = list([e[0] for e in self._get_input_edges(node_path, "collection")])
outputs = list([e[1] for e in self._get_output_edges(node_path, "item")])
inputs = list([e.source for e in self._get_input_edges(node_path, "collection")])
outputs = list([e.destination for e in self._get_output_edges(node_path, "item")])
if new_input is not None:
inputs.append(new_input)
@ -622,8 +627,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None,
) -> bool:
inputs = list([e[0] for e in self._get_input_edges(node_path, "item")])
outputs = list([e[1] for e in self._get_output_edges(node_path, "collection")])
inputs = list([e.source for e in self._get_input_edges(node_path, "item")])
outputs = list([e.destination for e in self._get_output_edges(node_path, "collection")])
if new_input is not None:
inputs.append(new_input)
@ -684,7 +689,7 @@ class Graph(BaseModel):
# TODO: Cache this?
g = nx.DiGraph()
g.add_nodes_from([n for n in self.nodes.keys()])
g.add_edges_from(set([(e[0].node_id, e[1].node_id) for e in self.edges]))
g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g
def nx_graph_flat(
@ -711,7 +716,7 @@ class Graph(BaseModel):
# TODO: figure out if iteration nodes need to be expanded
unique_edges = set([(e[0].node_id, e[1].node_id) for e in self.edges])
unique_edges = set([(e.source.node_id, e.destination.node_id) for e in self.edges])
g.add_edges_from(
[
(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix))
@ -841,13 +846,13 @@ class GraphExecutionState(BaseModel):
input_collection_prepared_node_id = next(
n[1]
for n in iteration_node_map
if n[0] == input_collection_edge[0].node_id
if n[0] == input_collection_edge.source.node_id
)
input_collection_prepared_node_output = self.results[
input_collection_prepared_node_id
]
input_collection = getattr(
input_collection_prepared_node_output, input_collection_edge[0].field
input_collection_prepared_node_output, input_collection_edge.source.field
)
self_iteration_count = len(input_collection)
@ -864,11 +869,11 @@ class GraphExecutionState(BaseModel):
new_edges = list()
for edge in input_edges:
for input_node_id in (
n[1] for n in iteration_node_map if n[0] == edge[0].node_id
n[1] for n in iteration_node_map if n[0] == edge.source.node_id
):
new_edge = (
EdgeConnection(node_id=input_node_id, field=edge[0].field),
EdgeConnection(node_id="", field=edge[1].field),
new_edge = Edge(
source=EdgeConnection(node_id=input_node_id, field=edge.source.field),
destination=EdgeConnection(node_id="", field=edge.destination.field),
)
new_edges.append(new_edge)
@ -893,9 +898,9 @@ class GraphExecutionState(BaseModel):
# Add new edges to execution graph
for edge in new_edges:
new_edge = (
edge[0],
EdgeConnection(node_id=new_node.id, field=edge[1].field),
new_edge = Edge(
source=edge.source,
destination=EdgeConnection(node_id=new_node.id, field=edge.destination.field),
)
self.execution_graph.add_edge(new_edge)
@ -1043,26 +1048,26 @@ class GraphExecutionState(BaseModel):
return self.execution_graph.nodes[next_node]
def _prepare_inputs(self, node: BaseInvocation):
input_edges = [e for e in self.execution_graph.edges if e[1].node_id == node.id]
input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id]
if isinstance(node, CollectInvocation):
output_collection = [
getattr(self.results[edge[0].node_id], edge[0].field)
getattr(self.results[edge.source.node_id], edge.source.field)
for edge in input_edges
if edge[1].field == "item"
if edge.destination.field == "item"
]
setattr(node, "collection", output_collection)
else:
for edge in input_edges:
output_value = getattr(self.results[edge[0].node_id], edge[0].field)
setattr(node, edge[1].field, output_value)
output_value = getattr(self.results[edge.source.node_id], edge.source.field)
setattr(node, edge.destination.field, output_value)
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool:
def _is_edge_valid(self, edge: Edge) -> bool:
if not self._is_edge_valid(edge):
return False
# Invalid if destination has already been prepared or executed
if edge[1].node_id in self.source_prepared_mapping:
if edge.destination.node_id in self.source_prepared_mapping:
return False
# Otherwise, the edge is valid
@ -1089,17 +1094,17 @@ class GraphExecutionState(BaseModel):
)
self.graph.delete_node(node_path)
def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None:
if not self._is_node_updatable(edge[1].node_id):
def add_edge(self, edge: Edge) -> None:
if not self._is_node_updatable(edge.destination.node_id):
raise NodeAlreadyExecutedError(
f"Destination node {edge[1].node_id} has already been prepared or executed and cannot be linked to"
f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot be linked to"
)
self.graph.add_edge(edge)
def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None:
if not self._is_node_updatable(edge[1].node_id):
def delete_edge(self, edge: Edge) -> None:
if not self._is_node_updatable(edge.destination.node_id):
raise NodeAlreadyExecutedError(
f"Destination node {edge[1].node_id} has already been prepared or executed and cannot have a source edge deleted"
f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot have a source edge deleted"
)
self.graph.delete_edge(edge)

View File

@ -1,36 +1,39 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from invokeai.backend import Generate
from invokeai.backend import ModelManager
from .events import EventServiceBase
from .image_storage import ImageStorageBase
from .restoration_services import RestorationServices
from .invocation_queue import InvocationQueueABC
from .item_storage import ItemStorageABC
class InvocationServices:
"""Services that can be used by invocations"""
generate: Generate # TODO: wrap Generate, or split it up from model?
events: EventServiceBase
images: ImageStorageBase
queue: InvocationQueueABC
model_manager: ModelManager
restoration: RestorationServices
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
processor: "InvocationProcessorABC"
def __init__(
self,
generate: Generate,
events: EventServiceBase,
images: ImageStorageBase,
queue: InvocationQueueABC,
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
processor: "InvocationProcessorABC",
self,
model_manager: ModelManager,
events: EventServiceBase,
images: ImageStorageBase,
queue: InvocationQueueABC,
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
processor: "InvocationProcessorABC",
restoration: RestorationServices,
):
self.generate = generate
self.model_manager = model_manager
self.events = events
self.images = images
self.queue = queue
self.graph_execution_manager = graph_execution_manager
self.processor = processor
self.restoration = restoration

View File

@ -0,0 +1,120 @@
import os
import sys
import torch
from argparse import Namespace
from invokeai.backend import Args
from omegaconf import OmegaConf
from pathlib import Path
import invokeai.version
from ...backend import ModelManager
from ...backend.util import choose_precision, choose_torch_device
from ...backend import Globals
# TODO: Replace with an abstract class base ModelManagerBase
def get_model_manager(config: Args) -> ModelManager:
if not config.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml")
if not os.path.exists(config_file):
report_model_error(
config, FileNotFoundError(f"The file {config_file} could not be found.")
)
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
transformers.logging.set_verbosity_error()
import diffusers
diffusers.logging.set_verbosity_error()
# normalize the config directory relative to root
if not os.path.isabs(config.conf):
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
if config.embeddings:
if not os.path.isabs(config.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, config.embedding_path)
)
else:
embedding_path = config.embedding_path
else:
embedding_path = None
# migrate legacy models
ModelManager.migrate_models()
# creating the model manager
try:
device = torch.device(choose_torch_device())
precision = 'float16' if config.precision=='float16' \
else 'float32' if config.precision=='float32' \
else choose_precision(device)
model_manager = ModelManager(
OmegaConf.load(config.conf),
precision=precision,
device_type=device,
max_loaded_models=config.max_loaded_models,
embedding_path = Path(embedding_path),
)
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(config, e)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
# try to autoconvert new models
# autoimport new .ckpt files
if path := config.autoconvert:
model_manager.autoconvert_weights(
conf_path=config.conf,
weights_directory=path,
)
return model_manager
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
response = input(
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
)
if response.startswith(("n", "N")):
return
print("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
previous_config = sys.argv
sys.argv = ["invokeai-configure"]
sys.argv.extend(root_dir)
sys.argv.extend(config.to_dict())
if yes_to_all is not None:
for arg in yes_to_all.split():
sys.argv.append(arg)
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
# TODO: Figure out how to restart
# print('** InvokeAI will now restart')
# sys.argv = previous_args
# main() # would rather do a os.exec(), but doesn't exist?
# sys.exit(0)

View File

@ -0,0 +1,109 @@
import sys
import traceback
import torch
from ...backend.restoration import Restoration
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
# This should be a real base class for postprocessing functions,
# but right now we just instantiate the existing gfpgan, esrgan
# and codeformer functions.
class RestorationServices:
'''Face restoration and upscaling'''
def __init__(self,args):
try:
gfpgan, codeformer, esrgan = None, None, None
if args.restore or args.esrgan:
restoration = Restoration()
if args.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
args.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if args.esrgan:
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
self.device = torch.device(choose_torch_device())
self.gfpgan = gfpgan
self.codeformer = codeformer
self.esrgan = esrgan
# note that this one method does gfpgan and codepath reconstruction, as well as
# esrgan upscaling
# TO DO: refactor into separate methods
def upscale_and_reconstruct(
self,
image_list,
facetool="gfpgan",
upscale=None,
upscale_denoise_str=0.75,
strength=0.0,
codeformer_fidelity=0.75,
save_original=False,
image_callback=None,
prefix=None,
):
results = []
for r in image_list:
image, seed = r
try:
if strength > 0:
if self.gfpgan is not None or self.codeformer is not None:
if facetool == "gfpgan":
if self.gfpgan is None:
print(
">> GFPGAN not found. Face restoration is disabled."
)
else:
image = self.gfpgan.process(image, strength, seed)
if facetool == "codeformer":
if self.codeformer is None:
print(
">> CodeFormer not found. Face restoration is disabled."
)
else:
cf_device = (
CPU_DEVICE if self.device == MPS_DEVICE else self.device
)
image = self.codeformer.process(
image=image,
strength=strength,
device=cf_device,
seed=seed,
fidelity=codeformer_fidelity,
)
else:
print(">> Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
upscale.append(0.75)
image = self.esrgan.process(
image,
upscale[1],
seed,
int(upscale[0]),
denoise_str=upscale_denoise_str,
)
else:
print(">> ESRGAN is disabled. Image not upscaled.")
except Exception as e:
print(
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
)
if image_callback is not None:
image_callback(image, seed, upscaled=True, use_prefix=prefix)
else:
r[0] = image
results.append([image, seed])
return results

View File

@ -2,6 +2,15 @@
Initialization file for invokeai.backend
"""
from .generate import Generate
from .generator import (
InvokeAIGeneratorBasicParams,
InvokeAIGenerator,
InvokeAIGeneratorOutput,
Txt2Img,
Img2Img,
Inpaint
)
from .model_management import ModelManager
from .safety_checker import SafetyChecker
from .args import Args
from .globals import Globals

View File

@ -490,7 +490,7 @@ class Args(object):
"-z",
type=int,
default=6,
choices=range(0, 9),
choices=range(0, 10),
dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.",
)
@ -943,7 +943,6 @@ class Args(object):
"--png_compression",
"-z",
type=int,
default=6,
choices=range(0, 10),
dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). [6]",

View File

@ -25,18 +25,19 @@ from accelerate.utils import set_seed
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from pathlib import Path
from .args import metadata_from_png
from .generator import infill_methods
from .globals import Globals, global_cache_dir
from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding
from .model_management import ModelManager
from .safety_checker import SafetyChecker
from .prompting import get_uc_and_c_and_ec
from .prompting.conditioning import log_tokenization
from .stable_diffusion import HuggingFaceConceptsLibrary
from .util import choose_precision, choose_torch_device
def fix_func(orig):
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
@ -222,6 +223,7 @@ class Generate:
self.precision,
max_loaded_models=max_loaded_models,
sequential_offload=self.free_gpu_mem,
embedding_path=Path(self.embedding_path),
)
# don't accept invalid models
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
@ -244,31 +246,8 @@ class Generate:
# load safety checker if requested
if safety_checker:
try:
print(">> Initializing NSFW checker")
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import AutoFeatureExtractor
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_model_path = global_cache_dir("hub")
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_checker.to(self.device)
except Exception:
print(
"** An error was encountered while installing the safety checker:"
)
print(traceback.format_exc())
print(">> Initializing NSFW checker")
self.safety_checker = SafetyChecker(self.device)
else:
print(">> NSFW checker is disabled")
@ -495,18 +474,6 @@ class Generate:
torch.cuda.reset_peak_memory_stats()
results = list()
init_image = None
mask_image = None
try:
if (
self.free_gpu_mem
and self.model.cond_stage_model.device != self.model.device
):
self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device)
except AttributeError:
pass
try:
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
@ -535,15 +502,6 @@ class Generate:
generator.set_variation(self.seed, variation_amount, with_variations)
generator.use_mps_noise = use_mps_noise
checker = (
{
"checker": self.safety_checker,
"extractor": self.safety_feature_extractor,
}
if self.safety_checker
else None
)
results = generator.generate(
prompt,
iterations=iterations,
@ -570,7 +528,7 @@ class Generate:
embiggen_strength=embiggen_strength,
inpaint_replace=inpaint_replace,
mask_blur_radius=mask_blur_radius,
safety_checker=checker,
safety_checker=self.safety_checker,
seam_size=seam_size,
seam_blur=seam_blur,
seam_strength=seam_strength,
@ -952,18 +910,6 @@ class Generate:
self.generators = {}
set_seed(random.randrange(0, np.iinfo(np.uint32).max))
if self.embedding_path is not None:
print(f">> Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path):
for name in files:
ti_path = os.path.join(root, name)
self.model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
print(
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
)
self.model_name = model_name
self._set_scheduler() # requires self.model_name to be set first
return self.model
@ -1010,7 +956,7 @@ class Generate:
):
results = []
for r in image_list:
image, seed = r
image, seed, _ = r
try:
if strength > 0:
if self.gfpgan is not None or self.codeformer is not None:

View File

@ -1,5 +1,13 @@
"""
Initialization file for the invokeai.generator package
"""
from .base import Generator
from .base import (
InvokeAIGenerator,
InvokeAIGeneratorBasicParams,
InvokeAIGeneratorOutput,
Txt2Img,
Img2Img,
Inpaint,
Generator,
)
from .inpaint import infill_methods

View File

@ -4,11 +4,15 @@ including img2img, txt2img, and inpaint
"""
from __future__ import annotations
import itertools
import dataclasses
import diffusers
import os
import random
import traceback
from abc import ABCMeta
from argparse import Namespace
from contextlib import nullcontext
from pathlib import Path
import cv2
import numpy as np
@ -17,12 +21,257 @@ from PIL import Image, ImageChops, ImageFilter
from accelerate.utils import set_seed
from diffusers import DiffusionPipeline
from tqdm import trange
from typing import List, Iterator, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler
import invokeai.assets.web as web_assets
from ..image_util import configure_model_padding
from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
from ..prompting.conditioning import get_uc_and_c_and_ec
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
downsampling = 8
CAUTION_IMG = "caution.png"
@dataclass
class InvokeAIGeneratorBasicParams:
seed: int=None
width: int=512
height: int=512
cfg_scale: int=7.5
steps: int=20
ddim_eta: float=0.0
scheduler: int='ddim'
precision: str='float16'
perlin: float=0.0
threshold: int=0.0
seamless: bool=False
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
h_symmetry_time_pct: float=None
v_symmetry_time_pct: float=None
variation_amount: float = 0.0
with_variations: list=field(default_factory=list)
safety_checker: SafetyChecker=None
@dataclass
class InvokeAIGeneratorOutput:
'''
InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation
operation, including the image, its seed, the model name used to generate the image
and the model hash, as well as all the generate() parameters that went into
generating the image (in .params, also available as attributes)
'''
image: Image
seed: int
model_hash: str
attention_maps_images: List[Image]
params: Namespace
# we are interposing a wrapper around the original Generator classes so that
# old code that calls Generate will continue to work.
class InvokeAIGenerator(metaclass=ABCMeta):
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
):
self.model_info=model_info
self.params=params
def generate(self,
prompt: str='',
callback: callable=None,
step_callback: callable=None,
iterations: int=1,
**keyword_args,
)->Iterator[InvokeAIGeneratorOutput]:
'''
Return an iterator across the indicated number of generations.
Each time the iterator is called it will return an InvokeAIGeneratorOutput
object. Use like this:
outputs = txt2img.generate(prompt='banana sushi', iterations=5)
for result in outputs:
print(result.image, result.seed)
In the typical case of wanting to get just a single image, iterations
defaults to 1 and do:
output = next(txt2img.generate(prompt='banana sushi')
Pass None to get an infinite iterator.
outputs = txt2img.generate(prompt='banana sushi', iterations=None)
for o in outputs:
print(o.image, o.seed)
'''
generator_args = dataclasses.asdict(self.params)
generator_args.update(keyword_args)
model_info = self.model_info
model_name = model_info['model_name']
model:StableDiffusionGeneratorPipeline = model_info['model']
model_hash = model_info['hash']
scheduler: Scheduler = self.get_scheduler(
model=model,
scheduler_name=generator_args.get('scheduler')
)
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model)
gen_class = self._generator_class()
generator = gen_class(model, self.params.precision)
if self.params.variation_amount > 0:
generator.set_variation(generator_args.get('seed'),
generator_args.get('variation_amount'),
generator_args.get('with_variations')
)
if isinstance(model, DiffusionPipeline):
for component in [model.unet, model.vae]:
configure_model_padding(component,
generator_args.get('seamless',False),
generator_args.get('seamless_axes')
)
else:
configure_model_padding(model,
generator_args.get('seamless',False),
generator_args.get('seamless_axes')
)
iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1)
for i in iteration_count:
results = generator.generate(prompt,
conditioning=(uc, c, extra_conditioning_info),
sampler=scheduler,
**generator_args,
)
output = InvokeAIGeneratorOutput(
image=results[0][0],
seed=results[0][1],
attention_maps_images=results[0][2],
model_hash = model_hash,
params=Namespace(model_name=model_name,**generator_args),
)
if callback:
callback(output)
yield output
@classmethod
def schedulers(self)->List[str]:
'''
Return list of all the schedulers that we currently handle.
'''
return list(self.scheduler_map.keys())
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
return generator_class(model, self.params.precision)
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
return scheduler
@classmethod
def _generator_class(cls)->Type[Generator]:
'''
In derived classes return the name of the generator to apply.
If you don't override will return the name of the derived
class, which nicely parallels the generator class names.
'''
return Generator
# ------------------------------------
class Txt2Img(InvokeAIGenerator):
@classmethod
def _generator_class(cls):
from .txt2img import Txt2Img
return Txt2Img
# ------------------------------------
class Img2Img(InvokeAIGenerator):
def generate(self,
init_image: Image | torch.FloatTensor,
strength: float=0.75,
**keyword_args
)->List[InvokeAIGeneratorOutput]:
return super().generate(init_image=init_image,
strength=strength,
**keyword_args
)
@classmethod
def _generator_class(cls):
from .img2img import Img2Img
return Img2Img
# ------------------------------------
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img):
def generate(self,
mask_image: Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 0,
seam_blur: int = 0,
seam_strength: float = 0.7,
seam_steps: int = 10,
tile_size: int = 32,
inpaint_replace=False,
infill_method=None,
inpaint_width=None,
inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
**keyword_args
)->List[InvokeAIGeneratorOutput]:
return super().generate(
mask_image=mask_image,
seam_size=seam_size,
seam_blur=seam_blur,
seam_strength=seam_strength,
seam_steps=seam_steps,
tile_size=tile_size,
inpaint_replace=inpaint_replace,
infill_method=infill_method,
inpaint_width=inpaint_width,
inpaint_height=inpaint_height,
inpaint_fill=inpaint_fill,
**keyword_args
)
@classmethod
def _generator_class(cls):
from .inpaint import Inpaint
return Inpaint
# ------------------------------------
class Embiggen(Txt2Img):
def generate(
self,
embiggen: list=None,
embiggen_tiles: list = None,
strength: float=0.75,
**kwargs)->List[InvokeAIGeneratorOutput]:
return super().generate(embiggen=embiggen,
embiggen_tiles=embiggen_tiles,
strength=strength,
**kwargs)
@classmethod
def _generator_class(cls):
from .embiggen import Embiggen
return Embiggen
class Generator:
@ -44,7 +293,6 @@ class Generator:
self.with_variations = []
self.use_mps_noise = False
self.free_gpu_mem = None
self.caution_img = None
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
def get_make_image(self, prompt, **kwargs):
@ -64,10 +312,10 @@ class Generator:
def generate(
self,
prompt,
init_image,
width,
height,
sampler,
init_image=None,
iterations=1,
seed=None,
image_callback=None,
@ -76,7 +324,7 @@ class Generator:
perlin=0.0,
h_symmetry_time_pct=None,
v_symmetry_time_pct=None,
safety_checker: dict = None,
safety_checker: SafetyChecker=None,
free_gpu_mem: bool = False,
**kwargs,
):
@ -99,7 +347,6 @@ class Generator:
h_symmetry_time_pct=h_symmetry_time_pct,
v_symmetry_time_pct=v_symmetry_time_pct,
attention_maps_callback=attention_maps_callback,
seed=seed,
**kwargs,
)
results = []
@ -127,12 +374,13 @@ class Generator:
print("** An error occurred while getting initial noise **")
print(traceback.format_exc())
image = make_image(x_T)
# Pass on the seed in case a layer beneath us needs to generate noise on its own.
image = make_image(x_T, seed)
if self.safety_checker is not None:
image = self.safety_check(image)
image = self.safety_checker.check(image)
results.append([image, seed])
results.append([image, seed, attention_maps_images])
if image_callback is not None:
attention_maps_image = (
@ -292,16 +540,6 @@ class Generator:
seed = random.randrange(0, np.iinfo(np.uint32).max)
return (seed, initial_noise)
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self, width, height):
"""
Returns a tensor filled with random numbers, either form a normal distribution
(txt2img) or from the latent image (img2img, inpaint)
"""
raise NotImplementedError(
"get_noise() must be implemented in a descendent class"
)
def get_perlin_noise(self, width, height):
fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device
# limit noise to only the diffusion image channels, not the mask channels
@ -361,53 +599,6 @@ class Generator:
return v2
def safety_check(self, image: Image.Image):
"""
If the CompViz safety checker flags an NSFW image, we
blur it out.
"""
import diffusers
checker = self.safety_checker["checker"]
extractor = self.safety_checker["extractor"]
features = extractor([image], return_tensors="pt")
features.to(self.model.device)
# unfortunately checker requires the numpy version, so we have to convert back
x_image = np.array(image).astype(np.float32) / 255.0
x_image = x_image[None].transpose(0, 3, 1, 2)
diffusers.logging.set_verbosity_error()
checked_image, has_nsfw_concept = checker(
images=x_image, clip_input=features.pixel_values
)
if has_nsfw_concept[0]:
print(
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
)
return self.blur(image)
else:
return image
def blur(self, input):
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
try:
caution = self.get_caution_img()
if caution:
blurry.paste(caution, (0, 0), caution)
except FileNotFoundError:
pass
return blurry
def get_caution_img(self):
path = None
if self.caution_img:
return self.caution_img
path = Path(web_assets.__path__[0]) / CAUTION_IMG
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
return self.caution_img
# this is a handy routine for debugging use. Given a generated sample,
# convert it into a PNG image and store it at the indicated path
def save_sample(self, sample, filepath):

View File

@ -37,7 +37,6 @@ class Img2Img(Generator):
h_symmetry_time_pct=None,
v_symmetry_time_pct=None,
attention_maps_callback=None,
seed=None,
**kwargs,
):
"""
@ -64,7 +63,7 @@ class Img2Img(Generator):
),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T):
def make_image(x_T: torch.Tensor, seed: int):
# FIXME: use x_T for initial seeded noise
# We're not at the moment because the pipeline automatically resizes init_image if
# necessary, which the x_T input might not match.
@ -77,7 +76,7 @@ class Img2Img(Generator):
conditioning_data,
noise_func=self.get_noise_like,
callback=step_callback,
seed=seed
seed=seed,
)
if (
pipeline_output.attention_map_saver is not None
@ -88,9 +87,7 @@ class Img2Img(Generator):
return make_image
def get_noise_like(self, like: torch.Tensor, seed: Optional[int]):
if seed is not None:
set_seed(seed)
def get_noise_like(self, like: torch.Tensor):
device = like.device
if device.type == "mps":
x = torch.randn_like(like, device="cpu").to(device)

View File

@ -159,6 +159,7 @@ class Inpaint(Img2Img):
seam_size: int,
seam_blur: int,
prompt,
seed,
sampler,
steps,
cfg_scale,
@ -192,7 +193,7 @@ class Inpaint(Img2Img):
seam_noise = self.get_noise(im.width, im.height)
result = make_image(seam_noise)
result = make_image(seam_noise, seed)
return result
@ -223,7 +224,6 @@ class Inpaint(Img2Img):
inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
attention_maps_callback=None,
seed=None,
**kwargs,
):
"""
@ -311,7 +311,7 @@ class Inpaint(Img2Img):
uc, c, cfg_scale
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T):
def make_image(x_T: torch.Tensor, seed: int):
pipeline_output = pipeline.inpaint_from_embeddings(
init_image=init_image,
mask=1 - mask, # expects white means "paint here."
@ -320,7 +320,7 @@ class Inpaint(Img2Img):
conditioning_data=conditioning_data,
noise_func=self.get_noise_like,
callback=step_callback,
seed=seed
seed=seed,
)
if (
@ -343,6 +343,7 @@ class Inpaint(Img2Img):
seam_size,
seam_blur,
prompt,
seed,
sampler,
seam_steps,
cfg_scale,

View File

@ -61,7 +61,7 @@ class Txt2Img(Generator):
),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T) -> PIL.Image.Image:
def make_image(x_T: torch.Tensor, _: int) -> PIL.Image.Image:
pipeline_output = pipeline.image_from_embeddings(
latents=torch.zeros_like(x_T, dtype=self.torch_dtype()),
noise=x_T,

View File

@ -64,7 +64,7 @@ class Txt2Img2Img(Generator):
),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T):
def make_image(x_T: torch.Tensor, _: int):
first_pass_latent_output, _ = pipeline.latents_from_embeddings(
latents=torch.zeros_like(x_T),
num_inference_steps=steps,

View File

@ -1075,9 +1075,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
dlogging.set_verbosity_error()
checkpoint = (
load_file(checkpoint_path)
if Path(checkpoint_path).suffix == ".safetensors"
else torch.load(checkpoint_path)
torch.load(checkpoint_path)
if Path(checkpoint_path).suffix == ".ckpt"
else load_file(checkpoint_path)
)
cache_dir = global_cache_dir("hub")
pipeline_class = (
@ -1274,7 +1275,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
safety_checker=safety_checker.to(precision),
safety_checker=None if return_generator_pipeline else safety_checker.to(precision),
feature_extractor=feature_extractor,
)
else:

View File

@ -34,8 +34,7 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util import CPU_DEVICE, ask_user, download_with_resume
from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum):
V1 = 1
@ -51,23 +50,29 @@ VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
}
class ModelManager(object):
'''
Model manager handles loading, caching, importing, deleting, converting, and editing models.
'''
def __init__(
self,
config: OmegaConf,
device_type: torch.device = CPU_DEVICE,
precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload=False,
self,
config: OmegaConf|Path,
device_type: torch.device = CUDA_DEVICE,
precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload=False,
embedding_path: Path=None,
):
"""
Initialize with the path to the models.yaml config file,
the torch device type, and precision. The optional
min_avail_mem argument specifies how much unused system
(CPU) memory to preserve. The cache of models in RAM will
grow until this value is approached. Default is 2G.
Initialize with the path to the models.yaml config file or
an initialized OmegaConf dictionary. Optional parameters
are the torch device type, precision, max_loaded_models,
and sequential_offload boolean. Note that the default device
type and precision are set up for a CUDA system running at half precision.
"""
# prevent nasty-looking CLIP log message
transformers.logging.set_verbosity_error()
if not isinstance(config, DictConfig):
config = OmegaConf.load(config)
self.config = config
self.precision = precision
self.device = torch.device(device_type)
@ -76,6 +81,7 @@ class ModelManager(object):
self.stack = [] # this is an LRU FIFO
self.current_model = None
self.sequential_offload = sequential_offload
self.embedding_path = embedding_path
def valid_model(self, model_name: str) -> bool:
"""
@ -84,12 +90,15 @@ class ModelManager(object):
"""
return model_name in self.config
def get_model(self, model_name: str):
def get_model(self, model_name: str=None)->dict:
"""
Given a model named identified in models.yaml, return
the model object. If in RAM will load into GPU VRAM.
If on disk, will load from there.
"""
if not model_name:
return self.get_model(self.current_model) if self.current_model else self.get_model(self.default_model())
if not self.valid_model(model_name):
print(
f'** "{model_name}" is not a known model name. Please check your models.yaml file'
@ -104,7 +113,7 @@ class ModelManager(object):
if model_name in self.models:
requested_model = self.models[model_name]["model"]
print(f">> Retrieving model {model_name} from system RAM cache")
self.models[model_name]["model"] = self._model_from_cpu(requested_model)
requested_model.ready()
width = self.models[model_name]["width"]
height = self.models[model_name]["height"]
hash = self.models[model_name]["hash"]
@ -112,6 +121,7 @@ class ModelManager(object):
else: # we're about to load a new model, so potentially offload the least recently used one
requested_model, width, height, hash = self._load_model(model_name)
self.models[model_name] = {
"model_name": model_name,
"model": requested_model,
"width": width,
"height": height,
@ -121,6 +131,7 @@ class ModelManager(object):
self.current_model = model_name
self._push_newest_model(model_name)
return {
"model_name": model_name,
"model": requested_model,
"width": width,
"height": height,
@ -425,6 +436,7 @@ class ModelManager(object):
height = width
print(f" | Default image dimensions = {width} x {height}")
self._add_embeddings_to_model(pipeline)
return pipeline, width, height, model_hash
@ -499,7 +511,7 @@ class ModelManager(object):
print(f">> Offloading {model_name} to CPU")
model = self.models[model_name]["model"]
self.models[model_name]["model"] = self._model_to_cpu(model)
model.offload_all()
gc.collect()
if self._has_cuda():
@ -557,7 +569,7 @@ class ModelManager(object):
"""
model_name = model_name or Path(repo_or_path).stem
model_description = (
model_description or f"Imported diffusers model {model_name}"
description or f"Imported diffusers model {model_name}"
)
new_config = dict(
description=model_description,
@ -720,9 +732,9 @@ class ModelManager(object):
# another round of heuristics to guess the correct config file.
checkpoint = (
safetensors.torch.load_file(model_path)
if model_path.suffix == ".safetensors"
else torch.load(model_path)
torch.load(model_path)
if model_path.suffix == ".ckpt"
else safetensors.torch.load_file(model_path)
)
# additional probing needed if no config file provided
@ -1044,43 +1056,6 @@ class ModelManager(object):
self.stack.remove(model_name)
self.models.pop(model_name, None)
def _model_to_cpu(self, model):
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.offload_all()
return model
model.cond_stage_model.device = CPU_DEVICE
model.to(CPU_DEVICE)
for submodel in ("first_stage_model", "cond_stage_model", "model"):
try:
getattr(model, submodel).to(CPU_DEVICE)
except AttributeError:
pass
return model
def _model_from_cpu(self, model):
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.ready()
return model
model.to(self.device)
model.cond_stage_model.device = self.device
for submodel in ("first_stage_model", "cond_stage_model", "model"):
try:
getattr(model, submodel).to(self.device)
except AttributeError:
pass
return model
def _pop_oldest_model(self):
"""
Remove the first element of the FIFO, which ought
@ -1098,6 +1073,19 @@ class ModelManager(object):
self.stack.remove(model_name)
self.stack.append(model_name)
def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline):
if self.embedding_path is not None:
print(f">> Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path):
for name in files:
ti_path = os.path.join(root, name)
model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
print(
f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
)
def _has_cuda(self) -> bool:
return self.device.type == "cuda"

View File

@ -3,7 +3,6 @@ Initialization file for invokeai.backend.prompting
"""
from .conditioning import (
get_prompt_structure,
get_tokenizer,
get_tokens_for_prompt_object,
get_uc_and_c_and_ec,
split_weighted_subprompts,

View File

@ -7,7 +7,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
"""
import re
from typing import Any, Optional, Union
from typing import Optional, Union
from compel import Compel
from compel.prompt_parser import (
@ -17,7 +17,6 @@ from compel.prompt_parser import (
Fragment,
PromptParser,
)
from transformers import CLIPTokenizer
from invokeai.backend.globals import Globals
@ -25,36 +24,6 @@ from ..stable_diffusion import InvokeAIDiffuserComponent
from ..util import torch_dtype
def get_tokenizer(model) -> CLIPTokenizer:
# TODO remove legacy ckpt fallback handling
return (
getattr(model, "tokenizer", None) # diffusers
or model.cond_stage_model.tokenizer
) # ldm
def get_text_encoder(model) -> Any:
# TODO remove legacy ckpt fallback handling
return getattr(
model, "text_encoder", None
) or UnsqueezingLDMTransformer( # diffusers
model.cond_stage_model.transformer
) # ldm
class UnsqueezingLDMTransformer:
def __init__(self, ldm_transformer):
self.ldm_transformer = ldm_transformer
@property
def device(self):
return self.ldm_transformer.device
def __call__(self, *args, **kwargs):
insufficiently_unsqueezed_tensor = self.ldm_transformer(*args, **kwargs)
return insufficiently_unsqueezed_tensor.unsqueeze(0)
def get_uc_and_c_and_ec(
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
):
@ -64,11 +33,10 @@ def get_uc_and_c_and_ec(
prompt_string
)
tokenizer = get_tokenizer(model)
text_encoder = get_text_encoder(model)
tokenizer = model.tokenizer
compel = Compel(
tokenizer=tokenizer,
text_encoder=text_encoder,
text_encoder=model.text_encoder,
textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype,
truncate_long_prompts=False

View File

@ -0,0 +1,82 @@
'''
SafetyChecker class - checks images against the StabilityAI NSFW filter
and blurs images that contain potential NSFW content.
'''
import diffusers
import numpy as np
import torch
import traceback
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from pathlib import Path
from PIL import Image, ImageFilter
from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets
from .globals import global_cache_dir
from .util import CPU_DEVICE
class SafetyChecker(object):
CAUTION_IMG = "caution.png"
def __init__(self, device: torch.device):
path = Path(web_assets.__path__[0]) / self.CAUTION_IMG
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
self.device = device
try:
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_model_path = global_cache_dir("hub")
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
except Exception:
print(
"** An error was encountered while installing the safety checker:"
)
print(traceback.format_exc())
def check(self, image: Image.Image):
"""
Check provided image against the StabilityAI safety checker and return
"""
self.safety_checker.to(self.device)
features = self.safety_feature_extractor([image], return_tensors="pt")
features.to(self.device)
# unfortunately checker requires the numpy version, so we have to convert back
x_image = np.array(image).astype(np.float32) / 255.0
x_image = x_image[None].transpose(0, 3, 1, 2)
diffusers.logging.set_verbosity_error()
checked_image, has_nsfw_concept = self.safety_checker(
images=x_image, clip_input=features.pixel_values
)
self.safety_checker.to(CPU_DEVICE) # offload
if has_nsfw_concept[0]:
print(
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
)
return self.blur(image)
else:
return image
def blur(self, input):
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
try:
if caution := self.caution_img:
blurry.paste(caution, (0, 0), caution)
except FileNotFoundError:
pass
return blurry

View File

@ -9,6 +9,7 @@ from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union
import einops
import PIL.Image
from accelerate.utils import set_seed
import psutil
import torch
import torchvision.transforms as T
@ -54,16 +55,6 @@ class PipelineIntermediateState:
attention_map_saver: Optional[AttentionMapSaver] = None
# copied from configs/stable-diffusion/v1-inference.yaml
_default_personalization_config_params = dict(
placeholder_strings=["*"],
initializer_wods=["sculpture"],
per_image_tokens=False,
num_vectors_per_token=1,
progressive_words=False,
)
@dataclass
class AddsMaskLatents:
"""Add the channels required for inpainting model input.
@ -175,7 +166,7 @@ def image_resized_to_grid_as_tensor(
:param normalize: scale the range to [-1, 1] instead of [0, 1]
:param multiple_of: resize the input so both dimensions are a multiple of this
"""
w, h = trim_to_multiple_of(*image.size)
w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of)
transformation = T.Compose(
[
T.Resize((h, w), T.InterpolationMode.LANCZOS),
@ -290,10 +281,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offsensive or harmful.
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
@ -436,11 +427,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
"""
Ready this pipeline's models.
i.e. pre-load them to the GPU if appropriate.
i.e. preload them to the GPU if appropriate.
"""
self._model_group.ready()
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False):
# overridden method; types match the superclass.
if torch_device is None:
return self
@ -704,7 +695,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet),
dtype=self.unet.dtype,
)
noise = noise_func(initial_latents, seed)
if seed is not None:
set_seed(seed)
noise = noise_func(initial_latents)
return self.img2img_from_latents_and_embeddings(
initial_latents,
@ -806,7 +799,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
init_image_latents = self.non_noised_latents_from_image(
init_image, device=device, dtype=latents_dtype
)
noise = noise_func(init_image_latents, seed)
if seed is not None:
set_seed(seed)
noise = noise_func(init_image_latents)
if mask.dim() == 3:
mask = mask.unsqueeze(0)
@ -917,20 +912,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet),
)
@property
def cond_stage_model(self):
return self.embeddings_provider
@torch.inference_mode()
def _tokenize(self, prompt: Union[str, List[str]]):
return self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
@property
def channels(self) -> int:
"""Compatible with DiffusionWrapper"""
@ -942,11 +923,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
return super().decode_latents(latents)
def debug_latents(self, latents, msg):
from invokeai.backend.image_util import debug_image
with torch.inference_mode():
from ldm.util import debug_image
decoded = self.numpy_to_pil(self.decode_latents(latents))
for i, img in enumerate(decoded):
debug_image(
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
)
for i, img in enumerate(decoded):
debug_image(
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
)

View File

@ -29,7 +29,6 @@ from ..image_util import PngWriter, retrieve_metadata
from ...frontend.merge.merge_diffusers import merge_diffusion_models
from ..prompting import (
get_prompt_structure,
get_tokenizer,
get_tokens_for_prompt_object,
)
from ..stable_diffusion import PipelineIntermediateState
@ -1274,7 +1273,7 @@ class InvokeAIWebServer:
None
if type(parsed_prompt) is Blend
else get_tokens_for_prompt_object(
get_tokenizer(self.generate.model), parsed_prompt
self.generate.model.tokenizer, parsed_prompt
)
)
attention_maps_image_base64_url = (

View File

@ -35,6 +35,7 @@ module.exports = {
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
],
'prettier/prettier': ['error', { endOfLine: 'auto' }],
'@typescript-eslint/ban-ts-comment': 'warn',
},
settings: {
react: {

View File

@ -1 +0,0 @@
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.ltr-parameters-panel-transition-enter{transform:translate(-150%)}.ltr-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-parameters-panel-transition-exit{transform:translate(0)}.ltr-parameters-panel-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.rtl-parameters-panel-transition-enter{transform:translate(150%)}.rtl-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-parameters-panel-transition-exit{transform:translate(0)}.rtl-parameters-panel-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}

View File

@ -0,0 +1 @@
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-61f10aa8.js"></script>
<script type="module" crossorigin src="./assets/index-d64f4654.js"></script>
<link rel="stylesheet" href="./assets/index-5483945c.css">
</head>

View File

@ -1,7 +1,25 @@
{
"accessibility": {
"modelSelect": "Model Select",
"invokeProgressBar": "Invoke progress bar"
"invokeProgressBar": "Invoke progress bar",
"reset": "Reset",
"uploadImage": "Upload Image",
"previousImage": "Previous Image",
"nextImage": "Next Image",
"useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "ExitViewer",
"zoomIn": "Zoom In",
"zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise",
"rotateClockwise": "Rotate Clockwise",
"flipHorizontally": "Flip Horizontally",
"flipVertically": "Flip Vertically",
"modifyConfig": "Modify Config",
"toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel"
},
"common": {
"hotkeysLabel": "Hotkeys",
@ -31,10 +49,11 @@
"langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"text2img": "Text To Image",
"txt2img": "Text To Image",
"img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas",
"nodes": "Nodes",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing",
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
@ -578,7 +597,7 @@
"autoSaveToGallery": "Auto Save to Gallery",
"saveBoxRegionOnly": "Save Box Region Only",
"limitStrokesToBox": "Limit Strokes to Box",
"showCanvasDebugInfo": "Show Canvas Debug Info",
"showCanvasDebugInfo": "Show Additional Canvas Info",
"clearCanvasHistory": "Clear Canvas History",
"clearHistory": "Clear History",
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",

View File

@ -63,7 +63,14 @@
"back": "Atrás",
"statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos"
"statusMergingModels": "Fusionar modelos",
"oceanTheme": "Océano",
"langPortuguese": "Portugués",
"langKorean": "Coreano",
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA"
},
"gallery": {
"generations": "Generaciones",
@ -385,14 +392,19 @@
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
"modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide",
"allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada"
"customConfig": "Configuración personalizada",
"v2_base": "v2 (512px)",
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia"
},
"parameters": {
"images": "Imágenes",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscurecer fuera",
"betaLimitToBox": "Limitar a caja",
"betaPreserveMasked": "Preservar área enmascarada"
},
"accessibility": {
"invokeProgressBar": "Activar la barra de progreso",
"modelSelect": "Seleccionar modelo",
"reset": "Reiniciar",
"uploadImage": "Cargar imagen",
"previousImage": "Imagen anterior",
"nextImage": "Siguiente imagen",
"useThisParameter": "Utiliza este parámetro",
"copyMetadataJson": "Copiar los metadatos JSON",
"exitViewer": "Salir del visor",
"zoomIn": "Acercar",
"zoomOut": "Alejar",
"rotateCounterClockwise": "Girar en sentido antihorario",
"rotateClockwise": "Girar en sentido horario",
"flipHorizontally": "Voltear horizontalmente",
"flipVertically": "Voltear verticalmente",
"modifyConfig": "Modificar la configuración",
"toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones"
}
}

View File

@ -63,7 +63,14 @@
"langSimplifiedChinese": "Cinese semplificato",
"langDutch": "Olandese",
"statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello"
"statusConvertingModel": "Conversione Modello",
"langKorean": "Coreano",
"langPortuguese": "Portoghese",
"pinOptionsPanel": "Blocca il pannello Opzioni",
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI"
},
"gallery": {
"generations": "Generazioni",
@ -392,7 +399,12 @@
"customSaveLocation": "Ubicazione salvataggio personalizzata",
"weightedSum": "Somma pesata",
"sigmoid": "Sigmoide",
"inverseSigmoid": "Sigmoide inverso"
"inverseSigmoid": "Sigmoide inverso",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello"
},
"parameters": {
"images": "Immagini",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscura all'esterno",
"betaLimitToBox": "Limita al rettangolo",
"betaPreserveMasked": "Conserva quanto mascherato"
},
"accessibility": {
"modelSelect": "Seleziona modello",
"invokeProgressBar": "Barra di avanzamento generazione",
"uploadImage": "Carica immagine",
"previousImage": "Immagine precedente",
"nextImage": "Immagine successiva",
"useThisParameter": "Usa questo parametro",
"reset": "Reimposta",
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",
"toggleLogViewer": "Attiva/disattiva visualizzatore registro",
"showGallery": "Mostra la galleria immagini",
"showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione"
}
}

View File

@ -63,6 +63,560 @@
"statusGeneratingOutpainting": "Geração de Ampliação",
"statusGenerationComplete": "Geração Completa",
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados"
"statusMergedModels": "Modelos Mesclados",
"oceanTheme": "Oceano",
"pinOptionsPanel": "Fixar painel de opções",
"loading": "A carregar",
"loadingInvokeAI": "A carregar Invoke AI",
"langPortuguese": "Português"
},
"gallery": {
"galleryImageResetSize": "Resetar Imagem",
"gallerySettings": "Configurações de Galeria",
"maintainAspectRatio": "Mater Proporções",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"pinGallery": "Fixar Galeria",
"singleColumnLayout": "Disposição em Coluna Única",
"allImagesLoaded": "Todas as Imagens Carregadas",
"loadMore": "Carregar Mais",
"noImagesInGallery": "Sem Imagens na Galeria",
"generations": "Gerações",
"showGenerations": "Mostrar Gerações",
"uploads": "Enviados",
"showUploads": "Mostrar Enviados",
"galleryImageSize": "Tamanho da Imagem"
},
"hotkeys": {
"generalHotkeys": "Atalhos Gerais",
"galleryHotkeys": "Atalhos da Galeria",
"toggleViewer": {
"title": "Ativar Visualizador",
"desc": "Abrir e fechar o Visualizador de Imagens"
},
"maximizeWorkSpace": {
"desc": "Fechar painéis e maximixar área de trabalho",
"title": "Maximizar a Área de Trabalho"
},
"changeTabs": {
"title": "Mudar Guias",
"desc": "Trocar para outra área de trabalho"
},
"consoleToggle": {
"desc": "Abrir e fechar console",
"title": "Ativar Console"
},
"setPrompt": {
"title": "Definir Prompt",
"desc": "Usar o prompt da imagem atual"
},
"sendToImageToImage": {
"desc": "Manda a imagem atual para Imagem Para Imagem",
"title": "Mandar para Imagem Para Imagem"
},
"previousImage": {
"desc": "Mostra a imagem anterior na galeria",
"title": "Imagem Anterior"
},
"nextImage": {
"title": "Próxima Imagem",
"desc": "Mostra a próxima imagem na galeria"
},
"decreaseGalleryThumbSize": {
"desc": "Diminui o tamanho das thumbs na galeria",
"title": "Diminuir Tamanho da Galeria de Imagem"
},
"selectBrush": {
"title": "Selecionar Pincel",
"desc": "Seleciona o pincel"
},
"selectEraser": {
"title": "Selecionar Apagador",
"desc": "Seleciona o apagador"
},
"decreaseBrushSize": {
"title": "Diminuir Tamanho do Pincel",
"desc": "Diminui o tamanho do pincel/apagador"
},
"increaseBrushOpacity": {
"desc": "Aumenta a opacidade do pincel",
"title": "Aumentar Opacidade do Pincel"
},
"moveTool": {
"title": "Ferramenta Mover",
"desc": "Permite navegar pela tela"
},
"decreaseBrushOpacity": {
"desc": "Diminui a opacidade do pincel",
"title": "Diminuir Opacidade do Pincel"
},
"toggleSnap": {
"title": "Ativar Encaixe",
"desc": "Ativa Encaixar na Grade"
},
"quickToggleMove": {
"title": "Ativar Mover Rapidamente",
"desc": "Temporariamente ativa o modo Mover"
},
"toggleLayer": {
"title": "Ativar Camada",
"desc": "Ativa a seleção de camada de máscara/base"
},
"clearMask": {
"title": "Limpar Máscara",
"desc": "Limpa toda a máscara"
},
"hideMask": {
"title": "Esconder Máscara",
"desc": "Esconde e Revela a máscara"
},
"mergeVisible": {
"title": "Fundir Visível",
"desc": "Fundir todas as camadas visíveis das telas"
},
"downloadImage": {
"desc": "Descarregar a tela atual",
"title": "Descarregar Imagem"
},
"undoStroke": {
"title": "Desfazer Traço",
"desc": "Desfaz um traço de pincel"
},
"redoStroke": {
"title": "Refazer Traço",
"desc": "Refaz o traço de pincel"
},
"keyboardShortcuts": "Atalhos de Teclado",
"appHotkeys": "Atalhos do app",
"invoke": {
"title": "Invocar",
"desc": "Gerar uma imagem"
},
"cancel": {
"title": "Cancelar",
"desc": "Cancelar geração de imagem"
},
"focusPrompt": {
"title": "Foco do Prompt",
"desc": "Foco da área de texto do prompt"
},
"toggleOptions": {
"title": "Ativar Opções",
"desc": "Abrir e fechar o painel de opções"
},
"pinOptions": {
"title": "Fixar Opções",
"desc": "Fixar o painel de opções"
},
"closePanels": {
"title": "Fechar Painéis",
"desc": "Fecha os painéis abertos"
},
"unifiedCanvasHotkeys": "Atalhos da Tela Unificada",
"toggleGallery": {
"title": "Ativar Galeria",
"desc": "Abrir e fechar a gaveta da galeria"
},
"setSeed": {
"title": "Definir Seed",
"desc": "Usar seed da imagem atual"
},
"setParameters": {
"title": "Definir Parâmetros",
"desc": "Usar todos os parâmetros da imagem atual"
},
"restoreFaces": {
"title": "Restaurar Rostos",
"desc": "Restaurar a imagem atual"
},
"upscale": {
"title": "Redimensionar",
"desc": "Redimensionar a imagem atual"
},
"showInfo": {
"title": "Mostrar Informações",
"desc": "Mostrar metadados de informações da imagem atual"
},
"deleteImage": {
"title": "Apagar Imagem",
"desc": "Apaga a imagem atual"
},
"toggleGalleryPin": {
"title": "Ativar Fixar Galeria",
"desc": "Fixa e desafixa a galeria na interface"
},
"increaseGalleryThumbSize": {
"title": "Aumentar Tamanho da Galeria de Imagem",
"desc": "Aumenta o tamanho das thumbs na galeria"
},
"increaseBrushSize": {
"title": "Aumentar Tamanho do Pincel",
"desc": "Aumenta o tamanho do pincel/apagador"
},
"fillBoundingBox": {
"title": "Preencher Caixa Delimitadora",
"desc": "Preenche a caixa delimitadora com a cor do pincel"
},
"eraseBoundingBox": {
"title": "Apagar Caixa Delimitadora",
"desc": "Apaga a área da caixa delimitadora"
},
"colorPicker": {
"title": "Selecionar Seletor de Cor",
"desc": "Seleciona o seletor de cores"
},
"showHideBoundingBox": {
"title": "Mostrar/Esconder Caixa Delimitadora",
"desc": "Ativa a visibilidade da caixa delimitadora"
},
"saveToGallery": {
"title": "Gravara Na Galeria",
"desc": "Grava a tela atual na galeria"
},
"copyToClipboard": {
"title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência"
},
"resetView": {
"title": "Resetar Visualização",
"desc": "Reseta Visualização da Tela"
},
"previousStagingImage": {
"title": "Imagem de Preparação Anterior",
"desc": "Área de Imagem de Preparação Anterior"
},
"nextStagingImage": {
"title": "Próxima Imagem de Preparação Anterior",
"desc": "Próxima Área de Imagem de Preparação Anterior"
},
"acceptStagingImage": {
"title": "Aceitar Imagem de Preparação Anterior",
"desc": "Aceitar Área de Imagem de Preparação Anterior"
}
},
"modelManager": {
"modelAdded": "Modelo Adicionado",
"modelUpdated": "Modelo Atualizado",
"modelEntryDeleted": "Entrada de modelo excluída",
"description": "Descrição",
"modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.",
"repo_id": "Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"width": "Largura",
"widthValidationMsg": "Largura padrão do seu modelo.",
"height": "Altura",
"heightValidationMsg": "Altura padrão do seu modelo.",
"findModels": "Encontrar Modelos",
"scanAgain": "Digitalize Novamente",
"deselectAll": "Deselecionar Tudo",
"showExisting": "Mostrar Existente",
"deleteConfig": "Apagar Config",
"convertToDiffusersHelpText6": "Deseja converter este modelo?",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"interpolationType": "Tipo de Interpolação",
"modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.",
"modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.",
"modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.",
"nameValidationMsg": "Insira um nome para o seu modelo",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Configuração",
"modelExists": "Modelo Existe",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"statusConverting": "A converter",
"modelConverted": "Modelo Convertido",
"ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados",
"addDifference": "Adicionar diferença",
"pickModelType": "Escolha o tipo de modelo",
"safetensorModels": "SafeTensors",
"cannotUseSpaces": "Não pode usar espaços",
"addNew": "Adicionar Novo",
"addManually": "Adicionar Manualmente",
"manual": "Manual",
"name": "Nome",
"configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.",
"modelLocation": "Localização do modelo",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"updateModel": "Atualizar Modelo",
"availableModels": "Modelos Disponíveis",
"load": "Carregar",
"active": "Ativado",
"notLoaded": "Não carregado",
"deleteModel": "Apagar modelo",
"deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.",
"convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.",
"convertToDiffusersSaveLocation": "Local para Gravar",
"v2_base": "v2 (512px)",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelSaveLocation": "Local de Salvamento",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergedModelCustomSaveLocation": "Caminho Personalizado",
"invokeAIFolder": "Pasta Invoke AI",
"inverseSigmoid": "Sigmóide Inversa",
"none": "nenhum",
"modelManager": "Gerente de Modelo",
"model": "Modelo",
"allModels": "Todos os Modelos",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"addNewModel": "Adicionar Novo modelo",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"vaeLocation": "Localização VAE",
"vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.",
"vaeRepoID": "VAE Repo ID",
"addModel": "Adicionar Modelo",
"search": "Procurar",
"cached": "Em cache",
"checkpointFolder": "Pasta de Checkpoint",
"clearCheckpointFolder": "Apagar Pasta de Checkpoint",
"modelsFound": "Modelos Encontrados",
"selectFolder": "Selecione a Pasta",
"selected": "Selecionada",
"selectAll": "Selecionar Tudo",
"addSelected": "Adicione Selecionado",
"delete": "Apagar",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.",
"convert": "Converter",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"v1": "v1",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.",
"sigmoid": "Sigmóide",
"weightedSum": "Soma Ponderada"
},
"parameters": {
"width": "Largura",
"seed": "Seed",
"hiresStrength": "Força da Alta Resolução",
"negativePrompts": "Indicações negativas",
"general": "Geral",
"randomizeSeed": "Seed Aleatório",
"shuffle": "Embaralhar",
"noiseThreshold": "Limite de Ruído",
"perlinNoise": "Ruído de Perlin",
"variations": "Variatções",
"seedWeights": "Pesos da Seed",
"restoreFaces": "Restaurar Rostos",
"faceRestoration": "Restauração de Rosto",
"type": "Tipo",
"denoisingStrength": "A força de remoção de ruído",
"scale": "Escala",
"otherOptions": "Outras Opções",
"seamlessTiling": "Ladrilho Sem Fronteira",
"hiresOptim": "Otimização de Alta Res",
"imageFit": "Caber Imagem Inicial No Tamanho de Saída",
"codeformerFidelity": "Fidelidade",
"seamSize": "Tamanho da Fronteira",
"seamBlur": "Desfoque da Fronteira",
"seamStrength": "Força da Fronteira",
"seamSteps": "Passos da Fronteira",
"tileSize": "Tamanho do Ladrilho",
"boundingBoxHeader": "Caixa Delimitadora",
"seamCorrectionHeader": "Correção de Fronteira",
"infillScalingHeader": "Preencimento e Escala",
"img2imgStrength": "Força de Imagem Para Imagem",
"toggleLoopback": "Ativar Loopback",
"symmetry": "Simetria",
"promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)",
"sendTo": "Mandar para",
"openInViewer": "Abrir No Visualizador",
"closeViewer": "Fechar Visualizador",
"usePrompt": "Usar Prompt",
"deleteImage": "Apagar Imagem",
"initialImage": "Imagem inicial",
"showOptionsPanel": "Mostrar Painel de Opções",
"strength": "Força",
"upscaling": "Redimensionando",
"upscale": "Redimensionar",
"upscaleImage": "Redimensionar Imagem",
"scaleBeforeProcessing": "Escala Antes do Processamento",
"invoke": "Invocar",
"images": "Imagems",
"steps": "Passos",
"cfgScale": "Escala CFG",
"height": "Altura",
"sampler": "Amostrador",
"imageToImage": "Imagem para Imagem",
"variationAmount": "Quntidade de Variatções",
"scaledWidth": "L Escalada",
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"hSymmetryStep": "H Passo de Simetria",
"vSymmetryStep": "V Passo de Simetria",
"cancel": {
"immediate": "Cancelar imediatamente",
"schedule": "Cancelar após a iteração atual",
"isScheduled": "A cancelar",
"setType": "Definir tipo de cancelamento"
},
"sendToImg2Img": "Mandar para Imagem Para Imagem",
"sendToUnifiedCanvas": "Mandar para Tela Unificada",
"copyImage": "Copiar imagem",
"copyImageToLink": "Copiar Imagem Para a Ligação",
"downloadImage": "Descarregar Imagem",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"useInitImg": "Usar Imagem Inicial",
"info": "Informações"
},
"settings": {
"confirmOnDelete": "Confirmar Antes de Apagar",
"displayHelpIcons": "Mostrar Ícones de Ajuda",
"useCanvasBeta": "Usar Layout de Telas Beta",
"enableImageDebugging": "Ativar Depuração de Imagem",
"useSlidersForAll": "Usar deslizadores para todas as opções",
"resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.",
"models": "Modelos",
"displayInProgress": "Mostrar Progresso de Imagens Em Andamento",
"saveSteps": "Gravar imagens a cada n passos",
"resetWebUI": "Reiniciar Interface",
"resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.",
"resetComplete": "A interface foi reiniciada. Atualize a página para carregar."
},
"toast": {
"uploadFailed": "Envio Falhou",
"uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez",
"uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro",
"downloadImageStarted": "Download de Imagem Começou",
"imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem",
"imageLinkCopied": "Ligação de Imagem Copiada",
"imageNotLoaded": "Nenhuma Imagem Carregada",
"parametersFailed": "Problema ao carregar parâmetros",
"parametersFailedDesc": "Não foi possível carregar imagem incial.",
"seedSet": "Seed Definida",
"upscalingFailed": "Redimensionamento Falhou",
"promptNotSet": "Prompt Não Definido",
"tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada",
"imageCopied": "Imagem Copiada",
"imageSavedToGallery": "Imagem Salva na Galeria",
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
"parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.",
"seedNotSet": "Seed Não Definida",
"seedNotSetDesc": "Não foi possível achar a seed para a imagem.",
"promptSet": "Prompt Definido",
"promptNotSetDesc": "Não foi possível achar prompt para essa imagem.",
"faceRestoreFailed": "Restauração de Rosto Falhou",
"metadataLoadFailed": "Falha ao tentar carregar metadados",
"initialImageSet": "Imagem Inicial Definida",
"initialImageNotSet": "Imagem Inicial Não Definida",
"initialImageNotSetDesc": "Não foi possível carregar imagem incial"
},
"tooltip": {
"feature": {
"prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.",
"other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.",
"seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.",
"imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75",
"faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.",
"seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.",
"gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.",
"variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.",
"upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.",
"boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.",
"infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)."
}
},
"unifiedCanvas": {
"emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.",
"scaledBoundingBox": "Caixa Delimitadora Escalada",
"boundingBoxPosition": "Posição da Caixa Delimitadora",
"next": "Próximo",
"accept": "Aceitar",
"showHide": "Mostrar/Esconder",
"discardAll": "Descartar Todos",
"betaClear": "Limpar",
"betaDarkenOutside": "Escurecer Externamente",
"base": "Base",
"brush": "Pincel",
"showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade",
"clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?",
"boundingBox": "Caixa Delimitadora",
"canvasDimensions": "Dimensões da Tela",
"canvasPosition": "Posição da Tela",
"cursorPosition": "Posição do cursor",
"previous": "Anterior",
"betaLimitToBox": "Limitar á Caixa",
"layer": "Camada",
"mask": "Máscara",
"maskingOptions": "Opções de Mascaramento",
"enableMask": "Ativar Máscara",
"preserveMaskedArea": "Preservar Área da Máscara",
"clearMask": "Limpar Máscara",
"eraser": "Apagador",
"fillBoundingBox": "Preencher Caixa Delimitadora",
"eraseBoundingBox": "Apagar Caixa Delimitadora",
"colorPicker": "Seletor de Cor",
"brushOptions": "Opções de Pincel",
"brushSize": "Tamanho",
"move": "Mover",
"resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível",
"saveToGallery": "Gravar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Descarregar Como Imagem",
"undo": "Desfazer",
"redo": "Refazer",
"clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela",
"snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa",
"autoSaveToGallery": "Gravar Automaticamente na Galeria",
"saveBoxRegionOnly": "Gravar Apenas a Região da Caixa",
"limitStrokesToBox": "Limitar Traços à Caixa",
"showCanvasDebugInfo": "Mostrar Informações de Depuração daTela",
"clearCanvasHistory": "Limpar o Histórico da Tela",
"clearHistory": "Limpar Históprico",
"clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.",
"emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários",
"emptyFolder": "Esvaziar Pasta",
"emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?",
"activeLayer": "Camada Ativa",
"canvasScale": "Escala da Tela",
"betaPreserveMasked": "Preservar Máscarado"
},
"accessibility": {
"invokeProgressBar": "Invocar barra de progresso",
"reset": "Repôr",
"nextImage": "Próxima imagem",
"useThisParameter": "Usar este parâmetro",
"copyMetadataJson": "Copiar metadados JSON",
"zoomIn": "Ampliar",
"zoomOut": "Reduzir",
"rotateCounterClockwise": "Girar no sentido anti-horário",
"rotateClockwise": "Girar no sentido horário",
"flipVertically": "Espelhar verticalmente",
"modifyConfig": "Modificar config",
"toggleAutoscroll": "Alternar rolagem automática",
"showGallery": "Mostrar galeria",
"showOptionsPanel": "Mostrar painel de opções",
"uploadImage": "Enviar imagem",
"previousImage": "Imagem anterior",
"flipHorizontally": "Espelhar horizontalmente",
"toggleLogViewer": "Alternar visualizador de registo"
}
}

View File

@ -63,7 +63,10 @@
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo",
"langSpanish": "Espanhol"
"langSpanish": "Espanhol",
"pinOptionsPanel": "Fixar painel de opções",
"loadingInvokeAI": "Carregando Invoke AI",
"loading": "Carregando"
},
"gallery": {
"generations": "Gerações",

View File

@ -46,7 +46,15 @@
"statusLoadingModel": "Загрузка модели",
"statusModelChanged": "Модель изменена",
"githubLabel": "Github",
"discordLabel": "Discord"
"discordLabel": "Discord",
"statusMergingModels": "Слияние моделей",
"statusModelConverted": "Модель сконвертирована",
"statusMergedModels": "Модели объединены",
"pinOptionsPanel": "Закрепить панель настроек",
"loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад",
"statusConvertingModel": "Конвертация модели"
},
"gallery": {
"generations": "Генерации",
@ -323,7 +331,30 @@
"deleteConfig": "Удалить конфигурацию",
"deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?",
"deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.",
"repoIDValidationMsg": "Онлайн-репозиторий модели"
"repoIDValidationMsg": "Онлайн-репозиторий модели",
"convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 7 Гб.",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Игнорировать несоответствия между выбранными моделями",
"addCheckpointModel": "Добавить модель Checkpoint/Safetensor",
"formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.",
"convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.",
"vaeRepoID": "ID репозитория VAE",
"mergedModelName": "Название объединенной модели",
"checkpointModels": "Checkpoints",
"allModels": "Все модели",
"addDiffuserModel": "Добавить Diffusers",
"repo_id": "ID репозитория",
"formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.",
"convert": "Преобразовать",
"convertToDiffusers": "Преобразовать в Diffusers",
"convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.",
"convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?",
"statusConverting": "Преобразование",
"modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели"
},
"parameters": {
"images": "Изображения",
@ -503,5 +534,8 @@
"betaDarkenOutside": "Затемнить снаружи",
"betaLimitToBox": "Ограничить выделением",
"betaPreserveMasked": "Сохранять маскируемую область"
},
"accessibility": {
"modelSelect": "Выбор модели"
}
}

View File

@ -19,6 +19,21 @@
"discordLabel": "Discord",
"nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。",
"reportBugLabel": "回報錯誤",
"githubLabel": "GitHub"
"githubLabel": "GitHub",
"langKorean": "韓語",
"langPortuguese": "葡萄牙語",
"hotkeysLabel": "快捷鍵",
"languagePickerLabel": "切換語言",
"langDutch": "荷蘭語",
"langFrench": "法語",
"langGerman": "德語",
"langItalian": "義大利語",
"langJapanese": "日語",
"langPolish": "波蘭語",
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
}
}

View File

@ -1,3 +1,5 @@
import React, { PropsWithChildren } from 'react';
export {};
declare module 'redux-socket.io-middleware';
@ -39,3 +41,18 @@ declare global {
}
/* eslint-enable @typescript-eslint/no-explicit-any */
}
declare module '@invoke-ai/invoke-ai-ui' {
declare class ThemeChanger extends React.Component<ThemeChangerProps> {
public constructor(props: ThemeChangerProps);
}
declare class InvokeAiLogoComponent extends React.Component<InvokeAILogoComponentProps> {
public constructor(props: InvokeAILogoComponentProps);
}
}
declare function Invoke(props: PropsWithChildren): JSX.Element;
export { ThemeChanger, InvokeAiLogoComponent };
export = Invoke;

View File

@ -6,9 +6,10 @@
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build",
"build:package": "vite build --mode=package",
"preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0",
"lint:eslint": "eslint --max-warnings=0 .",
"lint:prettier": "prettier --check .",
"lint:tsc": "tsc --noEmit",
"lint": "yarn run lint:eslint && yarn run lint:prettier && yarn run lint:tsc && yarn run lint:madge",
@ -36,6 +37,7 @@
},
"dependencies": {
"@chakra-ui/anatomy": "^2.1.1",
"@chakra-ui/cli": "^2.3.0",
"@chakra-ui/icons": "^2.0.17",
"@chakra-ui/react": "^2.5.1",
"@chakra-ui/styled-system": "^2.6.1",
@ -52,6 +54,7 @@
"i18next-http-backend": "^2.1.1",
"konva": "^8.4.2",
"lodash": "^4.17.21",
"patch-package": "^6.5.1",
"re-resizable": "^6.9.9",
"react": "^18.2.0",
"react-colorful": "^5.6.1",
@ -72,7 +75,6 @@
"uuid": "^9.0.0"
},
"devDependencies": {
"@chakra-ui/cli": "^2.3.0",
"@fontsource/inter": "^4.5.15",
"@types/dateformat": "^5.0.0",
"@types/react": "^18.0.28",
@ -92,7 +94,6 @@
"husky": "^8.0.3",
"lint-staged": "^13.1.2",
"madge": "^6.0.0",
"patch-package": "^6.5.1",
"postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0",

View File

@ -1,7 +1,25 @@
{
"accessibility": {
"modelSelect": "Model Select",
"invokeProgressBar": "Invoke progress bar"
"invokeProgressBar": "Invoke progress bar",
"reset": "Reset",
"uploadImage": "Upload Image",
"previousImage": "Previous Image",
"nextImage": "Next Image",
"useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "ExitViewer",
"zoomIn": "Zoom In",
"zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise",
"rotateClockwise": "Rotate Clockwise",
"flipHorizontally": "Flip Horizontally",
"flipVertically": "Flip Vertically",
"modifyConfig": "Modify Config",
"toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel"
},
"common": {
"hotkeysLabel": "Hotkeys",
@ -31,10 +49,11 @@
"langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська",
"langSpanish": "Español",
"text2img": "Text To Image",
"txt2img": "Text To Image",
"img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas",
"nodes": "Nodes",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing",
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
@ -578,7 +597,7 @@
"autoSaveToGallery": "Auto Save to Gallery",
"saveBoxRegionOnly": "Save Box Region Only",
"limitStrokesToBox": "Limit Strokes to Box",
"showCanvasDebugInfo": "Show Canvas Debug Info",
"showCanvasDebugInfo": "Show Additional Canvas Info",
"clearCanvasHistory": "Clear Canvas History",
"clearHistory": "Clear History",
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",

View File

@ -63,7 +63,14 @@
"back": "Atrás",
"statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos"
"statusMergingModels": "Fusionar modelos",
"oceanTheme": "Océano",
"langPortuguese": "Portugués",
"langKorean": "Coreano",
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA"
},
"gallery": {
"generations": "Generaciones",
@ -385,14 +392,19 @@
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
"modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide",
"allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada"
"customConfig": "Configuración personalizada",
"v2_base": "v2 (512px)",
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia"
},
"parameters": {
"images": "Imágenes",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscurecer fuera",
"betaLimitToBox": "Limitar a caja",
"betaPreserveMasked": "Preservar área enmascarada"
},
"accessibility": {
"invokeProgressBar": "Activar la barra de progreso",
"modelSelect": "Seleccionar modelo",
"reset": "Reiniciar",
"uploadImage": "Cargar imagen",
"previousImage": "Imagen anterior",
"nextImage": "Siguiente imagen",
"useThisParameter": "Utiliza este parámetro",
"copyMetadataJson": "Copiar los metadatos JSON",
"exitViewer": "Salir del visor",
"zoomIn": "Acercar",
"zoomOut": "Alejar",
"rotateCounterClockwise": "Girar en sentido antihorario",
"rotateClockwise": "Girar en sentido horario",
"flipHorizontally": "Voltear horizontalmente",
"flipVertically": "Voltear verticalmente",
"modifyConfig": "Modificar la configuración",
"toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones"
}
}

View File

@ -63,7 +63,14 @@
"langSimplifiedChinese": "Cinese semplificato",
"langDutch": "Olandese",
"statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello"
"statusConvertingModel": "Conversione Modello",
"langKorean": "Coreano",
"langPortuguese": "Portoghese",
"pinOptionsPanel": "Blocca il pannello Opzioni",
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI"
},
"gallery": {
"generations": "Generazioni",
@ -392,7 +399,12 @@
"customSaveLocation": "Ubicazione salvataggio personalizzata",
"weightedSum": "Somma pesata",
"sigmoid": "Sigmoide",
"inverseSigmoid": "Sigmoide inverso"
"inverseSigmoid": "Sigmoide inverso",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello"
},
"parameters": {
"images": "Immagini",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscura all'esterno",
"betaLimitToBox": "Limita al rettangolo",
"betaPreserveMasked": "Conserva quanto mascherato"
},
"accessibility": {
"modelSelect": "Seleziona modello",
"invokeProgressBar": "Barra di avanzamento generazione",
"uploadImage": "Carica immagine",
"previousImage": "Immagine precedente",
"nextImage": "Immagine successiva",
"useThisParameter": "Usa questo parametro",
"reset": "Reimposta",
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",
"toggleLogViewer": "Attiva/disattiva visualizzatore registro",
"showGallery": "Mostra la galleria immagini",
"showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione"
}
}

View File

@ -63,6 +63,560 @@
"statusGeneratingOutpainting": "Geração de Ampliação",
"statusGenerationComplete": "Geração Completa",
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados"
"statusMergedModels": "Modelos Mesclados",
"oceanTheme": "Oceano",
"pinOptionsPanel": "Fixar painel de opções",
"loading": "A carregar",
"loadingInvokeAI": "A carregar Invoke AI",
"langPortuguese": "Português"
},
"gallery": {
"galleryImageResetSize": "Resetar Imagem",
"gallerySettings": "Configurações de Galeria",
"maintainAspectRatio": "Mater Proporções",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"pinGallery": "Fixar Galeria",
"singleColumnLayout": "Disposição em Coluna Única",
"allImagesLoaded": "Todas as Imagens Carregadas",
"loadMore": "Carregar Mais",
"noImagesInGallery": "Sem Imagens na Galeria",
"generations": "Gerações",
"showGenerations": "Mostrar Gerações",
"uploads": "Enviados",
"showUploads": "Mostrar Enviados",
"galleryImageSize": "Tamanho da Imagem"
},
"hotkeys": {
"generalHotkeys": "Atalhos Gerais",
"galleryHotkeys": "Atalhos da Galeria",
"toggleViewer": {
"title": "Ativar Visualizador",
"desc": "Abrir e fechar o Visualizador de Imagens"
},
"maximizeWorkSpace": {
"desc": "Fechar painéis e maximixar área de trabalho",
"title": "Maximizar a Área de Trabalho"
},
"changeTabs": {
"title": "Mudar Guias",
"desc": "Trocar para outra área de trabalho"
},
"consoleToggle": {
"desc": "Abrir e fechar console",
"title": "Ativar Console"
},
"setPrompt": {
"title": "Definir Prompt",
"desc": "Usar o prompt da imagem atual"
},
"sendToImageToImage": {
"desc": "Manda a imagem atual para Imagem Para Imagem",
"title": "Mandar para Imagem Para Imagem"
},
"previousImage": {
"desc": "Mostra a imagem anterior na galeria",
"title": "Imagem Anterior"
},
"nextImage": {
"title": "Próxima Imagem",
"desc": "Mostra a próxima imagem na galeria"
},
"decreaseGalleryThumbSize": {
"desc": "Diminui o tamanho das thumbs na galeria",
"title": "Diminuir Tamanho da Galeria de Imagem"
},
"selectBrush": {
"title": "Selecionar Pincel",
"desc": "Seleciona o pincel"
},
"selectEraser": {
"title": "Selecionar Apagador",
"desc": "Seleciona o apagador"
},
"decreaseBrushSize": {
"title": "Diminuir Tamanho do Pincel",
"desc": "Diminui o tamanho do pincel/apagador"
},
"increaseBrushOpacity": {
"desc": "Aumenta a opacidade do pincel",
"title": "Aumentar Opacidade do Pincel"
},
"moveTool": {
"title": "Ferramenta Mover",
"desc": "Permite navegar pela tela"
},
"decreaseBrushOpacity": {
"desc": "Diminui a opacidade do pincel",
"title": "Diminuir Opacidade do Pincel"
},
"toggleSnap": {
"title": "Ativar Encaixe",
"desc": "Ativa Encaixar na Grade"
},
"quickToggleMove": {
"title": "Ativar Mover Rapidamente",
"desc": "Temporariamente ativa o modo Mover"
},
"toggleLayer": {
"title": "Ativar Camada",
"desc": "Ativa a seleção de camada de máscara/base"
},
"clearMask": {
"title": "Limpar Máscara",
"desc": "Limpa toda a máscara"
},
"hideMask": {
"title": "Esconder Máscara",
"desc": "Esconde e Revela a máscara"
},
"mergeVisible": {
"title": "Fundir Visível",
"desc": "Fundir todas as camadas visíveis das telas"
},
"downloadImage": {
"desc": "Descarregar a tela atual",
"title": "Descarregar Imagem"
},
"undoStroke": {
"title": "Desfazer Traço",
"desc": "Desfaz um traço de pincel"
},
"redoStroke": {
"title": "Refazer Traço",
"desc": "Refaz o traço de pincel"
},
"keyboardShortcuts": "Atalhos de Teclado",
"appHotkeys": "Atalhos do app",
"invoke": {
"title": "Invocar",
"desc": "Gerar uma imagem"
},
"cancel": {
"title": "Cancelar",
"desc": "Cancelar geração de imagem"
},
"focusPrompt": {
"title": "Foco do Prompt",
"desc": "Foco da área de texto do prompt"
},
"toggleOptions": {
"title": "Ativar Opções",
"desc": "Abrir e fechar o painel de opções"
},
"pinOptions": {
"title": "Fixar Opções",
"desc": "Fixar o painel de opções"
},
"closePanels": {
"title": "Fechar Painéis",
"desc": "Fecha os painéis abertos"
},
"unifiedCanvasHotkeys": "Atalhos da Tela Unificada",
"toggleGallery": {
"title": "Ativar Galeria",
"desc": "Abrir e fechar a gaveta da galeria"
},
"setSeed": {
"title": "Definir Seed",
"desc": "Usar seed da imagem atual"
},
"setParameters": {
"title": "Definir Parâmetros",
"desc": "Usar todos os parâmetros da imagem atual"
},
"restoreFaces": {
"title": "Restaurar Rostos",
"desc": "Restaurar a imagem atual"
},
"upscale": {
"title": "Redimensionar",
"desc": "Redimensionar a imagem atual"
},
"showInfo": {
"title": "Mostrar Informações",
"desc": "Mostrar metadados de informações da imagem atual"
},
"deleteImage": {
"title": "Apagar Imagem",
"desc": "Apaga a imagem atual"
},
"toggleGalleryPin": {
"title": "Ativar Fixar Galeria",
"desc": "Fixa e desafixa a galeria na interface"
},
"increaseGalleryThumbSize": {
"title": "Aumentar Tamanho da Galeria de Imagem",
"desc": "Aumenta o tamanho das thumbs na galeria"
},
"increaseBrushSize": {
"title": "Aumentar Tamanho do Pincel",
"desc": "Aumenta o tamanho do pincel/apagador"
},
"fillBoundingBox": {
"title": "Preencher Caixa Delimitadora",
"desc": "Preenche a caixa delimitadora com a cor do pincel"
},
"eraseBoundingBox": {
"title": "Apagar Caixa Delimitadora",
"desc": "Apaga a área da caixa delimitadora"
},
"colorPicker": {
"title": "Selecionar Seletor de Cor",
"desc": "Seleciona o seletor de cores"
},
"showHideBoundingBox": {
"title": "Mostrar/Esconder Caixa Delimitadora",
"desc": "Ativa a visibilidade da caixa delimitadora"
},
"saveToGallery": {
"title": "Gravara Na Galeria",
"desc": "Grava a tela atual na galeria"
},
"copyToClipboard": {
"title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência"
},
"resetView": {
"title": "Resetar Visualização",
"desc": "Reseta Visualização da Tela"
},
"previousStagingImage": {
"title": "Imagem de Preparação Anterior",
"desc": "Área de Imagem de Preparação Anterior"
},
"nextStagingImage": {
"title": "Próxima Imagem de Preparação Anterior",
"desc": "Próxima Área de Imagem de Preparação Anterior"
},
"acceptStagingImage": {
"title": "Aceitar Imagem de Preparação Anterior",
"desc": "Aceitar Área de Imagem de Preparação Anterior"
}
},
"modelManager": {
"modelAdded": "Modelo Adicionado",
"modelUpdated": "Modelo Atualizado",
"modelEntryDeleted": "Entrada de modelo excluída",
"description": "Descrição",
"modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.",
"repo_id": "Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"width": "Largura",
"widthValidationMsg": "Largura padrão do seu modelo.",
"height": "Altura",
"heightValidationMsg": "Altura padrão do seu modelo.",
"findModels": "Encontrar Modelos",
"scanAgain": "Digitalize Novamente",
"deselectAll": "Deselecionar Tudo",
"showExisting": "Mostrar Existente",
"deleteConfig": "Apagar Config",
"convertToDiffusersHelpText6": "Deseja converter este modelo?",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"interpolationType": "Tipo de Interpolação",
"modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.",
"modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.",
"modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.",
"nameValidationMsg": "Insira um nome para o seu modelo",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Configuração",
"modelExists": "Modelo Existe",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"statusConverting": "A converter",
"modelConverted": "Modelo Convertido",
"ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados",
"addDifference": "Adicionar diferença",
"pickModelType": "Escolha o tipo de modelo",
"safetensorModels": "SafeTensors",
"cannotUseSpaces": "Não pode usar espaços",
"addNew": "Adicionar Novo",
"addManually": "Adicionar Manualmente",
"manual": "Manual",
"name": "Nome",
"configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.",
"modelLocation": "Localização do modelo",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"updateModel": "Atualizar Modelo",
"availableModels": "Modelos Disponíveis",
"load": "Carregar",
"active": "Ativado",
"notLoaded": "Não carregado",
"deleteModel": "Apagar modelo",
"deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.",
"convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.",
"convertToDiffusersSaveLocation": "Local para Gravar",
"v2_base": "v2 (512px)",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelSaveLocation": "Local de Salvamento",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergedModelCustomSaveLocation": "Caminho Personalizado",
"invokeAIFolder": "Pasta Invoke AI",
"inverseSigmoid": "Sigmóide Inversa",
"none": "nenhum",
"modelManager": "Gerente de Modelo",
"model": "Modelo",
"allModels": "Todos os Modelos",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"addNewModel": "Adicionar Novo modelo",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"vaeLocation": "Localização VAE",
"vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.",
"vaeRepoID": "VAE Repo ID",
"addModel": "Adicionar Modelo",
"search": "Procurar",
"cached": "Em cache",
"checkpointFolder": "Pasta de Checkpoint",
"clearCheckpointFolder": "Apagar Pasta de Checkpoint",
"modelsFound": "Modelos Encontrados",
"selectFolder": "Selecione a Pasta",
"selected": "Selecionada",
"selectAll": "Selecionar Tudo",
"addSelected": "Adicione Selecionado",
"delete": "Apagar",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.",
"convert": "Converter",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"v1": "v1",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.",
"sigmoid": "Sigmóide",
"weightedSum": "Soma Ponderada"
},
"parameters": {
"width": "Largura",
"seed": "Seed",
"hiresStrength": "Força da Alta Resolução",
"negativePrompts": "Indicações negativas",
"general": "Geral",
"randomizeSeed": "Seed Aleatório",
"shuffle": "Embaralhar",
"noiseThreshold": "Limite de Ruído",
"perlinNoise": "Ruído de Perlin",
"variations": "Variatções",
"seedWeights": "Pesos da Seed",
"restoreFaces": "Restaurar Rostos",
"faceRestoration": "Restauração de Rosto",
"type": "Tipo",
"denoisingStrength": "A força de remoção de ruído",
"scale": "Escala",
"otherOptions": "Outras Opções",
"seamlessTiling": "Ladrilho Sem Fronteira",
"hiresOptim": "Otimização de Alta Res",
"imageFit": "Caber Imagem Inicial No Tamanho de Saída",
"codeformerFidelity": "Fidelidade",
"seamSize": "Tamanho da Fronteira",
"seamBlur": "Desfoque da Fronteira",
"seamStrength": "Força da Fronteira",
"seamSteps": "Passos da Fronteira",
"tileSize": "Tamanho do Ladrilho",
"boundingBoxHeader": "Caixa Delimitadora",
"seamCorrectionHeader": "Correção de Fronteira",
"infillScalingHeader": "Preencimento e Escala",
"img2imgStrength": "Força de Imagem Para Imagem",
"toggleLoopback": "Ativar Loopback",
"symmetry": "Simetria",
"promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)",
"sendTo": "Mandar para",
"openInViewer": "Abrir No Visualizador",
"closeViewer": "Fechar Visualizador",
"usePrompt": "Usar Prompt",
"deleteImage": "Apagar Imagem",
"initialImage": "Imagem inicial",
"showOptionsPanel": "Mostrar Painel de Opções",
"strength": "Força",
"upscaling": "Redimensionando",
"upscale": "Redimensionar",
"upscaleImage": "Redimensionar Imagem",
"scaleBeforeProcessing": "Escala Antes do Processamento",
"invoke": "Invocar",
"images": "Imagems",
"steps": "Passos",
"cfgScale": "Escala CFG",
"height": "Altura",
"sampler": "Amostrador",
"imageToImage": "Imagem para Imagem",
"variationAmount": "Quntidade de Variatções",
"scaledWidth": "L Escalada",
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"hSymmetryStep": "H Passo de Simetria",
"vSymmetryStep": "V Passo de Simetria",
"cancel": {
"immediate": "Cancelar imediatamente",
"schedule": "Cancelar após a iteração atual",
"isScheduled": "A cancelar",
"setType": "Definir tipo de cancelamento"
},
"sendToImg2Img": "Mandar para Imagem Para Imagem",
"sendToUnifiedCanvas": "Mandar para Tela Unificada",
"copyImage": "Copiar imagem",
"copyImageToLink": "Copiar Imagem Para a Ligação",
"downloadImage": "Descarregar Imagem",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"useInitImg": "Usar Imagem Inicial",
"info": "Informações"
},
"settings": {
"confirmOnDelete": "Confirmar Antes de Apagar",
"displayHelpIcons": "Mostrar Ícones de Ajuda",
"useCanvasBeta": "Usar Layout de Telas Beta",
"enableImageDebugging": "Ativar Depuração de Imagem",
"useSlidersForAll": "Usar deslizadores para todas as opções",
"resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.",
"models": "Modelos",
"displayInProgress": "Mostrar Progresso de Imagens Em Andamento",
"saveSteps": "Gravar imagens a cada n passos",
"resetWebUI": "Reiniciar Interface",
"resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.",
"resetComplete": "A interface foi reiniciada. Atualize a página para carregar."
},
"toast": {
"uploadFailed": "Envio Falhou",
"uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez",
"uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro",
"downloadImageStarted": "Download de Imagem Começou",
"imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem",
"imageLinkCopied": "Ligação de Imagem Copiada",
"imageNotLoaded": "Nenhuma Imagem Carregada",
"parametersFailed": "Problema ao carregar parâmetros",
"parametersFailedDesc": "Não foi possível carregar imagem incial.",
"seedSet": "Seed Definida",
"upscalingFailed": "Redimensionamento Falhou",
"promptNotSet": "Prompt Não Definido",
"tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada",
"imageCopied": "Imagem Copiada",
"imageSavedToGallery": "Imagem Salva na Galeria",
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
"parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.",
"seedNotSet": "Seed Não Definida",
"seedNotSetDesc": "Não foi possível achar a seed para a imagem.",
"promptSet": "Prompt Definido",
"promptNotSetDesc": "Não foi possível achar prompt para essa imagem.",
"faceRestoreFailed": "Restauração de Rosto Falhou",
"metadataLoadFailed": "Falha ao tentar carregar metadados",
"initialImageSet": "Imagem Inicial Definida",
"initialImageNotSet": "Imagem Inicial Não Definida",
"initialImageNotSetDesc": "Não foi possível carregar imagem incial"
},
"tooltip": {
"feature": {
"prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.",
"other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.",
"seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.",
"imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75",
"faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.",
"seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.",
"gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.",
"variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.",
"upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.",
"boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.",
"infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)."
}
},
"unifiedCanvas": {
"emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.",
"scaledBoundingBox": "Caixa Delimitadora Escalada",
"boundingBoxPosition": "Posição da Caixa Delimitadora",
"next": "Próximo",
"accept": "Aceitar",
"showHide": "Mostrar/Esconder",
"discardAll": "Descartar Todos",
"betaClear": "Limpar",
"betaDarkenOutside": "Escurecer Externamente",
"base": "Base",
"brush": "Pincel",
"showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade",
"clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?",
"boundingBox": "Caixa Delimitadora",
"canvasDimensions": "Dimensões da Tela",
"canvasPosition": "Posição da Tela",
"cursorPosition": "Posição do cursor",
"previous": "Anterior",
"betaLimitToBox": "Limitar á Caixa",
"layer": "Camada",
"mask": "Máscara",
"maskingOptions": "Opções de Mascaramento",
"enableMask": "Ativar Máscara",
"preserveMaskedArea": "Preservar Área da Máscara",
"clearMask": "Limpar Máscara",
"eraser": "Apagador",
"fillBoundingBox": "Preencher Caixa Delimitadora",
"eraseBoundingBox": "Apagar Caixa Delimitadora",
"colorPicker": "Seletor de Cor",
"brushOptions": "Opções de Pincel",
"brushSize": "Tamanho",
"move": "Mover",
"resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível",
"saveToGallery": "Gravar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Descarregar Como Imagem",
"undo": "Desfazer",
"redo": "Refazer",
"clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela",
"snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa",
"autoSaveToGallery": "Gravar Automaticamente na Galeria",
"saveBoxRegionOnly": "Gravar Apenas a Região da Caixa",
"limitStrokesToBox": "Limitar Traços à Caixa",
"showCanvasDebugInfo": "Mostrar Informações de Depuração daTela",
"clearCanvasHistory": "Limpar o Histórico da Tela",
"clearHistory": "Limpar Históprico",
"clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.",
"emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários",
"emptyFolder": "Esvaziar Pasta",
"emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?",
"activeLayer": "Camada Ativa",
"canvasScale": "Escala da Tela",
"betaPreserveMasked": "Preservar Máscarado"
},
"accessibility": {
"invokeProgressBar": "Invocar barra de progresso",
"reset": "Repôr",
"nextImage": "Próxima imagem",
"useThisParameter": "Usar este parâmetro",
"copyMetadataJson": "Copiar metadados JSON",
"zoomIn": "Ampliar",
"zoomOut": "Reduzir",
"rotateCounterClockwise": "Girar no sentido anti-horário",
"rotateClockwise": "Girar no sentido horário",
"flipVertically": "Espelhar verticalmente",
"modifyConfig": "Modificar config",
"toggleAutoscroll": "Alternar rolagem automática",
"showGallery": "Mostrar galeria",
"showOptionsPanel": "Mostrar painel de opções",
"uploadImage": "Enviar imagem",
"previousImage": "Imagem anterior",
"flipHorizontally": "Espelhar horizontalmente",
"toggleLogViewer": "Alternar visualizador de registo"
}
}

View File

@ -63,7 +63,10 @@
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo",
"langSpanish": "Espanhol"
"langSpanish": "Espanhol",
"pinOptionsPanel": "Fixar painel de opções",
"loadingInvokeAI": "Carregando Invoke AI",
"loading": "Carregando"
},
"gallery": {
"generations": "Gerações",

View File

@ -46,7 +46,15 @@
"statusLoadingModel": "Загрузка модели",
"statusModelChanged": "Модель изменена",
"githubLabel": "Github",
"discordLabel": "Discord"
"discordLabel": "Discord",
"statusMergingModels": "Слияние моделей",
"statusModelConverted": "Модель сконвертирована",
"statusMergedModels": "Модели объединены",
"pinOptionsPanel": "Закрепить панель настроек",
"loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад",
"statusConvertingModel": "Конвертация модели"
},
"gallery": {
"generations": "Генерации",
@ -323,7 +331,30 @@
"deleteConfig": "Удалить конфигурацию",
"deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?",
"deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.",
"repoIDValidationMsg": "Онлайн-репозиторий модели"
"repoIDValidationMsg": "Онлайн-репозиторий модели",
"convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 7 Гб.",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Игнорировать несоответствия между выбранными моделями",
"addCheckpointModel": "Добавить модель Checkpoint/Safetensor",
"formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.",
"convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.",
"vaeRepoID": "ID репозитория VAE",
"mergedModelName": "Название объединенной модели",
"checkpointModels": "Checkpoints",
"allModels": "Все модели",
"addDiffuserModel": "Добавить Diffusers",
"repo_id": "ID репозитория",
"formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.",
"convert": "Преобразовать",
"convertToDiffusers": "Преобразовать в Diffusers",
"convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.",
"convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?",
"statusConverting": "Преобразование",
"modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели"
},
"parameters": {
"images": "Изображения",
@ -503,5 +534,8 @@
"betaDarkenOutside": "Затемнить снаружи",
"betaLimitToBox": "Ограничить выделением",
"betaPreserveMasked": "Сохранять маскируемую область"
},
"accessibility": {
"modelSelect": "Выбор модели"
}
}

View File

@ -19,6 +19,21 @@
"discordLabel": "Discord",
"nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。",
"reportBugLabel": "回報錯誤",
"githubLabel": "GitHub"
"githubLabel": "GitHub",
"langKorean": "韓語",
"langPortuguese": "葡萄牙語",
"hotkeysLabel": "快捷鍵",
"languagePickerLabel": "切換語言",
"langDutch": "荷蘭語",
"langFrench": "法語",
"langGerman": "德語",
"langItalian": "義大利語",
"langJapanese": "日語",
"langPolish": "波蘭語",
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
}
}

View File

@ -9,34 +9,53 @@ import useToastWatcher from 'features/system/hooks/useToastWatcher';
import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton';
import FloatingParametersPanelButtons from 'features/ui/components/FloatingParametersPanelButtons';
import { Box, Grid } from '@chakra-ui/react';
import { APP_HEIGHT, APP_PADDING, APP_WIDTH } from 'theme/util/constants';
import { Box, Flex, Grid, Portal, useColorMode } from '@chakra-ui/react';
import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants';
import ImageGalleryPanel from 'features/gallery/components/ImageGalleryPanel';
import Lightbox from 'features/lightbox/components/Lightbox';
import { useAppSelector } from './storeHooks';
import { PropsWithChildren, useEffect } from 'react';
keepGUIAlive();
const App = () => {
const App = (props: PropsWithChildren) => {
useToastWatcher();
const currentTheme = useAppSelector((state) => state.ui.currentTheme);
const { setColorMode } = useColorMode();
useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');
}, [setColorMode, currentTheme]);
return (
<Grid w="100vw" h="100vh">
<Lightbox />
<ImageUploader>
<ProgressBar />
<Grid
gap={4}
p={APP_PADDING}
p={4}
gridAutoRows="min-content auto"
w={APP_WIDTH}
h={APP_HEIGHT}
>
<SiteHeader />
<InvokeTabs />
{props.children || <SiteHeader />}
<Flex gap={4} w="full" h="full">
<InvokeTabs />
<ImageGalleryPanel />
</Flex>
</Grid>
<Box>
<Console />
</Box>
</ImageUploader>
<FloatingParametersPanelButtons />
<FloatingGalleryButton />
<Portal>
<FloatingParametersPanelButtons />
</Portal>
<Portal>
<FloatingGalleryButton />
</Portal>
</Grid>
);
};

View File

@ -9,6 +9,15 @@ import { greenTeaThemeColors } from 'theme/colors/greenTea';
import { invokeAIThemeColors } from 'theme/colors/invokeAI';
import { lightThemeColors } from 'theme/colors/lightTheme';
import { oceanBlueColors } from 'theme/colors/oceanBlue';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
import '@fontsource/inter/400.css';
import '@fontsource/inter/500.css';
import '@fontsource/inter/600.css';
import '@fontsource/inter/700.css';
import '@fontsource/inter/800.css';
import '@fontsource/inter/900.css';
type ThemeLocaleProviderProps = {
children: ReactNode;

View File

@ -57,10 +57,13 @@ const galleryBlacklist = [
'currentImage',
'currentImageUuid',
'shouldAutoSwitchToNewImages',
'shouldHoldGalleryOpen',
'intermediateImage',
].map((blacklistItem) => `gallery.${blacklistItem}`);
const lightboxBlacklist = ['isLightboxOpen'].map(
(blacklistItem) => `lightbox.${blacklistItem}`
);
const rootReducer = combineReducers({
generation: generationReducer,
postprocessing: postprocessingReducer,
@ -75,7 +78,12 @@ const rootPersistConfig = getPersistConfig({
key: 'root',
storage,
rootReducer,
blacklist: [...canvasBlacklist, ...systemBlacklist, ...galleryBlacklist],
blacklist: [
...canvasBlacklist,
...systemBlacklist,
...galleryBlacklist,
...lightboxBlacklist,
],
debounce: 300,
});

View File

@ -1,5 +1,6 @@
import { Box, forwardRef, Icon } from '@chakra-ui/react';
import { Feature } from 'app/features';
import { memo } from 'react';
import { IconType } from 'react-icons';
import { MdHelp } from 'react-icons/md';
import GuidePopover from './GuidePopover';
@ -19,4 +20,4 @@ const GuideIcon = forwardRef(
)
);
export default GuideIcon;
export default memo(GuideIcon);

View File

@ -11,7 +11,7 @@ import { Feature, useFeatureHelpInfo } from 'app/features';
import { useAppSelector } from 'app/storeHooks';
import { systemSelector } from 'features/system/store/systemSelectors';
import { SystemState } from 'features/system/store/systemSlice';
import { ReactElement } from 'react';
import { memo, ReactElement } from 'react';
type GuideProps = {
children: ReactElement;
@ -30,7 +30,7 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
if (!shouldDisplayGuides) return null;
return (
<Popover trigger="hover">
<Popover trigger="hover" isLazy>
<PopoverTrigger>
<Box>{children}</Box>
</PopoverTrigger>
@ -46,4 +46,4 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
);
};
export default GuidePopover;
export default memo(GuidePopover);

View File

@ -8,7 +8,7 @@ import {
forwardRef,
useDisclosure,
} from '@chakra-ui/react';
import { cloneElement, ReactElement, ReactNode, useRef } from 'react';
import { cloneElement, memo, ReactElement, ReactNode, useRef } from 'react';
import IAIButton from './IAIButton';
type Props = {
@ -79,4 +79,4 @@ const IAIAlertDialog = forwardRef((props: Props, ref) => {
</>
);
});
export default IAIAlertDialog;
export default memo(IAIAlertDialog);

View File

@ -5,7 +5,7 @@ import {
Tooltip,
TooltipProps,
} from '@chakra-ui/react';
import { ReactNode } from 'react';
import { memo, ReactNode } from 'react';
export interface IAIButtonProps extends ButtonProps {
tooltip?: string;
@ -25,4 +25,4 @@ const IAIButton = forwardRef((props: IAIButtonProps, forwardedRef) => {
);
});
export default IAIButton;
export default memo(IAIButton);

View File

@ -1,5 +1,5 @@
import { Checkbox, CheckboxProps } from '@chakra-ui/react';
import type { ReactNode } from 'react';
import { memo, ReactNode } from 'react';
type IAICheckboxProps = CheckboxProps & {
label: string | ReactNode;
@ -14,4 +14,4 @@ const IAICheckbox = (props: IAICheckboxProps) => {
);
};
export default IAICheckbox;
export default memo(IAICheckbox);

View File

@ -1,4 +1,5 @@
import { chakra, ChakraProps } from '@chakra-ui/react';
import { memo } from 'react';
import { RgbaColorPicker } from 'react-colorful';
import { ColorPickerBaseProps, RgbaColor } from 'react-colorful/dist/types';
@ -35,4 +36,4 @@ const IAIColorPicker = (props: IAIColorPickerProps) => {
);
};
export default IAIColorPicker;
export default memo(IAIColorPicker);

View File

@ -0,0 +1,15 @@
import { FormErrorMessage, FormErrorMessageProps } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIFormErrorMessageProps = FormErrorMessageProps & {
children: ReactNode | string;
};
export default function IAIFormErrorMessage(props: IAIFormErrorMessageProps) {
const { children, ...rest } = props;
return (
<FormErrorMessage color="error.400" {...rest}>
{children}
</FormErrorMessage>
);
}

View File

@ -0,0 +1,15 @@
import { FormHelperText, FormHelperTextProps } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIFormHelperTextProps = FormHelperTextProps & {
children: ReactNode | string;
};
export default function IAIFormHelperText(props: IAIFormHelperTextProps) {
const { children, ...rest } = props;
return (
<FormHelperText margin={0} color="base.400" {...rest}>
{children}
</FormHelperText>
);
}

View File

@ -5,15 +5,17 @@ import {
Tooltip,
TooltipProps,
} from '@chakra-ui/react';
import { memo } from 'react';
export type IAIIconButtonProps = IconButtonProps & {
role?: string;
tooltip?: string;
tooltipProps?: Omit<TooltipProps, 'children'>;
isChecked?: boolean;
};
const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
const { tooltip = '', tooltipProps, isChecked, ...rest } = props;
const { role, tooltip = '', tooltipProps, isChecked, ...rest } = props;
return (
<Tooltip
@ -26,6 +28,7 @@ const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
>
<IconButton
ref={forwardedRef}
role={role}
aria-checked={isChecked !== undefined ? isChecked : undefined}
{...rest}
/>
@ -33,4 +36,5 @@ const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
);
});
export default IAIIconButton;
IAIIconButton.displayName = 'IAIIconButton';
export default memo(IAIIconButton);

View File

@ -5,7 +5,7 @@ import {
Input,
InputProps,
} from '@chakra-ui/react';
import { ChangeEvent } from 'react';
import { ChangeEvent, memo } from 'react';
interface IAIInputProps extends InputProps {
label?: string;
@ -15,7 +15,7 @@ interface IAIInputProps extends InputProps {
formControlProps?: Omit<FormControlProps, 'isInvalid' | 'isDisabled'>;
}
export default function IAIInput(props: IAIInputProps) {
const IAIInput = (props: IAIInputProps) => {
const {
label = '',
isDisabled = false,
@ -34,4 +34,6 @@ export default function IAIInput(props: IAIInputProps) {
<Input {...rest} />
</FormControl>
);
}
};
export default memo(IAIInput);

View File

@ -16,7 +16,7 @@ import {
} from '@chakra-ui/react';
import { clamp } from 'lodash';
import { FocusEvent, useEffect, useState } from 'react';
import { FocusEvent, memo, useEffect, useState } from 'react';
const numberStringRegex = /^-?(0\.)?\.?$/;
@ -139,4 +139,4 @@ const IAINumberInput = (props: Props) => {
);
};
export default IAINumberInput;
export default memo(IAINumberInput);

View File

@ -0,0 +1,18 @@
import { useToken } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIOptionProps = {
children: ReactNode | string | number;
value: string | number;
};
export default function IAIOption(props: IAIOptionProps) {
const { children, value } = props;
const [base800, base200] = useToken('colors', ['base.800', 'base.200']);
return (
<option value={value} style={{ background: base800, color: base200 }}>
{children}
</option>
);
}

View File

@ -6,7 +6,7 @@ import {
PopoverProps,
PopoverTrigger,
} from '@chakra-ui/react';
import { ReactNode } from 'react';
import { memo, ReactNode } from 'react';
type IAIPopoverProps = PopoverProps & {
triggerComponent: ReactNode;
@ -35,4 +35,4 @@ const IAIPopover = (props: IAIPopoverProps) => {
);
};
export default IAIPopover;
export default memo(IAIPopover);

View File

@ -6,7 +6,8 @@ import {
Tooltip,
TooltipProps,
} from '@chakra-ui/react';
import { MouseEvent } from 'react';
import { memo, MouseEvent } from 'react';
import IAIOption from './IAIOption';
type IAISelectProps = SelectProps & {
label?: string;
@ -37,13 +38,13 @@ const IAISelect = (props: IAISelectProps) => {
<Select {...rest}>
{validValues.map((opt) => {
return typeof opt === 'string' || typeof opt === 'number' ? (
<option key={opt} value={opt}>
<IAIOption key={opt} value={opt}>
{opt}
</option>
</IAIOption>
) : (
<option key={opt.value} value={opt.value}>
<IAIOption key={opt.value} value={opt.value}>
{opt.key}
</option>
</IAIOption>
);
})}
</Select>
@ -52,4 +53,4 @@ const IAISelect = (props: IAISelectProps) => {
);
};
export default IAISelect;
export default memo(IAISelect);

View File

@ -11,7 +11,7 @@ import {
IconButtonProps,
ButtonProps,
} from '@chakra-ui/react';
import { MouseEventHandler, ReactNode } from 'react';
import { memo, MouseEventHandler, ReactNode } from 'react';
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';
interface IAIMenuItem {
@ -31,7 +31,7 @@ interface IAIMenuProps {
menuItemProps?: MenuItemProps;
}
export default function IAISimpleMenu(props: IAIMenuProps) {
const IAISimpleMenu = (props: IAIMenuProps) => {
const {
menuType = 'icon',
iconTooltip,
@ -83,4 +83,6 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
)}
</Menu>
);
}
};
export default memo(IAISimpleMenu);

View File

@ -25,7 +25,8 @@ import {
} from '@chakra-ui/react';
import { clamp } from 'lodash';
import { FocusEvent, useEffect, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { FocusEvent, memo, useEffect, useMemo, useState } from 'react';
import { BiReset } from 'react-icons/bi';
import IAIIconButton, { IAIIconButtonProps } from './IAIIconButton';
@ -61,7 +62,7 @@ export type IAIFullSliderProps = {
sliderIAIIconButtonProps?: IAIIconButtonProps;
};
export default function IAISlider(props: IAIFullSliderProps) {
const IAISlider = (props: IAIFullSliderProps) => {
const [showTooltip, setShowTooltip] = useState(false);
const {
label,
@ -96,6 +97,8 @@ export default function IAISlider(props: IAIFullSliderProps) {
...rest
} = props;
const { t } = useTranslation();
const [localInputValue, setLocalInputValue] = useState<
string | number | undefined
>(String(value));
@ -171,16 +174,22 @@ export default function IAISlider(props: IAIFullSliderProps) {
<>
<SliderMark
value={min}
insetInlineStart={0}
sx={{ insetInlineStart: 'unset !important' }}
// insetInlineStart={0}
sx={{
insetInlineStart: '0 !important',
insetInlineEnd: 'unset !important',
}}
{...sliderMarkProps}
>
{min}
</SliderMark>
<SliderMark
value={max}
insetInlineEnd={0}
sx={{ insetInlineStart: 'unset !important' }}
// insetInlineEnd={0}
sx={{
insetInlineStart: 'unset !important',
insetInlineEnd: '0 !important',
}}
{...sliderMarkProps}
>
{max}
@ -234,7 +243,7 @@ export default function IAISlider(props: IAIFullSliderProps) {
{withReset && (
<IAIIconButton
size="sm"
aria-label="Reset"
aria-label={t('accessibility.reset')}
tooltip="Reset"
icon={<BiReset />}
onClick={handleResetDisable}
@ -245,4 +254,6 @@ export default function IAISlider(props: IAIFullSliderProps) {
</HStack>
</FormControl>
);
}
};
export default memo(IAISlider);

View File

@ -6,6 +6,7 @@ import {
Switch,
SwitchProps,
} from '@chakra-ui/react';
import { memo } from 'react';
interface Props extends SwitchProps {
label?: string;
@ -44,4 +45,4 @@ const IAISwitch = (props: Props) => {
);
};
export default IAISwitch;
export default memo(IAISwitch);

View File

@ -3,10 +3,11 @@ import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerCo
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import useImageUploader from 'common/hooks/useImageUploader';
import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
import { tabDict } from 'features/ui/components/InvokeTabs';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ResourceKey } from 'i18next';
import {
KeyboardEvent,
memo,
ReactNode,
useCallback,
useEffect,
@ -134,7 +135,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
const overlaySecondaryText = ['img2img', 'unifiedCanvas'].includes(
activeTabName
)
? ` to ${tabDict[activeTabName as keyof typeof tabDict].tooltip}`
? ` to ${String(t(`common.${activeTabName}` as ResourceKey))}`
: ``;
return (
@ -161,4 +162,4 @@ const ImageUploader = (props: ImageUploaderProps) => {
);
};
export default ImageUploader;
export default memo(ImageUploader);

View File

@ -1,14 +1,16 @@
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
import { useContext } from 'react';
import { useTranslation } from 'react-i18next';
import { FaUpload } from 'react-icons/fa';
import IAIIconButton from './IAIIconButton';
const ImageUploaderIconButton = () => {
const { t } = useTranslation();
const openImageUploader = useContext(ImageUploaderTriggerContext);
return (
<IAIIconButton
aria-label="Upload Image"
aria-label={t('accessibility.uploadImage')}
tooltip="Upload Image"
icon={<FaUpload />}
onClick={openImageUploader || undefined}

View File

@ -1,4 +1,4 @@
import React, { lazy } from 'react';
import React, { lazy, PropsWithChildren } from 'react';
import { Provider } from 'react-redux';
import { PersistGate } from 'redux-persist/integration/react';
import { store } from './app/store';
@ -21,14 +21,14 @@ import './i18n';
const App = lazy(() => import('./app/App'));
const ThemeLocaleProvider = lazy(() => import('./app/ThemeLocaleProvider'));
export default function Component() {
export default function Component(props: PropsWithChildren) {
return (
<React.StrictMode>
<Provider store={store}>
<PersistGate loading={<Loading />} persistor={persistor}>
<React.Suspense fallback={<Loading showText />}>
<ThemeLocaleProvider>
<App />
<App>{props.children}</App>
</ThemeLocaleProvider>
</React.Suspense>
</PersistGate>

View File

@ -0,0 +1,7 @@
import Component from './component';
import InvokeAiLogoComponent from './features/system/components/InvokeAILogoComponent';
import ThemeChanger from './features/system/components/ThemeChanger';
export default Component;
export { InvokeAiLogoComponent, ThemeChanger };

View File

@ -1,5 +1,6 @@
// Grid drawing adapted from https://longviewcoder.com/2021/12/08/konva-a-better-grid/
import { useToken } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks';
@ -22,13 +23,6 @@ const selector = createSelector(
}
);
const gridLinesColor = {
dark: 'rgba(255, 255, 255, 0.2)',
green: 'rgba(255, 255, 255, 0.2)',
light: 'rgba(0, 0, 0, 0.2)',
ocean: 'rgba(136, 148, 184, 0.2)',
};
const IAICanvasGrid = () => {
const currentTheme = useAppSelector(
(state: RootState) => state.ui.currentTheme
@ -37,6 +31,8 @@ const IAICanvasGrid = () => {
useAppSelector(selector);
const [gridLines, setGridLines] = useState<ReactNode[]>([]);
const [gridLineColor] = useToken('colors', ['gridLineColor']);
const unscale = useCallback(
(value: number) => {
return value / stageScale;
@ -45,9 +41,6 @@ const IAICanvasGrid = () => {
);
useLayoutEffect(() => {
const gridLineColor =
gridLinesColor[currentTheme as keyof typeof gridLinesColor];
const { width, height } = stageDimensions;
const { x, y } = stageCoordinates;
@ -112,7 +105,14 @@ const IAICanvasGrid = () => {
));
setGridLines(xLines.concat(yLines));
}, [stageScale, stageCoordinates, stageDimensions, currentTheme, unscale]);
}, [
stageScale,
stageCoordinates,
stageDimensions,
currentTheme,
unscale,
gridLineColor,
]);
return <Group>{gridLines}</Group>;
};

View File

@ -104,7 +104,7 @@ const IAICanvasStatusText = () => {
margin: 1,
borderRadius: 'base',
pointerEvents: 'none',
bg: 'blackAlpha.500',
bg: 'base.800',
}}
>
<Box

View File

@ -0,0 +1,16 @@
import { AppDispatch, AppGetState } from 'app/store';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { debounce } from 'lodash';
import { setDoesCanvasNeedScaling } from '../canvasSlice';
const debouncedCanvasScale = debounce((dispatch: AppDispatch) => {
dispatch(setDoesCanvasNeedScaling(true));
}, 300);
export const requestCanvasRescale =
() => (dispatch: AppDispatch, getState: AppGetState) => {
const activeTabName = activeTabNameSelector(getState());
if (activeTabName === 'unifiedCanvas') {
debouncedCanvasScale(dispatch);
}
};

View File

@ -7,10 +7,7 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIButton from 'common/components/IAIButton';
import IAIIconButton from 'common/components/IAIIconButton';
import IAIPopover from 'common/components/IAIPopover';
import {
setDoesCanvasNeedScaling,
setInitialCanvasImage,
} from 'features/canvas/store/canvasSlice';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { GalleryState } from 'features/gallery/store/gallerySlice';
import { lightboxSelector } from 'features/lightbox/store/lightboxSelectors';
import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice';
@ -52,6 +49,7 @@ import { gallerySelector } from '../store/gallerySelectors';
import DeleteImageModal from './DeleteImageModal';
import { useCallback } from 'react';
import useSetBothPrompts from 'features/parameters/hooks/usePrompt';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
const currentImageButtonsSelector = createSelector(
[
@ -361,7 +359,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
if (isLightboxOpen) dispatch(setIsLightboxOpen(false));
dispatch(setInitialCanvasImage(currentImage));
dispatch(setDoesCanvasNeedScaling(true));
dispatch(requestCanvasRescale());
if (activeTabName !== 'unifiedCanvas') {
dispatch(setActiveTab('unifiedCanvas'));
@ -419,7 +417,6 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
sx={{
flexDirection: 'column',
rowGap: 2,
w: 52,
}}
>
<IAIButton

View File

@ -1,9 +1,10 @@
import { Flex, Image } from '@chakra-ui/react';
import { Box, Flex, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/storeHooks';
import { GalleryState } from 'features/gallery/store/gallerySlice';
import { uiSelector } from 'features/ui/store/uiSelectors';
import { isEqual } from 'lodash';
import { APP_METADATA_HEIGHT } from 'theme/util/constants';
import { gallerySelector } from '../store/gallerySelectors';
import ImageMetadataViewer from './ImageMetaDataViewer/ImageMetadataViewer';
@ -45,6 +46,8 @@ export default function CurrentImagePreview() {
{imageToDisplay && (
<Image
src={imageToDisplay.url}
width={imageToDisplay.width}
height={imageToDisplay.height}
sx={{
objectFit: 'contain',
maxWidth: '100%',
@ -54,18 +57,23 @@ export default function CurrentImagePreview() {
imageRendering: isIntermediate ? 'pixelated' : 'initial',
borderRadius: 'base',
}}
{...(isIntermediate && {
width: imageToDisplay.width,
height: imageToDisplay.height,
})}
/>
)}
{!shouldShowImageDetails && <NextPrevImageButtons />}
{shouldShowImageDetails && imageToDisplay && (
<ImageMetadataViewer
image={imageToDisplay}
styleClass="current-image-metadata"
/>
<Box
sx={{
position: 'absolute',
top: '0',
width: '100%',
height: '100%',
borderRadius: 'base',
overflow: 'scroll',
maxHeight: APP_METADATA_HEIGHT,
}}
>
<ImageMetadataViewer image={imageToDisplay} />
</Box>
)}
</Flex>
);

View File

@ -0,0 +1,257 @@
import { ButtonGroup, Flex, Grid, Icon, Text } from '@chakra-ui/react';
import { requestImages } from 'app/socketio/actions';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIButton from 'common/components/IAIButton';
import IAICheckbox from 'common/components/IAICheckbox';
import IAIIconButton from 'common/components/IAIIconButton';
import IAIPopover from 'common/components/IAIPopover';
import IAISlider from 'common/components/IAISlider';
import { imageGallerySelector } from 'features/gallery/store/gallerySelectors';
import {
setCurrentCategory,
setGalleryImageMinimumWidth,
setGalleryImageObjectFit,
setShouldAutoSwitchToNewImages,
setShouldUseSingleGalleryColumn,
} from 'features/gallery/store/gallerySlice';
import { togglePinGalleryPanel } from 'features/ui/store/uiSlice';
import { ChangeEvent, useEffect, useRef, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { BsPinAngle, BsPinAngleFill } from 'react-icons/bs';
import { FaImage, FaUser, FaWrench } from 'react-icons/fa';
import { MdPhotoLibrary } from 'react-icons/md';
import HoverableImage from './HoverableImage';
import Scrollable from 'features/ui/components/common/Scrollable';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
const GALLERY_SHOW_BUTTONS_MIN_WIDTH = 290;
const ImageGalleryContent = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const resizeObserverRef = useRef<HTMLDivElement>(null);
const [shouldShouldIconButtons, setShouldShouldIconButtons] = useState(true);
const {
images,
currentCategory,
currentImageUuid,
shouldPinGallery,
galleryImageMinimumWidth,
galleryGridTemplateColumns,
galleryImageObjectFit,
shouldAutoSwitchToNewImages,
areMoreImagesAvailable,
shouldUseSingleGalleryColumn,
} = useAppSelector(imageGallerySelector);
const handleClickLoadMore = () => {
dispatch(requestImages(currentCategory));
};
const handleChangeGalleryImageMinimumWidth = (v: number) => {
dispatch(setGalleryImageMinimumWidth(v));
};
const handleSetShouldPinGallery = () => {
dispatch(togglePinGalleryPanel());
dispatch(requestCanvasRescale());
};
useEffect(() => {
if (!resizeObserverRef.current) {
return;
}
const resizeObserver = new ResizeObserver(() => {
if (!resizeObserverRef.current) {
return;
}
if (
resizeObserverRef.current.clientWidth < GALLERY_SHOW_BUTTONS_MIN_WIDTH
) {
setShouldShouldIconButtons(true);
return;
}
setShouldShouldIconButtons(false);
});
resizeObserver.observe(resizeObserverRef.current);
return () => resizeObserver.disconnect(); // clean up
}, []);
return (
<Flex flexDirection="column" w="full" h="full" gap={4}>
<Flex
ref={resizeObserverRef}
alignItems="center"
justifyContent="space-between"
>
<ButtonGroup
size="sm"
isAttached
w="max-content"
justifyContent="stretch"
>
{shouldShouldIconButtons ? (
<>
<IAIIconButton
aria-label={t('gallery.showGenerations')}
tooltip={t('gallery.showGenerations')}
isChecked={currentCategory === 'result'}
role="radio"
icon={<FaImage />}
onClick={() => dispatch(setCurrentCategory('result'))}
/>
<IAIIconButton
aria-label={t('gallery.showUploads')}
tooltip={t('gallery.showUploads')}
role="radio"
isChecked={currentCategory === 'user'}
icon={<FaUser />}
onClick={() => dispatch(setCurrentCategory('user'))}
/>
</>
) : (
<>
<IAIButton
size="sm"
isChecked={currentCategory === 'result'}
onClick={() => dispatch(setCurrentCategory('result'))}
flexGrow={1}
>
{t('gallery.generations')}
</IAIButton>
<IAIButton
size="sm"
isChecked={currentCategory === 'user'}
onClick={() => dispatch(setCurrentCategory('user'))}
flexGrow={1}
>
{t('gallery.uploads')}
</IAIButton>
</>
)}
</ButtonGroup>
<Flex gap={2}>
<IAIPopover
triggerComponent={
<IAIIconButton
size="sm"
aria-label={t('gallery.gallerySettings')}
icon={<FaWrench />}
/>
}
>
<Flex direction="column" gap={2}>
<IAISlider
value={galleryImageMinimumWidth}
onChange={handleChangeGalleryImageMinimumWidth}
min={32}
max={256}
hideTooltip={true}
label={t('gallery.galleryImageSize')}
withReset
handleReset={() => dispatch(setGalleryImageMinimumWidth(64))}
/>
<IAICheckbox
label={t('gallery.maintainAspectRatio')}
isChecked={galleryImageObjectFit === 'contain'}
onChange={() =>
dispatch(
setGalleryImageObjectFit(
galleryImageObjectFit === 'contain' ? 'cover' : 'contain'
)
)
}
/>
<IAICheckbox
label={t('gallery.autoSwitchNewImages')}
isChecked={shouldAutoSwitchToNewImages}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(setShouldAutoSwitchToNewImages(e.target.checked))
}
/>
<IAICheckbox
label={t('gallery.singleColumnLayout')}
isChecked={shouldUseSingleGalleryColumn}
onChange={(e: ChangeEvent<HTMLInputElement>) =>
dispatch(setShouldUseSingleGalleryColumn(e.target.checked))
}
/>
</Flex>
</IAIPopover>
<IAIIconButton
size="sm"
aria-label={t('gallery.pinGallery')}
tooltip={`${t('gallery.pinGallery')} (Shift+G)`}
onClick={handleSetShouldPinGallery}
icon={shouldPinGallery ? <BsPinAngleFill /> : <BsPinAngle />}
/>
</Flex>
</Flex>
<Scrollable>
<Flex direction="column" gap={2} h="full">
{images.length || areMoreImagesAvailable ? (
<>
<Grid
gap={2}
style={{ gridTemplateColumns: galleryGridTemplateColumns }}
>
{images.map((image) => {
const { uuid } = image;
const isSelected = currentImageUuid === uuid;
return (
<HoverableImage
key={uuid}
image={image}
isSelected={isSelected}
/>
);
})}
</Grid>
<IAIButton
onClick={handleClickLoadMore}
isDisabled={!areMoreImagesAvailable}
flexShrink={0}
>
{areMoreImagesAvailable
? t('gallery.loadMore')
: t('gallery.allImagesLoaded')}
</IAIButton>
</>
) : (
<Flex
sx={{
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
gap: 2,
padding: 8,
h: '100%',
w: '100%',
color: 'base.500',
}}
>
<Icon
as={MdPhotoLibrary}
sx={{
w: 16,
h: 16,
}}
/>
<Text textAlign="center">{t('gallery.noImagesInGallery')}</Text>
</Flex>
)}
</Flex>
</Scrollable>
</Flex>
);
};
ImageGalleryContent.displayName = 'ImageGalleryContent';
export default ImageGalleryContent;

View File

@ -0,0 +1,202 @@
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { gallerySelector } from 'features/gallery/store/gallerySelectors';
import {
selectNextImage,
selectPrevImage,
setGalleryImageMinimumWidth,
} from 'features/gallery/store/gallerySlice';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { clamp, isEqual } from 'lodash';
import { useHotkeys } from 'react-hotkeys-hook';
import './ImageGallery.css';
import ImageGalleryContent from './ImageGalleryContent';
import ResizableDrawer from 'features/ui/components/common/ResizableDrawer/ResizableDrawer';
import {
setShouldShowGallery,
toggleGalleryPanel,
togglePinGalleryPanel,
} from 'features/ui/store/uiSlice';
import { createSelector } from '@reduxjs/toolkit';
import {
activeTabNameSelector,
uiSelector,
} from 'features/ui/store/uiSelectors';
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
import { lightboxSelector } from 'features/lightbox/store/lightboxSelectors';
const GALLERY_TAB_WIDTHS: Record<
InvokeTabName,
{ galleryMinWidth: number; galleryMaxWidth: number }
> = {
txt2img: { galleryMinWidth: 200, galleryMaxWidth: 500 },
img2img: { galleryMinWidth: 200, galleryMaxWidth: 500 },
unifiedCanvas: { galleryMinWidth: 200, galleryMaxWidth: 200 },
nodes: { galleryMinWidth: 200, galleryMaxWidth: 500 },
postprocessing: { galleryMinWidth: 200, galleryMaxWidth: 500 },
training: { galleryMinWidth: 200, galleryMaxWidth: 500 },
};
const galleryPanelSelector = createSelector(
[
activeTabNameSelector,
uiSelector,
gallerySelector,
isStagingSelector,
lightboxSelector,
],
(activeTabName, ui, gallery, isStaging, lightbox) => {
const { shouldPinGallery, shouldShowGallery } = ui;
const { galleryImageMinimumWidth } = gallery;
const { isLightboxOpen } = lightbox;
return {
activeTabName,
isStaging,
shouldPinGallery,
shouldShowGallery,
galleryImageMinimumWidth,
isResizable: activeTabName !== 'unifiedCanvas',
isLightboxOpen,
};
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
);
export default function ImageGalleryPanel() {
const dispatch = useAppDispatch();
const {
shouldPinGallery,
shouldShowGallery,
galleryImageMinimumWidth,
activeTabName,
isStaging,
isResizable,
isLightboxOpen,
} = useAppSelector(galleryPanelSelector);
const handleSetShouldPinGallery = () => {
dispatch(togglePinGalleryPanel());
dispatch(requestCanvasRescale());
};
const handleToggleGallery = () => {
dispatch(toggleGalleryPanel());
shouldPinGallery && dispatch(requestCanvasRescale());
};
const handleCloseGallery = () => {
dispatch(setShouldShowGallery(false));
shouldPinGallery && dispatch(requestCanvasRescale());
};
useHotkeys(
'g',
() => {
handleToggleGallery();
},
[shouldPinGallery]
);
useHotkeys(
'left',
() => {
dispatch(selectPrevImage());
},
{
enabled: !isStaging || activeTabName !== 'unifiedCanvas',
},
[isStaging, activeTabName]
);
useHotkeys(
'right',
() => {
dispatch(selectNextImage());
},
{
enabled: !isStaging || activeTabName !== 'unifiedCanvas',
},
[isStaging, activeTabName]
);
useHotkeys(
'shift+g',
() => {
handleSetShouldPinGallery();
},
[shouldPinGallery]
);
useHotkeys(
'esc',
() => {
dispatch(setShouldShowGallery(false));
},
{
enabled: () => !shouldPinGallery,
preventDefault: true,
},
[shouldPinGallery]
);
const IMAGE_SIZE_STEP = 32;
useHotkeys(
'shift+up',
() => {
if (galleryImageMinimumWidth < 256) {
const newMinWidth = clamp(
galleryImageMinimumWidth + IMAGE_SIZE_STEP,
32,
256
);
dispatch(setGalleryImageMinimumWidth(newMinWidth));
}
},
[galleryImageMinimumWidth]
);
useHotkeys(
'shift+down',
() => {
if (galleryImageMinimumWidth > 32) {
const newMinWidth = clamp(
galleryImageMinimumWidth - IMAGE_SIZE_STEP,
32,
256
);
dispatch(setGalleryImageMinimumWidth(newMinWidth));
}
},
[galleryImageMinimumWidth]
);
return (
<ResizableDrawer
direction="right"
isResizable={isResizable || !shouldPinGallery}
isOpen={shouldShowGallery}
onClose={handleCloseGallery}
isPinned={shouldPinGallery && !isLightboxOpen}
minWidth={
shouldPinGallery
? GALLERY_TAB_WIDTHS[activeTabName].galleryMinWidth
: 200
}
maxWidth={
shouldPinGallery
? GALLERY_TAB_WIDTHS[activeTabName].galleryMaxWidth
: undefined
}
>
<ImageGalleryContent />
</ResizableDrawer>
);
}

View File

@ -42,9 +42,9 @@ import {
import { setShouldShowImageDetails } from 'features/ui/store/uiSlice';
import { memo } from 'react';
import { useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next';
import { FaCopy } from 'react-icons/fa';
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
import { APP_METADATA_HEIGHT } from 'theme/util/constants';
type MetadataItemProps = {
isLink?: boolean;
@ -66,12 +66,14 @@ const MetadataItem = ({
labelPosition,
withCopy = false,
}: MetadataItemProps) => {
const { t } = useTranslation();
return (
<Flex gap={2}>
{onClick && (
<Tooltip label={`Recall ${label}`}>
<IconButton
aria-label="Use this parameter"
aria-label={t('accessibility.useThisParameter')}
icon={<IoArrowUndoCircleOutline />}
size="xs"
variant="ghost"
@ -112,7 +114,6 @@ const MetadataItem = ({
type ImageMetadataViewerProps = {
image: InvokeAI.Image;
styleClass?: string;
};
// TODO: I don't know if this is needed.
@ -127,343 +128,328 @@ const memoEqualityCheck = (
* Image metadata viewer overlays currently selected image and provides
* access to any of its metadata for use in processing.
*/
const ImageMetadataViewer = memo(
({ image, styleClass }: ImageMetadataViewerProps) => {
const dispatch = useAppDispatch();
const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
const dispatch = useAppDispatch();
const setBothPrompts = useSetBothPrompts();
const setBothPrompts = useSetBothPrompts();
useHotkeys('esc', () => {
dispatch(setShouldShowImageDetails(false));
});
useHotkeys('esc', () => {
dispatch(setShouldShowImageDetails(false));
});
const metadata = image?.metadata?.image || {};
const dreamPrompt = image?.dreamPrompt;
const metadata = image?.metadata?.image || {};
const dreamPrompt = image?.dreamPrompt;
const {
cfg_scale,
fit,
height,
hires_fix,
init_image_path,
mask_image_path,
orig_path,
perlin,
postprocessing,
prompt,
sampler,
seamless,
seed,
steps,
strength,
threshold,
type,
variations,
width,
} = metadata;
const {
cfg_scale,
fit,
height,
hires_fix,
init_image_path,
mask_image_path,
orig_path,
perlin,
postprocessing,
prompt,
sampler,
seamless,
seed,
steps,
strength,
threshold,
type,
variations,
width,
} = metadata;
const metadataJSON = JSON.stringify(image.metadata, null, 2);
const { t } = useTranslation();
return (
<Box
className={styleClass}
sx={{
position: 'absolute',
top: '0',
width: '100%',
borderRadius: 'base',
padding: 4,
overflow: 'scroll',
maxHeight: APP_METADATA_HEIGHT,
height: '100%',
zIndex: '10',
backdropFilter: 'blur(10px)',
const metadataJSON = JSON.stringify(image.metadata, null, 2);
return (
<Flex
sx={{
padding: 4,
gap: 1,
flexDirection: 'column',
width: 'full',
height: 'full',
backdropFilter: 'blur(20px)',
bg: 'whiteAlpha.600',
_dark: {
bg: 'blackAlpha.600',
}}
>
<Flex gap={1} direction="column" width="100%">
<Flex gap={2}>
<Text fontWeight="semibold">File:</Text>
<Link href={image.url} isExternal maxW="calc(100% - 3rem)">
{image.url.length > 64
? image.url.substring(0, 64).concat('...')
: image.url}
<ExternalLinkIcon mx="2px" />
</Link>
</Flex>
{Object.keys(metadata).length > 0 ? (
<>
{type && <MetadataItem label="Generation type" value={type} />}
{image.metadata?.model_weights && (
<MetadataItem
label="Model"
value={image.metadata.model_weights}
/>
)}
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
)}
{prompt && (
<MetadataItem
label="Prompt"
labelPosition="top"
value={
typeof prompt === 'string' ? prompt : promptToString(prompt)
}
onClick={() => setBothPrompts(prompt)}
/>
)}
{seed !== undefined && (
<MetadataItem
label="Seed"
value={seed}
onClick={() => dispatch(setSeed(seed))}
/>
)}
{threshold !== undefined && (
<MetadataItem
label="Noise Threshold"
value={threshold}
onClick={() => dispatch(setThreshold(threshold))}
/>
)}
{perlin !== undefined && (
<MetadataItem
label="Perlin Noise"
value={perlin}
onClick={() => dispatch(setPerlin(perlin))}
/>
)}
{sampler && (
<MetadataItem
label="Sampler"
value={sampler}
onClick={() => dispatch(setSampler(sampler))}
/>
)}
{steps && (
<MetadataItem
label="Steps"
value={steps}
onClick={() => dispatch(setSteps(steps))}
/>
)}
{cfg_scale !== undefined && (
<MetadataItem
label="CFG scale"
value={cfg_scale}
onClick={() => dispatch(setCfgScale(cfg_scale))}
/>
)}
{variations && variations.length > 0 && (
<MetadataItem
label="Seed-weight pairs"
value={seedWeightsToString(variations)}
onClick={() =>
dispatch(setSeedWeights(seedWeightsToString(variations)))
}
/>
)}
{seamless && (
<MetadataItem
label="Seamless"
value={seamless}
onClick={() => dispatch(setSeamless(seamless))}
/>
)}
{hires_fix && (
<MetadataItem
label="High Resolution Optimization"
value={hires_fix}
onClick={() => dispatch(setHiresFix(hires_fix))}
/>
)}
{width && (
<MetadataItem
label="Width"
value={width}
onClick={() => dispatch(setWidth(width))}
/>
)}
{height && (
<MetadataItem
label="Height"
value={height}
onClick={() => dispatch(setHeight(height))}
/>
)}
{init_image_path && (
<MetadataItem
label="Initial image"
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImage(init_image_path))}
/>
)}
{mask_image_path && (
<MetadataItem
label="Mask image"
value={mask_image_path}
isLink
onClick={() => dispatch(setMaskPath(mask_image_path))}
/>
)}
{type === 'img2img' && strength && (
<MetadataItem
label="Image to image strength"
value={strength}
onClick={() => dispatch(setImg2imgStrength(strength))}
/>
)}
{fit && (
<MetadataItem
label="Image to image fit"
value={fit}
onClick={() => dispatch(setShouldFitToWidthHeight(fit))}
/>
)}
{postprocessing && postprocessing.length > 0 && (
<>
<Heading size="sm">Postprocessing</Heading>
{postprocessing.map(
(
postprocess: InvokeAI.PostProcessedImageMetadata,
i: number
) => {
if (postprocess.type === 'esrgan') {
const { scale, strength, denoise_str } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${
i + 1
}: Upscale (ESRGAN)`}</Text>
<MetadataItem
label="Scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
<MetadataItem
label="Strength"
value={strength}
onClick={() =>
dispatch(setUpscalingStrength(strength))
}
/>
{denoise_str !== undefined && (
<MetadataItem
label="Denoising strength"
value={denoise_str}
onClick={() =>
dispatch(setUpscalingDenoising(denoise_str))
}
/>
)}
</Flex>
);
} else if (postprocess.type === 'gfpgan') {
const { strength } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${
i + 1
}: Face restoration (GFPGAN)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('gfpgan'));
}}
/>
</Flex>
);
} else if (postprocess.type === 'codeformer') {
const { strength, fidelity } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${
i + 1
}: Face restoration (Codeformer)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('codeformer'));
}}
/>
{fidelity && (
<MetadataItem
label="Fidelity"
value={fidelity}
onClick={() => {
dispatch(setCodeformerFidelity(fidelity));
dispatch(setFacetoolType('codeformer'));
}}
/>
)}
</Flex>
);
}
}
)}
</>
)}
{dreamPrompt && (
<MetadataItem
withCopy
label="Dream Prompt"
value={dreamPrompt}
/>
)}
<Flex gap={2} direction="column">
<Flex gap={2}>
<Tooltip label="Copy metadata JSON">
<IconButton
aria-label="Copy metadata JSON"
icon={<FaCopy />}
size="xs"
variant="ghost"
fontSize={14}
onClick={() =>
navigator.clipboard.writeText(metadataJSON)
}
/>
</Tooltip>
<Text fontWeight="semibold">Metadata JSON:</Text>
</Flex>
<Box
sx={{
mt: 0,
mr: 2,
mb: 4,
ml: 2,
padding: 4,
borderRadius: 'base',
overflowX: 'scroll',
wordBreak: 'break-all',
bg: 'whiteAlpha.100',
}}
>
<pre>{metadataJSON}</pre>
</Box>
</Flex>
</>
) : (
<Center width="100%" pt={10}>
<Text fontSize="lg" fontWeight="semibold">
No metadata available
</Text>
</Center>
},
}}
>
<Flex gap={2}>
<Text fontWeight="semibold">File:</Text>
<Link href={image.url} isExternal maxW="calc(100% - 3rem)">
{image.url.length > 64
? image.url.substring(0, 64).concat('...')
: image.url}
<ExternalLinkIcon mx="2px" />
</Link>
</Flex>
{Object.keys(metadata).length > 0 ? (
<>
{type && <MetadataItem label="Generation type" value={type} />}
{image.metadata?.model_weights && (
<MetadataItem label="Model" value={image.metadata.model_weights} />
)}
</Flex>
</Box>
);
},
memoEqualityCheck
);
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
)}
{prompt && (
<MetadataItem
label="Prompt"
labelPosition="top"
value={
typeof prompt === 'string' ? prompt : promptToString(prompt)
}
onClick={() => setBothPrompts(prompt)}
/>
)}
{seed !== undefined && (
<MetadataItem
label="Seed"
value={seed}
onClick={() => dispatch(setSeed(seed))}
/>
)}
{threshold !== undefined && (
<MetadataItem
label="Noise Threshold"
value={threshold}
onClick={() => dispatch(setThreshold(threshold))}
/>
)}
{perlin !== undefined && (
<MetadataItem
label="Perlin Noise"
value={perlin}
onClick={() => dispatch(setPerlin(perlin))}
/>
)}
{sampler && (
<MetadataItem
label="Sampler"
value={sampler}
onClick={() => dispatch(setSampler(sampler))}
/>
)}
{steps && (
<MetadataItem
label="Steps"
value={steps}
onClick={() => dispatch(setSteps(steps))}
/>
)}
{cfg_scale !== undefined && (
<MetadataItem
label="CFG scale"
value={cfg_scale}
onClick={() => dispatch(setCfgScale(cfg_scale))}
/>
)}
{variations && variations.length > 0 && (
<MetadataItem
label="Seed-weight pairs"
value={seedWeightsToString(variations)}
onClick={() =>
dispatch(setSeedWeights(seedWeightsToString(variations)))
}
/>
)}
{seamless && (
<MetadataItem
label="Seamless"
value={seamless}
onClick={() => dispatch(setSeamless(seamless))}
/>
)}
{hires_fix && (
<MetadataItem
label="High Resolution Optimization"
value={hires_fix}
onClick={() => dispatch(setHiresFix(hires_fix))}
/>
)}
{width && (
<MetadataItem
label="Width"
value={width}
onClick={() => dispatch(setWidth(width))}
/>
)}
{height && (
<MetadataItem
label="Height"
value={height}
onClick={() => dispatch(setHeight(height))}
/>
)}
{init_image_path && (
<MetadataItem
label="Initial image"
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImage(init_image_path))}
/>
)}
{mask_image_path && (
<MetadataItem
label="Mask image"
value={mask_image_path}
isLink
onClick={() => dispatch(setMaskPath(mask_image_path))}
/>
)}
{type === 'img2img' && strength && (
<MetadataItem
label="Image to image strength"
value={strength}
onClick={() => dispatch(setImg2imgStrength(strength))}
/>
)}
{fit && (
<MetadataItem
label="Image to image fit"
value={fit}
onClick={() => dispatch(setShouldFitToWidthHeight(fit))}
/>
)}
{postprocessing && postprocessing.length > 0 && (
<>
<Heading size="sm">Postprocessing</Heading>
{postprocessing.map(
(
postprocess: InvokeAI.PostProcessedImageMetadata,
i: number
) => {
if (postprocess.type === 'esrgan') {
const { scale, strength, denoise_str } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${i + 1}: Upscale (ESRGAN)`}</Text>
<MetadataItem
label="Scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
<MetadataItem
label="Strength"
value={strength}
onClick={() =>
dispatch(setUpscalingStrength(strength))
}
/>
{denoise_str !== undefined && (
<MetadataItem
label="Denoising strength"
value={denoise_str}
onClick={() =>
dispatch(setUpscalingDenoising(denoise_str))
}
/>
)}
</Flex>
);
} else if (postprocess.type === 'gfpgan') {
const { strength } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${
i + 1
}: Face restoration (GFPGAN)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('gfpgan'));
}}
/>
</Flex>
);
} else if (postprocess.type === 'codeformer') {
const { strength, fidelity } = postprocess;
return (
<Flex key={i} pl={8} gap={1} direction="column">
<Text size="md">{`${
i + 1
}: Face restoration (Codeformer)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('codeformer'));
}}
/>
{fidelity && (
<MetadataItem
label="Fidelity"
value={fidelity}
onClick={() => {
dispatch(setCodeformerFidelity(fidelity));
dispatch(setFacetoolType('codeformer'));
}}
/>
)}
</Flex>
);
}
}
)}
</>
)}
{dreamPrompt && (
<MetadataItem withCopy label="Dream Prompt" value={dreamPrompt} />
)}
<Flex gap={2} direction="column">
<Flex gap={2}>
<Tooltip label="Copy metadata JSON">
<IconButton
aria-label={t('accessibility.copyMetadataJson')}
icon={<FaCopy />}
size="xs"
variant="ghost"
fontSize={14}
onClick={() => navigator.clipboard.writeText(metadataJSON)}
/>
</Tooltip>
<Text fontWeight="semibold">Metadata JSON:</Text>
</Flex>
<Box
sx={{
mt: 0,
mr: 2,
mb: 4,
ml: 2,
padding: 4,
borderRadius: 'base',
overflowX: 'scroll',
wordBreak: 'break-all',
bg: 'whiteAlpha.500',
_dark: { bg: 'blackAlpha.500' },
}}
>
<pre>{metadataJSON}</pre>
</Box>
</Flex>
</>
) : (
<Center width="100%" pt={10}>
<Text fontSize="lg" fontWeight="semibold">
No metadata available
</Text>
</Center>
)}
</Flex>
);
}, memoEqualityCheck);
ImageMetadataViewer.displayName = 'ImageMetadataViewer';

View File

@ -3,6 +3,7 @@ import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { isEqual } from 'lodash';
import { useState } from 'react';
import { useTranslation } from 'react-i18next';
import { FaAngleLeft, FaAngleRight } from 'react-icons/fa';
import { gallerySelector } from '../store/gallerySelectors';
import {
@ -51,6 +52,7 @@ export const nextPrevImageButtonsSelector = createSelector(
const NextPrevImageButtons = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const { isOnFirstImage, isOnLastImage } = useAppSelector(
nextPrevImageButtonsSelector
@ -95,7 +97,7 @@ const NextPrevImageButtons = () => {
>
{shouldShowNextPrevButtons && !isOnFirstImage && (
<IconButton
aria-label="Previous image"
aria-label={t('accessibility.previousImage')}
icon={<FaAngleLeft size={64} />}
variant="unstyled"
onClick={handleClickPrevButton}
@ -114,7 +116,7 @@ const NextPrevImageButtons = () => {
>
{shouldShowNextPrevButtons && !isOnLastImage && (
<IconButton
aria-label="Next image"
aria-label={t('accessibility.nextImage')}
icon={<FaAngleRight size={64} />}
variant="unstyled"
onClick={handleClickNextButton}

View File

@ -1,53 +1,47 @@
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { isStagingSelector } from 'features/canvas/store/canvasSelectors';
import { lightboxSelector } from 'features/lightbox/store/lightboxSelectors';
import { systemSelector } from 'features/system/store/systemSelectors';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import {
activeTabNameSelector,
uiSelector,
} from 'features/ui/store/uiSelectors';
import { isEqual } from 'lodash';
import { GalleryState } from './gallerySlice';
export const gallerySelector = (state: RootState) => state.gallery;
export const imageGallerySelector = createSelector(
[gallerySelector, lightboxSelector, isStagingSelector, activeTabNameSelector],
(gallery: GalleryState, lightbox, isStaging, activeTabName) => {
[gallerySelector, uiSelector, lightboxSelector, activeTabNameSelector],
(gallery, ui, lightbox, activeTabName) => {
const {
categories,
currentCategory,
currentImageUuid,
shouldPinGallery,
shouldShowGallery,
galleryImageMinimumWidth,
galleryImageObjectFit,
shouldHoldGalleryOpen,
shouldAutoSwitchToNewImages,
galleryWidth,
shouldUseSingleGalleryColumn,
} = gallery;
const { shouldPinGallery } = ui;
const { isLightboxOpen } = lightbox;
return {
currentImageUuid,
shouldPinGallery,
shouldShowGallery,
galleryImageMinimumWidth,
galleryImageObjectFit,
galleryGridTemplateColumns: shouldUseSingleGalleryColumn
? 'auto'
: `repeat(auto-fill, minmax(${galleryImageMinimumWidth}px, auto))`,
activeTabName,
shouldHoldGalleryOpen,
shouldAutoSwitchToNewImages,
currentCategory,
images: categories[currentCategory].images,
areMoreImagesAvailable:
categories[currentCategory].areMoreImagesAvailable,
currentCategory,
galleryWidth,
isLightboxOpen,
isStaging,
shouldEnableResize:
isLightboxOpen ||
(activeTabName === 'unifiedCanvas' && shouldPinGallery)
@ -65,7 +59,7 @@ export const imageGallerySelector = createSelector(
export const hoverableImageSelector = createSelector(
[gallerySelector, systemSelector, lightboxSelector, activeTabNameSelector],
(gallery: GalleryState, system, lightbox, activeTabName) => {
(gallery, system, lightbox, activeTabName) => {
return {
mayDeleteImage: system.isConnected && !system.isProcessing,
galleryImageObjectFit: gallery.galleryImageObjectFit,

View File

@ -29,11 +29,8 @@ export interface GalleryState {
boundingBox?: IRect;
generationMode?: InvokeTabName;
};
shouldPinGallery: boolean;
shouldShowGallery: boolean;
galleryImageMinimumWidth: number;
galleryImageObjectFit: GalleryImageObjectFitType;
shouldHoldGalleryOpen: boolean;
shouldAutoSwitchToNewImages: boolean;
categories: {
user: Gallery;
@ -46,11 +43,8 @@ export interface GalleryState {
const initialState: GalleryState = {
currentImageUuid: '',
shouldPinGallery: true,
shouldShowGallery: true,
galleryImageMinimumWidth: 64,
galleryImageObjectFit: 'cover',
shouldHoldGalleryOpen: false,
shouldAutoSwitchToNewImages: true,
currentCategory: 'result',
categories: {
@ -233,13 +227,6 @@ export const gallerySlice = createSlice({
areMoreImagesAvailable;
}
},
setShouldPinGallery: (state, action: PayloadAction<boolean>) => {
state.shouldPinGallery = action.payload;
},
setShouldShowGallery: (state, action: PayloadAction<boolean>) => {
state.shouldShowGallery = action.payload;
},
setGalleryImageMinimumWidth: (state, action: PayloadAction<number>) => {
state.galleryImageMinimumWidth = action.payload;
},
@ -249,9 +236,6 @@ export const gallerySlice = createSlice({
) => {
state.galleryImageObjectFit = action.payload;
},
setShouldHoldGalleryOpen: (state, action: PayloadAction<boolean>) => {
state.shouldHoldGalleryOpen = action.payload;
},
setShouldAutoSwitchToNewImages: (state, action: PayloadAction<boolean>) => {
state.shouldAutoSwitchToNewImages = action.payload;
},
@ -279,11 +263,8 @@ export const {
setIntermediateImage,
selectNextImage,
selectPrevImage,
setShouldPinGallery,
setShouldShowGallery,
setGalleryImageMinimumWidth,
setGalleryImageObjectFit,
setShouldHoldGalleryOpen,
setShouldAutoSwitchToNewImages,
setCurrentCategory,
setGalleryWidth,

View File

@ -1,19 +1,20 @@
import { Box, Flex, Grid } from '@chakra-ui/react';
import { Box, Flex } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import CurrentImageButtons from 'features/gallery/components/CurrentImageButtons';
import ImageGallery from 'features/gallery/components/ImageGallery';
import ImageMetadataViewer from 'features/gallery/components/ImageMetaDataViewer/ImageMetadataViewer';
import NextPrevImageButtons from 'features/gallery/components/NextPrevImageButtons';
import { gallerySelector } from 'features/gallery/store/gallerySelectors';
import { setIsLightboxOpen } from 'features/lightbox/store/lightboxSlice';
import { uiSelector } from 'features/ui/store/uiSelectors';
import { AnimatePresence, motion } from 'framer-motion';
import { isEqual } from 'lodash';
import { useHotkeys } from 'react-hotkeys-hook';
import { BiExit } from 'react-icons/bi';
import { TransformWrapper } from 'react-zoom-pan-pinch';
import { PROGRESS_BAR_THICKNESS } from 'theme/util/constants';
import useImageTransform from '../hooks/useImageTransform';
import ReactPanZoomButtons from './ReactPanZoomButtons';
import ReactPanZoomImage from './ReactPanZoomImage';
@ -65,106 +66,102 @@ export default function Lightbox() {
);
return (
<TransformWrapper
centerOnInit
minScale={0.1}
initialPositionX={50}
initialPositionY={50}
>
<Box
sx={{
width: '100%',
height: '100%',
overflow: 'hidden',
position: 'absolute',
insetInlineStart: 0,
top: 0,
zIndex: 30,
animation: 'popIn 0.3s ease-in',
bg: 'base.800',
}}
>
<Flex
sx={{
flexDir: 'column',
position: 'absolute',
top: 4,
insetInlineStart: 4,
gap: 4,
zIndex: 3,
<AnimatePresence>
{isLightBoxOpen && (
<motion.div
key="lightbox"
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.15, ease: 'easeInOut' }}
style={{
display: 'flex',
width: '100vw',
height: `calc(100vh - ${PROGRESS_BAR_THICKNESS * 4}px)`,
position: 'fixed',
top: `${PROGRESS_BAR_THICKNESS * 4}px`,
background: 'var(--invokeai-colors-base-900)',
zIndex: 99,
}}
>
<IAIIconButton
icon={<BiExit />}
aria-label="Exit Viewer"
onClick={() => {
dispatch(setIsLightboxOpen(false));
}}
fontSize={20}
/>
<ReactPanZoomButtons
flipHorizontally={flipHorizontally}
flipVertically={flipVertically}
rotateCounterClockwise={rotateCounterClockwise}
rotateClockwise={rotateClockwise}
reset={reset}
/>
</Flex>
<Flex>
<Grid
sx={{
overflow: 'hidden',
gridTemplateColumns: 'auto max-content',
placeItems: 'center',
width: '100vw',
height: '100vh',
bg: 'base.850',
}}
<TransformWrapper
centerOnInit
minScale={0.1}
initialPositionX={50}
initialPositionY={50}
>
<Flex
sx={{
flexDir: 'column',
position: 'absolute',
insetInlineStart: 4,
gap: 4,
zIndex: 3,
top: 4,
}}
>
<IAIIconButton
icon={<BiExit />}
aria-label="Exit Viewer"
className="lightbox-close-btn"
onClick={() => {
dispatch(setIsLightboxOpen(false));
}}
fontSize={20}
/>
<ReactPanZoomButtons
flipHorizontally={flipHorizontally}
flipVertically={flipVertically}
rotateCounterClockwise={rotateCounterClockwise}
rotateClockwise={rotateClockwise}
reset={reset}
/>
</Flex>
<Flex
sx={{
position: 'absolute',
top: 4,
zIndex: 3,
insetInlineStart: '50%',
transform: 'translate(-50%, 0)',
}}
>
<CurrentImageButtons />
</Flex>
{viewerImageToDisplay && (
<>
<ReactPanZoomImage
rotation={rotation}
scaleX={scaleX}
scaleY={scaleY}
image={viewerImageToDisplay.url}
image={viewerImageToDisplay}
styleClass="lightbox-image"
/>
{shouldShowImageDetails && (
<ImageMetadataViewer image={viewerImageToDisplay} />
)}
{!shouldShowImageDetails && (
<Box
sx={{
position: 'absolute',
top: 0,
insetInlineStart: 0,
w: '100vw',
h: '100vh',
px: 16,
pointerEvents: 'none',
}}
>
<NextPrevImageButtons />
</Box>
)}
</>
)}
{!shouldShowImageDetails && (
<Box
sx={{
position: 'absolute',
top: 0,
insetInlineStart: 0,
w: `calc(100vw - ${8 * 2 * 4}px)`,
h: '100vh',
mx: 8,
pointerEvents: 'none',
}}
>
<NextPrevImageButtons />
</Box>
)}
<Box
sx={{
position: 'absolute',
top: 4,
}}
>
<CurrentImageButtons />
</Box>
</Grid>
<ImageGallery />
</Flex>
</Box>
</TransformWrapper>
</TransformWrapper>
</motion.div>
)}
</AnimatePresence>
);
}

View File

@ -1,5 +1,6 @@
import { ButtonGroup } from '@chakra-ui/react';
import IAIIconButton from 'common/components/IAIIconButton';
import { useTranslation } from 'react-i18next';
import {
BiReset,
BiRotateLeft,
@ -26,12 +27,13 @@ const ReactPanZoomButtons = ({
reset,
}: ReactPanZoomButtonsProps) => {
const { zoomIn, zoomOut, resetTransform } = useTransformContext();
const { t } = useTranslation();
return (
<ButtonGroup isAttached orientation="vertical">
<IAIIconButton
icon={<BiZoomIn />}
aria-label="Zoom In"
aria-label={t('accessibility.zoomIn')}
tooltip="Zoom In"
onClick={() => zoomIn()}
fontSize={20}
@ -39,7 +41,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiZoomOut />}
aria-label="Zoom Out"
aria-label={t('accessibility.zoomOut')}
tooltip="Zoom Out"
onClick={() => zoomOut()}
fontSize={20}
@ -47,7 +49,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiRotateLeft />}
aria-label="Rotate Counter-Clockwise"
aria-label={t('accessibility.rotateCounterClockwise')}
tooltip="Rotate Counter-Clockwise"
onClick={rotateCounterClockwise}
fontSize={20}
@ -55,7 +57,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiRotateRight />}
aria-label="Rotate Clockwise"
aria-label={t('accessibility.rotateClockwise')}
tooltip="Rotate Clockwise"
onClick={rotateClockwise}
fontSize={20}
@ -63,7 +65,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<MdFlip />}
aria-label="Flip Horizontally"
aria-label={t('accessibility.flipHorizontally')}
tooltip="Flip Horizontally"
onClick={flipHorizontally}
fontSize={20}
@ -71,7 +73,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<MdFlip style={{ transform: 'rotate(90deg)' }} />}
aria-label="Flip Vertically"
aria-label={t('accessibility.flipVertically')}
tooltip="Flip Vertically"
onClick={flipVertically}
fontSize={20}
@ -79,7 +81,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiReset />}
aria-label="Reset"
aria-label={t('accessibility.reset')}
tooltip="Reset"
onClick={() => {
resetTransform();

View File

@ -1,8 +1,9 @@
import * as React from 'react';
import { TransformComponent, useTransformContext } from 'react-zoom-pan-pinch';
import * as InvokeAI from 'app/invokeai';
type ReactPanZoomProps = {
image: string;
image: InvokeAI.Image;
styleClass?: string;
alt?: string;
ref?: React.Ref<HTMLImageElement>;
@ -34,7 +35,7 @@ export default function ReactPanZoomImage({
transform: `rotate(${rotation}deg) scaleX(${scaleX}) scaleY(${scaleY})`,
width: '100%',
}}
src={image}
src={image.url}
alt={alt}
ref={ref}
className={styleClass ? styleClass : ''}

Some files were not shown because too many files have changed in this diff Show More