Merge branch 'main' into tests

This commit is contained in:
Lincoln Stein 2023-03-23 23:22:27 -04:00 committed by GitHub
commit cafa108f69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
214 changed files with 7401 additions and 3417 deletions

View File

@ -66,6 +66,16 @@ body:
validations: validations:
required: false required: false
- type: input
id: version-number
attributes:
label: What version did you experience this issue on?
description: |
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: X.X.X
validations:
required: true
- type: textarea - type: textarea
id: what-happened id: what-happened
attributes: attributes:

View File

@ -24,3 +24,4 @@ jobs:
days-before-pr-stale: -1 days-before-pr-stale: -1
days-before-pr-close: -1 days-before-pr-close: -1
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
operations-per-run: 500

View File

@ -1,12 +1,12 @@
name: Test invoke.py pip name: Test invoke.py pip
on: on:
pull_request: pull_request:
paths-ignore: paths:
- 'pyproject.toml' - '**'
- 'invokeai/**' - '!pyproject.toml'
- 'invokeai/backend/**' - '!invokeai/**'
- 'invokeai/configs/**' - 'invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**' - '!invokeai/frontend/web/dist/**'
merge_group: merge_group:
workflow_dispatch: workflow_dispatch:

View File

@ -6,15 +6,13 @@ on:
paths: paths:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- 'invokeai/backend/**' - '!invokeai/frontend/web/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/web/dist/**' - 'invokeai/frontend/web/dist/**'
pull_request: pull_request:
paths: paths:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- 'invokeai/backend/**' - '!invokeai/frontend/web/**'
- 'invokeai/configs/**'
- 'invokeai/frontend/web/dist/**' - 'invokeai/frontend/web/dist/**'
types: types:
- 'ready_for_review' - 'ready_for_review'

View File

@ -17,7 +17,7 @@ notebooks.
You will need a GPU to perform training in a reasonable length of You will need a GPU to perform training in a reasonable length of
time, and at least 12 GB of VRAM. We recommend using the [`xformers` time, and at least 12 GB of VRAM. We recommend using the [`xformers`
library](../installation/070_INSTALL_XFORMERS) to accelerate the library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
training process further. During training, about ~8 GB is temporarily training process further. During training, about ~8 GB is temporarily
needed in order to store intermediate models, checkpoints and logs. needed in order to store intermediate models, checkpoints and logs.

View File

@ -148,7 +148,7 @@ manager, please follow these steps:
=== "CUDA (NVidia)" === "CUDA (NVidia)"
```bash ```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
``` ```
=== "ROCm (AMD)" === "ROCm (AMD)"

View File

@ -24,7 +24,7 @@ You need to have opencv installed so that pypatchmatch can be built:
brew install opencv brew install opencv
``` ```
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built. The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built.
## Linux ## Linux
@ -56,7 +56,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
5. Confirm that pypatchmatch is installed. At the command-line prompt enter 5. Confirm that pypatchmatch is installed. At the command-line prompt enter
`python`, and then at the `>>>` line type `python`, and then at the `>>>` line type
`from patchmatch import patch_match`: It should look like the follwing: `from patchmatch import patch_match`: It should look like the following:
```py ```py
Python 3.9.5 (default, Nov 23 2021, 15:27:38) Python 3.9.5 (default, Nov 23 2021, 15:27:38)
@ -108,4 +108,4 @@ Prior to installing PyPatchMatch, you need to take the following steps:
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux) [**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
If you see no errors, then you're ready to go! If you see no errors you're ready to go!

View File

@ -4,7 +4,8 @@ import os
from argparse import Namespace from argparse import Namespace
from ...backend import Globals from ...backend import Globals
from ..services.generate_initializer import get_generate from ..services.model_manager_initializer import get_model_manager
from ..services.restoration_services import RestorationServices
from ..services.graph import GraphExecutionState from ..services.graph import GraphExecutionState
from ..services.image_storage import DiskImageStorage from ..services.image_storage import DiskImageStorage
from ..services.invocation_queue import MemoryInvocationQueue from ..services.invocation_queue import MemoryInvocationQueue
@ -37,18 +38,16 @@ class ApiDependencies:
invoker: Invoker = None invoker: Invoker = None
@staticmethod @staticmethod
def initialize(args, config, event_handler_id: int): def initialize(config, event_handler_id: int):
Globals.try_patchmatch = args.patchmatch Globals.try_patchmatch = config.patchmatch
Globals.always_use_cpu = args.always_use_cpu Globals.always_use_cpu = config.always_use_cpu
Globals.internet_available = args.internet_available and check_internet() Globals.internet_available = config.internet_available and check_internet()
Globals.disable_xformers = not args.xformers Globals.disable_xformers = not config.xformers
Globals.ckpt_convert = args.ckpt_convert Globals.ckpt_convert = config.ckpt_convert
# TODO: Use a logger # TODO: Use a logger
print(f">> Internet connectivity is {Globals.internet_available}") print(f">> Internet connectivity is {Globals.internet_available}")
generate = get_generate(args, config)
events = FastAPIEventService(event_handler_id) events = FastAPIEventService(event_handler_id)
output_folder = os.path.abspath( output_folder = os.path.abspath(
@ -61,7 +60,7 @@ class ApiDependencies:
db_location = os.path.join(output_folder, "invokeai.db") db_location = os.path.join(output_folder, "invokeai.db")
services = InvocationServices( services = InvocationServices(
generate=generate, model_manager=get_model_manager(config),
events=events, events=events,
images=images, images=images,
queue=MemoryInvocationQueue(), queue=MemoryInvocationQueue(),
@ -69,6 +68,7 @@ class ApiDependencies:
filename=db_location, table_name="graph_executions" filename=db_location, table_name="graph_executions"
), ),
processor=DefaultInvocationProcessor(), processor=DefaultInvocationProcessor(),
restoration=RestorationServices(config),
) )
ApiDependencies.invoker = Invoker(services) ApiDependencies.invoker = Invoker(services)

View File

@ -10,6 +10,7 @@ from pydantic.fields import Field
from ...invocations import * from ...invocations import *
from ...invocations.baseinvocation import BaseInvocation from ...invocations.baseinvocation import BaseInvocation
from ...services.graph import ( from ...services.graph import (
Edge,
EdgeConnection, EdgeConnection,
Graph, Graph,
GraphExecutionState, GraphExecutionState,
@ -92,7 +93,7 @@ async def get_session(
async def add_node( async def add_node(
session_id: str = Path(description="The id of the session"), session_id: str = Path(description="The id of the session"),
node: Annotated[ node: Annotated[
Union[BaseInvocation.get_invocations()], Field(discriminator="type") Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
] = Body(description="The node to add"), ] = Body(description="The node to add"),
) -> str: ) -> str:
"""Adds a node to the graph""" """Adds a node to the graph"""
@ -125,7 +126,7 @@ async def update_node(
session_id: str = Path(description="The id of the session"), session_id: str = Path(description="The id of the session"),
node_path: str = Path(description="The path to the node in the graph"), node_path: str = Path(description="The path to the node in the graph"),
node: Annotated[ node: Annotated[
Union[BaseInvocation.get_invocations()], Field(discriminator="type") Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
] = Body(description="The new node"), ] = Body(description="The new node"),
) -> GraphExecutionState: ) -> GraphExecutionState:
"""Updates a node in the graph and removes all linked edges""" """Updates a node in the graph and removes all linked edges"""
@ -186,7 +187,7 @@ async def delete_node(
) )
async def add_edge( async def add_edge(
session_id: str = Path(description="The id of the session"), session_id: str = Path(description="The id of the session"),
edge: tuple[EdgeConnection, EdgeConnection] = Body(description="The edge to add"), edge: Edge = Body(description="The edge to add"),
) -> GraphExecutionState: ) -> GraphExecutionState:
"""Adds an edge to the graph""" """Adds an edge to the graph"""
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
@ -228,9 +229,9 @@ async def delete_edge(
return Response(status_code=404) return Response(status_code=404)
try: try:
edge = ( edge = Edge(
EdgeConnection(node_id=from_node_id, field=from_field), source=EdgeConnection(node_id=from_node_id, field=from_field),
EdgeConnection(node_id=to_node_id, field=to_field), destination=EdgeConnection(node_id=to_node_id, field=to_field)
) )
session.delete_edge(edge) session.delete_edge(edge)
ApiDependencies.invoker.services.graph_execution_manager.set( ApiDependencies.invoker.services.graph_execution_manager.set(

View File

@ -1,5 +1,4 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import asyncio import asyncio
from inspect import signature from inspect import signature
@ -53,11 +52,11 @@ config = {}
# Add startup event to load dependencies # Add startup event to load dependencies
@app.on_event("startup") @app.on_event("startup")
async def startup_event(): async def startup_event():
args = Args() config = Args()
config = args.parse_args() config.parse_args()
ApiDependencies.initialize( ApiDependencies.initialize(
args=args, config=config, event_handler_id=event_handler_id config=config, event_handler_id=event_handler_id
) )
@ -113,10 +112,8 @@ def custom_openapi():
output_type_title = output_type_titles[output_type.__name__] output_type_title = output_type_titles[output_type.__name__]
invoker_schema = openapi_schema["components"]["schemas"][invoker_name] invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
if "additionalProperties" not in invoker_schema:
invoker_schema["additionalProperties"] = {}
invoker_schema["additionalProperties"]["outputs"] = outputs_ref invoker_schema["output"] = outputs_ref
app.openapi_schema = openapi_schema app.openapi_schema = openapi_schema
return app.openapi_schema return app.openapi_schema

View File

@ -17,8 +17,9 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra
from .invocations import * from .invocations import *
from .invocations.baseinvocation import BaseInvocation from .invocations.baseinvocation import BaseInvocation
from .services.events import EventServiceBase from .services.events import EventServiceBase
from .services.generate_initializer import get_generate from .services.model_manager_initializer import get_model_manager
from .services.graph import EdgeConnection, GraphExecutionState from .services.restoration_services import RestorationServices
from .services.graph import Edge, EdgeConnection, GraphExecutionState
from .services.image_storage import DiskImageStorage from .services.image_storage import DiskImageStorage
from .services.invocation_queue import MemoryInvocationQueue from .services.invocation_queue import MemoryInvocationQueue
from .services.invocation_services import InvocationServices from .services.invocation_services import InvocationServices
@ -76,7 +77,7 @@ def get_command_parser() -> argparse.ArgumentParser:
def generate_matching_edges( def generate_matching_edges(
a: BaseInvocation, b: BaseInvocation a: BaseInvocation, b: BaseInvocation
) -> list[tuple[EdgeConnection, EdgeConnection]]: ) -> list[Edge]:
"""Generates all possible edges between two invocations""" """Generates all possible edges between two invocations"""
atype = type(a) atype = type(a)
btype = type(b) btype = type(b)
@ -93,24 +94,41 @@ def generate_matching_edges(
matching_fields = matching_fields.difference(invalid_fields) matching_fields = matching_fields.difference(invalid_fields)
edges = [ edges = [
( Edge(
EdgeConnection(node_id=a.id, field=field), source=EdgeConnection(node_id=a.id, field=field),
EdgeConnection(node_id=b.id, field=field), destination=EdgeConnection(node_id=b.id, field=field)
) )
for field in matching_fields for field in matching_fields
] ]
return edges return edges
class SessionError(Exception):
"""Raised when a session error has occurred"""
pass
def invoke_all(context: CliContext):
"""Runs all invocations in the specified session"""
context.invoker.invoke(context.session, invoke_all=True)
while not context.get_session().is_complete():
# Wait some time
time.sleep(0.1)
# Print any errors
if context.session.has_error():
for n in context.session.errors:
print(
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
)
raise SessionError()
def invoke_cli(): def invoke_cli():
args = Args() config = Args()
config = args.parse_args() config.parse_args()
model_manager = get_model_manager(config)
generate = get_generate(args, config)
# NOTE: load model on first use, uncomment to load at startup
# TODO: Make this a config option?
# generate.load_model()
events = EventServiceBase() events = EventServiceBase()
@ -122,7 +140,7 @@ def invoke_cli():
db_location = os.path.join(output_folder, "invokeai.db") db_location = os.path.join(output_folder, "invokeai.db")
services = InvocationServices( services = InvocationServices(
generate=generate, model_manager=model_manager,
events=events, events=events,
images=DiskImageStorage(output_folder), images=DiskImageStorage(output_folder),
queue=MemoryInvocationQueue(), queue=MemoryInvocationQueue(),
@ -130,11 +148,11 @@ def invoke_cli():
filename=db_location, table_name="graph_executions" filename=db_location, table_name="graph_executions"
), ),
processor=DefaultInvocationProcessor(), processor=DefaultInvocationProcessor(),
restoration=RestorationServices(config),
) )
invoker = Invoker(services) invoker = Invoker(services)
session: GraphExecutionState = invoker.create_execution_state() session: GraphExecutionState = invoker.create_execution_state()
parser = get_command_parser() parser = get_command_parser()
# Uncomment to print out previous sessions at startup # Uncomment to print out previous sessions at startup
@ -151,8 +169,7 @@ def invoke_cli():
try: try:
# Refresh the state of the session # Refresh the state of the session
session = invoker.services.graph_execution_manager.get(session.id) history = list(get_graph_execution_history(context.session))
history = list(get_graph_execution_history(session))
# Split the command for piping # Split the command for piping
cmds = cmd_input.split("|") cmds = cmd_input.split("|")
@ -164,7 +181,7 @@ def invoke_cli():
raise InvalidArgs("Empty command") raise InvalidArgs("Empty command")
# Parse args to create invocation # Parse args to create invocation
args = vars(parser.parse_args(shlex.split(cmd.strip()))) args = vars(context.parser.parse_args(shlex.split(cmd.strip())))
# Override defaults # Override defaults
for field_name, field_default in context.defaults.items(): for field_name, field_default in context.defaults.items():
@ -176,16 +193,16 @@ def invoke_cli():
command = CliCommand(command=args) command = CliCommand(command=args)
# Run any CLI commands immediately # Run any CLI commands immediately
# TODO: this won't behave as expected if piping and using e.g. history,
# since invocations are gathered and then run together at the end.
# This is more efficient if the CLI is running against a distributed
# backend, so it's preferable not to change that behavior.
if isinstance(command.command, BaseCommand): if isinstance(command.command, BaseCommand):
# Invoke all current nodes to preserve operation order
invoke_all(context)
# Run the command
command.command.run(context) command.command.run(context)
continue continue
# Pipe previous command output (if there was a previous command) # Pipe previous command output (if there was a previous command)
edges = [] edges: list[Edge] = list()
if len(history) > 0 or current_id != start_id: if len(history) > 0 or current_id != start_id:
from_id = ( from_id = (
history[0] if current_id == start_id else str(current_id - 1) history[0] if current_id == start_id else str(current_id - 1)
@ -193,7 +210,7 @@ def invoke_cli():
from_node = ( from_node = (
next(filter(lambda n: n[0].id == from_id, new_invocations))[0] next(filter(lambda n: n[0].id == from_id, new_invocations))[0]
if current_id != start_id if current_id != start_id
else session.graph.get_node(from_id) else context.session.graph.get_node(from_id)
) )
matching_edges = generate_matching_edges( matching_edges = generate_matching_edges(
from_node, command.command from_node, command.command
@ -203,23 +220,23 @@ def invoke_cli():
# Parse provided links # Parse provided links
if "link_node" in args and args["link_node"]: if "link_node" in args and args["link_node"]:
for link in args["link_node"]: for link in args["link_node"]:
link_node = session.graph.get_node(link) link_node = context.session.graph.get_node(link)
matching_edges = generate_matching_edges( matching_edges = generate_matching_edges(
link_node, command.command link_node, command.command
) )
matching_destinations = [e[1] for e in matching_edges] matching_destinations = [e.destination for e in matching_edges]
edges = [e for e in edges if e[1] not in matching_destinations] edges = [e for e in edges if e.destination not in matching_destinations]
edges.extend(matching_edges) edges.extend(matching_edges)
if "link" in args and args["link"]: if "link" in args and args["link"]:
for link in args["link"]: for link in args["link"]:
edges = [e for e in edges if e[1].node_id != command.command.id and e[1].field != link[2]] edges = [e for e in edges if e.destination.node_id != command.command.id and e.destination.field != link[2]]
edges.append( edges.append(
( Edge(
EdgeConnection(node_id=link[1], field=link[0]), source=EdgeConnection(node_id=link[1], field=link[0]),
EdgeConnection( destination=EdgeConnection(
node_id=command.command.id, field=link[2] node_id=command.command.id, field=link[2]
), )
) )
) )
@ -227,37 +244,24 @@ def invoke_cli():
current_id = current_id + 1 current_id = current_id + 1
# Command line was parsed successfully # Add the node to the session
# Add the invocations to the session context.session.add_node(command.command)
for invocation in new_invocations: for edge in edges:
session.add_node(invocation[0])
for edge in invocation[1]:
print(edge) print(edge)
session.add_edge(edge) context.session.add_edge(edge)
# Execute all available invocations # Execute all remaining nodes
invoker.invoke(session, invoke_all=True) invoke_all(context)
while not session.is_complete():
# Wait some time
session = context.get_session()
time.sleep(0.1)
# Print any errors
if session.has_error():
for n in session.errors:
print(
f"Error in node {n} (source node {session.prepared_source_mapping[n]}): {session.errors[n]}"
)
# Start a new session
print("Creating a new session")
session = invoker.create_execution_state()
context.session = session
except InvalidArgs: except InvalidArgs:
print('Invalid command, use "help" to list commands') print('Invalid command, use "help" to list commands')
continue continue
except SessionError:
# Start a new session
print("Session error: creating a new session")
context.session = context.invoker.create_execution_state()
except ExitCli: except ExitCli:
break break

View File

@ -4,6 +4,8 @@ from datetime import datetime, timezone
from typing import Any, Literal, Optional, Union from typing import Any, Literal, Optional, Union
import numpy as np import numpy as np
from torch import Tensor
from PIL import Image from PIL import Image
from pydantic import Field from pydantic import Field
from skimage.exposure.histogram_matching import match_histograms from skimage.exposure.histogram_matching import match_histograms
@ -12,12 +14,14 @@ from ..services.image_storage import ImageType
from ..services.invocation_services import InvocationServices from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput from .image import ImageField, ImageOutput
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator, Generator
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.util.util import image_to_dataURL
SAMPLER_NAME_VALUES = Literal[ SAMPLER_NAME_VALUES = Literal[
"ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun" tuple(InvokeAIGenerator.schedulers())
] ]
# Text to image # Text to image
class TextToImageInvocation(BaseInvocation): class TextToImageInvocation(BaseInvocation):
"""Generates an image using text2img.""" """Generates an image using text2img."""
@ -41,35 +45,48 @@ class TextToImageInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching? # TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, sample: Any = None, step: int = 0 self, context: InvocationContext, sample: Tensor, step: int
) -> None: ) -> None:
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
context.services.events.emit_generator_progress( context.services.events.emit_generator_progress(
context.graph_execution_state_id, context.graph_execution_state_id,
self.id, self.id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step, step,
float(step) / float(self.steps), self.steps,
) )
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
def step_callback(sample, step=0): def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, sample, step) self.dispatch_progress(context, state.latents, state.step)
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now? # TODO: How to get the default model name now?
if self.model is None or self.model == "": # (right now uses whatever current model is set in model manager)
self.model = context.services.generate.model_name model= context.services.model_manager.get_model()
outputs = Txt2Img(model).generate(
# Set the model (if already cached, this does nothing)
context.services.generate.set_model(self.model)
results = context.services.generate.prompt2image(
prompt=self.prompt, prompt=self.prompt,
step_callback=step_callback, step_callback=step_callback,
**self.dict( **self.dict(
exclude={"prompt"} exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
) )
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generate_output = next(outputs)
# Results are image and seed, unwrap for now and ignore the seed # Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed? # TODO: pre-seed?
@ -78,7 +95,7 @@ class TextToImageInvocation(BaseInvocation):
image_name = context.services.images.create_name( image_name = context.services.images.create_name(
context.graph_execution_state_id, self.id context.graph_execution_state_id, self.id
) )
context.services.images.save(image_type, image_name, results[0][0]) context.services.images.save(image_type, image_name, generate_output.image)
return ImageOutput( return ImageOutput(
image=ImageField(image_type=image_type, image_name=image_name) image=ImageField(image_type=image_type, image_name=image_name)
) )
@ -115,23 +132,20 @@ class ImageToImageInvocation(TextToImageInvocation):
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now? # TODO: How to get the default model name now?
if self.model is None or self.model == "": model = context.services.model_manager.get_model()
self.model = context.services.generate.model_name generator_output = next(
Img2Img(model).generate(
# Set the model (if already cached, this does nothing) prompt=self.prompt,
context.services.generate.set_model(self.model) init_image=image,
init_mask=mask,
results = context.services.generate.prompt2image( step_callback=step_callback,
prompt=self.prompt, **self.dict(
init_img=image, exclude={"prompt", "image", "mask"}
init_mask=mask, ), # Shorthand for passing all of the parameters above manually
step_callback=step_callback, )
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
) )
result_image = results[0][0] result_image = generator_output.image
# Results are image and seed, unwrap for now and ignore the seed # Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed? # TODO: pre-seed?
@ -145,7 +159,6 @@ class ImageToImageInvocation(TextToImageInvocation):
image=ImageField(image_type=image_type, image_name=image_name) image=ImageField(image_type=image_type, image_name=image_name)
) )
class InpaintInvocation(ImageToImageInvocation): class InpaintInvocation(ImageToImageInvocation):
"""Generates an image using inpaint.""" """Generates an image using inpaint."""
@ -180,23 +193,20 @@ class InpaintInvocation(ImageToImageInvocation):
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now? # TODO: How to get the default model name now?
if self.model is None or self.model == "": manager = context.services.model_manager.get_model()
self.model = context.services.generate.model_name generator_output = next(
Inpaint(model).generate(
# Set the model (if already cached, this does nothing) prompt=self.prompt,
context.services.generate.set_model(self.model) init_image=image,
mask_image=mask,
results = context.services.generate.prompt2image( step_callback=step_callback,
prompt=self.prompt, **self.dict(
init_img=image, exclude={"prompt", "image", "mask"}
init_mask=mask, ), # Shorthand for passing all of the parameters above manually
step_callback=step_callback, )
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
) )
result_image = results[0][0] result_image = generator_output.image
# Results are image and seed, unwrap for now and ignore the seed # Results are image and seed, unwrap for now and ignore the seed
# TODO: pre-seed? # TODO: pre-seed?

View File

@ -8,7 +8,6 @@ from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput from .image import ImageField, ImageOutput
class RestoreFaceInvocation(BaseInvocation): class RestoreFaceInvocation(BaseInvocation):
"""Restores faces in an image.""" """Restores faces in an image."""
#fmt: off #fmt: off
@ -23,7 +22,7 @@ class RestoreFaceInvocation(BaseInvocation):
image = context.services.images.get( image = context.services.images.get(
self.image.image_type, self.image.image_name self.image.image_type, self.image.image_name
) )
results = context.services.generate.upscale_and_reconstruct( results = context.services.restoration.upscale_and_reconstruct(
image_list=[[image, 0]], image_list=[[image, 0]],
upscale=None, upscale=None,
strength=self.strength, # GFPGAN strength strength=self.strength, # GFPGAN strength

View File

@ -26,7 +26,7 @@ class UpscaleInvocation(BaseInvocation):
image = context.services.images.get( image = context.services.images.get(
self.image.image_type, self.image.image_name self.image.image_type, self.image.image_name
) )
results = context.services.generate.upscale_and_reconstruct( results = context.services.restoration.upscale_and_reconstruct(
image_list=[[image, 0]], image_list=[[image, 0]],
upscale=(self.level, self.strength), upscale=(self.level, self.strength),
strength=0.0, # GFPGAN strength strength=0.0, # GFPGAN strength

View File

@ -1,7 +1,10 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from typing import Any, Dict from typing import Any, Dict, TypedDict
ProgressImage = TypedDict(
"ProgressImage", {"dataURL": str, "width": int, "height": int}
)
class EventServiceBase: class EventServiceBase:
session_event: str = "session_event" session_event: str = "session_event"
@ -23,8 +26,9 @@ class EventServiceBase:
self, self,
graph_execution_state_id: str, graph_execution_state_id: str,
invocation_id: str, invocation_id: str,
progress_image: ProgressImage | None,
step: int, step: int,
percent: float, total_steps: int,
) -> None: ) -> None:
"""Emitted when there is generation progress""" """Emitted when there is generation progress"""
self.__emit_session_event( self.__emit_session_event(
@ -32,8 +36,9 @@ class EventServiceBase:
payload=dict( payload=dict(
graph_execution_state_id=graph_execution_state_id, graph_execution_state_id=graph_execution_state_id,
invocation_id=invocation_id, invocation_id=invocation_id,
progress_image=progress_image,
step=step, step=step,
percent=percent, total_steps=total_steps,
), ),
) )

View File

@ -1,255 +0,0 @@
import os
import sys
import traceback
from argparse import Namespace
import invokeai.version
from invokeai.backend import Generate, ModelManager
from ...backend import Globals
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated
def get_generate(args, config) -> Generate:
if not args.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml")
if not os.path.exists(config_file):
report_model_error(
args, FileNotFoundError(f"The file {config_file} could not be found.")
)
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
transformers.logging.set_verbosity_error()
import diffusers
diffusers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = load_face_restoration(args)
# normalize the config directory relative to root
if not os.path.isabs(args.conf):
args.conf = os.path.normpath(os.path.join(Globals.root, args.conf))
if args.embeddings:
if not os.path.isabs(args.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, args.embedding_path)
)
else:
embedding_path = args.embedding_path
else:
embedding_path = None
# migrate legacy models
ModelManager.migrate_models()
# load the infile as a list of lines
if args.infile:
try:
if os.path.isfile(args.infile):
infile = open(args.infile, "r", encoding="utf-8")
elif args.infile == "-": # stdin
infile = sys.stdin
else:
raise FileNotFoundError(f"{args.infile} not found.")
except (FileNotFoundError, IOError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
# creating a Generate object:
try:
gen = Generate(
conf=args.conf,
model=args.model,
sampler_name=args.sampler_name,
embedding_path=embedding_path,
full_precision=args.full_precision,
precision=args.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=args.free_gpu_mem,
safety_checker=args.safety_checker,
max_loaded_models=args.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(opt, e)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
if args.seamless:
print(">> changed to seamless tiling mode")
# preload the model
try:
gen.load_model()
except KeyError:
pass
except Exception as e:
report_model_error(args, e)
# try to autoconvert new models
# autoimport new .ckpt files
if path := args.autoconvert:
gen.model_manager.autoconvert_weights(
conf_path=args.conf,
weights_directory=path,
)
return gen
def load_face_restoration(opt):
try:
gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan:
from invokeai.backend.restoration import Restoration
restoration = Restoration()
if opt.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
opt.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if opt.esrgan:
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
return gfpgan, codeformer, esrgan
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
response = input(
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
)
if response.startswith(("n", "N")):
return
print("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
previous_args = sys.argv
sys.argv = ["invokeai-configure"]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
for arg in yes_to_all.split():
sys.argv.append(arg)
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
# TODO: Figure out how to restart
# print('** InvokeAI will now restart')
# sys.argv = previous_args
# main() # would rather do a os.exec(), but doesn't exist?
# sys.exit(0)
# Temporary initializer for Generate until we migrate off of it
def old_get_generate(args, config) -> Generate:
# TODO: Remove the need for globals
from invokeai.backend.globals import Globals
# alert - setting globals here
Globals.root = os.path.expanduser(
args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".")
)
Globals.try_patchmatch = args.patchmatch
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers
transformers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = None, None, None
try:
if config.restore or config.esrgan:
from ldm.invoke.restoration import Restoration
restoration = Restoration()
if config.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
config.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if config.esrgan:
esrgan = restoration.load_esrgan(config.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
# normalize the config directory relative to root
if not os.path.isabs(config.conf):
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
if config.embeddings:
if not os.path.isabs(config.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, config.embedding_path)
)
else:
embedding_path = None
# TODO: lazy-initialize this by wrapping it
try:
generate = Generate(
conf=config.conf,
model=config.model,
sampler_name=config.sampler_name,
embedding_path=embedding_path,
full_precision=config.full_precision,
precision=config.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=config.free_gpu_mem,
safety_checker=config.safety_checker,
max_loaded_models=config.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError):
# emergency_model_reconfigure() # TODO?
sys.exit(-1)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
generate.free_gpu_mem = config.free_gpu_mem
return generate

View File

@ -44,6 +44,11 @@ class EdgeConnection(BaseModel):
return hash(f"{self.node_id}.{self.field}") return hash(f"{self.node_id}.{self.field}")
class Edge(BaseModel):
source: EdgeConnection = Field(description="The connection for the edge's from node and field")
destination: EdgeConnection = Field(description="The connection for the edge's to node and field")
def get_output_field(node: BaseInvocation, field: str) -> Any: def get_output_field(node: BaseInvocation, field: str) -> Any:
node_type = type(node) node_type = type(node)
node_outputs = get_type_hints(node_type.get_output_type()) node_outputs = get_type_hints(node_type.get_output_type())
@ -194,7 +199,7 @@ class Graph(BaseModel):
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field( nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict description="The nodes in this graph", default_factory=dict
) )
edges: list[tuple[EdgeConnection, EdgeConnection]] = Field( edges: list[Edge] = Field(
description="The connections between nodes and their fields in this graph", description="The connections between nodes and their fields in this graph",
default_factory=list, default_factory=list,
) )
@ -251,7 +256,7 @@ class Graph(BaseModel):
except NodeNotFoundError: except NodeNotFoundError:
pass # Ignore, not doesn't exist (should this throw?) pass # Ignore, not doesn't exist (should this throw?)
def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: def add_edge(self, edge: Edge) -> None:
"""Adds an edge to a graph """Adds an edge to a graph
:raises InvalidEdgeError: the provided edge is invalid. :raises InvalidEdgeError: the provided edge is invalid.
@ -262,7 +267,7 @@ class Graph(BaseModel):
else: else:
raise InvalidEdgeError() raise InvalidEdgeError()
def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: def delete_edge(self, edge: Edge) -> None:
"""Deletes an edge from a graph""" """Deletes an edge from a graph"""
try: try:
@ -280,7 +285,7 @@ class Graph(BaseModel):
# Validate all edges reference nodes in the graph # Validate all edges reference nodes in the graph
node_ids = set( node_ids = set(
[e[0].node_id for e in self.edges] + [e[1].node_id for e in self.edges] [e.source.node_id for e in self.edges] + [e.destination.node_id for e in self.edges]
) )
if not all((self.has_node(node_id) for node_id in node_ids)): if not all((self.has_node(node_id) for node_id in node_ids)):
return False return False
@ -294,10 +299,10 @@ class Graph(BaseModel):
if not all( if not all(
( (
are_connections_compatible( are_connections_compatible(
self.get_node(e[0].node_id), self.get_node(e.source.node_id),
e[0].field, e.source.field,
self.get_node(e[1].node_id), self.get_node(e.destination.node_id),
e[1].field, e.destination.field,
) )
for e in self.edges for e in self.edges
) )
@ -328,58 +333,58 @@ class Graph(BaseModel):
return True return True
def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool: def _is_edge_valid(self, edge: Edge) -> bool:
"""Validates that a new edge doesn't create a cycle in the graph""" """Validates that a new edge doesn't create a cycle in the graph"""
# Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly) # Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)
try: try:
from_node = self.get_node(edge[0].node_id) from_node = self.get_node(edge.source.node_id)
to_node = self.get_node(edge[1].node_id) to_node = self.get_node(edge.destination.node_id)
except NodeNotFoundError: except NodeNotFoundError:
return False return False
# Validate that an edge to this node+field doesn't already exist # Validate that an edge to this node+field doesn't already exist
input_edges = self._get_input_edges(edge[1].node_id, edge[1].field) input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)
if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation): if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):
return False return False
# Validate that no cycles would be created # Validate that no cycles would be created
g = self.nx_graph_flat() g = self.nx_graph_flat()
g.add_edge(edge[0].node_id, edge[1].node_id) g.add_edge(edge.source.node_id, edge.destination.node_id)
if not nx.is_directed_acyclic_graph(g): if not nx.is_directed_acyclic_graph(g):
return False return False
# Validate that the field types are compatible # Validate that the field types are compatible
if not are_connections_compatible( if not are_connections_compatible(
from_node, edge[0].field, to_node, edge[1].field from_node, edge.source.field, to_node, edge.destination.field
): ):
return False return False
# Validate if iterator output type matches iterator input type (if this edge results in both being set) # Validate if iterator output type matches iterator input type (if this edge results in both being set)
if isinstance(to_node, IterateInvocation) and edge[1].field == "collection": if isinstance(to_node, IterateInvocation) and edge.destination.field == "collection":
if not self._is_iterator_connection_valid( if not self._is_iterator_connection_valid(
edge[1].node_id, new_input=edge[0] edge.destination.node_id, new_input=edge.source
): ):
return False return False
# Validate if iterator input type matches output type (if this edge results in both being set) # Validate if iterator input type matches output type (if this edge results in both being set)
if isinstance(from_node, IterateInvocation) and edge[0].field == "item": if isinstance(from_node, IterateInvocation) and edge.source.field == "item":
if not self._is_iterator_connection_valid( if not self._is_iterator_connection_valid(
edge[0].node_id, new_output=edge[1] edge.source.node_id, new_output=edge.destination
): ):
return False return False
# Validate if collector input type matches output type (if this edge results in both being set) # Validate if collector input type matches output type (if this edge results in both being set)
if isinstance(to_node, CollectInvocation) and edge[1].field == "item": if isinstance(to_node, CollectInvocation) and edge.destination.field == "item":
if not self._is_collector_connection_valid( if not self._is_collector_connection_valid(
edge[1].node_id, new_input=edge[0] edge.destination.node_id, new_input=edge.source
): ):
return False return False
# Validate if collector output type matches input type (if this edge results in both being set) # Validate if collector output type matches input type (if this edge results in both being set)
if isinstance(from_node, CollectInvocation) and edge[0].field == "collection": if isinstance(from_node, CollectInvocation) and edge.source.field == "collection":
if not self._is_collector_connection_valid( if not self._is_collector_connection_valid(
edge[0].node_id, new_output=edge[1] edge.source.node_id, new_output=edge.destination
): ):
return False return False
@ -438,15 +443,15 @@ class Graph(BaseModel):
# Remove the graph prefix from the node path # Remove the graph prefix from the node path
new_graph_node_path = ( new_graph_node_path = (
new_node.id new_node.id
if "." not in edge[1].node_id if "." not in edge.destination.node_id
else f'{edge[1].node_id[edge[1].node_id.rindex("."):]}.{new_node.id}' else f'{edge.destination.node_id[edge.destination.node_id.rindex("."):]}.{new_node.id}'
) )
graph.add_edge( graph.add_edge(
( Edge(
edge[0], source=edge.source,
EdgeConnection( destination=EdgeConnection(
node_id=new_graph_node_path, field=edge[1].field node_id=new_graph_node_path, field=edge.destination.field
), )
) )
) )
@ -454,51 +459,51 @@ class Graph(BaseModel):
# Remove the graph prefix from the node path # Remove the graph prefix from the node path
new_graph_node_path = ( new_graph_node_path = (
new_node.id new_node.id
if "." not in edge[0].node_id if "." not in edge.source.node_id
else f'{edge[0].node_id[edge[0].node_id.rindex("."):]}.{new_node.id}' else f'{edge.source.node_id[edge.source.node_id.rindex("."):]}.{new_node.id}'
) )
graph.add_edge( graph.add_edge(
( Edge(
EdgeConnection( source=EdgeConnection(
node_id=new_graph_node_path, field=edge[0].field node_id=new_graph_node_path, field=edge.source.field
), ),
edge[1], destination=edge.destination
) )
) )
def _get_input_edges( def _get_input_edges(
self, node_path: str, field: Optional[str] = None self, node_path: str, field: Optional[str] = None
) -> list[tuple[EdgeConnection, EdgeConnection]]: ) -> list[Edge]:
"""Gets all input edges for a node""" """Gets all input edges for a node"""
edges = self._get_input_edges_and_graphs(node_path) edges = self._get_input_edges_and_graphs(node_path)
# Filter to edges that match the field # Filter to edges that match the field
filtered_edges = (e for e in edges if field is None or e[2][1].field == field) filtered_edges = (e for e in edges if field is None or e[2].destination.field == field)
# Create full node paths for each edge # Create full node paths for each edge
return [ return [
( Edge(
EdgeConnection( source=EdgeConnection(
node_id=self._get_node_path(e[0].node_id, prefix=prefix), node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e[0].field, field=e.source.field,
),
EdgeConnection(
node_id=self._get_node_path(e[1].node_id, prefix=prefix),
field=e[1].field,
), ),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
)
) )
for _, prefix, e in filtered_edges for _, prefix, e in filtered_edges
] ]
def _get_input_edges_and_graphs( def _get_input_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]: ) -> list[tuple["Graph", str, Edge]]:
"""Gets all input edges for a node along with the graph they are in and the graph's path""" """Gets all input edges for a node along with the graph they are in and the graph's path"""
edges = list() edges = list()
# Return any input edges that appear in this graph # Return any input edges that appear in this graph
edges.extend( edges.extend(
[(self, prefix, e) for e in self.edges if e[1].node_id == node_path] [(self, prefix, e) for e in self.edges if e.destination.node_id == node_path]
) )
node_id = ( node_id = (
@ -522,37 +527,37 @@ class Graph(BaseModel):
def _get_output_edges( def _get_output_edges(
self, node_path: str, field: str self, node_path: str, field: str
) -> list[tuple[EdgeConnection, EdgeConnection]]: ) -> list[Edge]:
"""Gets all output edges for a node""" """Gets all output edges for a node"""
edges = self._get_output_edges_and_graphs(node_path) edges = self._get_output_edges_and_graphs(node_path)
# Filter to edges that match the field # Filter to edges that match the field
filtered_edges = (e for e in edges if e[2][0].field == field) filtered_edges = (e for e in edges if e[2].source.field == field)
# Create full node paths for each edge # Create full node paths for each edge
return [ return [
( Edge(
EdgeConnection( source=EdgeConnection(
node_id=self._get_node_path(e[0].node_id, prefix=prefix), node_id=self._get_node_path(e.source.node_id, prefix=prefix),
field=e[0].field, field=e.source.field,
),
EdgeConnection(
node_id=self._get_node_path(e[1].node_id, prefix=prefix),
field=e[1].field,
), ),
destination=EdgeConnection(
node_id=self._get_node_path(e.destination.node_id, prefix=prefix),
field=e.destination.field,
)
) )
for _, prefix, e in filtered_edges for _, prefix, e in filtered_edges
] ]
def _get_output_edges_and_graphs( def _get_output_edges_and_graphs(
self, node_path: str, prefix: Optional[str] = None self, node_path: str, prefix: Optional[str] = None
) -> list[tuple["Graph", str, tuple[EdgeConnection, EdgeConnection]]]: ) -> list[tuple["Graph", str, Edge]]:
"""Gets all output edges for a node along with the graph they are in and the graph's path""" """Gets all output edges for a node along with the graph they are in and the graph's path"""
edges = list() edges = list()
# Return any input edges that appear in this graph # Return any input edges that appear in this graph
edges.extend( edges.extend(
[(self, prefix, e) for e in self.edges if e[0].node_id == node_path] [(self, prefix, e) for e in self.edges if e.source.node_id == node_path]
) )
node_id = ( node_id = (
@ -580,8 +585,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None, new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None,
) -> bool: ) -> bool:
inputs = list([e[0] for e in self._get_input_edges(node_path, "collection")]) inputs = list([e.source for e in self._get_input_edges(node_path, "collection")])
outputs = list([e[1] for e in self._get_output_edges(node_path, "item")]) outputs = list([e.destination for e in self._get_output_edges(node_path, "item")])
if new_input is not None: if new_input is not None:
inputs.append(new_input) inputs.append(new_input)
@ -622,8 +627,8 @@ class Graph(BaseModel):
new_input: Optional[EdgeConnection] = None, new_input: Optional[EdgeConnection] = None,
new_output: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None,
) -> bool: ) -> bool:
inputs = list([e[0] for e in self._get_input_edges(node_path, "item")]) inputs = list([e.source for e in self._get_input_edges(node_path, "item")])
outputs = list([e[1] for e in self._get_output_edges(node_path, "collection")]) outputs = list([e.destination for e in self._get_output_edges(node_path, "collection")])
if new_input is not None: if new_input is not None:
inputs.append(new_input) inputs.append(new_input)
@ -684,7 +689,7 @@ class Graph(BaseModel):
# TODO: Cache this? # TODO: Cache this?
g = nx.DiGraph() g = nx.DiGraph()
g.add_nodes_from([n for n in self.nodes.keys()]) g.add_nodes_from([n for n in self.nodes.keys()])
g.add_edges_from(set([(e[0].node_id, e[1].node_id) for e in self.edges])) g.add_edges_from(set([(e.source.node_id, e.destination.node_id) for e in self.edges]))
return g return g
def nx_graph_flat( def nx_graph_flat(
@ -711,7 +716,7 @@ class Graph(BaseModel):
# TODO: figure out if iteration nodes need to be expanded # TODO: figure out if iteration nodes need to be expanded
unique_edges = set([(e[0].node_id, e[1].node_id) for e in self.edges]) unique_edges = set([(e.source.node_id, e.destination.node_id) for e in self.edges])
g.add_edges_from( g.add_edges_from(
[ [
(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) (self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix))
@ -768,6 +773,24 @@ class GraphExecutionState(BaseModel):
default_factory=dict, default_factory=dict,
) )
# Declare all fields as required; necessary for OpenAPI schema generation build.
# Technically only fields without a `default_factory` need to be listed here.
# See: https://github.com/pydantic/pydantic/discussions/4577
class Config:
schema_extra = {
'required': [
'id',
'graph',
'execution_graph',
'executed',
'executed_history',
'results',
'errors',
'prepared_source_mapping',
'source_prepared_mapping',
]
}
def next(self) -> BaseInvocation | None: def next(self) -> BaseInvocation | None:
"""Gets the next node ready to execute.""" """Gets the next node ready to execute."""
@ -841,13 +864,13 @@ class GraphExecutionState(BaseModel):
input_collection_prepared_node_id = next( input_collection_prepared_node_id = next(
n[1] n[1]
for n in iteration_node_map for n in iteration_node_map
if n[0] == input_collection_edge[0].node_id if n[0] == input_collection_edge.source.node_id
) )
input_collection_prepared_node_output = self.results[ input_collection_prepared_node_output = self.results[
input_collection_prepared_node_id input_collection_prepared_node_id
] ]
input_collection = getattr( input_collection = getattr(
input_collection_prepared_node_output, input_collection_edge[0].field input_collection_prepared_node_output, input_collection_edge.source.field
) )
self_iteration_count = len(input_collection) self_iteration_count = len(input_collection)
@ -864,11 +887,11 @@ class GraphExecutionState(BaseModel):
new_edges = list() new_edges = list()
for edge in input_edges: for edge in input_edges:
for input_node_id in ( for input_node_id in (
n[1] for n in iteration_node_map if n[0] == edge[0].node_id n[1] for n in iteration_node_map if n[0] == edge.source.node_id
): ):
new_edge = ( new_edge = Edge(
EdgeConnection(node_id=input_node_id, field=edge[0].field), source=EdgeConnection(node_id=input_node_id, field=edge.source.field),
EdgeConnection(node_id="", field=edge[1].field), destination=EdgeConnection(node_id="", field=edge.destination.field),
) )
new_edges.append(new_edge) new_edges.append(new_edge)
@ -893,9 +916,9 @@ class GraphExecutionState(BaseModel):
# Add new edges to execution graph # Add new edges to execution graph
for edge in new_edges: for edge in new_edges:
new_edge = ( new_edge = Edge(
edge[0], source=edge.source,
EdgeConnection(node_id=new_node.id, field=edge[1].field), destination=EdgeConnection(node_id=new_node.id, field=edge.destination.field),
) )
self.execution_graph.add_edge(new_edge) self.execution_graph.add_edge(new_edge)
@ -1043,26 +1066,26 @@ class GraphExecutionState(BaseModel):
return self.execution_graph.nodes[next_node] return self.execution_graph.nodes[next_node]
def _prepare_inputs(self, node: BaseInvocation): def _prepare_inputs(self, node: BaseInvocation):
input_edges = [e for e in self.execution_graph.edges if e[1].node_id == node.id] input_edges = [e for e in self.execution_graph.edges if e.destination.node_id == node.id]
if isinstance(node, CollectInvocation): if isinstance(node, CollectInvocation):
output_collection = [ output_collection = [
getattr(self.results[edge[0].node_id], edge[0].field) getattr(self.results[edge.source.node_id], edge.source.field)
for edge in input_edges for edge in input_edges
if edge[1].field == "item" if edge.destination.field == "item"
] ]
setattr(node, "collection", output_collection) setattr(node, "collection", output_collection)
else: else:
for edge in input_edges: for edge in input_edges:
output_value = getattr(self.results[edge[0].node_id], edge[0].field) output_value = getattr(self.results[edge.source.node_id], edge.source.field)
setattr(node, edge[1].field, output_value) setattr(node, edge.destination.field, output_value)
# TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state # TODO: Add API for modifying underlying graph that checks if the change will be valid given the current execution state
def _is_edge_valid(self, edge: tuple[EdgeConnection, EdgeConnection]) -> bool: def _is_edge_valid(self, edge: Edge) -> bool:
if not self._is_edge_valid(edge): if not self._is_edge_valid(edge):
return False return False
# Invalid if destination has already been prepared or executed # Invalid if destination has already been prepared or executed
if edge[1].node_id in self.source_prepared_mapping: if edge.destination.node_id in self.source_prepared_mapping:
return False return False
# Otherwise, the edge is valid # Otherwise, the edge is valid
@ -1089,17 +1112,17 @@ class GraphExecutionState(BaseModel):
) )
self.graph.delete_node(node_path) self.graph.delete_node(node_path)
def add_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: def add_edge(self, edge: Edge) -> None:
if not self._is_node_updatable(edge[1].node_id): if not self._is_node_updatable(edge.destination.node_id):
raise NodeAlreadyExecutedError( raise NodeAlreadyExecutedError(
f"Destination node {edge[1].node_id} has already been prepared or executed and cannot be linked to" f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot be linked to"
) )
self.graph.add_edge(edge) self.graph.add_edge(edge)
def delete_edge(self, edge: tuple[EdgeConnection, EdgeConnection]) -> None: def delete_edge(self, edge: Edge) -> None:
if not self._is_node_updatable(edge[1].node_id): if not self._is_node_updatable(edge.destination.node_id):
raise NodeAlreadyExecutedError( raise NodeAlreadyExecutedError(
f"Destination node {edge[1].node_id} has already been prepared or executed and cannot have a source edge deleted" f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot have a source edge deleted"
) )
self.graph.delete_edge(edge) self.graph.delete_edge(edge)

View File

@ -1,36 +1,39 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from invokeai.backend import Generate from invokeai.backend import ModelManager
from .events import EventServiceBase from .events import EventServiceBase
from .image_storage import ImageStorageBase from .image_storage import ImageStorageBase
from .restoration_services import RestorationServices
from .invocation_queue import InvocationQueueABC from .invocation_queue import InvocationQueueABC
from .item_storage import ItemStorageABC from .item_storage import ItemStorageABC
class InvocationServices: class InvocationServices:
"""Services that can be used by invocations""" """Services that can be used by invocations"""
generate: Generate # TODO: wrap Generate, or split it up from model?
events: EventServiceBase events: EventServiceBase
images: ImageStorageBase images: ImageStorageBase
queue: InvocationQueueABC queue: InvocationQueueABC
model_manager: ModelManager
restoration: RestorationServices
# NOTE: we must forward-declare any types that include invocations, since invocations can use services # NOTE: we must forward-declare any types that include invocations, since invocations can use services
graph_execution_manager: ItemStorageABC["GraphExecutionState"] graph_execution_manager: ItemStorageABC["GraphExecutionState"]
processor: "InvocationProcessorABC" processor: "InvocationProcessorABC"
def __init__( def __init__(
self, self,
generate: Generate, model_manager: ModelManager,
events: EventServiceBase, events: EventServiceBase,
images: ImageStorageBase, images: ImageStorageBase,
queue: InvocationQueueABC, queue: InvocationQueueABC,
graph_execution_manager: ItemStorageABC["GraphExecutionState"], graph_execution_manager: ItemStorageABC["GraphExecutionState"],
processor: "InvocationProcessorABC", processor: "InvocationProcessorABC",
restoration: RestorationServices,
): ):
self.generate = generate self.model_manager = model_manager
self.events = events self.events = events
self.images = images self.images = images
self.queue = queue self.queue = queue
self.graph_execution_manager = graph_execution_manager self.graph_execution_manager = graph_execution_manager
self.processor = processor self.processor = processor
self.restoration = restoration

View File

@ -0,0 +1,120 @@
import os
import sys
import torch
from argparse import Namespace
from invokeai.backend import Args
from omegaconf import OmegaConf
from pathlib import Path
import invokeai.version
from ...backend import ModelManager
from ...backend.util import choose_precision, choose_torch_device
from ...backend import Globals
# TODO: Replace with an abstract class base ModelManagerBase
def get_model_manager(config: Args) -> ModelManager:
if not config.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml")
if not os.path.exists(config_file):
report_model_error(
config, FileNotFoundError(f"The file {config_file} could not be found.")
)
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
transformers.logging.set_verbosity_error()
import diffusers
diffusers.logging.set_verbosity_error()
# normalize the config directory relative to root
if not os.path.isabs(config.conf):
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
if config.embeddings:
if not os.path.isabs(config.embedding_path):
embedding_path = os.path.normpath(
os.path.join(Globals.root, config.embedding_path)
)
else:
embedding_path = config.embedding_path
else:
embedding_path = None
# migrate legacy models
ModelManager.migrate_models()
# creating the model manager
try:
device = torch.device(choose_torch_device())
precision = 'float16' if config.precision=='float16' \
else 'float32' if config.precision=='float32' \
else choose_precision(device)
model_manager = ModelManager(
OmegaConf.load(config.conf),
precision=precision,
device_type=device,
max_loaded_models=config.max_loaded_models,
embedding_path = Path(embedding_path),
)
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(config, e)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
sys.exit(-1)
# try to autoconvert new models
# autoimport new .ckpt files
if path := config.autoconvert:
model_manager.autoconvert_weights(
conf_path=config.conf,
weights_directory=path,
)
return model_manager
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
response = input(
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
)
if response.startswith(("n", "N")):
return
print("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
previous_config = sys.argv
sys.argv = ["invokeai-configure"]
sys.argv.extend(root_dir)
sys.argv.extend(config.to_dict())
if yes_to_all is not None:
for arg in yes_to_all.split():
sys.argv.append(arg)
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
# TODO: Figure out how to restart
# print('** InvokeAI will now restart')
# sys.argv = previous_args
# main() # would rather do a os.exec(), but doesn't exist?
# sys.exit(0)

View File

@ -0,0 +1,109 @@
import sys
import traceback
import torch
from ...backend.restoration import Restoration
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
# This should be a real base class for postprocessing functions,
# but right now we just instantiate the existing gfpgan, esrgan
# and codeformer functions.
class RestorationServices:
'''Face restoration and upscaling'''
def __init__(self,args):
try:
gfpgan, codeformer, esrgan = None, None, None
if args.restore or args.esrgan:
restoration = Restoration()
if args.restore:
gfpgan, codeformer = restoration.load_face_restore_models(
args.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
if args.esrgan:
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
self.device = torch.device(choose_torch_device())
self.gfpgan = gfpgan
self.codeformer = codeformer
self.esrgan = esrgan
# note that this one method does gfpgan and codepath reconstruction, as well as
# esrgan upscaling
# TO DO: refactor into separate methods
def upscale_and_reconstruct(
self,
image_list,
facetool="gfpgan",
upscale=None,
upscale_denoise_str=0.75,
strength=0.0,
codeformer_fidelity=0.75,
save_original=False,
image_callback=None,
prefix=None,
):
results = []
for r in image_list:
image, seed = r
try:
if strength > 0:
if self.gfpgan is not None or self.codeformer is not None:
if facetool == "gfpgan":
if self.gfpgan is None:
print(
">> GFPGAN not found. Face restoration is disabled."
)
else:
image = self.gfpgan.process(image, strength, seed)
if facetool == "codeformer":
if self.codeformer is None:
print(
">> CodeFormer not found. Face restoration is disabled."
)
else:
cf_device = (
CPU_DEVICE if self.device == MPS_DEVICE else self.device
)
image = self.codeformer.process(
image=image,
strength=strength,
device=cf_device,
seed=seed,
fidelity=codeformer_fidelity,
)
else:
print(">> Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
upscale.append(0.75)
image = self.esrgan.process(
image,
upscale[1],
seed,
int(upscale[0]),
denoise_str=upscale_denoise_str,
)
else:
print(">> ESRGAN is disabled. Image not upscaled.")
except Exception as e:
print(
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
)
if image_callback is not None:
image_callback(image, seed, upscaled=True, use_prefix=prefix)
else:
r[0] = image
results.append([image, seed])
return results

View File

@ -2,6 +2,15 @@
Initialization file for invokeai.backend Initialization file for invokeai.backend
""" """
from .generate import Generate from .generate import Generate
from .generator import (
InvokeAIGeneratorBasicParams,
InvokeAIGenerator,
InvokeAIGeneratorOutput,
Txt2Img,
Img2Img,
Inpaint
)
from .model_management import ModelManager from .model_management import ModelManager
from .safety_checker import SafetyChecker
from .args import Args from .args import Args
from .globals import Globals from .globals import Globals

View File

@ -490,7 +490,7 @@ class Args(object):
"-z", "-z",
type=int, type=int,
default=6, default=6,
choices=range(0, 9), choices=range(0, 10),
dest="png_compression", dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.", help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.",
) )
@ -943,7 +943,6 @@ class Args(object):
"--png_compression", "--png_compression",
"-z", "-z",
type=int, type=int,
default=6,
choices=range(0, 10), choices=range(0, 10),
dest="png_compression", dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). [6]", help="level of PNG compression, from 0 (none) to 9 (maximum). [6]",

View File

@ -25,18 +25,19 @@ from accelerate.utils import set_seed
from diffusers.pipeline_utils import DiffusionPipeline from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf from omegaconf import OmegaConf
from pathlib import Path
from .args import metadata_from_png from .args import metadata_from_png
from .generator import infill_methods from .generator import infill_methods
from .globals import Globals, global_cache_dir from .globals import Globals, global_cache_dir
from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding
from .model_management import ModelManager from .model_management import ModelManager
from .safety_checker import SafetyChecker
from .prompting import get_uc_and_c_and_ec from .prompting import get_uc_and_c_and_ec
from .prompting.conditioning import log_tokenization from .prompting.conditioning import log_tokenization
from .stable_diffusion import HuggingFaceConceptsLibrary from .stable_diffusion import HuggingFaceConceptsLibrary
from .util import choose_precision, choose_torch_device from .util import choose_precision, choose_torch_device
def fix_func(orig): def fix_func(orig):
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
@ -222,6 +223,7 @@ class Generate:
self.precision, self.precision,
max_loaded_models=max_loaded_models, max_loaded_models=max_loaded_models,
sequential_offload=self.free_gpu_mem, sequential_offload=self.free_gpu_mem,
embedding_path=Path(self.embedding_path),
) )
# don't accept invalid models # don't accept invalid models
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
@ -244,31 +246,8 @@ class Generate:
# load safety checker if requested # load safety checker if requested
if safety_checker: if safety_checker:
try: print(">> Initializing NSFW checker")
print(">> Initializing NSFW checker") self.safety_checker = SafetyChecker(self.device)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import AutoFeatureExtractor
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_model_path = global_cache_dir("hub")
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_checker.to(self.device)
except Exception:
print(
"** An error was encountered while installing the safety checker:"
)
print(traceback.format_exc())
else: else:
print(">> NSFW checker is disabled") print(">> NSFW checker is disabled")
@ -495,18 +474,6 @@ class Generate:
torch.cuda.reset_peak_memory_stats() torch.cuda.reset_peak_memory_stats()
results = list() results = list()
init_image = None
mask_image = None
try:
if (
self.free_gpu_mem
and self.model.cond_stage_model.device != self.model.device
):
self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device)
except AttributeError:
pass
try: try:
uc, c, extra_conditioning_info = get_uc_and_c_and_ec( uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
@ -535,15 +502,6 @@ class Generate:
generator.set_variation(self.seed, variation_amount, with_variations) generator.set_variation(self.seed, variation_amount, with_variations)
generator.use_mps_noise = use_mps_noise generator.use_mps_noise = use_mps_noise
checker = (
{
"checker": self.safety_checker,
"extractor": self.safety_feature_extractor,
}
if self.safety_checker
else None
)
results = generator.generate( results = generator.generate(
prompt, prompt,
iterations=iterations, iterations=iterations,
@ -570,7 +528,7 @@ class Generate:
embiggen_strength=embiggen_strength, embiggen_strength=embiggen_strength,
inpaint_replace=inpaint_replace, inpaint_replace=inpaint_replace,
mask_blur_radius=mask_blur_radius, mask_blur_radius=mask_blur_radius,
safety_checker=checker, safety_checker=self.safety_checker,
seam_size=seam_size, seam_size=seam_size,
seam_blur=seam_blur, seam_blur=seam_blur,
seam_strength=seam_strength, seam_strength=seam_strength,
@ -952,18 +910,6 @@ class Generate:
self.generators = {} self.generators = {}
set_seed(random.randrange(0, np.iinfo(np.uint32).max)) set_seed(random.randrange(0, np.iinfo(np.uint32).max))
if self.embedding_path is not None:
print(f">> Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path):
for name in files:
ti_path = os.path.join(root, name)
self.model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
print(
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
)
self.model_name = model_name self.model_name = model_name
self._set_scheduler() # requires self.model_name to be set first self._set_scheduler() # requires self.model_name to be set first
return self.model return self.model
@ -1010,7 +956,7 @@ class Generate:
): ):
results = [] results = []
for r in image_list: for r in image_list:
image, seed = r image, seed, _ = r
try: try:
if strength > 0: if strength > 0:
if self.gfpgan is not None or self.codeformer is not None: if self.gfpgan is not None or self.codeformer is not None:

View File

@ -1,5 +1,13 @@
""" """
Initialization file for the invokeai.generator package Initialization file for the invokeai.generator package
""" """
from .base import Generator from .base import (
InvokeAIGenerator,
InvokeAIGeneratorBasicParams,
InvokeAIGeneratorOutput,
Txt2Img,
Img2Img,
Inpaint,
Generator,
)
from .inpaint import infill_methods from .inpaint import infill_methods

View File

@ -4,11 +4,15 @@ including img2img, txt2img, and inpaint
""" """
from __future__ import annotations from __future__ import annotations
import itertools
import dataclasses
import diffusers
import os import os
import random import random
import traceback import traceback
from abc import ABCMeta
from argparse import Namespace
from contextlib import nullcontext from contextlib import nullcontext
from pathlib import Path
import cv2 import cv2
import numpy as np import numpy as np
@ -17,12 +21,258 @@ from PIL import Image, ImageChops, ImageFilter
from accelerate.utils import set_seed from accelerate.utils import set_seed
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
from tqdm import trange from tqdm import trange
from typing import List, Iterator, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler
import invokeai.assets.web as web_assets from ..image_util import configure_model_padding
from ..util.util import rand_perlin_2d from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
from ..prompting.conditioning import get_uc_and_c_and_ec
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
downsampling = 8 downsampling = 8
CAUTION_IMG = "caution.png"
@dataclass
class InvokeAIGeneratorBasicParams:
seed: int=None
width: int=512
height: int=512
cfg_scale: int=7.5
steps: int=20
ddim_eta: float=0.0
scheduler: int='ddim'
precision: str='float16'
perlin: float=0.0
threshold: int=0.0
seamless: bool=False
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
h_symmetry_time_pct: float=None
v_symmetry_time_pct: float=None
variation_amount: float = 0.0
with_variations: list=field(default_factory=list)
safety_checker: SafetyChecker=None
@dataclass
class InvokeAIGeneratorOutput:
'''
InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation
operation, including the image, its seed, the model name used to generate the image
and the model hash, as well as all the generate() parameters that went into
generating the image (in .params, also available as attributes)
'''
image: Image
seed: int
model_hash: str
attention_maps_images: List[Image]
params: Namespace
# we are interposing a wrapper around the original Generator classes so that
# old code that calls Generate will continue to work.
class InvokeAIGenerator(metaclass=ABCMeta):
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
):
self.model_info=model_info
self.params=params
def generate(self,
prompt: str='',
callback: callable=None,
step_callback: callable=None,
iterations: int=1,
**keyword_args,
)->Iterator[InvokeAIGeneratorOutput]:
'''
Return an iterator across the indicated number of generations.
Each time the iterator is called it will return an InvokeAIGeneratorOutput
object. Use like this:
outputs = txt2img.generate(prompt='banana sushi', iterations=5)
for result in outputs:
print(result.image, result.seed)
In the typical case of wanting to get just a single image, iterations
defaults to 1 and do:
output = next(txt2img.generate(prompt='banana sushi')
Pass None to get an infinite iterator.
outputs = txt2img.generate(prompt='banana sushi', iterations=None)
for o in outputs:
print(o.image, o.seed)
'''
generator_args = dataclasses.asdict(self.params)
generator_args.update(keyword_args)
model_info = self.model_info
model_name = model_info['model_name']
model:StableDiffusionGeneratorPipeline = model_info['model']
model_hash = model_info['hash']
scheduler: Scheduler = self.get_scheduler(
model=model,
scheduler_name=generator_args.get('scheduler')
)
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model)
gen_class = self._generator_class()
generator = gen_class(model, self.params.precision)
if self.params.variation_amount > 0:
generator.set_variation(generator_args.get('seed'),
generator_args.get('variation_amount'),
generator_args.get('with_variations')
)
if isinstance(model, DiffusionPipeline):
for component in [model.unet, model.vae]:
configure_model_padding(component,
generator_args.get('seamless',False),
generator_args.get('seamless_axes')
)
else:
configure_model_padding(model,
generator_args.get('seamless',False),
generator_args.get('seamless_axes')
)
iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1)
for i in iteration_count:
results = generator.generate(prompt,
conditioning=(uc, c, extra_conditioning_info),
step_callback=step_callback,
sampler=scheduler,
**generator_args,
)
output = InvokeAIGeneratorOutput(
image=results[0][0],
seed=results[0][1],
attention_maps_images=results[0][2],
model_hash = model_hash,
params=Namespace(model_name=model_name,**generator_args),
)
if callback:
callback(output)
yield output
@classmethod
def schedulers(self)->List[str]:
'''
Return list of all the schedulers that we currently handle.
'''
return list(self.scheduler_map.keys())
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
return generator_class(model, self.params.precision)
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
return scheduler
@classmethod
def _generator_class(cls)->Type[Generator]:
'''
In derived classes return the name of the generator to apply.
If you don't override will return the name of the derived
class, which nicely parallels the generator class names.
'''
return Generator
# ------------------------------------
class Txt2Img(InvokeAIGenerator):
@classmethod
def _generator_class(cls):
from .txt2img import Txt2Img
return Txt2Img
# ------------------------------------
class Img2Img(InvokeAIGenerator):
def generate(self,
init_image: Image | torch.FloatTensor,
strength: float=0.75,
**keyword_args
)->List[InvokeAIGeneratorOutput]:
return super().generate(init_image=init_image,
strength=strength,
**keyword_args
)
@classmethod
def _generator_class(cls):
from .img2img import Img2Img
return Img2Img
# ------------------------------------
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img):
def generate(self,
mask_image: Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 0,
seam_blur: int = 0,
seam_strength: float = 0.7,
seam_steps: int = 10,
tile_size: int = 32,
inpaint_replace=False,
infill_method=None,
inpaint_width=None,
inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
**keyword_args
)->List[InvokeAIGeneratorOutput]:
return super().generate(
mask_image=mask_image,
seam_size=seam_size,
seam_blur=seam_blur,
seam_strength=seam_strength,
seam_steps=seam_steps,
tile_size=tile_size,
inpaint_replace=inpaint_replace,
infill_method=infill_method,
inpaint_width=inpaint_width,
inpaint_height=inpaint_height,
inpaint_fill=inpaint_fill,
**keyword_args
)
@classmethod
def _generator_class(cls):
from .inpaint import Inpaint
return Inpaint
# ------------------------------------
class Embiggen(Txt2Img):
def generate(
self,
embiggen: list=None,
embiggen_tiles: list = None,
strength: float=0.75,
**kwargs)->List[InvokeAIGeneratorOutput]:
return super().generate(embiggen=embiggen,
embiggen_tiles=embiggen_tiles,
strength=strength,
**kwargs)
@classmethod
def _generator_class(cls):
from .embiggen import Embiggen
return Embiggen
class Generator: class Generator:
@ -44,7 +294,6 @@ class Generator:
self.with_variations = [] self.with_variations = []
self.use_mps_noise = False self.use_mps_noise = False
self.free_gpu_mem = None self.free_gpu_mem = None
self.caution_img = None
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py # this is going to be overridden in img2img.py, txt2img.py and inpaint.py
def get_make_image(self, prompt, **kwargs): def get_make_image(self, prompt, **kwargs):
@ -64,10 +313,10 @@ class Generator:
def generate( def generate(
self, self,
prompt, prompt,
init_image,
width, width,
height, height,
sampler, sampler,
init_image=None,
iterations=1, iterations=1,
seed=None, seed=None,
image_callback=None, image_callback=None,
@ -76,7 +325,7 @@ class Generator:
perlin=0.0, perlin=0.0,
h_symmetry_time_pct=None, h_symmetry_time_pct=None,
v_symmetry_time_pct=None, v_symmetry_time_pct=None,
safety_checker: dict = None, safety_checker: SafetyChecker=None,
free_gpu_mem: bool = False, free_gpu_mem: bool = False,
**kwargs, **kwargs,
): ):
@ -126,12 +375,13 @@ class Generator:
print("** An error occurred while getting initial noise **") print("** An error occurred while getting initial noise **")
print(traceback.format_exc()) print(traceback.format_exc())
image = make_image(x_T) # Pass on the seed in case a layer beneath us needs to generate noise on its own.
image = make_image(x_T, seed)
if self.safety_checker is not None: if self.safety_checker is not None:
image = self.safety_check(image) image = self.safety_checker.check(image)
results.append([image, seed]) results.append([image, seed, attention_maps_images])
if image_callback is not None: if image_callback is not None:
attention_maps_image = ( attention_maps_image = (
@ -248,7 +498,8 @@ class Generator:
matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask)
return matched_result return matched_result
def sample_to_lowres_estimated_image(self, samples): @staticmethod
def sample_to_lowres_estimated_image(samples):
# origingally adapted from code by @erucipe and @keturn here: # origingally adapted from code by @erucipe and @keturn here:
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
@ -289,19 +540,7 @@ class Generator:
if self.variation_amount > 0: if self.variation_amount > 0:
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
seed = random.randrange(0, np.iinfo(np.uint32).max) seed = random.randrange(0, np.iinfo(np.uint32).max)
return (seed, initial_noise) return (seed, initial_noise)
else:
return (seed, None)
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self, width, height):
"""
Returns a tensor filled with random numbers, either form a normal distribution
(txt2img) or from the latent image (img2img, inpaint)
"""
raise NotImplementedError(
"get_noise() must be implemented in a descendent class"
)
def get_perlin_noise(self, width, height): def get_perlin_noise(self, width, height):
fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device
@ -362,53 +601,6 @@ class Generator:
return v2 return v2
def safety_check(self, image: Image.Image):
"""
If the CompViz safety checker flags an NSFW image, we
blur it out.
"""
import diffusers
checker = self.safety_checker["checker"]
extractor = self.safety_checker["extractor"]
features = extractor([image], return_tensors="pt")
features.to(self.model.device)
# unfortunately checker requires the numpy version, so we have to convert back
x_image = np.array(image).astype(np.float32) / 255.0
x_image = x_image[None].transpose(0, 3, 1, 2)
diffusers.logging.set_verbosity_error()
checked_image, has_nsfw_concept = checker(
images=x_image, clip_input=features.pixel_values
)
if has_nsfw_concept[0]:
print(
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
)
return self.blur(image)
else:
return image
def blur(self, input):
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
try:
caution = self.get_caution_img()
if caution:
blurry.paste(caution, (0, 0), caution)
except FileNotFoundError:
pass
return blurry
def get_caution_img(self):
path = None
if self.caution_img:
return self.caution_img
path = Path(web_assets.__path__[0]) / CAUTION_IMG
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
return self.caution_img
# this is a handy routine for debugging use. Given a generated sample, # this is a handy routine for debugging use. Given a generated sample,
# convert it into a PNG image and store it at the indicated path # convert it into a PNG image and store it at the indicated path
def save_sample(self, sample, filepath): def save_sample(self, sample, filepath):

View File

@ -1,8 +1,10 @@
""" """
invokeai.backend.generator.img2img descends from .generator invokeai.backend.generator.img2img descends from .generator
""" """
from typing import Optional
import torch import torch
from accelerate.utils import set_seed
from diffusers import logging from diffusers import logging
from ..stable_diffusion import ( from ..stable_diffusion import (
@ -61,10 +63,11 @@ class Img2Img(Generator):
), ),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T): def make_image(x_T: torch.Tensor, seed: int):
# FIXME: use x_T for initial seeded noise # FIXME: use x_T for initial seeded noise
# We're not at the moment because the pipeline automatically resizes init_image if # We're not at the moment because the pipeline automatically resizes init_image if
# necessary, which the x_T input might not match. # necessary, which the x_T input might not match.
# In the meantime, reset the seed prior to generating pipeline output so we at least get the same result.
logging.set_verbosity_error() # quench safety check warnings logging.set_verbosity_error() # quench safety check warnings
pipeline_output = pipeline.img2img_from_embeddings( pipeline_output = pipeline.img2img_from_embeddings(
init_image, init_image,
@ -73,6 +76,7 @@ class Img2Img(Generator):
conditioning_data, conditioning_data,
noise_func=self.get_noise_like, noise_func=self.get_noise_like,
callback=step_callback, callback=step_callback,
seed=seed,
) )
if ( if (
pipeline_output.attention_map_saver is not None pipeline_output.attention_map_saver is not None

View File

@ -159,6 +159,7 @@ class Inpaint(Img2Img):
seam_size: int, seam_size: int,
seam_blur: int, seam_blur: int,
prompt, prompt,
seed,
sampler, sampler,
steps, steps,
cfg_scale, cfg_scale,
@ -192,7 +193,7 @@ class Inpaint(Img2Img):
seam_noise = self.get_noise(im.width, im.height) seam_noise = self.get_noise(im.width, im.height)
result = make_image(seam_noise) result = make_image(seam_noise, seed)
return result return result
@ -310,7 +311,7 @@ class Inpaint(Img2Img):
uc, c, cfg_scale uc, c, cfg_scale
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T): def make_image(x_T: torch.Tensor, seed: int):
pipeline_output = pipeline.inpaint_from_embeddings( pipeline_output = pipeline.inpaint_from_embeddings(
init_image=init_image, init_image=init_image,
mask=1 - mask, # expects white means "paint here." mask=1 - mask, # expects white means "paint here."
@ -319,6 +320,7 @@ class Inpaint(Img2Img):
conditioning_data=conditioning_data, conditioning_data=conditioning_data,
noise_func=self.get_noise_like, noise_func=self.get_noise_like,
callback=step_callback, callback=step_callback,
seed=seed,
) )
if ( if (
@ -341,6 +343,7 @@ class Inpaint(Img2Img):
seam_size, seam_size,
seam_blur, seam_blur,
prompt, prompt,
seed,
sampler, sampler,
seam_steps, seam_steps,
cfg_scale, cfg_scale,

View File

@ -61,7 +61,7 @@ class Txt2Img(Generator):
), ),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T) -> PIL.Image.Image: def make_image(x_T: torch.Tensor, _: int) -> PIL.Image.Image:
pipeline_output = pipeline.image_from_embeddings( pipeline_output = pipeline.image_from_embeddings(
latents=torch.zeros_like(x_T, dtype=self.torch_dtype()), latents=torch.zeros_like(x_T, dtype=self.torch_dtype()),
noise=x_T, noise=x_T,

View File

@ -64,7 +64,7 @@ class Txt2Img2Img(Generator):
), ),
).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta) ).add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta)
def make_image(x_T): def make_image(x_T: torch.Tensor, _: int):
first_pass_latent_output, _ = pipeline.latents_from_embeddings( first_pass_latent_output, _ = pipeline.latents_from_embeddings(
latents=torch.zeros_like(x_T), latents=torch.zeros_like(x_T),
num_inference_steps=steps, num_inference_steps=steps,

View File

@ -1075,9 +1075,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
dlogging.set_verbosity_error() dlogging.set_verbosity_error()
checkpoint = ( checkpoint = (
load_file(checkpoint_path) torch.load(checkpoint_path)
if Path(checkpoint_path).suffix == ".safetensors" if Path(checkpoint_path).suffix == ".ckpt"
else torch.load(checkpoint_path) else load_file(checkpoint_path)
) )
cache_dir = global_cache_dir("hub") cache_dir = global_cache_dir("hub")
pipeline_class = ( pipeline_class = (
@ -1274,7 +1275,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
tokenizer=tokenizer, tokenizer=tokenizer,
unet=unet.to(precision), unet=unet.to(precision),
scheduler=scheduler, scheduler=scheduler,
safety_checker=safety_checker.to(precision), safety_checker=None if return_generator_pipeline else safety_checker.to(precision),
feature_extractor=feature_extractor, feature_extractor=feature_extractor,
) )
else: else:

View File

@ -34,8 +34,7 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util import CPU_DEVICE, ask_user, download_with_resume from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum): class SDLegacyType(Enum):
V1 = 1 V1 = 1
@ -51,23 +50,29 @@ VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
} }
class ModelManager(object): class ModelManager(object):
'''
Model manager handles loading, caching, importing, deleting, converting, and editing models.
'''
def __init__( def __init__(
self, self,
config: OmegaConf, config: OmegaConf|Path,
device_type: torch.device = CPU_DEVICE, device_type: torch.device = CUDA_DEVICE,
precision: str = "float16", precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS, max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload=False, sequential_offload=False,
embedding_path: Path=None,
): ):
""" """
Initialize with the path to the models.yaml config file, Initialize with the path to the models.yaml config file or
the torch device type, and precision. The optional an initialized OmegaConf dictionary. Optional parameters
min_avail_mem argument specifies how much unused system are the torch device type, precision, max_loaded_models,
(CPU) memory to preserve. The cache of models in RAM will and sequential_offload boolean. Note that the default device
grow until this value is approached. Default is 2G. type and precision are set up for a CUDA system running at half precision.
""" """
# prevent nasty-looking CLIP log message # prevent nasty-looking CLIP log message
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
if not isinstance(config, DictConfig):
config = OmegaConf.load(config)
self.config = config self.config = config
self.precision = precision self.precision = precision
self.device = torch.device(device_type) self.device = torch.device(device_type)
@ -76,6 +81,7 @@ class ModelManager(object):
self.stack = [] # this is an LRU FIFO self.stack = [] # this is an LRU FIFO
self.current_model = None self.current_model = None
self.sequential_offload = sequential_offload self.sequential_offload = sequential_offload
self.embedding_path = embedding_path
def valid_model(self, model_name: str) -> bool: def valid_model(self, model_name: str) -> bool:
""" """
@ -84,12 +90,15 @@ class ModelManager(object):
""" """
return model_name in self.config return model_name in self.config
def get_model(self, model_name: str): def get_model(self, model_name: str=None)->dict:
""" """
Given a model named identified in models.yaml, return Given a model named identified in models.yaml, return
the model object. If in RAM will load into GPU VRAM. the model object. If in RAM will load into GPU VRAM.
If on disk, will load from there. If on disk, will load from there.
""" """
if not model_name:
return self.get_model(self.current_model) if self.current_model else self.get_model(self.default_model())
if not self.valid_model(model_name): if not self.valid_model(model_name):
print( print(
f'** "{model_name}" is not a known model name. Please check your models.yaml file' f'** "{model_name}" is not a known model name. Please check your models.yaml file'
@ -104,7 +113,7 @@ class ModelManager(object):
if model_name in self.models: if model_name in self.models:
requested_model = self.models[model_name]["model"] requested_model = self.models[model_name]["model"]
print(f">> Retrieving model {model_name} from system RAM cache") print(f">> Retrieving model {model_name} from system RAM cache")
self.models[model_name]["model"] = self._model_from_cpu(requested_model) requested_model.ready()
width = self.models[model_name]["width"] width = self.models[model_name]["width"]
height = self.models[model_name]["height"] height = self.models[model_name]["height"]
hash = self.models[model_name]["hash"] hash = self.models[model_name]["hash"]
@ -112,6 +121,7 @@ class ModelManager(object):
else: # we're about to load a new model, so potentially offload the least recently used one else: # we're about to load a new model, so potentially offload the least recently used one
requested_model, width, height, hash = self._load_model(model_name) requested_model, width, height, hash = self._load_model(model_name)
self.models[model_name] = { self.models[model_name] = {
"model_name": model_name,
"model": requested_model, "model": requested_model,
"width": width, "width": width,
"height": height, "height": height,
@ -121,6 +131,7 @@ class ModelManager(object):
self.current_model = model_name self.current_model = model_name
self._push_newest_model(model_name) self._push_newest_model(model_name)
return { return {
"model_name": model_name,
"model": requested_model, "model": requested_model,
"width": width, "width": width,
"height": height, "height": height,
@ -351,6 +362,7 @@ class ModelManager(object):
raise NotImplementedError( raise NotImplementedError(
f"Unknown model format {model_name}: {model_format}" f"Unknown model format {model_name}: {model_format}"
) )
self._add_embeddings_to_model(model)
# usage statistics # usage statistics
toc = time.time() toc = time.time()
@ -499,7 +511,7 @@ class ModelManager(object):
print(f">> Offloading {model_name} to CPU") print(f">> Offloading {model_name} to CPU")
model = self.models[model_name]["model"] model = self.models[model_name]["model"]
self.models[model_name]["model"] = self._model_to_cpu(model) model.offload_all()
gc.collect() gc.collect()
if self._has_cuda(): if self._has_cuda():
@ -557,7 +569,7 @@ class ModelManager(object):
""" """
model_name = model_name or Path(repo_or_path).stem model_name = model_name or Path(repo_or_path).stem
model_description = ( model_description = (
model_description or f"Imported diffusers model {model_name}" description or f"Imported diffusers model {model_name}"
) )
new_config = dict( new_config = dict(
description=model_description, description=model_description,
@ -720,9 +732,9 @@ class ModelManager(object):
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
checkpoint = ( checkpoint = (
safetensors.torch.load_file(model_path) torch.load(model_path)
if model_path.suffix == ".safetensors" if model_path.suffix == ".ckpt"
else torch.load(model_path) else safetensors.torch.load_file(model_path)
) )
# additional probing needed if no config file provided # additional probing needed if no config file provided
@ -1044,43 +1056,6 @@ class ModelManager(object):
self.stack.remove(model_name) self.stack.remove(model_name)
self.models.pop(model_name, None) self.models.pop(model_name, None)
def _model_to_cpu(self, model):
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.offload_all()
return model
model.cond_stage_model.device = CPU_DEVICE
model.to(CPU_DEVICE)
for submodel in ("first_stage_model", "cond_stage_model", "model"):
try:
getattr(model, submodel).to(CPU_DEVICE)
except AttributeError:
pass
return model
def _model_from_cpu(self, model):
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.ready()
return model
model.to(self.device)
model.cond_stage_model.device = self.device
for submodel in ("first_stage_model", "cond_stage_model", "model"):
try:
getattr(model, submodel).to(self.device)
except AttributeError:
pass
return model
def _pop_oldest_model(self): def _pop_oldest_model(self):
""" """
Remove the first element of the FIFO, which ought Remove the first element of the FIFO, which ought
@ -1098,6 +1073,19 @@ class ModelManager(object):
self.stack.remove(model_name) self.stack.remove(model_name)
self.stack.append(model_name) self.stack.append(model_name)
def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline):
if self.embedding_path is not None:
print(f">> Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path):
for name in files:
ti_path = os.path.join(root, name)
model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
print(
f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
)
def _has_cuda(self) -> bool: def _has_cuda(self) -> bool:
return self.device.type == "cuda" return self.device.type == "cuda"

View File

@ -3,7 +3,6 @@ Initialization file for invokeai.backend.prompting
""" """
from .conditioning import ( from .conditioning import (
get_prompt_structure, get_prompt_structure,
get_tokenizer,
get_tokens_for_prompt_object, get_tokens_for_prompt_object,
get_uc_and_c_and_ec, get_uc_and_c_and_ec,
split_weighted_subprompts, split_weighted_subprompts,

View File

@ -7,7 +7,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
""" """
import re import re
from typing import Any, Optional, Union from typing import Optional, Union
from compel import Compel from compel import Compel
from compel.prompt_parser import ( from compel.prompt_parser import (
@ -17,7 +17,6 @@ from compel.prompt_parser import (
Fragment, Fragment,
PromptParser, PromptParser,
) )
from transformers import CLIPTextModel, CLIPTokenizer
from invokeai.backend.globals import Globals from invokeai.backend.globals import Globals
@ -25,36 +24,6 @@ from ..stable_diffusion import InvokeAIDiffuserComponent
from ..util import torch_dtype from ..util import torch_dtype
def get_tokenizer(model) -> CLIPTokenizer:
# TODO remove legacy ckpt fallback handling
return (
getattr(model, "tokenizer", None) # diffusers
or model.cond_stage_model.tokenizer
) # ldm
def get_text_encoder(model) -> Any:
# TODO remove legacy ckpt fallback handling
return getattr(
model, "text_encoder", None
) or UnsqueezingLDMTransformer( # diffusers
model.cond_stage_model.transformer
) # ldm
class UnsqueezingLDMTransformer:
def __init__(self, ldm_transformer):
self.ldm_transformer = ldm_transformer
@property
def device(self):
return self.ldm_transformer.device
def __call__(self, *args, **kwargs):
insufficiently_unsqueezed_tensor = self.ldm_transformer(*args, **kwargs)
return insufficiently_unsqueezed_tensor.unsqueeze(0)
def get_uc_and_c_and_ec( def get_uc_and_c_and_ec(
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
): ):
@ -64,13 +33,13 @@ def get_uc_and_c_and_ec(
prompt_string prompt_string
) )
tokenizer = get_tokenizer(model) tokenizer = model.tokenizer
text_encoder = get_text_encoder(model)
compel = Compel( compel = Compel(
tokenizer=tokenizer, tokenizer=tokenizer,
text_encoder=text_encoder, text_encoder=model.text_encoder,
textual_inversion_manager=model.textual_inversion_manager, textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype, dtype_for_device_getter=torch_dtype,
truncate_long_prompts=False
) )
# get rid of any newline characters # get rid of any newline characters
@ -82,12 +51,12 @@ def get_uc_and_c_and_ec(
legacy_blend = try_parse_legacy_blend( legacy_blend = try_parse_legacy_blend(
positive_prompt_string, skip_normalize_legacy_blend positive_prompt_string, skip_normalize_legacy_blend
) )
positive_prompt: FlattenedPrompt | Blend positive_prompt: Union[FlattenedPrompt, Blend]
if legacy_blend is not None: if legacy_blend is not None:
positive_prompt = legacy_blend positive_prompt = legacy_blend
else: else:
positive_prompt = Compel.parse_prompt_string(positive_prompt_string) positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string(
negative_prompt_string negative_prompt_string
) )
@ -96,6 +65,7 @@ def get_uc_and_c_and_ec(
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt) c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt) uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
[c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc])
tokens_count = get_max_token_count(tokenizer, positive_prompt) tokens_count = get_max_token_count(tokenizer, positive_prompt)
@ -116,12 +86,12 @@ def get_prompt_structure(
legacy_blend = try_parse_legacy_blend( legacy_blend = try_parse_legacy_blend(
positive_prompt_string, skip_normalize_legacy_blend positive_prompt_string, skip_normalize_legacy_blend
) )
positive_prompt: FlattenedPrompt | Blend positive_prompt: Union[FlattenedPrompt, Blend]
if legacy_blend is not None: if legacy_blend is not None:
positive_prompt = legacy_blend positive_prompt = legacy_blend
else: else:
positive_prompt = Compel.parse_prompt_string(positive_prompt_string) positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string(
negative_prompt_string negative_prompt_string
) )
@ -129,7 +99,7 @@ def get_prompt_structure(
def get_max_token_count( def get_max_token_count(
tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=False
) -> int: ) -> int:
if type(prompt) is Blend: if type(prompt) is Blend:
blend: Blend = prompt blend: Blend = prompt
@ -245,7 +215,7 @@ def log_tokenization_for_prompt_object(
) )
def log_tokenization_for_text(text, tokenizer, display_label=None): def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_too_long=False):
"""shows how the prompt is tokenized """shows how the prompt is tokenized
# usually tokens have '</w>' to indicate end-of-word, # usually tokens have '</w>' to indicate end-of-word,
# but for readability it has been replaced with ' ' # but for readability it has been replaced with ' '
@ -260,11 +230,11 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
token = tokens[i].replace("</w>", " ") token = tokens[i].replace("</w>", " ")
# alternate color # alternate color
s = (usedTokens % 6) + 1 s = (usedTokens % 6) + 1
if i < tokenizer.model_max_length: if truncate_if_too_long and i >= tokenizer.model_max_length:
discarded = discarded + f"\x1b[0;3{s};40m{token}"
else:
tokenized = tokenized + f"\x1b[0;3{s};40m{token}" tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
usedTokens += 1 usedTokens += 1
else: # over max token length
discarded = discarded + f"\x1b[0;3{s};40m{token}"
if usedTokens > 0: if usedTokens > 0:
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')

View File

@ -0,0 +1,82 @@
'''
SafetyChecker class - checks images against the StabilityAI NSFW filter
and blurs images that contain potential NSFW content.
'''
import diffusers
import numpy as np
import torch
import traceback
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from pathlib import Path
from PIL import Image, ImageFilter
from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets
from .globals import global_cache_dir
from .util import CPU_DEVICE
class SafetyChecker(object):
CAUTION_IMG = "caution.png"
def __init__(self, device: torch.device):
path = Path(web_assets.__path__[0]) / self.CAUTION_IMG
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
self.device = device
try:
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_model_path = global_cache_dir("hub")
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
safety_model_id,
local_files_only=True,
cache_dir=safety_model_path,
)
except Exception:
print(
"** An error was encountered while installing the safety checker:"
)
print(traceback.format_exc())
def check(self, image: Image.Image):
"""
Check provided image against the StabilityAI safety checker and return
"""
self.safety_checker.to(self.device)
features = self.safety_feature_extractor([image], return_tensors="pt")
features.to(self.device)
# unfortunately checker requires the numpy version, so we have to convert back
x_image = np.array(image).astype(np.float32) / 255.0
x_image = x_image[None].transpose(0, 3, 1, 2)
diffusers.logging.set_verbosity_error()
checked_image, has_nsfw_concept = self.safety_checker(
images=x_image, clip_input=features.pixel_values
)
self.safety_checker.to(CPU_DEVICE) # offload
if has_nsfw_concept[0]:
print(
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
)
return self.blur(image)
else:
return image
def blur(self, input):
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
try:
if caution := self.caution_img:
blurry.paste(caution, (0, 0), caution)
except FileNotFoundError:
pass
return blurry

View File

@ -6,7 +6,6 @@ The interface is through the Concepts() object.
""" """
import os import os
import re import re
import traceback
from typing import Callable from typing import Callable
from urllib import error as ul_error from urllib import error as ul_error
from urllib import request from urllib import request
@ -15,7 +14,6 @@ from huggingface_hub import (
HfApi, HfApi,
HfFolder, HfFolder,
ModelFilter, ModelFilter,
ModelSearchArguments,
hf_hub_url, hf_hub_url,
) )
@ -84,7 +82,7 @@ class HuggingFaceConceptsLibrary(object):
""" """
if not concept_name in self.list_concepts(): if not concept_name in self.list_concepts():
print( print(
f"This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept." f"{concept_name} is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
) )
return None return None
return self.get_concept_file(concept_name.lower(), "learned_embeds.bin") return self.get_concept_file(concept_name.lower(), "learned_embeds.bin")
@ -236,7 +234,7 @@ class HuggingFaceConceptsLibrary(object):
except ul_error.HTTPError as e: except ul_error.HTTPError as e:
if e.code == 404: if e.code == 404:
print( print(
f"This concept is not known to the Hugging Face library. Generation will continue without the concept." f"Concept {concept_name} is not known to the Hugging Face library. Generation will continue without the concept."
) )
else: else:
print( print(
@ -246,7 +244,7 @@ class HuggingFaceConceptsLibrary(object):
return False return False
except ul_error.URLError as e: except ul_error.URLError as e:
print( print(
f"ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept." f"ERROR while downloading {concept_name}: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
) )
os.rmdir(dest) os.rmdir(dest)
return False return False

View File

@ -9,6 +9,7 @@ from typing import Any, Callable, Generic, List, Optional, Type, TypeVar, Union
import einops import einops
import PIL.Image import PIL.Image
from accelerate.utils import set_seed
import psutil import psutil
import torch import torch
import torchvision.transforms as T import torchvision.transforms as T
@ -54,16 +55,6 @@ class PipelineIntermediateState:
attention_map_saver: Optional[AttentionMapSaver] = None attention_map_saver: Optional[AttentionMapSaver] = None
# copied from configs/stable-diffusion/v1-inference.yaml
_default_personalization_config_params = dict(
placeholder_strings=["*"],
initializer_wods=["sculpture"],
per_image_tokens=False,
num_vectors_per_token=1,
progressive_words=False,
)
@dataclass @dataclass
class AddsMaskLatents: class AddsMaskLatents:
"""Add the channels required for inpainting model input. """Add the channels required for inpainting model input.
@ -175,7 +166,7 @@ def image_resized_to_grid_as_tensor(
:param normalize: scale the range to [-1, 1] instead of [0, 1] :param normalize: scale the range to [-1, 1] instead of [0, 1]
:param multiple_of: resize the input so both dimensions are a multiple of this :param multiple_of: resize the input so both dimensions are a multiple of this
""" """
w, h = trim_to_multiple_of(*image.size) w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of)
transformation = T.Compose( transformation = T.Compose(
[ [
T.Resize((h, w), T.InterpolationMode.LANCZOS), T.Resize((h, w), T.InterpolationMode.LANCZOS),
@ -290,10 +281,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]): scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]): safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offsensive or harmful. Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
feature_extractor ([`CLIPFeatureExtractor`]): feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
@ -436,11 +427,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
""" """
Ready this pipeline's models. Ready this pipeline's models.
i.e. pre-load them to the GPU if appropriate. i.e. preload them to the GPU if appropriate.
""" """
self._model_group.ready() self._model_group.ready()
def to(self, torch_device: Optional[Union[str, torch.device]] = None): def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False):
# overridden method; types match the superclass. # overridden method; types match the superclass.
if torch_device is None: if torch_device is None:
return self return self
@ -690,6 +681,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
callback: Callable[[PipelineIntermediateState], None] = None, callback: Callable[[PipelineIntermediateState], None] = None,
run_id=None, run_id=None,
noise_func=None, noise_func=None,
seed=None,
) -> InvokeAIStableDiffusionPipelineOutput: ) -> InvokeAIStableDiffusionPipelineOutput:
if isinstance(init_image, PIL.Image.Image): if isinstance(init_image, PIL.Image.Image):
init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB")) init_image = image_resized_to_grid_as_tensor(init_image.convert("RGB"))
@ -703,6 +695,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet), device=self._model_group.device_for(self.unet),
dtype=self.unet.dtype, dtype=self.unet.dtype,
) )
if seed is not None:
set_seed(seed)
noise = noise_func(initial_latents) noise = noise_func(initial_latents)
return self.img2img_from_latents_and_embeddings( return self.img2img_from_latents_and_embeddings(
@ -731,9 +725,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet), device=self._model_group.device_for(self.unet),
) )
result_latents, result_attention_maps = self.latents_from_embeddings( result_latents, result_attention_maps = self.latents_from_embeddings(
initial_latents, latents=initial_latents if strength < 1.0 else torch.zeros_like(
num_inference_steps, initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
conditioning_data, ),
num_inference_steps=num_inference_steps,
conditioning_data=conditioning_data,
timesteps=timesteps, timesteps=timesteps,
noise=noise, noise=noise,
run_id=run_id, run_id=run_id,
@ -779,6 +775,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
callback: Callable[[PipelineIntermediateState], None] = None, callback: Callable[[PipelineIntermediateState], None] = None,
run_id=None, run_id=None,
noise_func=None, noise_func=None,
seed=None,
) -> InvokeAIStableDiffusionPipelineOutput: ) -> InvokeAIStableDiffusionPipelineOutput:
device = self._model_group.device_for(self.unet) device = self._model_group.device_for(self.unet)
latents_dtype = self.unet.dtype latents_dtype = self.unet.dtype
@ -802,6 +799,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
init_image_latents = self.non_noised_latents_from_image( init_image_latents = self.non_noised_latents_from_image(
init_image, device=device, dtype=latents_dtype init_image, device=device, dtype=latents_dtype
) )
if seed is not None:
set_seed(seed)
noise = noise_func(init_image_latents) noise = noise_func(init_image_latents)
if mask.dim() == 3: if mask.dim() == 3:
@ -831,9 +830,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
try: try:
result_latents, result_attention_maps = self.latents_from_embeddings( result_latents, result_attention_maps = self.latents_from_embeddings(
init_image_latents, latents=init_image_latents if strength < 1.0 else torch.zeros_like(
num_inference_steps, init_image_latents, device=init_image_latents.device, dtype=init_image_latents.dtype
conditioning_data, ),
num_inference_steps=num_inference_steps,
conditioning_data=conditioning_data,
noise=noise, noise=noise,
timesteps=timesteps, timesteps=timesteps,
additional_guidance=guidance, additional_guidance=guidance,
@ -911,20 +912,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet), device=self._model_group.device_for(self.unet),
) )
@property
def cond_stage_model(self):
return self.embeddings_provider
@torch.inference_mode()
def _tokenize(self, prompt: Union[str, List[str]]):
return self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
@property @property
def channels(self) -> int: def channels(self) -> int:
"""Compatible with DiffusionWrapper""" """Compatible with DiffusionWrapper"""
@ -936,11 +923,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
return super().decode_latents(latents) return super().decode_latents(latents)
def debug_latents(self, latents, msg): def debug_latents(self, latents, msg):
from invokeai.backend.image_util import debug_image
with torch.inference_mode(): with torch.inference_mode():
from ldm.util import debug_image
decoded = self.numpy_to_pil(self.decode_latents(latents)) decoded = self.numpy_to_pil(self.decode_latents(latents))
for i, img in enumerate(decoded): for i, img in enumerate(decoded):
debug_image( debug_image(
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
) )

View File

@ -3,6 +3,9 @@ import math
import multiprocessing as mp import multiprocessing as mp
import os import os
import re import re
import io
import base64
from collections import abc from collections import abc
from inspect import isfunction from inspect import isfunction
from pathlib import Path from pathlib import Path
@ -364,3 +367,16 @@ def url_attachment_name(url: str) -> dict:
def download_with_progress_bar(url: str, dest: Path) -> bool: def download_with_progress_bar(url: str, dest: Path) -> bool:
result = download_with_resume(url, dest, access_token=None) result = download_with_resume(url, dest, access_token=None)
return result is not None return result is not None
def image_to_dataURL(image: Image.Image, image_format: str = "PNG") -> str:
"""
Converts an image into a base64 image dataURL.
"""
buffered = io.BytesIO()
image.save(buffered, format=image_format)
mime_type = Image.MIME.get(image_format.upper(), "image/" + image_format.lower())
image_base64 = f"data:{mime_type};base64," + base64.b64encode(
buffered.getvalue()
).decode("UTF-8")
return image_base64

View File

@ -29,7 +29,6 @@ from ..image_util import PngWriter, retrieve_metadata
from ...frontend.merge.merge_diffusers import merge_diffusion_models from ...frontend.merge.merge_diffusers import merge_diffusion_models
from ..prompting import ( from ..prompting import (
get_prompt_structure, get_prompt_structure,
get_tokenizer,
get_tokens_for_prompt_object, get_tokens_for_prompt_object,
) )
from ..stable_diffusion import PipelineIntermediateState from ..stable_diffusion import PipelineIntermediateState
@ -1274,7 +1273,7 @@ class InvokeAIWebServer:
None None
if type(parsed_prompt) is Blend if type(parsed_prompt) is Blend
else get_tokens_for_prompt_object( else get_tokens_for_prompt_object(
get_tokenizer(self.generate.model), parsed_prompt self.generate.model.tokenizer, parsed_prompt
) )
) )
attention_maps_image_base64_url = ( attention_maps_image_base64_url = (

View File

@ -35,6 +35,7 @@ module.exports = {
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' }, { varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
], ],
'prettier/prettier': ['error', { endOfLine: 'auto' }], 'prettier/prettier': ['error', { endOfLine: 'auto' }],
'@typescript-eslint/ban-ts-comment': 'warn',
}, },
settings: { settings: {
react: { react: {

View File

@ -1 +0,0 @@
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.ltr-parameters-panel-transition-enter{transform:translate(-150%)}.ltr-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-parameters-panel-transition-exit{transform:translate(0)}.ltr-parameters-panel-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.rtl-parameters-panel-transition-enter{transform:translate(150%)}.rtl-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-parameters-panel-transition-exit{transform:translate(0)}.rtl-parameters-panel-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}

View File

@ -0,0 +1 @@
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0; margin: 0;
} }
</style> </style>
<script type="module" crossorigin src="./assets/index-b928084d.js"></script> <script type="module" crossorigin src="./assets/index-f7f41e1f.js"></script>
<link rel="stylesheet" href="./assets/index-5483945c.css"> <link rel="stylesheet" href="./assets/index-5483945c.css">
</head> </head>

View File

@ -1,4 +1,26 @@
{ {
"accessibility": {
"modelSelect": "Model Select",
"invokeProgressBar": "Invoke progress bar",
"reset": "Reset",
"uploadImage": "Upload Image",
"previousImage": "Previous Image",
"nextImage": "Next Image",
"useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "ExitViewer",
"zoomIn": "Zoom In",
"zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise",
"rotateClockwise": "Rotate Clockwise",
"flipHorizontally": "Flip Horizontally",
"flipVertically": "Flip Vertically",
"modifyConfig": "Modify Config",
"toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel"
},
"common": { "common": {
"hotkeysLabel": "Hotkeys", "hotkeysLabel": "Hotkeys",
"themeLabel": "Theme", "themeLabel": "Theme",
@ -27,10 +49,11 @@
"langSimplifiedChinese": "简体中文", "langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська", "langUkranian": "Украї́нська",
"langSpanish": "Español", "langSpanish": "Español",
"text2img": "Text To Image", "txt2img": "Text To Image",
"img2img": "Image To Image", "img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas", "unifiedCanvas": "Unified Canvas",
"nodes": "Nodes", "nodes": "Nodes",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.", "nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing", "postProcessing": "Post Processing",
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.", "postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
@ -41,6 +64,8 @@
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.", "trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
"upload": "Upload", "upload": "Upload",
"close": "Close", "close": "Close",
"cancel": "Cancel",
"accept": "Accept",
"load": "Load", "load": "Load",
"back": "Back", "back": "Back",
"statusConnected": "Connected", "statusConnected": "Connected",
@ -310,6 +335,7 @@
"addNewModel": "Add New Model", "addNewModel": "Add New Model",
"addCheckpointModel": "Add Checkpoint / Safetensor Model", "addCheckpointModel": "Add Checkpoint / Safetensor Model",
"addDiffuserModel": "Add Diffusers", "addDiffuserModel": "Add Diffusers",
"scanForModels": "Scan For Models",
"addManually": "Add Manually", "addManually": "Add Manually",
"manual": "Manual", "manual": "Manual",
"name": "Name", "name": "Name",
@ -574,7 +600,7 @@
"autoSaveToGallery": "Auto Save to Gallery", "autoSaveToGallery": "Auto Save to Gallery",
"saveBoxRegionOnly": "Save Box Region Only", "saveBoxRegionOnly": "Save Box Region Only",
"limitStrokesToBox": "Limit Strokes to Box", "limitStrokesToBox": "Limit Strokes to Box",
"showCanvasDebugInfo": "Show Canvas Debug Info", "showCanvasDebugInfo": "Show Additional Canvas Info",
"clearCanvasHistory": "Clear Canvas History", "clearCanvasHistory": "Clear Canvas History",
"clearHistory": "Clear History", "clearHistory": "Clear History",
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.", "clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",

View File

@ -63,7 +63,14 @@
"back": "Atrás", "back": "Atrás",
"statusConvertingModel": "Convertir el modelo", "statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado", "statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos" "statusMergingModels": "Fusionar modelos",
"oceanTheme": "Océano",
"langPortuguese": "Portugués",
"langKorean": "Coreano",
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -363,7 +370,6 @@
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?", "convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
"convertToDiffusersSaveLocation": "Guardar ubicación", "convertToDiffusersSaveLocation": "Guardar ubicación",
"v1": "v1", "v1": "v1",
"v2": "v2",
"statusConverting": "Adaptar", "statusConverting": "Adaptar",
"modelConverted": "Modelo adaptado", "modelConverted": "Modelo adaptado",
"sameFolder": "La misma carpeta", "sameFolder": "La misma carpeta",
@ -386,14 +392,19 @@
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.", "modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.", "modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados", "ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.", "modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso", "inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada", "weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide", "sigmoid": "Función sigmoide",
"allModels": "Todos los modelos", "allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio", "repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada", "pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada" "customConfig": "Configuración personalizada",
"v2_base": "v2 (512px)",
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -589,5 +600,27 @@
"betaDarkenOutside": "Oscurecer fuera", "betaDarkenOutside": "Oscurecer fuera",
"betaLimitToBox": "Limitar a caja", "betaLimitToBox": "Limitar a caja",
"betaPreserveMasked": "Preservar área enmascarada" "betaPreserveMasked": "Preservar área enmascarada"
},
"accessibility": {
"invokeProgressBar": "Activar la barra de progreso",
"modelSelect": "Seleccionar modelo",
"reset": "Reiniciar",
"uploadImage": "Cargar imagen",
"previousImage": "Imagen anterior",
"nextImage": "Siguiente imagen",
"useThisParameter": "Utiliza este parámetro",
"copyMetadataJson": "Copiar los metadatos JSON",
"exitViewer": "Salir del visor",
"zoomIn": "Acercar",
"zoomOut": "Alejar",
"rotateCounterClockwise": "Girar en sentido antihorario",
"rotateClockwise": "Girar en sentido horario",
"flipHorizontally": "Voltear horizontalmente",
"flipVertically": "Voltear verticalmente",
"modifyConfig": "Modificar la configuración",
"toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones"
} }
} }

View File

@ -45,7 +45,9 @@
"statusUpscaling": "Mise à échelle", "statusUpscaling": "Mise à échelle",
"statusUpscalingESRGAN": "Mise à échelle (ESRGAN)", "statusUpscalingESRGAN": "Mise à échelle (ESRGAN)",
"statusLoadingModel": "Chargement du modèle", "statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé" "statusModelChanged": "Modèle changé",
"discordLabel": "Discord",
"githubLabel": "Github"
}, },
"gallery": { "gallery": {
"generations": "Générations", "generations": "Générations",

View File

@ -92,7 +92,6 @@
"modelThree": "מודל 3", "modelThree": "מודל 3",
"mergedModelName": "שם מודל ממוזג", "mergedModelName": "שם מודל ממוזג",
"v1": "v1", "v1": "v1",
"v2": "v2",
"invokeRoot": "תיקיית InvokeAI", "invokeRoot": "תיקיית InvokeAI",
"customConfig": "תצורה מותאמת אישית", "customConfig": "תצורה מותאמת אישית",
"pathToCustomConfig": "נתיב לתצורה מותאמת אישית", "pathToCustomConfig": "נתיב לתצורה מותאמת אישית",

View File

@ -63,7 +63,14 @@
"langSimplifiedChinese": "Cinese semplificato", "langSimplifiedChinese": "Cinese semplificato",
"langDutch": "Olandese", "langDutch": "Olandese",
"statusModelConverted": "Modello Convertito", "statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello" "statusConvertingModel": "Conversione Modello",
"langKorean": "Coreano",
"langPortuguese": "Portoghese",
"pinOptionsPanel": "Blocca il pannello Opzioni",
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -361,7 +368,6 @@
"convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 4 GB e 7 GB di dimensioni.", "convertToDiffusersHelpText5": "Assicurati di avere spazio su disco sufficiente. I modelli generalmente variano tra 4 GB e 7 GB di dimensioni.",
"convertToDiffusersHelpText6": "Vuoi convertire questo modello?", "convertToDiffusersHelpText6": "Vuoi convertire questo modello?",
"convertToDiffusersSaveLocation": "Ubicazione salvataggio", "convertToDiffusersSaveLocation": "Ubicazione salvataggio",
"v2": "v2",
"inpainting": "v1 Inpainting", "inpainting": "v1 Inpainting",
"customConfig": "Configurazione personalizzata", "customConfig": "Configurazione personalizzata",
"statusConverting": "Conversione in corso", "statusConverting": "Conversione in corso",
@ -393,7 +399,12 @@
"customSaveLocation": "Ubicazione salvataggio personalizzata", "customSaveLocation": "Ubicazione salvataggio personalizzata",
"weightedSum": "Somma pesata", "weightedSum": "Somma pesata",
"sigmoid": "Sigmoide", "sigmoid": "Sigmoide",
"inverseSigmoid": "Sigmoide inverso" "inverseSigmoid": "Sigmoide inverso",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -589,5 +600,27 @@
"betaDarkenOutside": "Oscura all'esterno", "betaDarkenOutside": "Oscura all'esterno",
"betaLimitToBox": "Limita al rettangolo", "betaLimitToBox": "Limita al rettangolo",
"betaPreserveMasked": "Conserva quanto mascherato" "betaPreserveMasked": "Conserva quanto mascherato"
},
"accessibility": {
"modelSelect": "Seleziona modello",
"invokeProgressBar": "Barra di avanzamento generazione",
"uploadImage": "Carica immagine",
"previousImage": "Immagine precedente",
"nextImage": "Immagine successiva",
"useThisParameter": "Usa questo parametro",
"reset": "Reimposta",
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",
"toggleLogViewer": "Attiva/disattiva visualizzatore registro",
"showGallery": "Mostra la galleria immagini",
"showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione"
} }
} }

View File

@ -302,7 +302,7 @@
"name": "Naam", "name": "Naam",
"nameValidationMsg": "Geef een naam voor je model", "nameValidationMsg": "Geef een naam voor je model",
"description": "Beschrijving", "description": "Beschrijving",
"descriptionValidationMsg": "Voeg een beschrijving toe voor je model", "descriptionValidationMsg": "Voeg een beschrijving toe voor je model.",
"config": "Configuratie", "config": "Configuratie",
"configValidationMsg": "Pad naar het configuratiebestand van je model.", "configValidationMsg": "Pad naar het configuratiebestand van je model.",
"modelLocation": "Locatie model", "modelLocation": "Locatie model",
@ -364,7 +364,6 @@
"convertToDiffusersHelpText5": "Zorg ervoor dat je genoeg schijfruimte hebt. Modellen nemen gewoonlijk ongeveer 4 - 7 GB ruimte in beslag.", "convertToDiffusersHelpText5": "Zorg ervoor dat je genoeg schijfruimte hebt. Modellen nemen gewoonlijk ongeveer 4 - 7 GB ruimte in beslag.",
"convertToDiffusersSaveLocation": "Bewaarlocatie", "convertToDiffusersSaveLocation": "Bewaarlocatie",
"v1": "v1", "v1": "v1",
"v2": "v2",
"inpainting": "v1-inpainting", "inpainting": "v1-inpainting",
"customConfig": "Eigen configuratie", "customConfig": "Eigen configuratie",
"pathToCustomConfig": "Pad naar eigen configuratie", "pathToCustomConfig": "Pad naar eigen configuratie",

View File

@ -63,6 +63,560 @@
"statusGeneratingOutpainting": "Geração de Ampliação", "statusGeneratingOutpainting": "Geração de Ampliação",
"statusGenerationComplete": "Geração Completa", "statusGenerationComplete": "Geração Completa",
"statusMergingModels": "Mesclando Modelos", "statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados" "statusMergedModels": "Modelos Mesclados",
"oceanTheme": "Oceano",
"pinOptionsPanel": "Fixar painel de opções",
"loading": "A carregar",
"loadingInvokeAI": "A carregar Invoke AI",
"langPortuguese": "Português"
},
"gallery": {
"galleryImageResetSize": "Resetar Imagem",
"gallerySettings": "Configurações de Galeria",
"maintainAspectRatio": "Mater Proporções",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"pinGallery": "Fixar Galeria",
"singleColumnLayout": "Disposição em Coluna Única",
"allImagesLoaded": "Todas as Imagens Carregadas",
"loadMore": "Carregar Mais",
"noImagesInGallery": "Sem Imagens na Galeria",
"generations": "Gerações",
"showGenerations": "Mostrar Gerações",
"uploads": "Enviados",
"showUploads": "Mostrar Enviados",
"galleryImageSize": "Tamanho da Imagem"
},
"hotkeys": {
"generalHotkeys": "Atalhos Gerais",
"galleryHotkeys": "Atalhos da Galeria",
"toggleViewer": {
"title": "Ativar Visualizador",
"desc": "Abrir e fechar o Visualizador de Imagens"
},
"maximizeWorkSpace": {
"desc": "Fechar painéis e maximixar área de trabalho",
"title": "Maximizar a Área de Trabalho"
},
"changeTabs": {
"title": "Mudar Guias",
"desc": "Trocar para outra área de trabalho"
},
"consoleToggle": {
"desc": "Abrir e fechar console",
"title": "Ativar Console"
},
"setPrompt": {
"title": "Definir Prompt",
"desc": "Usar o prompt da imagem atual"
},
"sendToImageToImage": {
"desc": "Manda a imagem atual para Imagem Para Imagem",
"title": "Mandar para Imagem Para Imagem"
},
"previousImage": {
"desc": "Mostra a imagem anterior na galeria",
"title": "Imagem Anterior"
},
"nextImage": {
"title": "Próxima Imagem",
"desc": "Mostra a próxima imagem na galeria"
},
"decreaseGalleryThumbSize": {
"desc": "Diminui o tamanho das thumbs na galeria",
"title": "Diminuir Tamanho da Galeria de Imagem"
},
"selectBrush": {
"title": "Selecionar Pincel",
"desc": "Seleciona o pincel"
},
"selectEraser": {
"title": "Selecionar Apagador",
"desc": "Seleciona o apagador"
},
"decreaseBrushSize": {
"title": "Diminuir Tamanho do Pincel",
"desc": "Diminui o tamanho do pincel/apagador"
},
"increaseBrushOpacity": {
"desc": "Aumenta a opacidade do pincel",
"title": "Aumentar Opacidade do Pincel"
},
"moveTool": {
"title": "Ferramenta Mover",
"desc": "Permite navegar pela tela"
},
"decreaseBrushOpacity": {
"desc": "Diminui a opacidade do pincel",
"title": "Diminuir Opacidade do Pincel"
},
"toggleSnap": {
"title": "Ativar Encaixe",
"desc": "Ativa Encaixar na Grade"
},
"quickToggleMove": {
"title": "Ativar Mover Rapidamente",
"desc": "Temporariamente ativa o modo Mover"
},
"toggleLayer": {
"title": "Ativar Camada",
"desc": "Ativa a seleção de camada de máscara/base"
},
"clearMask": {
"title": "Limpar Máscara",
"desc": "Limpa toda a máscara"
},
"hideMask": {
"title": "Esconder Máscara",
"desc": "Esconde e Revela a máscara"
},
"mergeVisible": {
"title": "Fundir Visível",
"desc": "Fundir todas as camadas visíveis das telas"
},
"downloadImage": {
"desc": "Descarregar a tela atual",
"title": "Descarregar Imagem"
},
"undoStroke": {
"title": "Desfazer Traço",
"desc": "Desfaz um traço de pincel"
},
"redoStroke": {
"title": "Refazer Traço",
"desc": "Refaz o traço de pincel"
},
"keyboardShortcuts": "Atalhos de Teclado",
"appHotkeys": "Atalhos do app",
"invoke": {
"title": "Invocar",
"desc": "Gerar uma imagem"
},
"cancel": {
"title": "Cancelar",
"desc": "Cancelar geração de imagem"
},
"focusPrompt": {
"title": "Foco do Prompt",
"desc": "Foco da área de texto do prompt"
},
"toggleOptions": {
"title": "Ativar Opções",
"desc": "Abrir e fechar o painel de opções"
},
"pinOptions": {
"title": "Fixar Opções",
"desc": "Fixar o painel de opções"
},
"closePanels": {
"title": "Fechar Painéis",
"desc": "Fecha os painéis abertos"
},
"unifiedCanvasHotkeys": "Atalhos da Tela Unificada",
"toggleGallery": {
"title": "Ativar Galeria",
"desc": "Abrir e fechar a gaveta da galeria"
},
"setSeed": {
"title": "Definir Seed",
"desc": "Usar seed da imagem atual"
},
"setParameters": {
"title": "Definir Parâmetros",
"desc": "Usar todos os parâmetros da imagem atual"
},
"restoreFaces": {
"title": "Restaurar Rostos",
"desc": "Restaurar a imagem atual"
},
"upscale": {
"title": "Redimensionar",
"desc": "Redimensionar a imagem atual"
},
"showInfo": {
"title": "Mostrar Informações",
"desc": "Mostrar metadados de informações da imagem atual"
},
"deleteImage": {
"title": "Apagar Imagem",
"desc": "Apaga a imagem atual"
},
"toggleGalleryPin": {
"title": "Ativar Fixar Galeria",
"desc": "Fixa e desafixa a galeria na interface"
},
"increaseGalleryThumbSize": {
"title": "Aumentar Tamanho da Galeria de Imagem",
"desc": "Aumenta o tamanho das thumbs na galeria"
},
"increaseBrushSize": {
"title": "Aumentar Tamanho do Pincel",
"desc": "Aumenta o tamanho do pincel/apagador"
},
"fillBoundingBox": {
"title": "Preencher Caixa Delimitadora",
"desc": "Preenche a caixa delimitadora com a cor do pincel"
},
"eraseBoundingBox": {
"title": "Apagar Caixa Delimitadora",
"desc": "Apaga a área da caixa delimitadora"
},
"colorPicker": {
"title": "Selecionar Seletor de Cor",
"desc": "Seleciona o seletor de cores"
},
"showHideBoundingBox": {
"title": "Mostrar/Esconder Caixa Delimitadora",
"desc": "Ativa a visibilidade da caixa delimitadora"
},
"saveToGallery": {
"title": "Gravara Na Galeria",
"desc": "Grava a tela atual na galeria"
},
"copyToClipboard": {
"title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência"
},
"resetView": {
"title": "Resetar Visualização",
"desc": "Reseta Visualização da Tela"
},
"previousStagingImage": {
"title": "Imagem de Preparação Anterior",
"desc": "Área de Imagem de Preparação Anterior"
},
"nextStagingImage": {
"title": "Próxima Imagem de Preparação Anterior",
"desc": "Próxima Área de Imagem de Preparação Anterior"
},
"acceptStagingImage": {
"title": "Aceitar Imagem de Preparação Anterior",
"desc": "Aceitar Área de Imagem de Preparação Anterior"
}
},
"modelManager": {
"modelAdded": "Modelo Adicionado",
"modelUpdated": "Modelo Atualizado",
"modelEntryDeleted": "Entrada de modelo excluída",
"description": "Descrição",
"modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.",
"repo_id": "Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"width": "Largura",
"widthValidationMsg": "Largura padrão do seu modelo.",
"height": "Altura",
"heightValidationMsg": "Altura padrão do seu modelo.",
"findModels": "Encontrar Modelos",
"scanAgain": "Digitalize Novamente",
"deselectAll": "Deselecionar Tudo",
"showExisting": "Mostrar Existente",
"deleteConfig": "Apagar Config",
"convertToDiffusersHelpText6": "Deseja converter este modelo?",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"interpolationType": "Tipo de Interpolação",
"modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.",
"modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.",
"modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.",
"nameValidationMsg": "Insira um nome para o seu modelo",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Configuração",
"modelExists": "Modelo Existe",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"statusConverting": "A converter",
"modelConverted": "Modelo Convertido",
"ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados",
"addDifference": "Adicionar diferença",
"pickModelType": "Escolha o tipo de modelo",
"safetensorModels": "SafeTensors",
"cannotUseSpaces": "Não pode usar espaços",
"addNew": "Adicionar Novo",
"addManually": "Adicionar Manualmente",
"manual": "Manual",
"name": "Nome",
"configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.",
"modelLocation": "Localização do modelo",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"updateModel": "Atualizar Modelo",
"availableModels": "Modelos Disponíveis",
"load": "Carregar",
"active": "Ativado",
"notLoaded": "Não carregado",
"deleteModel": "Apagar modelo",
"deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.",
"convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.",
"convertToDiffusersSaveLocation": "Local para Gravar",
"v2_base": "v2 (512px)",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelSaveLocation": "Local de Salvamento",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergedModelCustomSaveLocation": "Caminho Personalizado",
"invokeAIFolder": "Pasta Invoke AI",
"inverseSigmoid": "Sigmóide Inversa",
"none": "nenhum",
"modelManager": "Gerente de Modelo",
"model": "Modelo",
"allModels": "Todos os Modelos",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"addNewModel": "Adicionar Novo modelo",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"vaeLocation": "Localização VAE",
"vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.",
"vaeRepoID": "VAE Repo ID",
"addModel": "Adicionar Modelo",
"search": "Procurar",
"cached": "Em cache",
"checkpointFolder": "Pasta de Checkpoint",
"clearCheckpointFolder": "Apagar Pasta de Checkpoint",
"modelsFound": "Modelos Encontrados",
"selectFolder": "Selecione a Pasta",
"selected": "Selecionada",
"selectAll": "Selecionar Tudo",
"addSelected": "Adicione Selecionado",
"delete": "Apagar",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.",
"convert": "Converter",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"v1": "v1",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.",
"sigmoid": "Sigmóide",
"weightedSum": "Soma Ponderada"
},
"parameters": {
"width": "Largura",
"seed": "Seed",
"hiresStrength": "Força da Alta Resolução",
"negativePrompts": "Indicações negativas",
"general": "Geral",
"randomizeSeed": "Seed Aleatório",
"shuffle": "Embaralhar",
"noiseThreshold": "Limite de Ruído",
"perlinNoise": "Ruído de Perlin",
"variations": "Variatções",
"seedWeights": "Pesos da Seed",
"restoreFaces": "Restaurar Rostos",
"faceRestoration": "Restauração de Rosto",
"type": "Tipo",
"denoisingStrength": "A força de remoção de ruído",
"scale": "Escala",
"otherOptions": "Outras Opções",
"seamlessTiling": "Ladrilho Sem Fronteira",
"hiresOptim": "Otimização de Alta Res",
"imageFit": "Caber Imagem Inicial No Tamanho de Saída",
"codeformerFidelity": "Fidelidade",
"seamSize": "Tamanho da Fronteira",
"seamBlur": "Desfoque da Fronteira",
"seamStrength": "Força da Fronteira",
"seamSteps": "Passos da Fronteira",
"tileSize": "Tamanho do Ladrilho",
"boundingBoxHeader": "Caixa Delimitadora",
"seamCorrectionHeader": "Correção de Fronteira",
"infillScalingHeader": "Preencimento e Escala",
"img2imgStrength": "Força de Imagem Para Imagem",
"toggleLoopback": "Ativar Loopback",
"symmetry": "Simetria",
"promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)",
"sendTo": "Mandar para",
"openInViewer": "Abrir No Visualizador",
"closeViewer": "Fechar Visualizador",
"usePrompt": "Usar Prompt",
"deleteImage": "Apagar Imagem",
"initialImage": "Imagem inicial",
"showOptionsPanel": "Mostrar Painel de Opções",
"strength": "Força",
"upscaling": "Redimensionando",
"upscale": "Redimensionar",
"upscaleImage": "Redimensionar Imagem",
"scaleBeforeProcessing": "Escala Antes do Processamento",
"invoke": "Invocar",
"images": "Imagems",
"steps": "Passos",
"cfgScale": "Escala CFG",
"height": "Altura",
"sampler": "Amostrador",
"imageToImage": "Imagem para Imagem",
"variationAmount": "Quntidade de Variatções",
"scaledWidth": "L Escalada",
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"hSymmetryStep": "H Passo de Simetria",
"vSymmetryStep": "V Passo de Simetria",
"cancel": {
"immediate": "Cancelar imediatamente",
"schedule": "Cancelar após a iteração atual",
"isScheduled": "A cancelar",
"setType": "Definir tipo de cancelamento"
},
"sendToImg2Img": "Mandar para Imagem Para Imagem",
"sendToUnifiedCanvas": "Mandar para Tela Unificada",
"copyImage": "Copiar imagem",
"copyImageToLink": "Copiar Imagem Para a Ligação",
"downloadImage": "Descarregar Imagem",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"useInitImg": "Usar Imagem Inicial",
"info": "Informações"
},
"settings": {
"confirmOnDelete": "Confirmar Antes de Apagar",
"displayHelpIcons": "Mostrar Ícones de Ajuda",
"useCanvasBeta": "Usar Layout de Telas Beta",
"enableImageDebugging": "Ativar Depuração de Imagem",
"useSlidersForAll": "Usar deslizadores para todas as opções",
"resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.",
"models": "Modelos",
"displayInProgress": "Mostrar Progresso de Imagens Em Andamento",
"saveSteps": "Gravar imagens a cada n passos",
"resetWebUI": "Reiniciar Interface",
"resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.",
"resetComplete": "A interface foi reiniciada. Atualize a página para carregar."
},
"toast": {
"uploadFailed": "Envio Falhou",
"uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez",
"uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro",
"downloadImageStarted": "Download de Imagem Começou",
"imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem",
"imageLinkCopied": "Ligação de Imagem Copiada",
"imageNotLoaded": "Nenhuma Imagem Carregada",
"parametersFailed": "Problema ao carregar parâmetros",
"parametersFailedDesc": "Não foi possível carregar imagem incial.",
"seedSet": "Seed Definida",
"upscalingFailed": "Redimensionamento Falhou",
"promptNotSet": "Prompt Não Definido",
"tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada",
"imageCopied": "Imagem Copiada",
"imageSavedToGallery": "Imagem Salva na Galeria",
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
"parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.",
"seedNotSet": "Seed Não Definida",
"seedNotSetDesc": "Não foi possível achar a seed para a imagem.",
"promptSet": "Prompt Definido",
"promptNotSetDesc": "Não foi possível achar prompt para essa imagem.",
"faceRestoreFailed": "Restauração de Rosto Falhou",
"metadataLoadFailed": "Falha ao tentar carregar metadados",
"initialImageSet": "Imagem Inicial Definida",
"initialImageNotSet": "Imagem Inicial Não Definida",
"initialImageNotSetDesc": "Não foi possível carregar imagem incial"
},
"tooltip": {
"feature": {
"prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.",
"other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.",
"seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.",
"imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75",
"faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.",
"seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.",
"gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.",
"variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.",
"upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.",
"boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.",
"infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)."
}
},
"unifiedCanvas": {
"emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.",
"scaledBoundingBox": "Caixa Delimitadora Escalada",
"boundingBoxPosition": "Posição da Caixa Delimitadora",
"next": "Próximo",
"accept": "Aceitar",
"showHide": "Mostrar/Esconder",
"discardAll": "Descartar Todos",
"betaClear": "Limpar",
"betaDarkenOutside": "Escurecer Externamente",
"base": "Base",
"brush": "Pincel",
"showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade",
"clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?",
"boundingBox": "Caixa Delimitadora",
"canvasDimensions": "Dimensões da Tela",
"canvasPosition": "Posição da Tela",
"cursorPosition": "Posição do cursor",
"previous": "Anterior",
"betaLimitToBox": "Limitar á Caixa",
"layer": "Camada",
"mask": "Máscara",
"maskingOptions": "Opções de Mascaramento",
"enableMask": "Ativar Máscara",
"preserveMaskedArea": "Preservar Área da Máscara",
"clearMask": "Limpar Máscara",
"eraser": "Apagador",
"fillBoundingBox": "Preencher Caixa Delimitadora",
"eraseBoundingBox": "Apagar Caixa Delimitadora",
"colorPicker": "Seletor de Cor",
"brushOptions": "Opções de Pincel",
"brushSize": "Tamanho",
"move": "Mover",
"resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível",
"saveToGallery": "Gravar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Descarregar Como Imagem",
"undo": "Desfazer",
"redo": "Refazer",
"clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela",
"snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa",
"autoSaveToGallery": "Gravar Automaticamente na Galeria",
"saveBoxRegionOnly": "Gravar Apenas a Região da Caixa",
"limitStrokesToBox": "Limitar Traços à Caixa",
"showCanvasDebugInfo": "Mostrar Informações de Depuração daTela",
"clearCanvasHistory": "Limpar o Histórico da Tela",
"clearHistory": "Limpar Históprico",
"clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.",
"emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários",
"emptyFolder": "Esvaziar Pasta",
"emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?",
"activeLayer": "Camada Ativa",
"canvasScale": "Escala da Tela",
"betaPreserveMasked": "Preservar Máscarado"
},
"accessibility": {
"invokeProgressBar": "Invocar barra de progresso",
"reset": "Repôr",
"nextImage": "Próxima imagem",
"useThisParameter": "Usar este parâmetro",
"copyMetadataJson": "Copiar metadados JSON",
"zoomIn": "Ampliar",
"zoomOut": "Reduzir",
"rotateCounterClockwise": "Girar no sentido anti-horário",
"rotateClockwise": "Girar no sentido horário",
"flipVertically": "Espelhar verticalmente",
"modifyConfig": "Modificar config",
"toggleAutoscroll": "Alternar rolagem automática",
"showGallery": "Mostrar galeria",
"showOptionsPanel": "Mostrar painel de opções",
"uploadImage": "Enviar imagem",
"previousImage": "Imagem anterior",
"flipHorizontally": "Espelhar horizontalmente",
"toggleLogViewer": "Alternar visualizador de registo"
} }
} }

View File

@ -63,7 +63,10 @@
"statusMergingModels": "Mesclando Modelos", "statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados", "statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo", "langRussian": "Russo",
"langSpanish": "Espanhol" "langSpanish": "Espanhol",
"pinOptionsPanel": "Fixar painel de opções",
"loadingInvokeAI": "Carregando Invoke AI",
"loading": "Carregando"
}, },
"gallery": { "gallery": {
"generations": "Gerações", "generations": "Gerações",
@ -358,7 +361,6 @@
"convertToDiffusersHelpText6": "Você deseja converter este modelo?", "convertToDiffusersHelpText6": "Você deseja converter este modelo?",
"convertToDiffusersSaveLocation": "Local para Salvar", "convertToDiffusersSaveLocation": "Local para Salvar",
"v1": "v1", "v1": "v1",
"v2": "v2",
"inpainting": "v1 Inpainting", "inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada", "customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada", "pathToCustomConfig": "Caminho para configuração personalizada",
@ -381,7 +383,19 @@
"allModels": "Todos os Modelos", "allModels": "Todos os Modelos",
"repoIDValidationMsg": "Repositório Online do seu Modelo", "repoIDValidationMsg": "Repositório Online do seu Modelo",
"convert": "Converter", "convert": "Converter",
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo." "convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo.",
"mergedModelCustomSaveLocation": "Caminho Personalizado",
"mergedModelSaveLocation": "Local de Salvamento",
"interpolationType": "Tipo de Interpolação",
"ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados",
"invokeAIFolder": "Pasta Invoke AI",
"weightedSum": "Soma Ponderada",
"sigmoid": "Sigmóide",
"inverseSigmoid": "Sigmóide Inversa",
"modelMergeHeaderHelp1": "Você pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.",
"modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.",
"modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam em uma influência menor do segundo modelo.",
"modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se você deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro."
}, },
"parameters": { "parameters": {
"images": "Imagems", "images": "Imagems",
@ -441,7 +455,22 @@
"info": "Informações", "info": "Informações",
"deleteImage": "Apagar Imagem", "deleteImage": "Apagar Imagem",
"initialImage": "Imagem inicial", "initialImage": "Imagem inicial",
"showOptionsPanel": "Mostrar Painel de Opções" "showOptionsPanel": "Mostrar Painel de Opções",
"vSymmetryStep": "V Passo de Simetria",
"hSymmetryStep": "H Passo de Simetria",
"symmetry": "Simetria",
"copyImage": "Copiar imagem",
"negativePrompts": "Indicações negativas",
"hiresStrength": "Força da Alta Resolução",
"denoisingStrength": "A força de remoção de ruído",
"imageToImage": "Imagem para Imagem",
"cancel": {
"setType": "Definir tipo de cancelamento",
"isScheduled": "Cancelando",
"schedule": "Cancelar após a iteração atual",
"immediate": "Cancelar imediatamente"
},
"general": "Geral"
}, },
"settings": { "settings": {
"models": "Modelos", "models": "Modelos",
@ -454,7 +483,8 @@
"resetWebUI": "Reiniciar Interface", "resetWebUI": "Reiniciar Interface",
"resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.", "resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.",
"resetWebUIDesc2": "Se as imagens não estão aparecendo na galeria ou algo mais não está funcionando, favor tentar reiniciar antes de postar um problema no GitHub.", "resetWebUIDesc2": "Se as imagens não estão aparecendo na galeria ou algo mais não está funcionando, favor tentar reiniciar antes de postar um problema no GitHub.",
"resetComplete": "A interface foi reiniciada. Atualize a página para carregar." "resetComplete": "A interface foi reiniciada. Atualize a página para carregar.",
"useSlidersForAll": "Usar deslizadores para todas as opções"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Pasta de Arquivos Temporários Esvaziada", "tempFoldersEmptied": "Pasta de Arquivos Temporários Esvaziada",
@ -546,5 +576,20 @@
"betaDarkenOutside": "Escurecer Externamente", "betaDarkenOutside": "Escurecer Externamente",
"betaLimitToBox": "Limitar Para a Caixa", "betaLimitToBox": "Limitar Para a Caixa",
"betaPreserveMasked": "Preservar Máscarado" "betaPreserveMasked": "Preservar Máscarado"
},
"tooltip": {
"feature": {
"seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Você pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10), e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.",
"gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em arquivos e acessadas pelo menu de contexto.",
"other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.",
"boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.",
"upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.",
"seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.",
"faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, resultando em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.",
"prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Você também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.",
"infillAndScaling": "Gerencie os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos).",
"imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75",
"variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3."
}
} }
} }

View File

@ -46,7 +46,15 @@
"statusLoadingModel": "Загрузка модели", "statusLoadingModel": "Загрузка модели",
"statusModelChanged": "Модель изменена", "statusModelChanged": "Модель изменена",
"githubLabel": "Github", "githubLabel": "Github",
"discordLabel": "Discord" "discordLabel": "Discord",
"statusMergingModels": "Слияние моделей",
"statusModelConverted": "Модель сконвертирована",
"statusMergedModels": "Модели объединены",
"pinOptionsPanel": "Закрепить панель настроек",
"loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад",
"statusConvertingModel": "Конвертация модели"
}, },
"gallery": { "gallery": {
"generations": "Генерации", "generations": "Генерации",
@ -323,7 +331,30 @@
"deleteConfig": "Удалить конфигурацию", "deleteConfig": "Удалить конфигурацию",
"deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?", "deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?",
"deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.", "deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.",
"repoIDValidationMsg": "Онлайн-репозиторий модели" "repoIDValidationMsg": "Онлайн-репозиторий модели",
"convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 7 Гб.",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Игнорировать несоответствия между выбранными моделями",
"addCheckpointModel": "Добавить модель Checkpoint/Safetensor",
"formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.",
"convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.",
"vaeRepoID": "ID репозитория VAE",
"mergedModelName": "Название объединенной модели",
"checkpointModels": "Checkpoints",
"allModels": "Все модели",
"addDiffuserModel": "Добавить Diffusers",
"repo_id": "ID репозитория",
"formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.",
"convert": "Преобразовать",
"convertToDiffusers": "Преобразовать в Diffusers",
"convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.",
"convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?",
"statusConverting": "Преобразование",
"modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели"
}, },
"parameters": { "parameters": {
"images": "Изображения", "images": "Изображения",
@ -503,5 +534,8 @@
"betaDarkenOutside": "Затемнить снаружи", "betaDarkenOutside": "Затемнить снаружи",
"betaLimitToBox": "Ограничить выделением", "betaLimitToBox": "Ограничить выделением",
"betaPreserveMasked": "Сохранять маскируемую область" "betaPreserveMasked": "Сохранять маскируемую область"
},
"accessibility": {
"modelSelect": "Выбор модели"
} }
} }

View File

@ -1 +1,39 @@
{} {
"common": {
"nodes": "節點",
"img2img": "圖片轉圖片",
"langSimplifiedChinese": "簡體中文",
"statusError": "錯誤",
"statusDisconnected": "已中斷連線",
"statusConnected": "已連線",
"back": "返回",
"load": "載入",
"close": "關閉",
"langEnglish": "英語",
"settingsLabel": "設定",
"upload": "上傳",
"langArabic": "阿拉伯語",
"greenTheme": "綠色",
"lightTheme": "淺色",
"darkTheme": "深色",
"discordLabel": "Discord",
"nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。",
"reportBugLabel": "回報錯誤",
"githubLabel": "GitHub",
"langKorean": "韓語",
"langPortuguese": "葡萄牙語",
"hotkeysLabel": "快捷鍵",
"languagePickerLabel": "切換語言",
"langDutch": "荷蘭語",
"langFrench": "法語",
"langGerman": "德語",
"langItalian": "義大利語",
"langJapanese": "日語",
"langPolish": "波蘭語",
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
}
}

View File

@ -1,3 +1,7 @@
import React, { PropsWithChildren } from 'react';
import { IAIPopoverProps } from '../web/src/common/components/IAIPopover';
import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton';
export {}; export {};
declare module 'redux-socket.io-middleware'; declare module 'redux-socket.io-middleware';
@ -39,3 +43,36 @@ declare global {
} }
/* eslint-enable @typescript-eslint/no-explicit-any */ /* eslint-enable @typescript-eslint/no-explicit-any */
} }
declare module '@invoke-ai/invoke-ai-ui' {
declare class ThemeChanger extends React.Component<ThemeChangerProps> {
public constructor(props: ThemeChangerProps);
}
declare class InvokeAiLogoComponent extends React.Component<InvokeAILogoComponentProps> {
public constructor(props: InvokeAILogoComponentProps);
}
declare class IAIPopover extends React.Component<IAIPopoverProps> {
public constructor(props: IAIPopoverProps);
}
declare class IAIIconButton extends React.Component<IAIIconButtonProps> {
public constructor(props: IAIIconButtonProps);
}
declare class SettingsModal extends React.Component<SettingsModalProps> {
public constructor(props: SettingsModalProps);
}
}
declare function Invoke(props: PropsWithChildren): JSX.Element;
export {
ThemeChanger,
InvokeAiLogoComponent,
IAIPopover,
IAIIconButton,
SettingsModal,
};
export = Invoke;

View File

@ -8,7 +8,7 @@
"build": "yarn run lint && vite build", "build": "yarn run lint && vite build",
"preview": "vite preview", "preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx", "lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0", "lint:eslint": "eslint --max-warnings=0 .",
"lint:prettier": "prettier --check .", "lint:prettier": "prettier --check .",
"lint:tsc": "tsc --noEmit", "lint:tsc": "tsc --noEmit",
"lint": "yarn run lint:eslint && yarn run lint:prettier && yarn run lint:tsc && yarn run lint:madge", "lint": "yarn run lint:eslint && yarn run lint:prettier && yarn run lint:tsc && yarn run lint:madge",
@ -36,6 +36,7 @@
}, },
"dependencies": { "dependencies": {
"@chakra-ui/anatomy": "^2.1.1", "@chakra-ui/anatomy": "^2.1.1",
"@chakra-ui/cli": "^2.3.0",
"@chakra-ui/icons": "^2.0.17", "@chakra-ui/icons": "^2.0.17",
"@chakra-ui/react": "^2.5.1", "@chakra-ui/react": "^2.5.1",
"@chakra-ui/styled-system": "^2.6.1", "@chakra-ui/styled-system": "^2.6.1",
@ -52,6 +53,7 @@
"i18next-http-backend": "^2.1.1", "i18next-http-backend": "^2.1.1",
"konva": "^8.4.2", "konva": "^8.4.2",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"patch-package": "^6.5.1",
"re-resizable": "^6.9.9", "re-resizable": "^6.9.9",
"react": "^18.2.0", "react": "^18.2.0",
"react-colorful": "^5.6.1", "react-colorful": "^5.6.1",
@ -72,7 +74,6 @@
"uuid": "^9.0.0" "uuid": "^9.0.0"
}, },
"devDependencies": { "devDependencies": {
"@chakra-ui/cli": "^2.3.0",
"@fontsource/inter": "^4.5.15", "@fontsource/inter": "^4.5.15",
"@types/dateformat": "^5.0.0", "@types/dateformat": "^5.0.0",
"@types/react": "^18.0.28", "@types/react": "^18.0.28",
@ -92,7 +93,6 @@
"husky": "^8.0.3", "husky": "^8.0.3",
"lint-staged": "^13.1.2", "lint-staged": "^13.1.2",
"madge": "^6.0.0", "madge": "^6.0.0",
"patch-package": "^6.5.1",
"postinstall-postinstall": "^2.1.0", "postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.4", "prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0", "rollup-plugin-visualizer": "^5.9.0",

View File

@ -1,4 +1,26 @@
{ {
"accessibility": {
"modelSelect": "Model Select",
"invokeProgressBar": "Invoke progress bar",
"reset": "Reset",
"uploadImage": "Upload Image",
"previousImage": "Previous Image",
"nextImage": "Next Image",
"useThisParameter": "Use this parameter",
"copyMetadataJson": "Copy metadata JSON",
"exitViewer": "ExitViewer",
"zoomIn": "Zoom In",
"zoomOut": "Zoom Out",
"rotateCounterClockwise": "Rotate Counter-Clockwise",
"rotateClockwise": "Rotate Clockwise",
"flipHorizontally": "Flip Horizontally",
"flipVertically": "Flip Vertically",
"modifyConfig": "Modify Config",
"toggleAutoscroll": "Toggle autoscroll",
"toggleLogViewer": "Toggle Log Viewer",
"showGallery": "Show Gallery",
"showOptionsPanel": "Show Options Panel"
},
"common": { "common": {
"hotkeysLabel": "Hotkeys", "hotkeysLabel": "Hotkeys",
"themeLabel": "Theme", "themeLabel": "Theme",
@ -27,10 +49,11 @@
"langSimplifiedChinese": "简体中文", "langSimplifiedChinese": "简体中文",
"langUkranian": "Украї́нська", "langUkranian": "Украї́нська",
"langSpanish": "Español", "langSpanish": "Español",
"text2img": "Text To Image", "txt2img": "Text To Image",
"img2img": "Image To Image", "img2img": "Image To Image",
"unifiedCanvas": "Unified Canvas", "unifiedCanvas": "Unified Canvas",
"nodes": "Nodes", "nodes": "Nodes",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.", "nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing", "postProcessing": "Post Processing",
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.", "postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
@ -41,6 +64,8 @@
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.", "trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
"upload": "Upload", "upload": "Upload",
"close": "Close", "close": "Close",
"cancel": "Cancel",
"accept": "Accept",
"load": "Load", "load": "Load",
"back": "Back", "back": "Back",
"statusConnected": "Connected", "statusConnected": "Connected",
@ -310,6 +335,7 @@
"addNewModel": "Add New Model", "addNewModel": "Add New Model",
"addCheckpointModel": "Add Checkpoint / Safetensor Model", "addCheckpointModel": "Add Checkpoint / Safetensor Model",
"addDiffuserModel": "Add Diffusers", "addDiffuserModel": "Add Diffusers",
"scanForModels": "Scan For Models",
"addManually": "Add Manually", "addManually": "Add Manually",
"manual": "Manual", "manual": "Manual",
"name": "Name", "name": "Name",
@ -574,7 +600,7 @@
"autoSaveToGallery": "Auto Save to Gallery", "autoSaveToGallery": "Auto Save to Gallery",
"saveBoxRegionOnly": "Save Box Region Only", "saveBoxRegionOnly": "Save Box Region Only",
"limitStrokesToBox": "Limit Strokes to Box", "limitStrokesToBox": "Limit Strokes to Box",
"showCanvasDebugInfo": "Show Canvas Debug Info", "showCanvasDebugInfo": "Show Additional Canvas Info",
"clearCanvasHistory": "Clear Canvas History", "clearCanvasHistory": "Clear Canvas History",
"clearHistory": "Clear History", "clearHistory": "Clear History",
"clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.", "clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.",

View File

@ -63,7 +63,14 @@
"back": "Atrás", "back": "Atrás",
"statusConvertingModel": "Convertir el modelo", "statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado", "statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos" "statusMergingModels": "Fusionar modelos",
"oceanTheme": "Océano",
"langPortuguese": "Portugués",
"langKorean": "Coreano",
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -385,14 +392,19 @@
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.", "modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.", "modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados", "ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.", "modelMergeHeaderHelp1": "Puede unir hasta tres modelos diferentes para crear una combinación que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso", "inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada", "weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide", "sigmoid": "Función sigmoide",
"allModels": "Todos los modelos", "allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio", "repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada", "pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada" "customConfig": "Configuración personalizada",
"v2_base": "v2 (512px)",
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscurecer fuera", "betaDarkenOutside": "Oscurecer fuera",
"betaLimitToBox": "Limitar a caja", "betaLimitToBox": "Limitar a caja",
"betaPreserveMasked": "Preservar área enmascarada" "betaPreserveMasked": "Preservar área enmascarada"
},
"accessibility": {
"invokeProgressBar": "Activar la barra de progreso",
"modelSelect": "Seleccionar modelo",
"reset": "Reiniciar",
"uploadImage": "Cargar imagen",
"previousImage": "Imagen anterior",
"nextImage": "Siguiente imagen",
"useThisParameter": "Utiliza este parámetro",
"copyMetadataJson": "Copiar los metadatos JSON",
"exitViewer": "Salir del visor",
"zoomIn": "Acercar",
"zoomOut": "Alejar",
"rotateCounterClockwise": "Girar en sentido antihorario",
"rotateClockwise": "Girar en sentido horario",
"flipHorizontally": "Voltear horizontalmente",
"flipVertically": "Voltear verticalmente",
"modifyConfig": "Modificar la configuración",
"toggleAutoscroll": "Activar el autodesplazamiento",
"toggleLogViewer": "Alternar el visor de registros",
"showGallery": "Mostrar galería",
"showOptionsPanel": "Mostrar el panel de opciones"
} }
} }

View File

@ -63,7 +63,14 @@
"langSimplifiedChinese": "Cinese semplificato", "langSimplifiedChinese": "Cinese semplificato",
"langDutch": "Olandese", "langDutch": "Olandese",
"statusModelConverted": "Modello Convertito", "statusModelConverted": "Modello Convertito",
"statusConvertingModel": "Conversione Modello" "statusConvertingModel": "Conversione Modello",
"langKorean": "Coreano",
"langPortuguese": "Portoghese",
"pinOptionsPanel": "Blocca il pannello Opzioni",
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -392,7 +399,12 @@
"customSaveLocation": "Ubicazione salvataggio personalizzata", "customSaveLocation": "Ubicazione salvataggio personalizzata",
"weightedSum": "Somma pesata", "weightedSum": "Somma pesata",
"sigmoid": "Sigmoide", "sigmoid": "Sigmoide",
"inverseSigmoid": "Sigmoide inverso" "inverseSigmoid": "Sigmoide inverso",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -588,5 +600,27 @@
"betaDarkenOutside": "Oscura all'esterno", "betaDarkenOutside": "Oscura all'esterno",
"betaLimitToBox": "Limita al rettangolo", "betaLimitToBox": "Limita al rettangolo",
"betaPreserveMasked": "Conserva quanto mascherato" "betaPreserveMasked": "Conserva quanto mascherato"
},
"accessibility": {
"modelSelect": "Seleziona modello",
"invokeProgressBar": "Barra di avanzamento generazione",
"uploadImage": "Carica immagine",
"previousImage": "Immagine precedente",
"nextImage": "Immagine successiva",
"useThisParameter": "Usa questo parametro",
"reset": "Reimposta",
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",
"toggleLogViewer": "Attiva/disattiva visualizzatore registro",
"showGallery": "Mostra la galleria immagini",
"showOptionsPanel": "Mostra il pannello opzioni",
"flipVertically": "Capovolgi verticalmente",
"toggleAutoscroll": "Attiva/disattiva lo scorrimento automatico",
"modifyConfig": "Modifica configurazione"
} }
} }

View File

@ -63,6 +63,560 @@
"statusGeneratingOutpainting": "Geração de Ampliação", "statusGeneratingOutpainting": "Geração de Ampliação",
"statusGenerationComplete": "Geração Completa", "statusGenerationComplete": "Geração Completa",
"statusMergingModels": "Mesclando Modelos", "statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados" "statusMergedModels": "Modelos Mesclados",
"oceanTheme": "Oceano",
"pinOptionsPanel": "Fixar painel de opções",
"loading": "A carregar",
"loadingInvokeAI": "A carregar Invoke AI",
"langPortuguese": "Português"
},
"gallery": {
"galleryImageResetSize": "Resetar Imagem",
"gallerySettings": "Configurações de Galeria",
"maintainAspectRatio": "Mater Proporções",
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
"pinGallery": "Fixar Galeria",
"singleColumnLayout": "Disposição em Coluna Única",
"allImagesLoaded": "Todas as Imagens Carregadas",
"loadMore": "Carregar Mais",
"noImagesInGallery": "Sem Imagens na Galeria",
"generations": "Gerações",
"showGenerations": "Mostrar Gerações",
"uploads": "Enviados",
"showUploads": "Mostrar Enviados",
"galleryImageSize": "Tamanho da Imagem"
},
"hotkeys": {
"generalHotkeys": "Atalhos Gerais",
"galleryHotkeys": "Atalhos da Galeria",
"toggleViewer": {
"title": "Ativar Visualizador",
"desc": "Abrir e fechar o Visualizador de Imagens"
},
"maximizeWorkSpace": {
"desc": "Fechar painéis e maximixar área de trabalho",
"title": "Maximizar a Área de Trabalho"
},
"changeTabs": {
"title": "Mudar Guias",
"desc": "Trocar para outra área de trabalho"
},
"consoleToggle": {
"desc": "Abrir e fechar console",
"title": "Ativar Console"
},
"setPrompt": {
"title": "Definir Prompt",
"desc": "Usar o prompt da imagem atual"
},
"sendToImageToImage": {
"desc": "Manda a imagem atual para Imagem Para Imagem",
"title": "Mandar para Imagem Para Imagem"
},
"previousImage": {
"desc": "Mostra a imagem anterior na galeria",
"title": "Imagem Anterior"
},
"nextImage": {
"title": "Próxima Imagem",
"desc": "Mostra a próxima imagem na galeria"
},
"decreaseGalleryThumbSize": {
"desc": "Diminui o tamanho das thumbs na galeria",
"title": "Diminuir Tamanho da Galeria de Imagem"
},
"selectBrush": {
"title": "Selecionar Pincel",
"desc": "Seleciona o pincel"
},
"selectEraser": {
"title": "Selecionar Apagador",
"desc": "Seleciona o apagador"
},
"decreaseBrushSize": {
"title": "Diminuir Tamanho do Pincel",
"desc": "Diminui o tamanho do pincel/apagador"
},
"increaseBrushOpacity": {
"desc": "Aumenta a opacidade do pincel",
"title": "Aumentar Opacidade do Pincel"
},
"moveTool": {
"title": "Ferramenta Mover",
"desc": "Permite navegar pela tela"
},
"decreaseBrushOpacity": {
"desc": "Diminui a opacidade do pincel",
"title": "Diminuir Opacidade do Pincel"
},
"toggleSnap": {
"title": "Ativar Encaixe",
"desc": "Ativa Encaixar na Grade"
},
"quickToggleMove": {
"title": "Ativar Mover Rapidamente",
"desc": "Temporariamente ativa o modo Mover"
},
"toggleLayer": {
"title": "Ativar Camada",
"desc": "Ativa a seleção de camada de máscara/base"
},
"clearMask": {
"title": "Limpar Máscara",
"desc": "Limpa toda a máscara"
},
"hideMask": {
"title": "Esconder Máscara",
"desc": "Esconde e Revela a máscara"
},
"mergeVisible": {
"title": "Fundir Visível",
"desc": "Fundir todas as camadas visíveis das telas"
},
"downloadImage": {
"desc": "Descarregar a tela atual",
"title": "Descarregar Imagem"
},
"undoStroke": {
"title": "Desfazer Traço",
"desc": "Desfaz um traço de pincel"
},
"redoStroke": {
"title": "Refazer Traço",
"desc": "Refaz o traço de pincel"
},
"keyboardShortcuts": "Atalhos de Teclado",
"appHotkeys": "Atalhos do app",
"invoke": {
"title": "Invocar",
"desc": "Gerar uma imagem"
},
"cancel": {
"title": "Cancelar",
"desc": "Cancelar geração de imagem"
},
"focusPrompt": {
"title": "Foco do Prompt",
"desc": "Foco da área de texto do prompt"
},
"toggleOptions": {
"title": "Ativar Opções",
"desc": "Abrir e fechar o painel de opções"
},
"pinOptions": {
"title": "Fixar Opções",
"desc": "Fixar o painel de opções"
},
"closePanels": {
"title": "Fechar Painéis",
"desc": "Fecha os painéis abertos"
},
"unifiedCanvasHotkeys": "Atalhos da Tela Unificada",
"toggleGallery": {
"title": "Ativar Galeria",
"desc": "Abrir e fechar a gaveta da galeria"
},
"setSeed": {
"title": "Definir Seed",
"desc": "Usar seed da imagem atual"
},
"setParameters": {
"title": "Definir Parâmetros",
"desc": "Usar todos os parâmetros da imagem atual"
},
"restoreFaces": {
"title": "Restaurar Rostos",
"desc": "Restaurar a imagem atual"
},
"upscale": {
"title": "Redimensionar",
"desc": "Redimensionar a imagem atual"
},
"showInfo": {
"title": "Mostrar Informações",
"desc": "Mostrar metadados de informações da imagem atual"
},
"deleteImage": {
"title": "Apagar Imagem",
"desc": "Apaga a imagem atual"
},
"toggleGalleryPin": {
"title": "Ativar Fixar Galeria",
"desc": "Fixa e desafixa a galeria na interface"
},
"increaseGalleryThumbSize": {
"title": "Aumentar Tamanho da Galeria de Imagem",
"desc": "Aumenta o tamanho das thumbs na galeria"
},
"increaseBrushSize": {
"title": "Aumentar Tamanho do Pincel",
"desc": "Aumenta o tamanho do pincel/apagador"
},
"fillBoundingBox": {
"title": "Preencher Caixa Delimitadora",
"desc": "Preenche a caixa delimitadora com a cor do pincel"
},
"eraseBoundingBox": {
"title": "Apagar Caixa Delimitadora",
"desc": "Apaga a área da caixa delimitadora"
},
"colorPicker": {
"title": "Selecionar Seletor de Cor",
"desc": "Seleciona o seletor de cores"
},
"showHideBoundingBox": {
"title": "Mostrar/Esconder Caixa Delimitadora",
"desc": "Ativa a visibilidade da caixa delimitadora"
},
"saveToGallery": {
"title": "Gravara Na Galeria",
"desc": "Grava a tela atual na galeria"
},
"copyToClipboard": {
"title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência"
},
"resetView": {
"title": "Resetar Visualização",
"desc": "Reseta Visualização da Tela"
},
"previousStagingImage": {
"title": "Imagem de Preparação Anterior",
"desc": "Área de Imagem de Preparação Anterior"
},
"nextStagingImage": {
"title": "Próxima Imagem de Preparação Anterior",
"desc": "Próxima Área de Imagem de Preparação Anterior"
},
"acceptStagingImage": {
"title": "Aceitar Imagem de Preparação Anterior",
"desc": "Aceitar Área de Imagem de Preparação Anterior"
}
},
"modelManager": {
"modelAdded": "Modelo Adicionado",
"modelUpdated": "Modelo Atualizado",
"modelEntryDeleted": "Entrada de modelo excluída",
"description": "Descrição",
"modelLocationValidationMsg": "Caminho para onde o seu modelo está localizado.",
"repo_id": "Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"width": "Largura",
"widthValidationMsg": "Largura padrão do seu modelo.",
"height": "Altura",
"heightValidationMsg": "Altura padrão do seu modelo.",
"findModels": "Encontrar Modelos",
"scanAgain": "Digitalize Novamente",
"deselectAll": "Deselecionar Tudo",
"showExisting": "Mostrar Existente",
"deleteConfig": "Apagar Config",
"convertToDiffusersHelpText6": "Deseja converter este modelo?",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"interpolationType": "Tipo de Interpolação",
"modelMergeHeaderHelp1": "Pode mesclar até três modelos diferentes para criar uma mistura que atenda às suas necessidades.",
"modelMergeHeaderHelp2": "Apenas Diffusers estão disponíveis para mesclagem. Se deseja mesclar um modelo de checkpoint, por favor, converta-o para Diffusers primeiro.",
"modelMergeInterpAddDifferenceHelp": "Neste modo, o Modelo 3 é primeiro subtraído do Modelo 2. A versão resultante é mesclada com o Modelo 1 com a taxa alpha definida acima.",
"nameValidationMsg": "Insira um nome para o seu modelo",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Configuração",
"modelExists": "Modelo Existe",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"statusConverting": "A converter",
"modelConverted": "Modelo Convertido",
"ignoreMismatch": "Ignorar Divergências entre Modelos Selecionados",
"addDifference": "Adicionar diferença",
"pickModelType": "Escolha o tipo de modelo",
"safetensorModels": "SafeTensors",
"cannotUseSpaces": "Não pode usar espaços",
"addNew": "Adicionar Novo",
"addManually": "Adicionar Manualmente",
"manual": "Manual",
"name": "Nome",
"configValidationMsg": "Caminho para o ficheiro de configuração do seu modelo.",
"modelLocation": "Localização do modelo",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"updateModel": "Atualizar Modelo",
"availableModels": "Modelos Disponíveis",
"load": "Carregar",
"active": "Ativado",
"notLoaded": "Não carregado",
"deleteModel": "Apagar modelo",
"deleteMsg1": "Tem certeza de que deseja apagar esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai apagar o ficheiro de modelo checkpoint do seu disco. Pode lê-los, se desejar.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido ao formato 🧨 Diffusers.",
"convertToDiffusersHelpText2": "Este processo irá substituir a sua entrada de Gestor de Modelos por uma versão Diffusers do mesmo modelo.",
"convertToDiffusersHelpText3": "O seu ficheiro de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Pode adicionar o seu ponto de verificação ao Gestor de modelos novamente, se desejar.",
"convertToDiffusersSaveLocation": "Local para Gravar",
"v2_base": "v2 (512px)",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelSaveLocation": "Local de Salvamento",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergedModelCustomSaveLocation": "Caminho Personalizado",
"invokeAIFolder": "Pasta Invoke AI",
"inverseSigmoid": "Sigmóide Inversa",
"none": "nenhum",
"modelManager": "Gerente de Modelo",
"model": "Modelo",
"allModels": "Todos os Modelos",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"addNewModel": "Adicionar Novo modelo",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"vaeLocation": "Localização VAE",
"vaeLocationValidationMsg": "Caminho para onde o seu VAE está localizado.",
"vaeRepoID": "VAE Repo ID",
"addModel": "Adicionar Modelo",
"search": "Procurar",
"cached": "Em cache",
"checkpointFolder": "Pasta de Checkpoint",
"clearCheckpointFolder": "Apagar Pasta de Checkpoint",
"modelsFound": "Modelos Encontrados",
"selectFolder": "Selecione a Pasta",
"selected": "Selecionada",
"selectAll": "Selecionar Tudo",
"addSelected": "Adicione Selecionado",
"delete": "Apagar",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo ficheiro VAE dentro do local do modelo.",
"convert": "Converter",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, a depender das especificações do seu computador.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que tenha espaço suficiente no disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"v1": "v1",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"modelMergeAlphaHelp": "Alpha controla a força da mistura dos modelos. Valores de alpha mais baixos resultam numa influência menor do segundo modelo.",
"sigmoid": "Sigmóide",
"weightedSum": "Soma Ponderada"
},
"parameters": {
"width": "Largura",
"seed": "Seed",
"hiresStrength": "Força da Alta Resolução",
"negativePrompts": "Indicações negativas",
"general": "Geral",
"randomizeSeed": "Seed Aleatório",
"shuffle": "Embaralhar",
"noiseThreshold": "Limite de Ruído",
"perlinNoise": "Ruído de Perlin",
"variations": "Variatções",
"seedWeights": "Pesos da Seed",
"restoreFaces": "Restaurar Rostos",
"faceRestoration": "Restauração de Rosto",
"type": "Tipo",
"denoisingStrength": "A força de remoção de ruído",
"scale": "Escala",
"otherOptions": "Outras Opções",
"seamlessTiling": "Ladrilho Sem Fronteira",
"hiresOptim": "Otimização de Alta Res",
"imageFit": "Caber Imagem Inicial No Tamanho de Saída",
"codeformerFidelity": "Fidelidade",
"seamSize": "Tamanho da Fronteira",
"seamBlur": "Desfoque da Fronteira",
"seamStrength": "Força da Fronteira",
"seamSteps": "Passos da Fronteira",
"tileSize": "Tamanho do Ladrilho",
"boundingBoxHeader": "Caixa Delimitadora",
"seamCorrectionHeader": "Correção de Fronteira",
"infillScalingHeader": "Preencimento e Escala",
"img2imgStrength": "Força de Imagem Para Imagem",
"toggleLoopback": "Ativar Loopback",
"symmetry": "Simetria",
"promptPlaceholder": "Digite o prompt aqui. [tokens negativos], (upweight)++, (downweight)--, trocar e misturar estão disponíveis (veja docs)",
"sendTo": "Mandar para",
"openInViewer": "Abrir No Visualizador",
"closeViewer": "Fechar Visualizador",
"usePrompt": "Usar Prompt",
"deleteImage": "Apagar Imagem",
"initialImage": "Imagem inicial",
"showOptionsPanel": "Mostrar Painel de Opções",
"strength": "Força",
"upscaling": "Redimensionando",
"upscale": "Redimensionar",
"upscaleImage": "Redimensionar Imagem",
"scaleBeforeProcessing": "Escala Antes do Processamento",
"invoke": "Invocar",
"images": "Imagems",
"steps": "Passos",
"cfgScale": "Escala CFG",
"height": "Altura",
"sampler": "Amostrador",
"imageToImage": "Imagem para Imagem",
"variationAmount": "Quntidade de Variatções",
"scaledWidth": "L Escalada",
"scaledHeight": "A Escalada",
"infillMethod": "Método de Preenchimento",
"hSymmetryStep": "H Passo de Simetria",
"vSymmetryStep": "V Passo de Simetria",
"cancel": {
"immediate": "Cancelar imediatamente",
"schedule": "Cancelar após a iteração atual",
"isScheduled": "A cancelar",
"setType": "Definir tipo de cancelamento"
},
"sendToImg2Img": "Mandar para Imagem Para Imagem",
"sendToUnifiedCanvas": "Mandar para Tela Unificada",
"copyImage": "Copiar imagem",
"copyImageToLink": "Copiar Imagem Para a Ligação",
"downloadImage": "Descarregar Imagem",
"useSeed": "Usar Seed",
"useAll": "Usar Todos",
"useInitImg": "Usar Imagem Inicial",
"info": "Informações"
},
"settings": {
"confirmOnDelete": "Confirmar Antes de Apagar",
"displayHelpIcons": "Mostrar Ícones de Ajuda",
"useCanvasBeta": "Usar Layout de Telas Beta",
"enableImageDebugging": "Ativar Depuração de Imagem",
"useSlidersForAll": "Usar deslizadores para todas as opções",
"resetWebUIDesc1": "Reiniciar a interface apenas reinicia o cache local do broswer para imagens e configurações lembradas. Não apaga nenhuma imagem do disco.",
"models": "Modelos",
"displayInProgress": "Mostrar Progresso de Imagens Em Andamento",
"saveSteps": "Gravar imagens a cada n passos",
"resetWebUI": "Reiniciar Interface",
"resetWebUIDesc2": "Se as imagens não estão a aparecer na galeria ou algo mais não está a funcionar, favor tentar reiniciar antes de postar um problema no GitHub.",
"resetComplete": "A interface foi reiniciada. Atualize a página para carregar."
},
"toast": {
"uploadFailed": "Envio Falhou",
"uploadFailedMultipleImagesDesc": "Várias imagens copiadas, só é permitido uma imagem de cada vez",
"uploadFailedUnableToLoadDesc": "Não foj possível carregar o ficheiro",
"downloadImageStarted": "Download de Imagem Começou",
"imageNotLoadedDesc": "Nenhuma imagem encontrada a enviar para o módulo de imagem para imagem",
"imageLinkCopied": "Ligação de Imagem Copiada",
"imageNotLoaded": "Nenhuma Imagem Carregada",
"parametersFailed": "Problema ao carregar parâmetros",
"parametersFailedDesc": "Não foi possível carregar imagem incial.",
"seedSet": "Seed Definida",
"upscalingFailed": "Redimensionamento Falhou",
"promptNotSet": "Prompt Não Definido",
"tempFoldersEmptied": "Pasta de Ficheiros Temporários Esvaziada",
"imageCopied": "Imagem Copiada",
"imageSavedToGallery": "Imagem Salva na Galeria",
"canvasMerged": "Tela Fundida",
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
"parametersSet": "Parâmetros Definidos",
"parametersNotSet": "Parâmetros Não Definidos",
"parametersNotSetDesc": "Nenhum metadado foi encontrado para essa imagem.",
"seedNotSet": "Seed Não Definida",
"seedNotSetDesc": "Não foi possível achar a seed para a imagem.",
"promptSet": "Prompt Definido",
"promptNotSetDesc": "Não foi possível achar prompt para essa imagem.",
"faceRestoreFailed": "Restauração de Rosto Falhou",
"metadataLoadFailed": "Falha ao tentar carregar metadados",
"initialImageSet": "Imagem Inicial Definida",
"initialImageNotSet": "Imagem Inicial Não Definida",
"initialImageNotSetDesc": "Não foi possível carregar imagem incial"
},
"tooltip": {
"feature": {
"prompt": "Este é o campo de prompt. O prompt inclui objetos de geração e termos estilísticos. Também pode adicionar peso (importância do token) no prompt, mas comandos e parâmetros de CLI não funcionarão.",
"other": "Essas opções ativam modos alternativos de processamento para o Invoke. 'Seamless tiling' criará padrões repetidos na saída. 'High resolution' é uma geração em duas etapas com img2img: use essa configuração quando desejar uma imagem maior e mais coerente sem artefatos. Levará mais tempo do que o txt2img usual.",
"seed": "O valor da semente afeta o ruído inicial a partir do qual a imagem é formada. Pode usar as sementes já existentes de imagens anteriores. 'Limiar de ruído' é usado para mitigar artefatos em valores CFG altos (experimente a faixa de 0-10) e o Perlin para adicionar ruído Perlin durante a geração: ambos servem para adicionar variação às suas saídas.",
"imageToImage": "Image to Image carrega qualquer imagem como inicial, que é então usada para gerar uma nova junto com o prompt. Quanto maior o valor, mais a imagem resultante mudará. Valores de 0.0 a 1.0 são possíveis, a faixa recomendada é de 0.25 a 0.75",
"faceCorrection": "Correção de rosto com GFPGAN ou Codeformer: o algoritmo detecta rostos na imagem e corrige quaisquer defeitos. Um valor alto mudará mais a imagem, a resultar em rostos mais atraentes. Codeformer com uma fidelidade maior preserva a imagem original às custas de uma correção de rosto mais forte.",
"seamCorrection": "Controla o tratamento das emendas visíveis que ocorrem entre as imagens geradas no canvas.",
"gallery": "A galeria exibe as gerações da pasta de saída conforme elas são criadas. As configurações são armazenadas em ficheiros e acessadas pelo menu de contexto.",
"variations": "Experimente uma variação com um valor entre 0,1 e 1,0 para mudar o resultado para uma determinada semente. Variações interessantes da semente estão entre 0,1 e 0,3.",
"upscale": "Use o ESRGAN para ampliar a imagem imediatamente após a geração.",
"boundingBox": "A caixa delimitadora é a mesma que as configurações de largura e altura para Texto para Imagem ou Imagem para Imagem. Apenas a área na caixa será processada.",
"infillAndScaling": "Gira os métodos de preenchimento (usados em áreas mascaradas ou apagadas do canvas) e a escala (útil para tamanhos de caixa delimitadora pequenos)."
}
},
"unifiedCanvas": {
"emptyTempImagesFolderMessage": "Esvaziar a pasta de ficheiros de imagem temporários também reseta completamente a Tela Unificada. Isso inclui todo o histórico de desfazer/refazer, imagens na área de preparação e a camada base da tela.",
"scaledBoundingBox": "Caixa Delimitadora Escalada",
"boundingBoxPosition": "Posição da Caixa Delimitadora",
"next": "Próximo",
"accept": "Aceitar",
"showHide": "Mostrar/Esconder",
"discardAll": "Descartar Todos",
"betaClear": "Limpar",
"betaDarkenOutside": "Escurecer Externamente",
"base": "Base",
"brush": "Pincel",
"showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade",
"clearCanvasHistoryConfirm": "Tem certeza que quer limpar o histórico de tela?",
"boundingBox": "Caixa Delimitadora",
"canvasDimensions": "Dimensões da Tela",
"canvasPosition": "Posição da Tela",
"cursorPosition": "Posição do cursor",
"previous": "Anterior",
"betaLimitToBox": "Limitar á Caixa",
"layer": "Camada",
"mask": "Máscara",
"maskingOptions": "Opções de Mascaramento",
"enableMask": "Ativar Máscara",
"preserveMaskedArea": "Preservar Área da Máscara",
"clearMask": "Limpar Máscara",
"eraser": "Apagador",
"fillBoundingBox": "Preencher Caixa Delimitadora",
"eraseBoundingBox": "Apagar Caixa Delimitadora",
"colorPicker": "Seletor de Cor",
"brushOptions": "Opções de Pincel",
"brushSize": "Tamanho",
"move": "Mover",
"resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível",
"saveToGallery": "Gravar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Descarregar Como Imagem",
"undo": "Desfazer",
"redo": "Refazer",
"clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela",
"snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa",
"autoSaveToGallery": "Gravar Automaticamente na Galeria",
"saveBoxRegionOnly": "Gravar Apenas a Região da Caixa",
"limitStrokesToBox": "Limitar Traços à Caixa",
"showCanvasDebugInfo": "Mostrar Informações de Depuração daTela",
"clearCanvasHistory": "Limpar o Histórico da Tela",
"clearHistory": "Limpar Históprico",
"clearCanvasHistoryMessage": "Limpar o histórico de tela deixa a sua tela atual intacta, mas limpa de forma irreversível o histórico de desfazer e refazer.",
"emptyTempImageFolder": "Esvaziar a Pasta de Ficheiros de Imagem Temporários",
"emptyFolder": "Esvaziar Pasta",
"emptyTempImagesFolderConfirm": "Tem certeza que quer esvaziar a pasta de ficheiros de imagem temporários?",
"activeLayer": "Camada Ativa",
"canvasScale": "Escala da Tela",
"betaPreserveMasked": "Preservar Máscarado"
},
"accessibility": {
"invokeProgressBar": "Invocar barra de progresso",
"reset": "Repôr",
"nextImage": "Próxima imagem",
"useThisParameter": "Usar este parâmetro",
"copyMetadataJson": "Copiar metadados JSON",
"zoomIn": "Ampliar",
"zoomOut": "Reduzir",
"rotateCounterClockwise": "Girar no sentido anti-horário",
"rotateClockwise": "Girar no sentido horário",
"flipVertically": "Espelhar verticalmente",
"modifyConfig": "Modificar config",
"toggleAutoscroll": "Alternar rolagem automática",
"showGallery": "Mostrar galeria",
"showOptionsPanel": "Mostrar painel de opções",
"uploadImage": "Enviar imagem",
"previousImage": "Imagem anterior",
"flipHorizontally": "Espelhar horizontalmente",
"toggleLogViewer": "Alternar visualizador de registo"
} }
} }

View File

@ -63,7 +63,10 @@
"statusMergingModels": "Mesclando Modelos", "statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados", "statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo", "langRussian": "Russo",
"langSpanish": "Espanhol" "langSpanish": "Espanhol",
"pinOptionsPanel": "Fixar painel de opções",
"loadingInvokeAI": "Carregando Invoke AI",
"loading": "Carregando"
}, },
"gallery": { "gallery": {
"generations": "Gerações", "generations": "Gerações",

View File

@ -46,7 +46,15 @@
"statusLoadingModel": "Загрузка модели", "statusLoadingModel": "Загрузка модели",
"statusModelChanged": "Модель изменена", "statusModelChanged": "Модель изменена",
"githubLabel": "Github", "githubLabel": "Github",
"discordLabel": "Discord" "discordLabel": "Discord",
"statusMergingModels": "Слияние моделей",
"statusModelConverted": "Модель сконвертирована",
"statusMergedModels": "Модели объединены",
"pinOptionsPanel": "Закрепить панель настроек",
"loading": "Загрузка",
"loadingInvokeAI": "Загрузка Invoke AI",
"back": "Назад",
"statusConvertingModel": "Конвертация модели"
}, },
"gallery": { "gallery": {
"generations": "Генерации", "generations": "Генерации",
@ -323,7 +331,30 @@
"deleteConfig": "Удалить конфигурацию", "deleteConfig": "Удалить конфигурацию",
"deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?", "deleteMsg1": "Вы точно хотите удалить модель из InvokeAI?",
"deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.", "deleteMsg2": "Это не удалит файл модели с диска. Позже вы можете добавить его снова.",
"repoIDValidationMsg": "Онлайн-репозиторий модели" "repoIDValidationMsg": "Онлайн-репозиторий модели",
"convertToDiffusersHelpText5": "Пожалуйста, убедитесь, что у вас достаточно места на диске. Модели обычно занимают 4 7 Гб.",
"invokeAIFolder": "Каталог InvokeAI",
"ignoreMismatch": "Игнорировать несоответствия между выбранными моделями",
"addCheckpointModel": "Добавить модель Checkpoint/Safetensor",
"formMessageDiffusersModelLocationDesc": "Укажите хотя бы одно.",
"convertToDiffusersHelpText3": "Файл модели на диске НЕ будет удалён или изменён. Вы сможете заново добавить его в Model Manager при необходимости.",
"vaeRepoID": "ID репозитория VAE",
"mergedModelName": "Название объединенной модели",
"checkpointModels": "Checkpoints",
"allModels": "Все модели",
"addDiffuserModel": "Добавить Diffusers",
"repo_id": "ID репозитория",
"formMessageDiffusersVAELocationDesc": "Если не указано, InvokeAI будет искать файл VAE рядом с моделью.",
"convert": "Преобразовать",
"convertToDiffusers": "Преобразовать в Diffusers",
"convertToDiffusersHelpText1": "Модель будет преобразована в формат 🧨 Diffusers.",
"convertToDiffusersHelpText4": "Это единоразовое действие. Оно может занять 30—60 секунд в зависимости от характеристик вашего компьютера.",
"convertToDiffusersHelpText6": "Вы хотите преобразовать эту модель?",
"statusConverting": "Преобразование",
"modelConverted": "Модель преобразована",
"invokeRoot": "Каталог InvokeAI",
"modelsMerged": "Модели объединены",
"mergeModels": "Объединить модели"
}, },
"parameters": { "parameters": {
"images": "Изображения", "images": "Изображения",
@ -503,5 +534,8 @@
"betaDarkenOutside": "Затемнить снаружи", "betaDarkenOutside": "Затемнить снаружи",
"betaLimitToBox": "Ограничить выделением", "betaLimitToBox": "Ограничить выделением",
"betaPreserveMasked": "Сохранять маскируемую область" "betaPreserveMasked": "Сохранять маскируемую область"
},
"accessibility": {
"modelSelect": "Выбор модели"
} }
} }

View File

@ -19,6 +19,21 @@
"discordLabel": "Discord", "discordLabel": "Discord",
"nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。", "nodesDesc": "使用Node生成圖像的系統正在開發中。敬請期待有關於這項功能的更新。",
"reportBugLabel": "回報錯誤", "reportBugLabel": "回報錯誤",
"githubLabel": "GitHub" "githubLabel": "GitHub",
"langKorean": "韓語",
"langPortuguese": "葡萄牙語",
"hotkeysLabel": "快捷鍵",
"languagePickerLabel": "切換語言",
"langDutch": "荷蘭語",
"langFrench": "法語",
"langGerman": "德語",
"langItalian": "義大利語",
"langJapanese": "日語",
"langPolish": "波蘭語",
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
} }
} }

View File

@ -9,34 +9,53 @@ import useToastWatcher from 'features/system/hooks/useToastWatcher';
import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton'; import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton';
import FloatingParametersPanelButtons from 'features/ui/components/FloatingParametersPanelButtons'; import FloatingParametersPanelButtons from 'features/ui/components/FloatingParametersPanelButtons';
import { Box, Grid } from '@chakra-ui/react'; import { Box, Flex, Grid, Portal, useColorMode } from '@chakra-ui/react';
import { APP_HEIGHT, APP_PADDING, APP_WIDTH } from 'theme/util/constants'; import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants';
import ImageGalleryPanel from 'features/gallery/components/ImageGalleryPanel';
import Lightbox from 'features/lightbox/components/Lightbox';
import { useAppSelector } from './storeHooks';
import { PropsWithChildren, useEffect } from 'react';
keepGUIAlive(); keepGUIAlive();
const App = () => { const App = (props: PropsWithChildren) => {
useToastWatcher(); useToastWatcher();
const currentTheme = useAppSelector((state) => state.ui.currentTheme);
const { setColorMode } = useColorMode();
useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');
}, [setColorMode, currentTheme]);
return ( return (
<Grid w="100vw" h="100vh"> <Grid w="100vw" h="100vh">
<Lightbox />
<ImageUploader> <ImageUploader>
<ProgressBar /> <ProgressBar />
<Grid <Grid
gap={4} gap={4}
p={APP_PADDING} p={4}
gridAutoRows="min-content auto" gridAutoRows="min-content auto"
w={APP_WIDTH} w={APP_WIDTH}
h={APP_HEIGHT} h={APP_HEIGHT}
> >
<SiteHeader /> {props.children || <SiteHeader />}
<InvokeTabs /> <Flex gap={4} w="full" h="full">
<InvokeTabs />
<ImageGalleryPanel />
</Flex>
</Grid> </Grid>
<Box> <Box>
<Console /> <Console />
</Box> </Box>
</ImageUploader> </ImageUploader>
<FloatingParametersPanelButtons /> <Portal>
<FloatingGalleryButton /> <FloatingParametersPanelButtons />
</Portal>
<Portal>
<FloatingGalleryButton />
</Portal>
</Grid> </Grid>
); );
}; };

View File

@ -9,6 +9,15 @@ import { greenTeaThemeColors } from 'theme/colors/greenTea';
import { invokeAIThemeColors } from 'theme/colors/invokeAI'; import { invokeAIThemeColors } from 'theme/colors/invokeAI';
import { lightThemeColors } from 'theme/colors/lightTheme'; import { lightThemeColors } from 'theme/colors/lightTheme';
import { oceanBlueColors } from 'theme/colors/oceanBlue'; import { oceanBlueColors } from 'theme/colors/oceanBlue';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
import '@fontsource/inter/400.css';
import '@fontsource/inter/500.css';
import '@fontsource/inter/600.css';
import '@fontsource/inter/700.css';
import '@fontsource/inter/800.css';
import '@fontsource/inter/900.css';
type ThemeLocaleProviderProps = { type ThemeLocaleProviderProps = {
children: ReactNode; children: ReactNode;

View File

@ -31,18 +31,14 @@ export const DIFFUSERS_SAMPLERS: Array<string> = [
]; ];
// Valid image widths // Valid image widths
export const WIDTHS: Array<number> = [ export const WIDTHS: Array<number> = Array.from(Array(65)).map(
64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, (_x, i) => i * 64
1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, );
1856, 1920, 1984, 2048,
];
// Valid image heights // Valid image heights
export const HEIGHTS: Array<number> = [ export const HEIGHTS: Array<number> = Array.from(Array(65)).map(
64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, (_x, i) => i * 64
1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, );
1856, 1920, 1984, 2048,
];
// Valid upscaling levels // Valid upscaling levels
export const UPSCALING_LEVELS: Array<{ key: string; value: number }> = [ export const UPSCALING_LEVELS: Array<{ key: string; value: number }> = [

View File

@ -57,10 +57,13 @@ const galleryBlacklist = [
'currentImage', 'currentImage',
'currentImageUuid', 'currentImageUuid',
'shouldAutoSwitchToNewImages', 'shouldAutoSwitchToNewImages',
'shouldHoldGalleryOpen',
'intermediateImage', 'intermediateImage',
].map((blacklistItem) => `gallery.${blacklistItem}`); ].map((blacklistItem) => `gallery.${blacklistItem}`);
const lightboxBlacklist = ['isLightboxOpen'].map(
(blacklistItem) => `lightbox.${blacklistItem}`
);
const rootReducer = combineReducers({ const rootReducer = combineReducers({
generation: generationReducer, generation: generationReducer,
postprocessing: postprocessingReducer, postprocessing: postprocessingReducer,
@ -75,7 +78,12 @@ const rootPersistConfig = getPersistConfig({
key: 'root', key: 'root',
storage, storage,
rootReducer, rootReducer,
blacklist: [...canvasBlacklist, ...systemBlacklist, ...galleryBlacklist], blacklist: [
...canvasBlacklist,
...systemBlacklist,
...galleryBlacklist,
...lightboxBlacklist,
],
debounce: 300, debounce: 300,
}); });

View File

@ -1,5 +1,6 @@
import { Box, forwardRef, Icon } from '@chakra-ui/react'; import { Box, forwardRef, Icon } from '@chakra-ui/react';
import { Feature } from 'app/features'; import { Feature } from 'app/features';
import { memo } from 'react';
import { IconType } from 'react-icons'; import { IconType } from 'react-icons';
import { MdHelp } from 'react-icons/md'; import { MdHelp } from 'react-icons/md';
import GuidePopover from './GuidePopover'; import GuidePopover from './GuidePopover';
@ -19,4 +20,4 @@ const GuideIcon = forwardRef(
) )
); );
export default GuideIcon; export default memo(GuideIcon);

View File

@ -11,7 +11,7 @@ import { Feature, useFeatureHelpInfo } from 'app/features';
import { useAppSelector } from 'app/storeHooks'; import { useAppSelector } from 'app/storeHooks';
import { systemSelector } from 'features/system/store/systemSelectors'; import { systemSelector } from 'features/system/store/systemSelectors';
import { SystemState } from 'features/system/store/systemSlice'; import { SystemState } from 'features/system/store/systemSlice';
import { ReactElement } from 'react'; import { memo, ReactElement } from 'react';
type GuideProps = { type GuideProps = {
children: ReactElement; children: ReactElement;
@ -30,7 +30,7 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
if (!shouldDisplayGuides) return null; if (!shouldDisplayGuides) return null;
return ( return (
<Popover trigger="hover"> <Popover trigger="hover" isLazy>
<PopoverTrigger> <PopoverTrigger>
<Box>{children}</Box> <Box>{children}</Box>
</PopoverTrigger> </PopoverTrigger>
@ -46,4 +46,4 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
); );
}; };
export default GuidePopover; export default memo(GuidePopover);

View File

@ -8,7 +8,8 @@ import {
forwardRef, forwardRef,
useDisclosure, useDisclosure,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { cloneElement, ReactElement, ReactNode, useRef } from 'react'; import { cloneElement, memo, ReactElement, ReactNode, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import IAIButton from './IAIButton'; import IAIButton from './IAIButton';
type Props = { type Props = {
@ -22,10 +23,12 @@ type Props = {
}; };
const IAIAlertDialog = forwardRef((props: Props, ref) => { const IAIAlertDialog = forwardRef((props: Props, ref) => {
const { t } = useTranslation();
const { const {
acceptButtonText = 'Accept', acceptButtonText = t('common.accept'),
acceptCallback, acceptCallback,
cancelButtonText = 'Cancel', cancelButtonText = t('common.cancel'),
cancelCallback, cancelCallback,
children, children,
title, title,
@ -56,6 +59,7 @@ const IAIAlertDialog = forwardRef((props: Props, ref) => {
isOpen={isOpen} isOpen={isOpen}
leastDestructiveRef={cancelRef} leastDestructiveRef={cancelRef}
onClose={onClose} onClose={onClose}
isCentered
> >
<AlertDialogOverlay> <AlertDialogOverlay>
<AlertDialogContent> <AlertDialogContent>
@ -79,4 +83,4 @@ const IAIAlertDialog = forwardRef((props: Props, ref) => {
</> </>
); );
}); });
export default IAIAlertDialog; export default memo(IAIAlertDialog);

View File

@ -5,7 +5,7 @@ import {
Tooltip, Tooltip,
TooltipProps, TooltipProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { ReactNode } from 'react'; import { memo, ReactNode } from 'react';
export interface IAIButtonProps extends ButtonProps { export interface IAIButtonProps extends ButtonProps {
tooltip?: string; tooltip?: string;
@ -25,4 +25,4 @@ const IAIButton = forwardRef((props: IAIButtonProps, forwardedRef) => {
); );
}); });
export default IAIButton; export default memo(IAIButton);

View File

@ -1,5 +1,5 @@
import { Checkbox, CheckboxProps } from '@chakra-ui/react'; import { Checkbox, CheckboxProps } from '@chakra-ui/react';
import type { ReactNode } from 'react'; import { memo, ReactNode } from 'react';
type IAICheckboxProps = CheckboxProps & { type IAICheckboxProps = CheckboxProps & {
label: string | ReactNode; label: string | ReactNode;
@ -14,4 +14,4 @@ const IAICheckbox = (props: IAICheckboxProps) => {
); );
}; };
export default IAICheckbox; export default memo(IAICheckbox);

View File

@ -1,4 +1,5 @@
import { chakra, ChakraProps } from '@chakra-ui/react'; import { chakra, ChakraProps } from '@chakra-ui/react';
import { memo } from 'react';
import { RgbaColorPicker } from 'react-colorful'; import { RgbaColorPicker } from 'react-colorful';
import { ColorPickerBaseProps, RgbaColor } from 'react-colorful/dist/types'; import { ColorPickerBaseProps, RgbaColor } from 'react-colorful/dist/types';
@ -35,4 +36,4 @@ const IAIColorPicker = (props: IAIColorPickerProps) => {
); );
}; };
export default IAIColorPicker; export default memo(IAIColorPicker);

View File

@ -0,0 +1,8 @@
import { chakra } from '@chakra-ui/react';
/**
* Chakra-enabled <form />
*/
const IAIForm = chakra.form;
export default IAIForm;

View File

@ -0,0 +1,15 @@
import { FormErrorMessage, FormErrorMessageProps } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIFormErrorMessageProps = FormErrorMessageProps & {
children: ReactNode | string;
};
export default function IAIFormErrorMessage(props: IAIFormErrorMessageProps) {
const { children, ...rest } = props;
return (
<FormErrorMessage color="error.400" {...rest}>
{children}
</FormErrorMessage>
);
}

View File

@ -0,0 +1,15 @@
import { FormHelperText, FormHelperTextProps } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIFormHelperTextProps = FormHelperTextProps & {
children: ReactNode | string;
};
export default function IAIFormHelperText(props: IAIFormHelperTextProps) {
const { children, ...rest } = props;
return (
<FormHelperText margin={0} color="base.400" {...rest}>
{children}
</FormHelperText>
);
}

View File

@ -0,0 +1,23 @@
import { Flex } from '@chakra-ui/react';
import { ReactElement } from 'react';
export function IAIFormItemWrapper({
children,
}: {
children: ReactElement | ReactElement[];
}) {
return (
<Flex
sx={{
flexDirection: 'column',
padding: 4,
rowGap: 4,
borderRadius: 'base',
width: 'full',
bg: 'base.900',
}}
>
{children}
</Flex>
);
}

View File

@ -5,15 +5,17 @@ import {
Tooltip, Tooltip,
TooltipProps, TooltipProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { memo } from 'react';
export type IAIIconButtonProps = IconButtonProps & { export type IAIIconButtonProps = IconButtonProps & {
role?: string;
tooltip?: string; tooltip?: string;
tooltipProps?: Omit<TooltipProps, 'children'>; tooltipProps?: Omit<TooltipProps, 'children'>;
isChecked?: boolean; isChecked?: boolean;
}; };
const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => { const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
const { tooltip = '', tooltipProps, isChecked, ...rest } = props; const { role, tooltip = '', tooltipProps, isChecked, ...rest } = props;
return ( return (
<Tooltip <Tooltip
@ -26,6 +28,7 @@ const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
> >
<IconButton <IconButton
ref={forwardedRef} ref={forwardedRef}
role={role}
aria-checked={isChecked !== undefined ? isChecked : undefined} aria-checked={isChecked !== undefined ? isChecked : undefined}
{...rest} {...rest}
/> />
@ -33,4 +36,5 @@ const IAIIconButton = forwardRef((props: IAIIconButtonProps, forwardedRef) => {
); );
}); });
export default IAIIconButton; IAIIconButton.displayName = 'IAIIconButton';
export default memo(IAIIconButton);

View File

@ -5,7 +5,7 @@ import {
Input, Input,
InputProps, InputProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { ChangeEvent } from 'react'; import { ChangeEvent, memo } from 'react';
interface IAIInputProps extends InputProps { interface IAIInputProps extends InputProps {
label?: string; label?: string;
@ -15,7 +15,7 @@ interface IAIInputProps extends InputProps {
formControlProps?: Omit<FormControlProps, 'isInvalid' | 'isDisabled'>; formControlProps?: Omit<FormControlProps, 'isInvalid' | 'isDisabled'>;
} }
export default function IAIInput(props: IAIInputProps) { const IAIInput = (props: IAIInputProps) => {
const { const {
label = '', label = '',
isDisabled = false, isDisabled = false,
@ -34,4 +34,6 @@ export default function IAIInput(props: IAIInputProps) {
<Input {...rest} /> <Input {...rest} />
</FormControl> </FormControl>
); );
} };
export default memo(IAIInput);

View File

@ -16,7 +16,7 @@ import {
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { clamp } from 'lodash'; import { clamp } from 'lodash';
import { FocusEvent, useEffect, useState } from 'react'; import { FocusEvent, memo, useEffect, useState } from 'react';
const numberStringRegex = /^-?(0\.)?\.?$/; const numberStringRegex = /^-?(0\.)?\.?$/;
@ -139,4 +139,4 @@ const IAINumberInput = (props: Props) => {
); );
}; };
export default IAINumberInput; export default memo(IAINumberInput);

View File

@ -0,0 +1,18 @@
import { useToken } from '@chakra-ui/react';
import { ReactNode } from 'react';
type IAIOptionProps = {
children: ReactNode | string | number;
value: string | number;
};
export default function IAIOption(props: IAIOptionProps) {
const { children, value } = props;
const [base800, base200] = useToken('colors', ['base.800', 'base.200']);
return (
<option value={value} style={{ background: base800, color: base200 }}>
{children}
</option>
);
}

View File

@ -6,9 +6,9 @@ import {
PopoverProps, PopoverProps,
PopoverTrigger, PopoverTrigger,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { ReactNode } from 'react'; import { memo, ReactNode } from 'react';
type IAIPopoverProps = PopoverProps & { export type IAIPopoverProps = PopoverProps & {
triggerComponent: ReactNode; triggerComponent: ReactNode;
triggerContainerProps?: BoxProps; triggerContainerProps?: BoxProps;
children: ReactNode; children: ReactNode;
@ -35,4 +35,4 @@ const IAIPopover = (props: IAIPopoverProps) => {
); );
}; };
export default IAIPopover; export default memo(IAIPopover);

View File

@ -6,7 +6,8 @@ import {
Tooltip, Tooltip,
TooltipProps, TooltipProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { MouseEvent } from 'react'; import { memo, MouseEvent } from 'react';
import IAIOption from './IAIOption';
type IAISelectProps = SelectProps & { type IAISelectProps = SelectProps & {
label?: string; label?: string;
@ -37,13 +38,13 @@ const IAISelect = (props: IAISelectProps) => {
<Select {...rest}> <Select {...rest}>
{validValues.map((opt) => { {validValues.map((opt) => {
return typeof opt === 'string' || typeof opt === 'number' ? ( return typeof opt === 'string' || typeof opt === 'number' ? (
<option key={opt} value={opt}> <IAIOption key={opt} value={opt}>
{opt} {opt}
</option> </IAIOption>
) : ( ) : (
<option key={opt.value} value={opt.value}> <IAIOption key={opt.value} value={opt.value}>
{opt.key} {opt.key}
</option> </IAIOption>
); );
})} })}
</Select> </Select>
@ -52,4 +53,4 @@ const IAISelect = (props: IAISelectProps) => {
); );
}; };
export default IAISelect; export default memo(IAISelect);

View File

@ -11,7 +11,7 @@ import {
IconButtonProps, IconButtonProps,
ButtonProps, ButtonProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { MouseEventHandler, ReactNode } from 'react'; import { memo, MouseEventHandler, ReactNode } from 'react';
import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md'; import { MdArrowDropDown, MdArrowDropUp } from 'react-icons/md';
interface IAIMenuItem { interface IAIMenuItem {
@ -31,7 +31,7 @@ interface IAIMenuProps {
menuItemProps?: MenuItemProps; menuItemProps?: MenuItemProps;
} }
export default function IAISimpleMenu(props: IAIMenuProps) { const IAISimpleMenu = (props: IAIMenuProps) => {
const { const {
menuType = 'icon', menuType = 'icon',
iconTooltip, iconTooltip,
@ -68,6 +68,7 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
<MenuButton <MenuButton
as={menuType === 'icon' ? IconButton : Button} as={menuType === 'icon' ? IconButton : Button}
tooltip={iconTooltip} tooltip={iconTooltip}
aria-label={iconTooltip}
icon={isOpen ? <MdArrowDropUp /> : <MdArrowDropDown />} icon={isOpen ? <MdArrowDropUp /> : <MdArrowDropDown />}
paddingX={0} paddingX={0}
paddingY={menuType === 'regular' ? 2 : 0} paddingY={menuType === 'regular' ? 2 : 0}
@ -82,4 +83,6 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
)} )}
</Menu> </Menu>
); );
} };
export default memo(IAISimpleMenu);

View File

@ -25,7 +25,8 @@ import {
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { clamp } from 'lodash'; import { clamp } from 'lodash';
import { FocusEvent, useEffect, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next';
import { FocusEvent, memo, useEffect, useMemo, useState } from 'react';
import { BiReset } from 'react-icons/bi'; import { BiReset } from 'react-icons/bi';
import IAIIconButton, { IAIIconButtonProps } from './IAIIconButton'; import IAIIconButton, { IAIIconButtonProps } from './IAIIconButton';
@ -61,7 +62,7 @@ export type IAIFullSliderProps = {
sliderIAIIconButtonProps?: IAIIconButtonProps; sliderIAIIconButtonProps?: IAIIconButtonProps;
}; };
export default function IAISlider(props: IAIFullSliderProps) { const IAISlider = (props: IAIFullSliderProps) => {
const [showTooltip, setShowTooltip] = useState(false); const [showTooltip, setShowTooltip] = useState(false);
const { const {
label, label,
@ -96,6 +97,8 @@ export default function IAISlider(props: IAIFullSliderProps) {
...rest ...rest
} = props; } = props;
const { t } = useTranslation();
const [localInputValue, setLocalInputValue] = useState< const [localInputValue, setLocalInputValue] = useState<
string | number | undefined string | number | undefined
>(String(value)); >(String(value));
@ -171,16 +174,22 @@ export default function IAISlider(props: IAIFullSliderProps) {
<> <>
<SliderMark <SliderMark
value={min} value={min}
insetInlineStart={0} // insetInlineStart={0}
sx={{ insetInlineStart: 'unset !important' }} sx={{
insetInlineStart: '0 !important',
insetInlineEnd: 'unset !important',
}}
{...sliderMarkProps} {...sliderMarkProps}
> >
{min} {min}
</SliderMark> </SliderMark>
<SliderMark <SliderMark
value={max} value={max}
insetInlineEnd={0} // insetInlineEnd={0}
sx={{ insetInlineStart: 'unset !important' }} sx={{
insetInlineStart: 'unset !important',
insetInlineEnd: '0 !important',
}}
{...sliderMarkProps} {...sliderMarkProps}
> >
{max} {max}
@ -234,7 +243,7 @@ export default function IAISlider(props: IAIFullSliderProps) {
{withReset && ( {withReset && (
<IAIIconButton <IAIIconButton
size="sm" size="sm"
aria-label="Reset" aria-label={t('accessibility.reset')}
tooltip="Reset" tooltip="Reset"
icon={<BiReset />} icon={<BiReset />}
onClick={handleResetDisable} onClick={handleResetDisable}
@ -245,4 +254,6 @@ export default function IAISlider(props: IAIFullSliderProps) {
</HStack> </HStack>
</FormControl> </FormControl>
); );
} };
export default memo(IAISlider);

View File

@ -6,6 +6,7 @@ import {
Switch, Switch,
SwitchProps, SwitchProps,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { memo } from 'react';
interface Props extends SwitchProps { interface Props extends SwitchProps {
label?: string; label?: string;
@ -44,4 +45,4 @@ const IAISwitch = (props: Props) => {
); );
}; };
export default IAISwitch; export default memo(IAISwitch);

View File

@ -3,10 +3,11 @@ import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerCo
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import useImageUploader from 'common/hooks/useImageUploader'; import useImageUploader from 'common/hooks/useImageUploader';
import { uploadImage } from 'features/gallery/store/thunks/uploadImage'; import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
import { tabDict } from 'features/ui/components/InvokeTabs';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ResourceKey } from 'i18next';
import { import {
KeyboardEvent, KeyboardEvent,
memo,
ReactNode, ReactNode,
useCallback, useCallback,
useEffect, useEffect,
@ -134,7 +135,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
const overlaySecondaryText = ['img2img', 'unifiedCanvas'].includes( const overlaySecondaryText = ['img2img', 'unifiedCanvas'].includes(
activeTabName activeTabName
) )
? ` to ${tabDict[activeTabName as keyof typeof tabDict].tooltip}` ? ` to ${String(t(`common.${activeTabName}` as ResourceKey))}`
: ``; : ``;
return ( return (
@ -161,4 +162,4 @@ const ImageUploader = (props: ImageUploaderProps) => {
); );
}; };
export default ImageUploader; export default memo(ImageUploader);

View File

@ -1,14 +1,16 @@
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext'; import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
import { useContext } from 'react'; import { useContext } from 'react';
import { useTranslation } from 'react-i18next';
import { FaUpload } from 'react-icons/fa'; import { FaUpload } from 'react-icons/fa';
import IAIIconButton from './IAIIconButton'; import IAIIconButton from './IAIIconButton';
const ImageUploaderIconButton = () => { const ImageUploaderIconButton = () => {
const { t } = useTranslation();
const openImageUploader = useContext(ImageUploaderTriggerContext); const openImageUploader = useContext(ImageUploaderTriggerContext);
return ( return (
<IAIIconButton <IAIIconButton
aria-label="Upload Image" aria-label={t('accessibility.uploadImage')}
tooltip="Upload Image" tooltip="Upload Image"
icon={<FaUpload />} icon={<FaUpload />}
onClick={openImageUploader || undefined} onClick={openImageUploader || undefined}

View File

@ -0,0 +1,38 @@
import React, { lazy, PropsWithChildren } from 'react';
import { Provider } from 'react-redux';
import { PersistGate } from 'redux-persist/integration/react';
import { store } from './app/store';
import { persistor } from './persistor';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
import '@fontsource/inter/400.css';
import '@fontsource/inter/500.css';
import '@fontsource/inter/600.css';
import '@fontsource/inter/700.css';
import '@fontsource/inter/800.css';
import '@fontsource/inter/900.css';
import Loading from './Loading';
// Localization
import './i18n';
const App = lazy(() => import('./app/App'));
const ThemeLocaleProvider = lazy(() => import('./app/ThemeLocaleProvider'));
export default function Component(props: PropsWithChildren) {
return (
<React.StrictMode>
<Provider store={store}>
<PersistGate loading={<Loading />} persistor={persistor}>
<React.Suspense fallback={<Loading showText />}>
<ThemeLocaleProvider>
<App>{props.children}</App>
</ThemeLocaleProvider>
</React.Suspense>
</PersistGate>
</Provider>
</React.StrictMode>
);
}

View File

@ -0,0 +1,16 @@
import Component from './component';
import InvokeAiLogoComponent from './features/system/components/InvokeAILogoComponent';
import ThemeChanger from './features/system/components/ThemeChanger';
import IAIPopover from './common/components/IAIPopover';
import IAIIconButton from './common/components/IAIIconButton';
import SettingsModal from './features/system/components/SettingsModal/SettingsModal';
export default Component;
export {
InvokeAiLogoComponent,
ThemeChanger,
IAIPopover,
IAIIconButton,
SettingsModal,
};

View File

@ -1,5 +1,6 @@
// Grid drawing adapted from https://longviewcoder.com/2021/12/08/konva-a-better-grid/ // Grid drawing adapted from https://longviewcoder.com/2021/12/08/konva-a-better-grid/
import { useToken } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store'; import { RootState } from 'app/store';
import { useAppSelector } from 'app/storeHooks'; import { useAppSelector } from 'app/storeHooks';
@ -22,13 +23,6 @@ const selector = createSelector(
} }
); );
const gridLinesColor = {
dark: 'rgba(255, 255, 255, 0.2)',
green: 'rgba(255, 255, 255, 0.2)',
light: 'rgba(0, 0, 0, 0.2)',
ocean: 'rgba(136, 148, 184, 0.2)',
};
const IAICanvasGrid = () => { const IAICanvasGrid = () => {
const currentTheme = useAppSelector( const currentTheme = useAppSelector(
(state: RootState) => state.ui.currentTheme (state: RootState) => state.ui.currentTheme
@ -37,6 +31,8 @@ const IAICanvasGrid = () => {
useAppSelector(selector); useAppSelector(selector);
const [gridLines, setGridLines] = useState<ReactNode[]>([]); const [gridLines, setGridLines] = useState<ReactNode[]>([]);
const [gridLineColor] = useToken('colors', ['gridLineColor']);
const unscale = useCallback( const unscale = useCallback(
(value: number) => { (value: number) => {
return value / stageScale; return value / stageScale;
@ -45,9 +41,6 @@ const IAICanvasGrid = () => {
); );
useLayoutEffect(() => { useLayoutEffect(() => {
const gridLineColor =
gridLinesColor[currentTheme as keyof typeof gridLinesColor];
const { width, height } = stageDimensions; const { width, height } = stageDimensions;
const { x, y } = stageCoordinates; const { x, y } = stageCoordinates;
@ -112,7 +105,14 @@ const IAICanvasGrid = () => {
)); ));
setGridLines(xLines.concat(yLines)); setGridLines(xLines.concat(yLines));
}, [stageScale, stageCoordinates, stageDimensions, currentTheme, unscale]); }, [
stageScale,
stageCoordinates,
stageDimensions,
currentTheme,
unscale,
gridLineColor,
]);
return <Group>{gridLines}</Group>; return <Group>{gridLines}</Group>;
}; };

View File

@ -104,7 +104,7 @@ const IAICanvasStatusText = () => {
margin: 1, margin: 1,
borderRadius: 'base', borderRadius: 'base',
pointerEvents: 'none', pointerEvents: 'none',
bg: 'blackAlpha.500', bg: 'base.800',
}} }}
> >
<Box <Box

View File

@ -104,7 +104,6 @@ const IAICanvasMaskOptions = () => {
return ( return (
<IAIPopover <IAIPopover
trigger="hover"
triggerComponent={ triggerComponent={
<ButtonGroup> <ButtonGroup>
<IAIIconButton <IAIIconButton

View File

@ -88,7 +88,7 @@ const IAICanvasSettingsButtonPopover = () => {
return ( return (
<IAIPopover <IAIPopover
trigger="hover" isLazy={false}
triggerComponent={ triggerComponent={
<IAIIconButton <IAIIconButton
tooltip={t('unifiedCanvas.canvasSettings')} tooltip={t('unifiedCanvas.canvasSettings')}

View File

@ -219,7 +219,6 @@ const IAICanvasToolChooserOptions = () => {
onClick={handleSelectColorPickerTool} onClick={handleSelectColorPickerTool}
/> />
<IAIPopover <IAIPopover
trigger="hover"
triggerComponent={ triggerComponent={
<IAIIconButton <IAIIconButton
aria-label={t('unifiedCanvas.brushOptions')} aria-label={t('unifiedCanvas.brushOptions')}

Some files were not shown because too many files have changed in this diff Show More