Merge branch 'main' into tests

This commit is contained in:
mastercaster 2023-03-07 09:03:18 +00:00 committed by GitHub
commit 00f30ea457
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
343 changed files with 6850 additions and 6823 deletions

View File

@ -23,7 +23,7 @@ jobs:
node-version: '18'
- uses: actions/checkout@v3
- run: 'yarn install --frozen-lockfile'
- run: 'yarn tsc'
- run: 'yarn run madge'
- run: 'yarn run lint --max-warnings=0'
- run: 'yarn run prettier --check'
- run: 'yarn run lint:tsc'
- run: 'yarn run lint:madge'
- run: 'yarn run lint:eslint'
- run: 'yarn run lint:prettier'

View File

View File

@ -0,0 +1,202 @@
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
from abc import ABC, abstractmethod
import argparse
from typing import Any, Callable, Iterable, Literal, get_args, get_origin, get_type_hints
from pydantic import BaseModel, Field
from ..invocations.image import ImageField
from ..services.graph import GraphExecutionState
from ..services.invoker import Invoker
def add_parsers(
subparsers,
commands: list[type],
command_field: str = "type",
exclude_fields: list[str] = ["id", "type"],
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
):
"""Adds parsers for each command to the subparsers"""
# Create subparsers for each command
for command in commands:
hints = get_type_hints(command)
cmd_name = get_args(hints[command_field])[0]
command_parser = subparsers.add_parser(cmd_name, help=command.__doc__)
if add_arguments is not None:
add_arguments(command_parser)
# Convert all fields to arguments
fields = command.__fields__ # type: ignore
for name, field in fields.items():
if name in exclude_fields:
continue
if get_origin(field.type_) == Literal:
allowed_values = get_args(field.type_)
allowed_types = set()
for val in allowed_values:
allowed_types.add(type(val))
allowed_types_list = list(allowed_types)
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
command_parser.add_argument(
f"--{name}",
dest=name,
type=field_type,
default=field.default,
choices=allowed_values,
help=field.field_info.description,
)
else:
command_parser.add_argument(
f"--{name}",
dest=name,
type=field.type_,
default=field.default,
help=field.field_info.description,
)
class CliContext:
invoker: Invoker
session: GraphExecutionState
parser: argparse.ArgumentParser
defaults: dict[str, Any]
def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser):
self.invoker = invoker
self.session = session
self.parser = parser
self.defaults = dict()
def get_session(self):
self.session = self.invoker.services.graph_execution_manager.get(self.session.id)
return self.session
class ExitCli(Exception):
"""Exception to exit the CLI"""
pass
class BaseCommand(ABC, BaseModel):
"""A CLI command"""
# All commands must include a type name like this:
# type: Literal['your_command_name'] = 'your_command_name'
@classmethod
def get_all_subclasses(cls):
subclasses = []
toprocess = [cls]
while len(toprocess) > 0:
next = toprocess.pop(0)
next_subclasses = next.__subclasses__()
subclasses.extend(next_subclasses)
toprocess.extend(next_subclasses)
return subclasses
@classmethod
def get_commands(cls):
return tuple(BaseCommand.get_all_subclasses())
@classmethod
def get_commands_map(cls):
# Get the type strings out of the literals and into a dictionary
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseCommand.get_all_subclasses()))
@abstractmethod
def run(self, context: CliContext) -> None:
"""Run the command. Raise ExitCli to exit."""
pass
class ExitCommand(BaseCommand):
"""Exits the CLI"""
type: Literal['exit'] = 'exit'
def run(self, context: CliContext) -> None:
raise ExitCli()
class HelpCommand(BaseCommand):
"""Shows help"""
type: Literal['help'] = 'help'
def run(self, context: CliContext) -> None:
context.parser.print_help()
def get_graph_execution_history(
graph_execution_state: GraphExecutionState,
) -> Iterable[str]:
"""Gets the history of fully-executed invocations for a graph execution"""
return (
n
for n in reversed(graph_execution_state.executed_history)
if n in graph_execution_state.graph.nodes
)
def get_invocation_command(invocation) -> str:
fields = invocation.__fields__.items()
type_hints = get_type_hints(type(invocation))
command = [invocation.type]
for name, field in fields:
if name in ["id", "type"]:
continue
# TODO: add links
# Skip image fields when serializing command
type_hint = type_hints.get(name) or None
if type_hint is ImageField or ImageField in get_args(type_hint):
continue
field_value = getattr(invocation, name)
field_default = field.default
if field_value != field_default:
if type_hint is str or str in get_args(type_hint):
command.append(f'--{name} "{field_value}"')
else:
command.append(f"--{name} {field_value}")
return " ".join(command)
class HistoryCommand(BaseCommand):
"""Shows the invocation history"""
type: Literal['history'] = 'history'
# Inputs
# fmt: off
count: int = Field(default=5, gt=0, description="The number of history entries to show")
# fmt: on
def run(self, context: CliContext) -> None:
history = list(get_graph_execution_history(context.get_session()))
for i in range(min(self.count, len(history))):
entry_id = history[-1 - i]
entry = context.get_session().graph.get_node(entry_id)
print(f"{entry_id}: {get_invocation_command(entry)}")
class SetDefaultCommand(BaseCommand):
"""Sets a default value for a field"""
type: Literal['default'] = 'default'
# Inputs
# fmt: off
field: str = Field(description="The field to set the default for")
value: str = Field(description="The value to set the default to, or None to clear the default")
# fmt: on
def run(self, context: CliContext) -> None:
if self.value is None:
if self.field in context.defaults:
del context.defaults[self.field]
else:
context.defaults[self.field] = self.value

View File

@ -5,13 +5,7 @@ import os
import shlex
import time
from typing import (
Any,
Dict,
Iterable,
Literal,
Union,
get_args,
get_origin,
get_type_hints,
)
@ -19,9 +13,9 @@ from pydantic import BaseModel
from pydantic.fields import Field
from ..backend import Args
from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_graph_execution_history
from .invocations import *
from .invocations.baseinvocation import BaseInvocation
from .invocations.image import ImageField
from .services.events import EventServiceBase
from .services.generate_initializer import get_generate
from .services.graph import EdgeConnection, GraphExecutionState
@ -33,15 +27,33 @@ from .services.processor import DefaultInvocationProcessor
from .services.sqlite import SqliteItemStorage
class InvocationCommand(BaseModel):
invocation: Union[BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
class CliCommand(BaseModel):
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
class InvalidArgs(Exception):
pass
def get_invocation_parser() -> argparse.ArgumentParser:
def add_invocation_args(command_parser):
# Add linking capability
command_parser.add_argument(
"--link",
"-l",
action="append",
nargs=3,
help="A link in the format 'dest_field source_node source_field'. source_node can be relative to history (e.g. -1)",
)
command_parser.add_argument(
"--link_node",
"-ln",
action="append",
help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)",
)
def get_command_parser() -> argparse.ArgumentParser:
# Create invocation parser
parser = argparse.ArgumentParser()
@ -49,129 +61,19 @@ def get_invocation_parser() -> argparse.ArgumentParser:
raise InvalidArgs
parser.exit = exit
subparsers = parser.add_subparsers(dest="type")
invocation_parsers = dict()
# Add history parser
history_parser = subparsers.add_parser(
"history", help="Shows the invocation history"
)
history_parser.add_argument(
"count",
nargs="?",
default=5,
type=int,
help="The number of history entries to show",
)
# Add default parser
default_parser = subparsers.add_parser(
"default", help="Define a default value for all inputs with a specified name"
)
default_parser.add_argument("input", type=str, help="The input field")
default_parser.add_argument("value", help="The default value")
default_parser = subparsers.add_parser(
"reset_default", help="Resets a default value"
)
default_parser.add_argument("input", type=str, help="The input field")
# Create subparsers for each invocation
invocations = BaseInvocation.get_all_subclasses()
for invocation in invocations:
hints = get_type_hints(invocation)
cmd_name = get_args(hints["type"])[0]
command_parser = subparsers.add_parser(cmd_name, help=invocation.__doc__)
invocation_parsers[cmd_name] = command_parser
add_parsers(subparsers, invocations, add_arguments=add_invocation_args)
# Add linking capability
command_parser.add_argument(
"--link",
"-l",
action="append",
nargs=3,
help="A link in the format 'dest_field source_node source_field'. source_node can be relative to history (e.g. -1)",
)
command_parser.add_argument(
"--link_node",
"-ln",
action="append",
help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)",
)
# Convert all fields to arguments
fields = invocation.__fields__
for name, field in fields.items():
if name in ["id", "type"]:
continue
if get_origin(field.type_) == Literal:
allowed_values = get_args(field.type_)
allowed_types = set()
for val in allowed_values:
allowed_types.add(type(val))
allowed_types_list = list(allowed_types)
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
command_parser.add_argument(
f"--{name}",
dest=name,
type=field_type,
default=field.default,
choices=allowed_values,
help=field.field_info.description,
)
else:
command_parser.add_argument(
f"--{name}",
dest=name,
type=field.type_,
default=field.default,
help=field.field_info.description,
)
# Create subparsers for each command
commands = BaseCommand.get_all_subclasses()
add_parsers(subparsers, commands, exclude_fields=["type"])
return parser
def get_invocation_command(invocation) -> str:
fields = invocation.__fields__.items()
type_hints = get_type_hints(type(invocation))
command = [invocation.type]
for name, field in fields:
if name in ["id", "type"]:
continue
# TODO: add links
# Skip image fields when serializing command
type_hint = type_hints.get(name) or None
if type_hint is ImageField or ImageField in get_args(type_hint):
continue
field_value = getattr(invocation, name)
field_default = field.default
if field_value != field_default:
if type_hint is str or str in get_args(type_hint):
command.append(f'--{name} "{field_value}"')
else:
command.append(f"--{name} {field_value}")
return " ".join(command)
def get_graph_execution_history(
graph_execution_state: GraphExecutionState,
) -> Iterable[str]:
"""Gets the history of fully-executed invocations for a graph execution"""
return (
n
for n in reversed(graph_execution_state.executed_history)
if n in graph_execution_state.graph.nodes
)
def generate_matching_edges(
a: BaseInvocation, b: BaseInvocation
) -> list[tuple[EdgeConnection, EdgeConnection]]:
@ -233,13 +135,12 @@ def invoke_cli():
invoker = Invoker(services)
session: GraphExecutionState = invoker.create_execution_state()
parser = get_invocation_parser()
parser = get_command_parser()
# Uncomment to print out previous sessions at startup
# print(services.session_manager.list())
# Defaults storage
defaults: Dict[str, Any] = dict()
context = CliContext(invoker, session, parser)
while True:
try:
@ -248,13 +149,6 @@ def invoke_cli():
# Ctrl-c exits
break
if cmd_input in ["exit", "q"]:
break
if cmd_input in ["--help", "help", "h", "?"]:
parser.print_help()
continue
try:
# Refresh the state of the session
session = invoker.services.graph_execution_manager.get(session.id)
@ -272,35 +166,23 @@ def invoke_cli():
# Parse args to create invocation
args = vars(parser.parse_args(shlex.split(cmd.strip())))
# Check for special commands
# TODO: These might be better as Pydantic models, similar to the invocations
if args["type"] == "history":
history_count = args["count"] or 5
for i in range(min(history_count, len(history))):
entry_id = history[-1 - i]
entry = session.graph.get_node(entry_id)
print(f"{entry_id}: {get_invocation_command(entry.invocation)}")
continue
if args["type"] == "reset_default":
if args["input"] in defaults:
del defaults[args["input"]]
continue
if args["type"] == "default":
field = args["input"]
field_value = args["value"]
defaults[field] = field_value
continue
# Override defaults
for field_name, field_default in defaults.items():
for field_name, field_default in context.defaults.items():
if field_name in args:
args[field_name] = field_default
# Parse invocation
args["id"] = current_id
command = InvocationCommand(invocation=args)
command = CliCommand(command=args)
# Run any CLI commands immediately
# TODO: this won't behave as expected if piping and using e.g. history,
# since invocations are gathered and then run together at the end.
# This is more efficient if the CLI is running against a distributed
# backend, so it's preferable not to change that behavior.
if isinstance(command.command, BaseCommand):
command.command.run(context)
continue
# Pipe previous command output (if there was a previous command)
edges = []
@ -314,7 +196,7 @@ def invoke_cli():
else session.graph.get_node(from_id)
)
matching_edges = generate_matching_edges(
from_node, command.invocation
from_node, command.command
)
edges.extend(matching_edges)
@ -323,22 +205,25 @@ def invoke_cli():
for link in args["link_node"]:
link_node = session.graph.get_node(link)
matching_edges = generate_matching_edges(
link_node, command.invocation
link_node, command.command
)
matching_destinations = [e[1] for e in matching_edges]
edges = [e for e in edges if e[1] not in matching_destinations]
edges.extend(matching_edges)
if "link" in args and args["link"]:
for link in args["link"]:
edges = [e for e in edges if e[1].node_id != command.command.id and e[1].field != link[2]]
edges.append(
(
EdgeConnection(node_id=link[1], field=link[0]),
EdgeConnection(
node_id=command.invocation.id, field=link[2]
node_id=command.command.id, field=link[2]
),
)
)
new_invocations.append((command.invocation, edges))
new_invocations.append((command.command, edges))
current_id = current_id + 1
@ -347,13 +232,14 @@ def invoke_cli():
for invocation in new_invocations:
session.add_node(invocation[0])
for edge in invocation[1]:
print(edge)
session.add_edge(edge)
# Execute all available invocations
invoker.invoke(session, invoke_all=True)
while not session.is_complete():
# Wait some time
session = invoker.services.graph_execution_manager.get(session.id)
session = context.get_session()
time.sleep(0.1)
# Print any errors
@ -366,11 +252,15 @@ def invoke_cli():
# Start a new session
print("Creating a new session")
session = invoker.create_execution_state()
context.session = session
except InvalidArgs:
print('Invalid command, use "help" to list commands')
continue
except ExitCli:
break
except SystemExit:
continue

View File

@ -1101,14 +1101,17 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
if original_config_file is None:
model_type = ModelManager.probe_model_type(checkpoint)
if model_type == SDLegacyType.V2:
if model_type == SDLegacyType.V2_v:
original_config_file = (
global_config_dir() / "stable-diffusion" / "v2-inference-v.yaml"
)
if global_step == 110000:
# v2.1 needs to upcast attention
upcast_attention = True
elif model_type == SDLegacyType.V2_e:
original_config_file = (
global_config_dir() / "stable-diffusion" / "v2-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
original_config_file = (
global_config_dir()

View File

@ -40,7 +40,9 @@ from ..util import CPU_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum):
V1 = 1
V1_INPAINT = 2
V2 = 3
V2 = 3
V2_e = 4
V2_v = 5
UNKNOWN = 99
DEFAULT_MAX_MODELS = 2
@ -580,15 +582,24 @@ class ModelManager(object):
format. Valid return values include:
SDLegacyType.V1
SDLegacyType.V1_INPAINT
SDLegacyType.V2
SDLegacyType.V2 (V2 prediction type unknown)
SDLegacyType.V2_e (V2 using 'epsilon' prediction type)
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
SDLegacyType.UNKNOWN
"""
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
return SDLegacyType.V2
global_step = checkpoint.get('global_step')
state_dict = checkpoint.get("state_dict") or checkpoint
try:
state_dict = checkpoint.get("state_dict") or checkpoint
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
if global_step == 220000:
return SDLegacyType.V2_e
elif global_step == 110000:
return SDLegacyType.V2_v
else:
return SDLegacyType.V2
# otherwise we assume a V1 file
in_channels = state_dict[
"model.diffusion_model.input_blocks.0.0.weight"
].shape[1]
@ -602,12 +613,13 @@ class ModelManager(object):
return SDLegacyType.UNKNOWN
def heuristic_import(
self,
path_url_or_repo: str,
convert: bool = True,
model_name: str = None,
description: str = None,
commit_to_conf: Path = None,
self,
path_url_or_repo: str,
convert: bool = True,
model_name: str = None,
description: str = None,
model_config_file: Path = None,
commit_to_conf: Path = None,
) -> str:
"""
Accept a string which could be:
@ -704,7 +716,7 @@ class ModelManager(object):
if model_path.stem in self.config: # already imported
print(" | Already imported. Skipping")
return
return model_path.stem
# another round of heuristics to guess the correct config file.
checkpoint = (
@ -712,32 +724,46 @@ class ModelManager(object):
if model_path.suffix == ".safetensors"
else torch.load(model_path)
)
model_type = self.probe_model_type(checkpoint)
model_config_file = None
if model_type == SDLegacyType.V1:
print(" | SD-v1 model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V2:
print(
" | SD-v2 model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
convert = True
else:
print(
f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import"
)
return
# additional probing needed if no config file provided
if model_config_file is None:
model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1:
print(" | SD-v1 model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V2_v:
print(
" | SD-v2-v model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
convert = True
elif model_type == SDLegacyType.V2_e:
print(
" | SD-v2-e model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
convert = True
elif model_type == SDLegacyType.V2:
print(
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
)
return
else:
print(
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
)
return
diffuser_path = Path(
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem

View File

@ -26,7 +26,7 @@ from ..args import APP_ID, APP_VERSION, Args, calculate_init_img_hash
from ..generator import infill_methods
from ..globals import Globals, global_converted_ckpts_dir, global_models_dir
from ..image_util import PngWriter, retrieve_metadata
from ..model_management import merge_diffusion_models
from ...frontend.merge.merge_diffusers import merge_diffusion_models
from ..prompting import (
get_prompt_structure,
get_tokenizer,

View File

@ -20,7 +20,7 @@ stable-diffusion-2.1:
recommended: True
sd-inpainting-2.0:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1
repo_id: stabilityai/stable-diffusion-2-inpainting
format: diffusers
recommended: False
analog-diffusion-1.0:

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 1.0e-4
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.stable_diffusion.diffusion.ddpm.LatentDiffusion
params:
parameterization: "v"
linear_start: 0.00085

View File

@ -0,0 +1,67 @@
model:
base_learning_rate: 1.0e-4
target: invokeai.backend.stable_diffusion.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: "jpg"
cond_stage_key: "txt"
image_size: 64
channels: 4
cond_stage_trainable: false
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: True
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_head_channels: 64 # need to fix for flash-attn
use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: 1
context_dim: 1024
legacy: False
first_stage_config:
target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
#attn_type: "vanilla-xformers"
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: invokeai.backend.stable_diffusion.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"

View File

@ -19,7 +19,7 @@ import invokeai.version as invokeai
from ...backend import Generate, ModelManager
from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png
from ...backend.globals import Globals
from ...backend.globals import Globals, global_config_dir
from ...backend.image_util import (
PngWriter,
make_grid,
@ -66,6 +66,9 @@ def main():
Globals.sequential_guidance = args.sequential_guidance
Globals.ckpt_convert = True # always true now
# run any post-install patches needed
run_patches()
print(f">> Internet connectivity is {Globals.internet_available}")
if not args.conf:
@ -658,7 +661,16 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
)
if not imported_name:
print("** Import failed or was skipped")
if config_file := _pick_configuration_file(completer):
imported_name = gen.model_manager.heuristic_import(
model_path,
model_name=model_name,
description=model_desc,
convert=convert,
model_config_file=config_file,
)
if not imported_name:
print("** Aborting import.")
return
if not _verify_load(imported_name, gen):
@ -672,6 +684,46 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
completer.update_models(gen.model_manager.list_models())
print(f">> {imported_name} successfully installed")
def _pick_configuration_file(completer)->Path:
print(
"""
Please select the type of this model:
[1] A Stable Diffusion v1.x ckpt/safetensors model
[2] A Stable Diffusion v1.x inpainting ckpt/safetensors model
[3] A Stable Diffusion v2.x base model (512 pixels)
[4] A Stable Diffusion v2.x v-predictive model (768 pixels)
[5] Other (you will be prompted to enter the config file path)
[Q] I have no idea! Skip the import.
""")
choices = [
global_config_dir() / 'stable-diffusion' / x
for x in [
'v1-inference.yaml',
'v1-inpainting-inference.yaml',
'v2-inference.yaml',
'v2-inference-v.yaml',
]
]
ok = False
while not ok:
try:
choice = input('select 0-5, Q > ').strip()
if choice.startswith(('q','Q')):
return
if choice == '5':
completer.complete_extensions(('.yaml'))
choice = Path(input('Select config file for this model> ').strip()).absolute()
completer.complete_extensions(None)
ok = choice.exists()
else:
choice = choices[int(choice)-1]
ok = True
except (ValueError, IndexError):
print(f'{choice} is not a valid choice')
except EOFError:
return
return choice
def _verify_load(model_name: str, gen) -> bool:
print(">> Verifying that new model loads...")
@ -1236,6 +1288,21 @@ def check_internet() -> bool:
except:
return False
# This routine performs any patch-ups needed after installation
def run_patches():
# install ckpt configuration files that may have been added to the
# distro after original root directory configuration
import invokeai.configs as conf
from shutil import copyfile
root_configs = Path(global_config_dir(), 'stable-diffusion')
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
if not root_configs.exists():
os.makedirs(root_configs, exist_ok=True)
for src in repo_configs.iterdir():
dest = root_configs / src.name
if not dest.exists():
copyfile(src, dest)
if __name__ == "__main__":
main()

View File

@ -3,3 +3,6 @@ dist/
node_modules/
patches/
stats.html
index.html
.yarn/
*.scss

View File

@ -30,7 +30,10 @@ module.exports = {
radix: 'error',
'space-before-blocks': 'error',
'import/prefer-default-export': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
'@typescript-eslint/no-unused-vars': [
'warn',
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
],
'prettier/prettier': ['error', { endOfLine: 'auto' }],
},
settings: {

View File

@ -1,4 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
cd invokeai/frontend/web && npm run lint-staged
cd invokeai/frontend/web/ && npm run lint-staged

View File

@ -3,3 +3,4 @@ dist/
node_modules/
patches/
stats.html
.yarn/

View File

@ -3,6 +3,7 @@ module.exports = {
tabWidth: 2,
semi: true,
singleQuote: true,
endOfLine: 'auto',
overrides: [
{
files: ['public/locales/*.json'],

View File

@ -0,0 +1 @@
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.ltr-parameters-panel-transition-enter{transform:translate(-150%)}.ltr-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-parameters-panel-transition-exit{transform:translate(0)}.ltr-parameters-panel-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}.rtl-parameters-panel-transition-enter{transform:translate(150%)}.rtl-parameters-panel-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-parameters-panel-transition-exit{transform:translate(0)}.rtl-parameters-panel-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,11 +5,18 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-0e39fbc4.js"></script>
<link rel="stylesheet" href="./assets/index-14cb2922.css">
<style>
html,
body {
padding: 0;
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-b928084d.js"></script>
<link rel="stylesheet" href="./assets/index-5483945c.css">
</head>
<body>
<body dir="ltr">
<div id="root"></div>
</body>

View File

@ -10,14 +10,18 @@
"darkTheme": "Dark",
"lightTheme": "Light",
"greenTheme": "Green",
"oceanTheme": "Ocean",
"langArabic": "العربية",
"langEnglish": "English",
"langDutch": "Nederlands",
"langFrench": "Français",
"langGerman": "Deutsch",
"langHebrew": "עברית",
"langItalian": "Italiano",
"langJapanese": "日本語",
"langKorean": "한국어",
"langPolish": "Polski",
"langPortuguese": "Português",
"langBrPortuguese": "Português do Brasil",
"langRussian": "Русский",
"langSimplifiedChinese": "简体中文",
@ -63,7 +67,10 @@
"statusConvertingModel": "Converting Model",
"statusModelConverted": "Model Converted",
"statusMergingModels": "Merging Models",
"statusMergedModels": "Models Merged"
"statusMergedModels": "Models Merged",
"pinOptionsPanel": "Pin Options Panel",
"loading": "Loading",
"loadingInvokeAI": "Loading Invoke AI"
},
"gallery": {
"generations": "Generations",
@ -82,7 +89,7 @@
"noImagesInGallery": "No Images In Gallery"
},
"hotkeys": {
"keyboardShortcuts": "Keyboard Shorcuts",
"keyboardShortcuts": "Keyboard Shortcuts",
"appHotkeys": "App Hotkeys",
"generalHotkeys": "General Hotkeys",
"galleryHotkeys": "Gallery Hotkeys",
@ -364,7 +371,8 @@
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"convertToDiffusersSaveLocation": "Save Location",
"v1": "v1",
"v2": "v2",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Custom Config",
"pathToCustomConfig": "Path To Custom Config",
@ -387,13 +395,16 @@
"mergedModelCustomSaveLocation": "Custom Path",
"invokeAIFolder": "Invoke AI Folder",
"ignoreMismatch": "Ignore Mismatches Between Selected Models",
"modelMergeHeaderHelp1": "You can merge upto three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp1": "You can merge up to three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
"modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"inverseSigmoid": "Inverse Sigmoid",
"sigmoid": "Sigmoid",
"weightedSum": "Weighted Sum"
"weightedSum": "Weighted Sum",
"none": "none",
"addDifference": "Add Difference",
"pickModelType": "Pick Model Type"
},
"parameters": {
"general": "General",

View File

@ -15,7 +15,7 @@
"langSpanish": "Español",
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
"postProcessing": "Post-procesamiento",
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
"training": "Entrenamiento",
@ -44,7 +44,26 @@
"statusUpscaling": "Aumentando Tamaño",
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
"statusLoadingModel": "Cargando Modelo",
"statusModelChanged": "Modelo cambiado"
"statusModelChanged": "Modelo cambiado",
"statusMergedModels": "Modelos combinados",
"githubLabel": "Github",
"discordLabel": "Discord",
"langEnglish": "Inglés",
"langDutch": "Holandés",
"langFrench": "Francés",
"langGerman": "Alemán",
"langItalian": "Italiano",
"langArabic": "Árabe",
"langJapanese": "Japones",
"langPolish": "Polaco",
"langBrPortuguese": "Portugués brasileño",
"langRussian": "Ruso",
"langSimplifiedChinese": "Chino simplificado",
"langUkranian": "Ucraniano",
"back": "Atrás",
"statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos"
},
"gallery": {
"generations": "Generaciones",
@ -284,16 +303,16 @@
"nameValidationMsg": "Introduce un nombre para tu modelo",
"description": "Descripción",
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
"config": "Config",
"configValidationMsg": "Ruta del archivo de configuración del modelo",
"config": "Configurar",
"configValidationMsg": "Ruta del archivo de configuración del modelo.",
"modelLocation": "Ubicación del Modelo",
"modelLocationValidationMsg": "Ruta del archivo de modelo",
"modelLocationValidationMsg": "Ruta del archivo de modelo.",
"vaeLocation": "Ubicación VAE",
"vaeLocationValidationMsg": "Ruta del archivo VAE",
"vaeLocationValidationMsg": "Ruta del archivo VAE.",
"width": "Ancho",
"widthValidationMsg": "Ancho predeterminado de tu modelo",
"widthValidationMsg": "Ancho predeterminado de tu modelo.",
"height": "Alto",
"heightValidationMsg": "Alto predeterminado de tu modelo",
"heightValidationMsg": "Alto predeterminado de tu modelo.",
"addModel": "Añadir Modelo",
"updateModel": "Actualizar Modelo",
"availableModels": "Modelos disponibles",
@ -320,7 +339,61 @@
"deleteModel": "Eliminar Modelo",
"deleteConfig": "Eliminar Configuración",
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas."
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
"safetensorModels": "SafeTensors",
"addDiffuserModel": "Añadir difusores",
"inpainting": "v1 Repintado",
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
"checkpointModels": "Puntos de control",
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
"diffusersModels": "Difusores",
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
"vaeRepoID": "Identificador del repositorio de VAE",
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
"formMessageDiffusersVAELocation": "Ubicación VAE",
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
"convert": "Convertir",
"convertToDiffusers": "Convertir en difusores",
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
"convertToDiffusersSaveLocation": "Guardar ubicación",
"v1": "v1",
"v2": "v2",
"statusConverting": "Adaptar",
"modelConverted": "Modelo adaptado",
"sameFolder": "La misma carpeta",
"invokeRoot": "Carpeta InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Ubicación personalizada para guardar",
"merge": "Fusión",
"modelsMerged": "Modelos fusionados",
"mergeModels": "Combinar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelName": "Nombre del modelo combinado",
"alpha": "Alfa",
"interpolationType": "Tipo de interpolación",
"mergedModelSaveLocation": "Guardar ubicación",
"mergedModelCustomSaveLocation": "Ruta personalizada",
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide",
"allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada"
},
"parameters": {
"images": "Imágenes",
@ -380,7 +453,22 @@
"info": "Información",
"deleteImage": "Eliminar Imagen",
"initialImage": "Imagen Inicial",
"showOptionsPanel": "Mostrar panel de opciones"
"showOptionsPanel": "Mostrar panel de opciones",
"symmetry": "Simetría",
"vSymmetryStep": "Paso de simetría V",
"hSymmetryStep": "Paso de simetría H",
"cancel": {
"immediate": "Cancelar inmediatamente",
"schedule": "Cancelar tras la iteración actual",
"isScheduled": "Cancelando",
"setType": "Tipo de cancelación"
},
"copyImage": "Copiar la imagen",
"general": "General",
"negativePrompts": "Preguntas negativas",
"imageToImage": "Imagen a imagen",
"denoisingStrength": "Intensidad de la eliminación del ruido",
"hiresStrength": "Alta resistencia"
},
"settings": {
"models": "Modelos",
@ -393,7 +481,8 @@
"resetWebUI": "Restablecer interfaz web",
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla."
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
},
"toast": {
"tempFoldersEmptied": "Directorio temporal vaciado",
@ -431,12 +520,12 @@
"feature": {
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. El modo sin costuras funciona para generar patrones repetitivos en la salida. La optimización de alta resolución realiza un ciclo de generación de dos pasos y debe usarse en resoluciones más altas cuando desee una imagen/composición más coherente.",
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75.",
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."

View File

@ -44,7 +44,26 @@
"statusUpscaling": "Redimensinando",
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
"statusLoadingModel": "Carregando Modelo",
"statusModelChanged": "Modelo Alterado"
"statusModelChanged": "Modelo Alterado",
"githubLabel": "Github",
"discordLabel": "Discord",
"langArabic": "Árabe",
"langEnglish": "Inglês",
"langDutch": "Holandês",
"langFrench": "Francês",
"langGerman": "Alemão",
"langItalian": "Italiano",
"langJapanese": "Japonês",
"langPolish": "Polonês",
"langSimplifiedChinese": "Chinês",
"langUkranian": "Ucraniano",
"back": "Voltar",
"statusConvertingModel": "Convertendo Modelo",
"statusModelConverted": "Modelo Convertido",
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo",
"langSpanish": "Espanhol"
},
"gallery": {
"generations": "Gerações",
@ -237,7 +256,7 @@
"desc": "Salva a tela atual na galeria"
},
"copyToClipboard": {
"title": "Copiar Para a Área de Transferência ",
"title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência"
},
"downloadImage": {
@ -284,7 +303,7 @@
"nameValidationMsg": "Insira um nome para o seu modelo",
"description": "Descrição",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Config",
"config": "Configuração",
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
"modelLocation": "Localização do modelo",
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
@ -317,7 +336,52 @@
"deleteModel": "Excluir modelo",
"deleteConfig": "Excluir Config",
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar."
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar.",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"safetensorModels": "SafeTensors",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"repo_id": "Repo ID",
"vaeRepoID": "VAE Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"scanAgain": "Digitalize Novamente",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo arquivo VAE dentro do local do modelo.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido para o formato 🧨 Diffusers.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que você tenha espaço suficiente em disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"convertToDiffusersHelpText6": "Você deseja converter este modelo?",
"convertToDiffusersSaveLocation": "Local para Salvar",
"v1": "v1",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"convertToDiffusersHelpText3": "Seu arquivo de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Você pode adicionar seu ponto de verificação ao Gerenciador de modelos novamente, se desejar.",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, dependendo das especificações do seu computador.",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"statusConverting": "Convertendo",
"modelConverted": "Modelo Convertido",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"allModels": "Todos os Modelos",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"convert": "Converter",
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo."
},
"parameters": {
"images": "Imagems",
@ -442,14 +506,14 @@
"move": "Mover",
"resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível",
"saveToGallery": "Save To Gallery",
"saveToGallery": "Salvar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Baixar Como Imagem",
"undo": "Desfazer",
"redo": "Refazer",
"clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela",
"showIntermediates": "Show Intermediates",
"showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade",
"snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa",

View File

@ -0,0 +1 @@
{}

View File

@ -5,9 +5,16 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="favicon.ico" />
<style>
html,
body {
padding: 0;
margin: 0;
}
</style>
</head>
<body>
<body dir="ltr">
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>

View File

@ -5,28 +5,45 @@
"scripts": {
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "vite dev",
"build": "tsc && vite build",
"build": "npm run lint && vite build",
"preview": "vite preview",
"madge": "madge --circular src/main.tsx",
"lint": "eslint --fix .",
"lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0",
"lint:prettier": "prettier --check .",
"lint:tsc": "tsc --noEmit",
"lint": "npm run lint:eslint && npm run lint:prettier && npm run lint:tsc && npm run lint:madge",
"fix": "eslint --fix . && prettier --loglevel warn --write . && tsc --noEmit",
"lint-staged": "lint-staged",
"prettier": "prettier *.{json,js,ts,html} public/locales/*.json src/**/*.{ts,tsx,scss} --write --loglevel warn .",
"fmt": "npm run prettier -- --write",
"postinstall": "patch-package"
"postinstall": "patch-package && yarn run theme",
"theme": "chakra-cli tokens src/theme/theme.ts",
"theme:watch": "chakra-cli tokens src/theme/theme.ts --watch"
},
"madge": {
"detectiveOptions": {
"ts": {
"skipTypeImports": true
},
"tsx": {
"skipTypeImports": true
}
}
},
"lint-staged": {
"**/*.{js,jsx,ts,tsx,cjs,json,html,scss}": [
"prettier --write",
"eslint --fix"
]
},
"dependencies": {
"@chakra-ui/anatomy": "^2.1.1",
"@chakra-ui/icons": "^2.0.17",
"@chakra-ui/react": "^2.5.1",
"@emotion/cache": "^11.10.5",
"@chakra-ui/styled-system": "^2.6.1",
"@chakra-ui/theme-tools": "^2.0.16",
"@emotion/react": "^11.10.6",
"@emotion/styled": "^11.10.6",
"@radix-ui/react-context-menu": "^2.1.1",
"@radix-ui/react-slider": "^1.1.0",
"@radix-ui/react-tooltip": "^1.0.3",
"@reduxjs/toolkit": "^1.9.2",
"@types/uuid": "^9.0.0",
"@vitejs/plugin-react-swc": "^3.2.0",
"add": "^2.0.6",
"chakra-ui-contextmenu": "^1.0.5",
"dateformat": "^5.0.3",
"formik": "^2.2.9",
"framer-motion": "^9.0.4",
@ -50,19 +67,21 @@
"react-zoom-pan-pinch": "^2.6.1",
"redux-deep-persist": "^1.0.7",
"redux-persist": "^6.0.0",
"socket.io": "^4.6.0",
"socket.io-client": "^4.6.0",
"use-image": "^1.1.0",
"uuid": "^9.0.0",
"yarn": "^1.22.19"
"uuid": "^9.0.0"
},
"devDependencies": {
"@chakra-ui/cli": "^2.3.0",
"@fontsource/inter": "^4.5.15",
"@types/dateformat": "^5.0.0",
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@types/react-transition-group": "^4.4.5",
"@types/uuid": "^9.0.0",
"@typescript-eslint/eslint-plugin": "^5.52.0",
"@typescript-eslint/parser": "^5.52.0",
"@vitejs/plugin-react-swc": "^3.2.0",
"babel-plugin-transform-imports": "^2.0.0",
"eslint": "^8.34.0",
"eslint-config-prettier": "^8.6.0",
@ -76,26 +95,10 @@
"postinstall-postinstall": "^2.1.0",
"prettier": "^2.8.4",
"rollup-plugin-visualizer": "^5.9.0",
"sass": "^1.58.3",
"terser": "^5.16.4",
"vite": "^4.1.2",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.0.5"
},
"madge": {
"detectiveOptions": {
"ts": {
"skipTypeImports": true
},
"tsx": {
"skipTypeImports": true
}
}
},
"lint-staged": {
"**/*.{js,jsx,ts,tsx,cjs,json,html,scss}": [
"npm run prettier",
"npm run lint"
]
"vite-tsconfig-paths": "^4.0.5",
"yarn": "^1.22.19"
}
}

View File

@ -0,0 +1,14 @@
diff --git a/node_modules/@chakra-ui/cli/dist/scripts/read-theme-file.worker.js b/node_modules/@chakra-ui/cli/dist/scripts/read-theme-file.worker.js
index 937cf0d..7dcc0c0 100644
--- a/node_modules/@chakra-ui/cli/dist/scripts/read-theme-file.worker.js
+++ b/node_modules/@chakra-ui/cli/dist/scripts/read-theme-file.worker.js
@@ -50,7 +50,8 @@ async function readTheme(themeFilePath) {
project: tsConfig.configFileAbsolutePath,
compilerOptions: {
module: "CommonJS",
- esModuleInterop: true
+ esModuleInterop: true,
+ jsx: 'react'
},
transpileOnly: true,
swc: true

View File

@ -10,14 +10,18 @@
"darkTheme": "Dark",
"lightTheme": "Light",
"greenTheme": "Green",
"oceanTheme": "Ocean",
"langArabic": "العربية",
"langEnglish": "English",
"langDutch": "Nederlands",
"langFrench": "Français",
"langGerman": "Deutsch",
"langHebrew": "עברית",
"langItalian": "Italiano",
"langJapanese": "日本語",
"langKorean": "한국어",
"langPolish": "Polski",
"langPortuguese": "Português",
"langBrPortuguese": "Português do Brasil",
"langRussian": "Русский",
"langSimplifiedChinese": "简体中文",
@ -63,7 +67,10 @@
"statusConvertingModel": "Converting Model",
"statusModelConverted": "Model Converted",
"statusMergingModels": "Merging Models",
"statusMergedModels": "Models Merged"
"statusMergedModels": "Models Merged",
"pinOptionsPanel": "Pin Options Panel",
"loading": "Loading",
"loadingInvokeAI": "Loading Invoke AI"
},
"gallery": {
"generations": "Generations",
@ -82,7 +89,7 @@
"noImagesInGallery": "No Images In Gallery"
},
"hotkeys": {
"keyboardShortcuts": "Keyboard Shorcuts",
"keyboardShortcuts": "Keyboard Shortcuts",
"appHotkeys": "App Hotkeys",
"generalHotkeys": "General Hotkeys",
"galleryHotkeys": "Gallery Hotkeys",
@ -364,7 +371,8 @@
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
"convertToDiffusersSaveLocation": "Save Location",
"v1": "v1",
"v2": "v2",
"v2_base": "v2 (512px)",
"v2_768": "v2 (768px)",
"inpainting": "v1 Inpainting",
"customConfig": "Custom Config",
"pathToCustomConfig": "Path To Custom Config",
@ -387,13 +395,16 @@
"mergedModelCustomSaveLocation": "Custom Path",
"invokeAIFolder": "Invoke AI Folder",
"ignoreMismatch": "Ignore Mismatches Between Selected Models",
"modelMergeHeaderHelp1": "You can merge upto three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp1": "You can merge up to three different models to create a blend that suits your needs.",
"modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.",
"modelMergeAlphaHelp": "Alpha controls blend strength for the models. Lower alpha values lead to lower influence of the second model.",
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"inverseSigmoid": "Inverse Sigmoid",
"sigmoid": "Sigmoid",
"weightedSum": "Weighted Sum"
"weightedSum": "Weighted Sum",
"none": "none",
"addDifference": "Add Difference",
"pickModelType": "Pick Model Type"
},
"parameters": {
"general": "General",

View File

@ -363,7 +363,6 @@
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
"convertToDiffusersSaveLocation": "Guardar ubicación",
"v1": "v1",
"v2": "v2",
"statusConverting": "Adaptar",
"modelConverted": "Modelo adaptado",
"sameFolder": "La misma carpeta",

View File

@ -45,7 +45,9 @@
"statusUpscaling": "Mise à échelle",
"statusUpscalingESRGAN": "Mise à échelle (ESRGAN)",
"statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé"
"statusModelChanged": "Modèle changé",
"discordLabel": "Discord",
"githubLabel": "Github"
},
"gallery": {
"generations": "Générations",

Some files were not shown because too many files have changed in this diff Show More