Merge branch 'main' into lstein/bugfix/compel

This commit is contained in:
Eugene Brodsky 2023-05-12 08:22:18 -04:00 committed by GitHub
commit af060188bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 674 additions and 559 deletions

View File

@ -52,7 +52,7 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting image", )
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting image", )
cfg_scale: float = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
# fmt: on

View File

@ -33,8 +33,8 @@ class ImageOutput(BaseInvocationOutput):
# fmt: off
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
width: Optional[int] = Field(default=None, description="The width of the image in pixels")
height: Optional[int] = Field(default=None, description="The height of the image in pixels")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
# fmt: on
class Config:

View File

@ -17,6 +17,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post
from ...backend.image_util.seamless import configure_model_padding
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline, image_resized_to_grid_as_tensor
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
import numpy as np
from ..services.image_storage import ImageType
@ -40,41 +41,55 @@ class LatentsField(BaseModel):
class LatentsOutput(BaseInvocationOutput):
"""Base class for invocations that output latents"""
#fmt: off
type: Literal["latent_output"] = "latent_output"
latents: LatentsField = Field(default=None, description="The output latents")
type: Literal["latents_output"] = "latents_output"
# Inputs
latents: LatentsField = Field(default=None, description="The output latents")
width: int = Field(description="The width of the latents in pixels")
height: int = Field(description="The height of the latents in pixels")
#fmt: on
def build_latents_output(latents_name: str, latents: torch.Tensor):
return LatentsOutput(
latents=LatentsField(latents_name=latents_name),
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
class NoiseOutput(BaseInvocationOutput):
"""Invocation noise output"""
#fmt: off
type: Literal["noise_output"] = "noise_output"
type: Literal["noise_output"] = "noise_output"
# Inputs
noise: LatentsField = Field(default=None, description="The output noise")
width: int = Field(description="The width of the noise in pixels")
height: int = Field(description="The height of the noise in pixels")
#fmt: on
# TODO: this seems like a hack
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def build_noise_output(latents_name: str, latents: torch.Tensor):
return NoiseOutput(
noise=LatentsField(latents_name=latents_name),
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
SAMPLER_NAME_VALUES = Literal[
tuple(list(scheduler_map.keys()))
tuple(list(SCHEDULER_MAP.keys()))
]
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
scheduler_config = model.scheduler.config
if "_backup" in scheduler_config:
scheduler_config = scheduler_config["_backup"]
scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
scheduler = scheduler_class.from_config(scheduler_config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
@ -130,9 +145,7 @@ class NoiseInvocation(BaseInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, noise)
return NoiseOutput(
noise=LatentsField(latents_name=name)
)
return build_noise_output(latents_name=name, latents=noise)
# Text to image
@ -148,7 +161,7 @@ class TextToLatentsInvocation(BaseInvocation):
noise: Optional[LatentsField] = Field(description="The noise to use")
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
@ -216,7 +229,7 @@ class TextToLatentsInvocation(BaseInvocation):
h_symmetry_time_pct=None,#h_symmetry_time_pct,
v_symmetry_time_pct=None#v_symmetry_time_pct,
),
).add_scheduler_args_if_applicable(model.scheduler, eta=None)#ddim_eta)
).add_scheduler_args_if_applicable(model.scheduler, eta=0.0)#ddim_eta)
return conditioning_data
@ -248,9 +261,7 @@ class TextToLatentsInvocation(BaseInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
return build_latents_output(latents_name=name, latents=result_latents)
class LatentsToLatentsInvocation(TextToLatentsInvocation):
@ -285,7 +296,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
self.dispatch_progress(context, source_node_id, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
conditioning_data = self.get_conditioning_data(context, model)
# TODO: Verify the noise is the right size
@ -293,11 +304,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
latent, device=model.device, dtype=latent.dtype
)
timesteps, _ = model.get_img2img_timesteps(
self.steps,
self.strength,
device=model.device,
)
timesteps, _ = model.get_img2img_timesteps(self.steps, self.strength)
result_latents, result_attention_map_saver = model.latents_from_embeddings(
latents=initial_latents,
@ -313,9 +320,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
return build_latents_output(latents_name=name, latents=result_latents)
# Latent to image
@ -379,11 +384,11 @@ class ResizeLatentsInvocation(BaseInvocation):
type: Literal["lresize"] = "lresize"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to resize")
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
mode: Optional[LATENTS_INTERPOLATION_MODE] = Field(default="bilinear", description="The interpolation mode")
antialias: Optional[bool] = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
latents: Optional[LatentsField] = Field(description="The latents to resize")
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -400,7 +405,7 @@ class ResizeLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, resized_latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=resized_latents)
class ScaleLatentsInvocation(BaseInvocation):
@ -409,10 +414,10 @@ class ScaleLatentsInvocation(BaseInvocation):
type: Literal["lscale"] = "lscale"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to scale")
scale_factor: float = Field(gt=0, description="The factor by which to scale the latents")
mode: Optional[LATENTS_INTERPOLATION_MODE] = Field(default="bilinear", description="The interpolation mode")
antialias: Optional[bool] = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
latents: Optional[LatentsField] = Field(description="The latents to scale")
scale_factor: float = Field(gt=0, description="The factor by which to scale the latents")
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -430,7 +435,7 @@ class ScaleLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, resized_latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=resized_latents)
class ImageToLatentsInvocation(BaseInvocation):
@ -474,4 +479,4 @@ class ImageToLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=latents)

View File

@ -3,6 +3,7 @@
from typing import Literal
from pydantic import BaseModel, Field
import numpy as np
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
@ -73,3 +74,12 @@ class DivideInvocation(BaseInvocation, MathInvocationConfig):
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=int(self.a / self.b))
class RandomIntInvocation(BaseInvocation):
"""Outputs a single random integer."""
#fmt: off
type: Literal["rand_int"] = "rand_int"
#fmt: on
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=np.random.randint(0, np.iinfo(np.int32).max))

View File

@ -108,17 +108,20 @@ APP_VERSION = invokeai.version.__version__
SAMPLER_CHOICES = [
"ddim",
"k_dpm_2_a",
"k_dpm_2",
"k_dpmpp_2_a",
"k_dpmpp_2",
"k_euler_a",
"k_euler",
"k_heun",
"k_lms",
"plms",
# diffusers:
"ddpm",
"deis",
"lms",
"pndm",
"heun",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2m",
"dpmpp_2m_k",
"unipc",
]
PRECISION_CHOICES = [
@ -631,7 +634,7 @@ class Args(object):
choices=SAMPLER_CHOICES,
metavar="SAMPLER_NAME",
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
default="k_lms",
default="lms",
)
render_group.add_argument(
"--log_tokenization",

View File

@ -37,6 +37,7 @@ from .safety_checker import SafetyChecker
from .prompting import get_uc_and_c_and_ec
from .prompting.conditioning import log_tokenization
from .stable_diffusion import HuggingFaceConceptsLibrary
from .stable_diffusion.schedulers import SCHEDULER_MAP
from .util import choose_precision, choose_torch_device
def fix_func(orig):
@ -141,7 +142,7 @@ class Generate:
model=None,
conf="configs/models.yaml",
embedding_path=None,
sampler_name="k_lms",
sampler_name="lms",
ddim_eta=0.0, # deterministic
full_precision=False,
precision="auto",
@ -1047,29 +1048,12 @@ class Generate:
def _set_scheduler(self):
default = self.model.scheduler
# See https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
# DPMSolverMultistepScheduler is technically not `k_` anything, as it is neither
# the k-diffusers implementation nor included in EDM (Karras 2022), but we can
# provide an alias for compatibility.
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
if self.sampler_name in scheduler_map:
sampler_class = scheduler_map[self.sampler_name]
if self.sampler_name in SCHEDULER_MAP:
sampler_class, sampler_extra_config = SCHEDULER_MAP[self.sampler_name]
msg = (
f"Setting Sampler to {self.sampler_name} ({sampler_class.__name__})"
)
self.sampler = sampler_class.from_config(self.model.scheduler.config)
self.sampler = sampler_class.from_config({**self.model.scheduler.config, **sampler_extra_config})
else:
msg = (
f" Unsupported Sampler: {self.sampler_name} "+

View File

@ -31,6 +31,7 @@ from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
from ..prompting.conditioning import get_uc_and_c_and_ec
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ..stable_diffusion.schedulers import SCHEDULER_MAP
downsampling = 8
@ -71,19 +72,6 @@ class InvokeAIGeneratorOutput:
# we are interposing a wrapper around the original Generator classes so that
# old code that calls Generate will continue to work.
class InvokeAIGenerator(metaclass=ABCMeta):
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
@ -175,14 +163,20 @@ class InvokeAIGenerator(metaclass=ABCMeta):
'''
Return list of all the schedulers that we currently handle.
'''
return list(self.scheduler_map.keys())
return list(SCHEDULER_MAP.keys())
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
return generator_class(model, self.params.precision)
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
scheduler_config = model.scheduler.config
if "_backup" in scheduler_config:
scheduler_config = scheduler_config["_backup"]
scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
scheduler = scheduler_class.from_config(scheduler_config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False

View File

@ -47,6 +47,7 @@ from diffusers import (
LDMTextToImagePipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
@ -1209,6 +1210,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
elif scheduler_type == "dpm":
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
elif scheduler_type == 'unipc':
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
elif scheduler_type == "ddim":
scheduler = scheduler
else:

View File

@ -30,7 +30,7 @@ from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
logging as dlogging,
)
)
from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
@ -68,7 +68,7 @@ class SDModelComponent(Enum):
scheduler="scheduler"
safety_checker="safety_checker"
feature_extractor="feature_extractor"
DEFAULT_MAX_MODELS = 2
class ModelManager(object):
@ -182,7 +182,7 @@ class ModelManager(object):
vae from the model currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.vae)
def get_model_tokenizer(self, model_name: str=None)->CLIPTokenizer:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPTokenizer. If no
@ -190,12 +190,12 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.tokenizer)
def get_model_unet(self, model_name: str=None)->UNet2DConditionModel:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned UNet2DConditionModel. If no model
name is provided, return the UNet from the model
currently in the GPU.
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.unet)
@ -222,7 +222,7 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.scheduler)
def _get_sub_model(
self,
model_name: str=None,
@ -1228,7 +1228,7 @@ class ModelManager(object):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
self.logger.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
self.logger.debug(f"sha256 = {hash} ({count} files hashed in {toc - tic:4.2f}s)")
with open(hashpath, "w") as f:
f.write(hash)
return hash

View File

@ -509,10 +509,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
run_id=None,
callback: Callable[[PipelineIntermediateState], None] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if self.scheduler.config.get("cpu_only", False):
scheduler_device = torch.device('cpu')
else:
scheduler_device = self._model_group.device_for(self.unet)
if timesteps is None:
self.scheduler.set_timesteps(
num_inference_steps, device=self._model_group.device_for(self.unet)
)
self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps = self.scheduler.timesteps
infer_latents_from_embeddings = GeneratorToCallbackinator(
self.generate_latents_from_embeddings, PipelineIntermediateState
@ -726,12 +729,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise: torch.Tensor,
run_id=None,
callback=None,
) -> InvokeAIStableDiffusionPipelineOutput:
timesteps, _ = self.get_img2img_timesteps(
num_inference_steps,
strength,
device=self._model_group.device_for(self.unet),
)
) -> InvokeAIStableDiffusionPipelineOutput:
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
result_latents, result_attention_maps = self.latents_from_embeddings(
latents=initial_latents if strength < 1.0 else torch.zeros_like(
initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
@ -757,13 +756,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
return self.check_for_safety(output, dtype=conditioning_data.dtype)
def get_img2img_timesteps(
self, num_inference_steps: int, strength: float, device
self, num_inference_steps: int, strength: float, device=None
) -> (torch.Tensor, int):
img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
assert img2img_pipeline.scheduler is self.scheduler
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
if self.scheduler.config.get("cpu_only", False):
scheduler_device = torch.device('cpu')
else:
scheduler_device = self._model_group.device_for(self.unet)
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
num_inference_steps, strength, device=device
num_inference_steps, strength, device=scheduler_device
)
# Workaround for low strength resulting in zero timesteps.
# TODO: submit upstream fix for zero-step img2img
@ -797,9 +802,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if init_image.dim() == 3:
init_image = init_image.unsqueeze(0)
timesteps, _ = self.get_img2img_timesteps(
num_inference_steps, strength, device=device
)
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
# 6. Prepare latent variables
# can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents

View File

@ -0,0 +1 @@
from .schedulers import SCHEDULER_MAP

View File

@ -0,0 +1,22 @@
from diffusers import DDIMScheduler, DPMSolverMultistepScheduler, KDPM2DiscreteScheduler, \
KDPM2AncestralDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, \
HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UniPCMultistepScheduler, \
DPMSolverSinglestepScheduler, DEISMultistepScheduler, DDPMScheduler
SCHEDULER_MAP = dict(
ddim=(DDIMScheduler, dict()),
ddpm=(DDPMScheduler, dict()),
deis=(DEISMultistepScheduler, dict()),
lms=(LMSDiscreteScheduler, dict()),
pndm=(PNDMScheduler, dict()),
heun=(HeunDiscreteScheduler, dict()),
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
euler_a=(EulerAncestralDiscreteScheduler, dict()),
kdpm_2=(KDPM2DiscreteScheduler, dict()),
kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
dpmpp_2s=(DPMSolverSinglestepScheduler, dict()),
dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True))
)

View File

@ -4,17 +4,20 @@ from .parse_seed_weights import parse_seed_weights
SAMPLER_CHOICES = [
"ddim",
"k_dpm_2_a",
"k_dpm_2",
"k_dpmpp_2_a",
"k_dpmpp_2",
"k_euler_a",
"k_euler",
"k_heun",
"k_lms",
"plms",
# diffusers:
"ddpm",
"deis",
"lms",
"pndm",
"heun",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2m",
"dpmpp_2m_k",
"unipc",
]

View File

@ -5,6 +5,7 @@ import { PluginOption, UserConfig } from 'vite';
import dts from 'vite-plugin-dts';
import eslint from 'vite-plugin-eslint';
import tsconfigPaths from 'vite-tsconfig-paths';
import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js';
export const packageConfig: UserConfig = {
base: './',
@ -16,9 +17,10 @@ export const packageConfig: UserConfig = {
dts({
insertTypesEntry: true,
}),
cssInjectedByJsPlugin(),
],
build: {
chunkSizeWarningLimit: 1500,
cssCodeSplit: true,
lib: {
entry: path.resolve(__dirname, '../src/index.ts'),
name: 'InvokeAIUI',
@ -30,6 +32,7 @@ export const packageConfig: UserConfig = {
globals: {
react: 'React',
'react-dom': 'ReactDOM',
'@emotion/react': 'EmotionReact',
},
},
},

View File

@ -37,7 +37,7 @@ From `invokeai/frontend/web/` run `yarn install` to get everything set up.
Start everything in dev mode:
1. Start the dev server: `yarn dev`
2. Start the InvokeAI UI per usual: `invokeai --web`
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-new.py --web # run from the repo root`
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
### Production builds

View File

@ -145,6 +145,7 @@
"terser": "^5.17.1",
"ts-toolbelt": "^9.6.0",
"vite": "^4.3.3",
"vite-plugin-css-injected-by-js": "^3.1.1",
"vite-plugin-dts": "^2.3.0",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.2.0",

View File

@ -25,7 +25,7 @@
"common": {
"hotkeysLabel": "Hotkeys",
"themeLabel": "Theme",
"languagePickerLabel": "Language Picker",
"languagePickerLabel": "Language",
"reportBugLabel": "Report Bug",
"githubLabel": "Github",
"discordLabel": "Discord",

View File

@ -7,18 +7,12 @@ import useToastWatcher from 'features/system/hooks/useToastWatcher';
import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton';
import FloatingParametersPanelButtons from 'features/ui/components/FloatingParametersPanelButtons';
import { Box, Flex, Grid, Portal, useColorMode } from '@chakra-ui/react';
import { Box, Flex, Grid, Portal } from '@chakra-ui/react';
import { APP_HEIGHT, APP_WIDTH } from 'theme/util/constants';
import GalleryDrawer from 'features/gallery/components/ImageGalleryPanel';
import Lightbox from 'features/lightbox/components/Lightbox';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
memo,
PropsWithChildren,
useCallback,
useEffect,
useState,
} from 'react';
import { memo, ReactNode, useCallback, useEffect, useState } from 'react';
import { motion, AnimatePresence } from 'framer-motion';
import Loading from 'common/components/Loading/Loading';
import { useIsApplicationReady } from 'features/system/hooks/useIsApplicationReady';
@ -27,21 +21,24 @@ import { useGlobalHotkeys } from 'common/hooks/useGlobalHotkeys';
import { configChanged } from 'features/system/store/configSlice';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { useLogger } from 'app/logging/useLogger';
import ProgressImagePreview from 'features/parameters/components/_ProgressImagePreview';
import ParametersDrawer from 'features/ui/components/ParametersDrawer';
import { languageSelector } from 'features/system/store/systemSelectors';
import i18n from 'i18n';
const DEFAULT_CONFIG = {};
interface Props extends PropsWithChildren {
interface Props {
config?: PartialAppConfig;
headerComponent?: ReactNode;
}
const App = ({ config = DEFAULT_CONFIG, children }: Props) => {
const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
useToastWatcher();
useGlobalHotkeys();
const log = useLogger();
const currentTheme = useAppSelector((state) => state.ui.currentTheme);
const language = useAppSelector(languageSelector);
const log = useLogger();
const isLightboxEnabled = useFeatureStatus('lightbox').isFeatureEnabled;
@ -49,18 +46,17 @@ const App = ({ config = DEFAULT_CONFIG, children }: Props) => {
const [loadingOverridden, setLoadingOverridden] = useState(false);
const { setColorMode } = useColorMode();
const dispatch = useAppDispatch();
useEffect(() => {
i18n.changeLanguage(language);
}, [language]);
useEffect(() => {
log.info({ namespace: 'App', data: config }, 'Received config');
dispatch(configChanged(config));
}, [dispatch, config, log]);
useEffect(() => {
setColorMode(['light'].includes(currentTheme) ? 'light' : 'dark');
}, [setColorMode, currentTheme]);
const handleOverrideClicked = useCallback(() => {
setLoadingOverridden(true);
}, []);
@ -77,7 +73,7 @@ const App = ({ config = DEFAULT_CONFIG, children }: Props) => {
w={APP_WIDTH}
h={APP_HEIGHT}
>
{children || <SiteHeader />}
{headerComponent || <SiteHeader />}
<Flex
gap={4}
w={{ base: '100vw', xl: 'full' }}

View File

@ -1,16 +1,13 @@
import React, { lazy, memo, PropsWithChildren, useEffect } from 'react';
import React, {
lazy,
memo,
PropsWithChildren,
ReactNode,
useEffect,
} from 'react';
import { Provider } from 'react-redux';
import { store } from 'app/store/store';
import { OpenAPI } from 'services/api';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
import '@fontsource/inter/400.css';
import '@fontsource/inter/500.css';
import '@fontsource/inter/600.css';
import '@fontsource/inter/700.css';
import '@fontsource/inter/800.css';
import '@fontsource/inter/900.css';
import Loading from '../../common/components/Loading/Loading';
import { addMiddleware, resetMiddlewares } from 'redux-dynamic-middlewares';
@ -26,9 +23,10 @@ interface Props extends PropsWithChildren {
apiUrl?: string;
token?: string;
config?: PartialAppConfig;
headerComponent?: ReactNode;
}
const InvokeAIUI = ({ apiUrl, token, config, children }: Props) => {
const InvokeAIUI = ({ apiUrl, token, config, headerComponent }: Props) => {
useEffect(() => {
// configure API client token
if (token) {
@ -57,7 +55,7 @@ const InvokeAIUI = ({ apiUrl, token, config, children }: Props) => {
<Provider store={store}>
<React.Suspense fallback={<Loading />}>
<ThemeLocaleProvider>
<App config={config}>{children}</App>
<App config={config} headerComponent={headerComponent} />
</ThemeLocaleProvider>
</React.Suspense>
</Provider>

View File

@ -1,4 +1,8 @@
import { ChakraProvider, extendTheme } from '@chakra-ui/react';
import {
ChakraProvider,
createLocalStorageManager,
extendTheme,
} from '@chakra-ui/react';
import { ReactNode, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import { theme as invokeAITheme } from 'theme/theme';
@ -9,15 +13,8 @@ import { greenTeaThemeColors } from 'theme/colors/greenTea';
import { invokeAIThemeColors } from 'theme/colors/invokeAI';
import { lightThemeColors } from 'theme/colors/lightTheme';
import { oceanBlueColors } from 'theme/colors/oceanBlue';
import '@fontsource/inter/100.css';
import '@fontsource/inter/200.css';
import '@fontsource/inter/300.css';
import '@fontsource/inter/400.css';
import '@fontsource/inter/500.css';
import '@fontsource/inter/600.css';
import '@fontsource/inter/700.css';
import '@fontsource/inter/800.css';
import '@fontsource/inter/900.css';
import '@fontsource/inter/variable.css';
import 'overlayscrollbars/overlayscrollbars.css';
import 'theme/css/overlayscrollbars.css';
@ -32,6 +29,8 @@ const THEMES = {
ocean: oceanBlueColors,
};
const manager = createLocalStorageManager('@@invokeai-color-mode');
function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
const { i18n } = useTranslation();
@ -51,7 +50,11 @@ function ThemeLocaleProvider({ children }: ThemeLocaleProviderProps) {
document.body.dir = direction;
}, [direction]);
return <ChakraProvider theme={theme}>{children}</ChakraProvider>;
return (
<ChakraProvider theme={theme} colorModeManager={manager}>
{children}
</ChakraProvider>
);
}
export default ThemeLocaleProvider;

View File

@ -2,17 +2,28 @@
export const DIFFUSERS_SCHEDULERS: Array<string> = [
'ddim',
'plms',
'k_lms',
'dpmpp_2',
'k_dpm_2',
'k_dpm_2_a',
'k_dpmpp_2',
'k_euler',
'k_euler_a',
'k_heun',
'ddpm',
'deis',
'lms',
'pndm',
'heun',
'euler',
'euler_k',
'euler_a',
'kdpm_2',
'kdpm_2_a',
'dpmpp_2s',
'dpmpp_2m',
'dpmpp_2m_k',
'unipc',
];
export const IMG2IMG_DIFFUSERS_SCHEDULERS = DIFFUSERS_SCHEDULERS.filter(
(scheduler) => {
return scheduler !== 'dpmpp_2s';
}
);
// Valid image widths
export const WIDTHS: Array<number> = Array.from(Array(64)).map(
(_x, i) => (i + 1) * 64

View File

@ -6,9 +6,12 @@ import { imageUploaded } from 'services/thunks/image';
export const addImageUploadedListener = () => {
startAppListening({
actionCreator: imageUploaded.fulfilled,
predicate: (action): action is ReturnType<typeof imageUploaded.fulfilled> =>
imageUploaded.fulfilled.match(action) &&
action.payload.response.image_type !== 'intermediates',
effect: (action, { dispatch, getState }) => {
const { response } = action.payload;
const state = getState();
const image = deserializeImageResponse(response);

View File

@ -47,15 +47,20 @@ export type CommonGeneratedImageMetadata = {
postprocessing: null | Array<ESRGANMetadata | FacetoolMetadata>;
sampler:
| 'ddim'
| 'k_dpm_2_a'
| 'k_dpm_2'
| 'k_dpmpp_2_a'
| 'k_dpmpp_2'
| 'k_euler_a'
| 'k_euler'
| 'k_heun'
| 'k_lms'
| 'plms';
| 'ddpm'
| 'deis'
| 'lms'
| 'pndm'
| 'heun'
| 'euler'
| 'euler_k'
| 'euler_a'
| 'kdpm_2'
| 'kdpm_2_a'
| 'dpmpp_2s'
| 'dpmpp_2m'
| 'dpmpp_2m_k'
| 'unipc';
prompt: Prompt;
seed: number;
variations: SeedWeights;
@ -321,11 +326,11 @@ export type AppFeature =
/**
* A disable-able Stable Diffusion feature
*/
export type StableDiffusionFeature =
| 'noiseConfig'
| 'variations'
export type SDFeature =
| 'noise'
| 'variation'
| 'symmetry'
| 'tiling'
| 'seamless'
| 'hires';
/**
@ -343,6 +348,7 @@ export type AppConfig = {
shouldFetchImages: boolean;
disabledTabs: InvokeTabName[];
disabledFeatures: AppFeature[];
disabledSDFeatures: SDFeature[];
canRestoreDeletedImagesFromBin: boolean;
sd: {
iterations: {

View File

@ -0,0 +1,54 @@
import { Badge, Flex } from '@chakra-ui/react';
import { Image } from 'app/types/invokeai';
import { isNumber, isString } from 'lodash-es';
import { useMemo } from 'react';
type ImageMetadataOverlayProps = {
image: Image;
};
const ImageMetadataOverlay = ({ image }: ImageMetadataOverlayProps) => {
const dimensions = useMemo(() => {
if (!isNumber(image.metadata?.width) || isNumber(!image.metadata?.height)) {
return;
}
return `${image.metadata?.width} × ${image.metadata?.height}`;
}, [image.metadata]);
const model = useMemo(() => {
if (!isString(image.metadata?.invokeai?.node?.model)) {
return;
}
return image.metadata?.invokeai?.node?.model;
}, [image.metadata]);
return (
<Flex
sx={{
pointerEvents: 'none',
flexDirection: 'column',
position: 'absolute',
top: 0,
right: 0,
p: 2,
alignItems: 'flex-end',
gap: 2,
}}
>
{dimensions && (
<Badge variant="solid" colorScheme="base">
{dimensions}
</Badge>
)}
{model && (
<Badge variant="solid" colorScheme="base">
{model}
</Badge>
)}
</Flex>
);
};
export default ImageMetadataOverlay;

View File

@ -1,37 +0,0 @@
import { Badge, Box, Flex } from '@chakra-ui/react';
import { Image } from 'app/types/invokeai';
type ImageToImageOverlayProps = {
image: Image;
};
const ImageToImageOverlay = ({ image }: ImageToImageOverlayProps) => {
return (
<Box
sx={{
top: 0,
left: 0,
w: 'full',
h: 'full',
position: 'absolute',
pointerEvents: 'none',
}}
>
<Flex
sx={{
position: 'absolute',
top: 0,
right: 0,
p: 2,
alignItems: 'flex-start',
}}
>
<Badge variant="solid" colorScheme="base">
{image.metadata?.width} × {image.metadata?.height}
</Badge>
</Flex>
</Box>
);
};
export default ImageToImageOverlay;

View File

@ -152,6 +152,7 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
} = useAppSelector(currentImageButtonsSelector);
const isLightboxEnabled = useFeatureStatus('lightbox').isFeatureEnabled;
const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled;
const isUpscalingEnabled = useFeatureStatus('upscaling').isFeatureEnabled;
const isFaceRestoreEnabled = useFeatureStatus('faceRestore').isFeatureEnabled;
@ -429,13 +430,15 @@ const CurrentImageButtons = (props: CurrentImageButtonsProps) => {
>
{t('parameters.sendToImg2Img')}
</IAIButton>
<IAIButton
size="sm"
onClick={handleSendToCanvas}
leftIcon={<FaShare />}
>
{t('parameters.sendToUnifiedCanvas')}
</IAIButton>
{isCanvasEnabled && (
<IAIButton
size="sm"
onClick={handleSendToCanvas}
leftIcon={<FaShare />}
>
{t('parameters.sendToUnifiedCanvas')}
</IAIButton>
)}
{/* <IAIButton
size="sm"

View File

@ -1,4 +1,4 @@
import { Box, Flex, Image, Skeleton, useBoolean } from '@chakra-ui/react';
import { Box, Flex, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
@ -11,7 +11,8 @@ import NextPrevImageButtons from './NextPrevImageButtons';
import CurrentImageHidden from './CurrentImageHidden';
import { DragEvent, memo, useCallback } from 'react';
import { systemSelector } from 'features/system/store/systemSelectors';
import CurrentImageFallback from './CurrentImageFallback';
import ImageFallbackSpinner from './ImageFallbackSpinner';
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
export const imagesSelector = createSelector(
[uiSelector, gallerySelector, systemSelector],
@ -50,8 +51,6 @@ const CurrentImagePreview = () => {
} = useAppSelector(imagesSelector);
const { getUrl } = useGetUrl();
const [isLoaded, { on, off }] = useBoolean();
const handleDragStart = useCallback(
(e: DragEvent<HTMLDivElement>) => {
if (!image) {
@ -67,11 +66,11 @@ const CurrentImagePreview = () => {
return (
<Flex
sx={{
position: 'relative',
justifyContent: 'center',
alignItems: 'center',
width: '100%',
height: '100%',
position: 'relative',
alignItems: 'center',
justifyContent: 'center',
}}
>
{progressImage && shouldShowProgressInViewer ? (
@ -91,28 +90,23 @@ const CurrentImagePreview = () => {
/>
) : (
image && (
<Image
onDragStart={handleDragStart}
fallbackStrategy="beforeLoadOrError"
src={shouldHidePreview ? undefined : getUrl(image.url)}
width={image.metadata.width || 'auto'}
height={image.metadata.height || 'auto'}
fallback={
shouldHidePreview ? (
<CurrentImageHidden />
) : (
<CurrentImageFallback />
)
}
sx={{
objectFit: 'contain',
maxWidth: '100%',
maxHeight: '100%',
height: 'auto',
position: 'absolute',
borderRadius: 'base',
}}
/>
<>
<Image
src={getUrl(image.url)}
fallbackStrategy="beforeLoadOrError"
fallback={<ImageFallbackSpinner />}
onDragStart={handleDragStart}
sx={{
objectFit: 'contain',
maxWidth: '100%',
maxHeight: '100%',
height: 'auto',
position: 'absolute',
borderRadius: 'base',
}}
/>
<ImageMetadataOverlay image={image} />
</>
)
)}
{shouldShowImageDetails && image && 'metadata' in image && (

View File

@ -1,4 +1,4 @@
import { Box, Flex, Image } from '@chakra-ui/react';
import { Flex, Image, Spinner } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
@ -42,6 +42,7 @@ const GalleryProgressImage = () => {
alignItems: 'center',
justifyContent: 'center',
aspectRatio: '1/1',
position: 'relative',
}}
>
<Image
@ -61,6 +62,7 @@ const GalleryProgressImage = () => {
imageRendering: shouldAntialiasProgressImage ? 'auto' : 'pixelated',
}}
/>
<Spinner sx={{ position: 'absolute', top: 1, right: 1, opacity: 0.7 }} />
</Flex>
);
};

View File

@ -104,7 +104,10 @@ const HoverableImage = memo((props: HoverableImageProps) => {
const toast = useToast();
const { t } = useTranslation();
const { isFeatureEnabled: isLightboxEnabled } = useFeatureStatus('lightbox');
const isLightboxEnabled = useFeatureStatus('lightbox').isFeatureEnabled;
const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled;
const { recallSeed, recallPrompt, recallInitialImage, recallAllParameters } =
useParameters();
@ -250,9 +253,11 @@ const HoverableImage = memo((props: HoverableImageProps) => {
>
{t('parameters.sendToImg2Img')}
</MenuItem>
<MenuItem icon={<FaShare />} onClickCapture={handleSendToCanvas}>
{t('parameters.sendToUnifiedCanvas')}
</MenuItem>
{isCanvasEnabled && (
<MenuItem icon={<FaShare />} onClickCapture={handleSendToCanvas}>
{t('parameters.sendToUnifiedCanvas')}
</MenuItem>
)}
<MenuItem icon={<FaTrash />} onClickCapture={onDeleteDialogOpen}>
{t('gallery.deleteImage')}
</MenuItem>
@ -278,6 +283,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
h: 'full',
transition: 'transform 0.2s ease-out',
aspectRatio: '1/1',
cursor: 'pointer',
}}
>
<Image

View File

@ -1,8 +1,8 @@
import { Flex, Spinner, SpinnerProps } from '@chakra-ui/react';
type CurrentImageFallbackProps = SpinnerProps;
type ImageFallbackSpinnerProps = SpinnerProps;
const CurrentImageFallback = (props: CurrentImageFallbackProps) => {
const ImageFallbackSpinner = (props: ImageFallbackSpinnerProps) => {
const { size = 'xl', ...rest } = props;
return (
@ -21,4 +21,4 @@ const CurrentImageFallback = (props: CurrentImageFallbackProps) => {
);
};
export default CurrentImageFallback;
export default ImageFallbackSpinner;

View File

@ -54,11 +54,7 @@ import { uploadsAdapter } from '../store/uploadsSlice';
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import { Virtuoso, VirtuosoGrid } from 'react-virtuoso';
import ProgressImagePreview from 'features/parameters/components/_ProgressImagePreview';
import ProgressImage from 'features/parameters/components/ProgressImage';
import { systemSelector } from 'features/system/store/systemSelectors';
import { Image as ImageType } from 'app/types/invokeai';
import { ProgressImage as ProgressImageType } from 'services/events/types';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import GalleryProgressImage from './GalleryProgressImage';
@ -71,13 +67,13 @@ const selector = createSelector(
const { results, uploads, system, gallery } = state;
const { currentCategory } = gallery;
const tempImages: (ImageType | typeof PROGRESS_IMAGE_PLACEHOLDER)[] = [];
if (system.progressImage) {
tempImages.push(PROGRESS_IMAGE_PLACEHOLDER);
}
if (currentCategory === 'results') {
const tempImages: (ImageType | typeof PROGRESS_IMAGE_PLACEHOLDER)[] = [];
if (system.progressImage) {
tempImages.push(PROGRESS_IMAGE_PLACEHOLDER);
}
return {
images: tempImages.concat(
resultsAdapter.getSelectors().selectAll(results)
@ -88,9 +84,7 @@ const selector = createSelector(
}
return {
images: tempImages.concat(
uploadsAdapter.getSelectors().selectAll(uploads)
),
images: uploadsAdapter.getSelectors().selectAll(uploads),
isLoading: uploads.isLoading,
areMoreImagesAvailable: uploads.page < uploads.pages - 1,
};

View File

@ -1,6 +1,7 @@
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import { Image } from 'app/types/invokeai';
import { imageReceived, thumbnailReceived } from 'services/thunks/image';
type GalleryImageObjectFitType = 'contain' | 'cover';
@ -63,6 +64,29 @@ export const gallerySlice = createSlice({
state.shouldUseSingleGalleryColumn = action.payload;
},
},
extraReducers(builder) {
builder.addCase(imageReceived.fulfilled, (state, action) => {
// When we get an updated URL for an image, we need to update the selectedImage in gallery,
// which is currently its own object (instead of a reference to an image in results/uploads)
const { imagePath } = action.payload;
const { imageName } = action.meta.arg;
if (state.selectedImage?.name === imageName) {
state.selectedImage.url = imagePath;
}
});
builder.addCase(thumbnailReceived.fulfilled, (state, action) => {
// When we get an updated URL for an image, we need to update the selectedImage in gallery,
// which is currently its own object (instead of a reference to an image in results/uploads)
const { thumbnailPath } = action.payload;
const { thumbnailName } = action.meta.arg;
if (state.selectedImage?.name === thumbnailName) {
state.selectedImage.thumbnail = thumbnailPath;
}
});
},
});
export const {

View File

@ -20,7 +20,7 @@ export const iterationGraph = {
model: '',
progress_images: false,
prompt: 'dog',
sampler_name: 'k_lms',
sampler_name: 'lms',
seamless: false,
steps: 11,
type: 'txt2img',

View File

@ -1,8 +1,12 @@
import { DIFFUSERS_SCHEDULERS } from 'app/constants';
import {
DIFFUSERS_SCHEDULERS,
IMG2IMG_DIFFUSERS_SCHEDULERS,
} from 'app/constants';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAISelect from 'common/components/IAISelect';
import { setSampler } from 'features/parameters/store/generationSlice';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ChangeEvent, memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@ -10,6 +14,9 @@ const ParamSampler = () => {
const sampler = useAppSelector(
(state: RootState) => state.generation.sampler
);
const activeTabName = useAppSelector(activeTabNameSelector);
const dispatch = useAppDispatch();
const { t } = useTranslation();
@ -23,7 +30,11 @@ const ParamSampler = () => {
label={t('parameters.sampler')}
value={sampler}
onChange={handleChange}
validValues={DIFFUSERS_SCHEDULERS}
validValues={
activeTabName === 'img2img' || activeTabName == 'unifiedCanvas'
? IMG2IMG_DIFFUSERS_SCHEDULERS
: DIFFUSERS_SCHEDULERS
}
minWidth={36}
/>
);

View File

@ -6,6 +6,7 @@ import IAICollapse from 'common/components/IAICollapse';
import { memo } from 'react';
import { ParamHiresStrength } from './ParamHiresStrength';
import { setHiresFix } from 'features/parameters/store/postprocessingSlice';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
const ParamHiresCollapse = () => {
const { t } = useTranslation();
@ -13,10 +14,16 @@ const ParamHiresCollapse = () => {
(state: RootState) => state.postprocessing.hiresFix
);
const isHiresEnabled = useFeatureStatus('hires').isFeatureEnabled;
const dispatch = useAppDispatch();
const handleToggle = () => dispatch(setHiresFix(!hiresFix));
if (!isHiresEnabled) {
return null;
}
return (
<IAICollapse
label={t('parameters.hiresOptim')}

View File

@ -47,7 +47,7 @@ const ImageToImageStrength = () => {
return (
<IAISlider
label={`${t('parameters.strength')}`}
label={`${t('parameters.denoisingStrength')}`}
step={step}
min={min}
max={sliderMax}

View File

@ -1,17 +1,18 @@
import { Flex, Image, Spinner } from '@chakra-ui/react';
import { Flex, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import SelectImagePlaceholder from 'common/components/SelectImagePlaceholder';
import { useGetUrl } from 'common/util/getUrl';
import { clearInitialImage } from 'features/parameters/store/generationSlice';
import { addToast } from 'features/system/store/systemSlice';
import { DragEvent, useCallback, useState } from 'react';
import { DragEvent, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { ImageType } from 'services/api';
import ImageToImageOverlay from 'common/components/ImageToImageOverlay';
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
import { generationSelector } from 'features/parameters/store/generationSelectors';
import { initialImageSelected } from 'features/parameters/store/actions';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import ImageFallbackSpinner from 'features/gallery/components/ImageFallbackSpinner';
const selector = createSelector(
[generationSelector],
@ -30,8 +31,6 @@ const InitialImagePreview = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const [isLoaded, setIsLoaded] = useState(false);
const onError = () => {
dispatch(
addToast({
@ -42,13 +41,10 @@ const InitialImagePreview = () => {
})
);
dispatch(clearInitialImage());
setIsLoaded(false);
};
const handleDrop = useCallback(
(e: DragEvent<HTMLDivElement>) => {
setIsLoaded(false);
const name = e.dataTransfer.getData('invokeai/imageName');
const type = e.dataTransfer.getData('invokeai/imageType') as ImageType;
@ -62,48 +58,32 @@ const InitialImagePreview = () => {
sx={{
width: 'full',
height: 'full',
position: 'relative',
alignItems: 'center',
justifyContent: 'center',
position: 'relative',
}}
onDrop={handleDrop}
>
<Flex
sx={{
height: 'full',
width: 'full',
blur: '5px',
position: 'relative',
alignItems: 'center',
justifyContent: 'center',
}}
>
{initialImage?.url && (
<>
<Image
sx={{
objectFit: 'contain',
borderRadius: 'base',
maxHeight: 'full',
}}
src={getUrl(initialImage?.url)}
onError={onError}
onLoad={() => {
setIsLoaded(true);
}}
fallback={
<Flex
sx={{ h: 36, alignItems: 'center', justifyContent: 'center' }}
>
<Spinner color="grey" w="5rem" h="5rem" />
</Flex>
}
/>
{isLoaded && <ImageToImageOverlay image={initialImage} />}
</>
)}
{!initialImage?.url && <SelectImagePlaceholder />}
</Flex>
{initialImage?.url && (
<>
<Image
src={getUrl(initialImage?.url)}
fallbackStrategy="beforeLoadOrError"
fallback={<ImageFallbackSpinner />}
onError={onError}
sx={{
objectFit: 'contain',
maxWidth: '100%',
maxHeight: '100%',
height: 'auto',
position: 'absolute',
borderRadius: 'base',
}}
/>
<ImageMetadataOverlay image={initialImage} />
</>
)}
{!initialImage?.url && <SelectImagePlaceholder />}
</Flex>
);
};

View File

@ -7,9 +7,13 @@ import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { setShouldUseNoiseSettings } from 'features/parameters/store/generationSlice';
import { memo } from 'react';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
const ParamNoiseCollapse = () => {
const { t } = useTranslation();
const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled;
const shouldUseNoiseSettings = useAppSelector(
(state: RootState) => state.generation.shouldUseNoiseSettings
);
@ -19,6 +23,10 @@ const ParamNoiseCollapse = () => {
const handleToggle = () =>
dispatch(setShouldUseNoiseSettings(!shouldUseNoiseSettings));
if (!isNoiseEnabled) {
return null;
}
return (
<IAICollapse
label={t('parameters.noiseSettings')}

View File

@ -9,6 +9,7 @@ import { generationSelector } from 'features/parameters/store/generationSelector
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import ParamSeamlessXAxis from './ParamSeamlessXAxis';
import ParamSeamlessYAxis from './ParamSeamlessYAxis';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
const selector = createSelector(
generationSelector,
@ -24,10 +25,16 @@ const ParamSeamlessCollapse = () => {
const { t } = useTranslation();
const { shouldUseSeamless } = useAppSelector(selector);
const isSeamlessEnabled = useFeatureStatus('seamless').isFeatureEnabled;
const dispatch = useAppDispatch();
const handleToggle = () => dispatch(setSeamless(!shouldUseSeamless));
if (!isSeamlessEnabled) {
return null;
}
return (
<IAICollapse
label={t('parameters.seamlessTiling')}

View File

@ -8,6 +8,7 @@ import IAICollapse from 'common/components/IAICollapse';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { setShouldUseSymmetry } from 'features/parameters/store/generationSlice';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
const ParamSymmetryCollapse = () => {
const { t } = useTranslation();
@ -15,10 +16,16 @@ const ParamSymmetryCollapse = () => {
(state: RootState) => state.generation.shouldUseSymmetry
);
const isSymmetryEnabled = useFeatureStatus('symmetry').isFeatureEnabled;
const dispatch = useAppDispatch();
const handleToggle = () => dispatch(setShouldUseSymmetry(!shouldUseSymmetry));
if (!isSymmetryEnabled) {
return null;
}
return (
<IAICollapse
label={t('parameters.symmetry')}

View File

@ -7,6 +7,7 @@ import { setShouldGenerateVariations } from 'features/parameters/store/generatio
import { Flex } from '@chakra-ui/react';
import IAICollapse from 'common/components/IAICollapse';
import { memo } from 'react';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
const ParamVariationCollapse = () => {
const { t } = useTranslation();
@ -14,11 +15,17 @@ const ParamVariationCollapse = () => {
(state: RootState) => state.generation.shouldGenerateVariations
);
const isVariationEnabled = useFeatureStatus('variation').isFeatureEnabled;
const dispatch = useAppDispatch();
const handleToggle = () =>
dispatch(setShouldGenerateVariations(!shouldGenerateVariations));
if (!isVariationEnabled) {
return null;
}
return (
<IAICollapse
label={t('parameters.variations')}

View File

@ -51,7 +51,7 @@ export const initialGenerationState: GenerationState = {
perlin: 0,
prompt: '',
negativePrompt: '',
sampler: 'k_lms',
sampler: 'lms',
seamBlur: 16,
seamSize: 96,
seamSteps: 30,

View File

@ -1,73 +1,69 @@
import type { ReactNode } from 'react';
import { VStack } from '@chakra-ui/react';
import IAIButton from 'common/components/IAIButton';
import IAIIconButton from 'common/components/IAIIconButton';
import IAIPopover from 'common/components/IAIPopover';
import {
IconButton,
Menu,
MenuButton,
MenuItemOption,
MenuList,
MenuOptionGroup,
Tooltip,
} from '@chakra-ui/react';
import { useTranslation } from 'react-i18next';
import { FaCheck, FaLanguage } from 'react-icons/fa';
import i18n from 'i18n';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { languageSelector } from '../store/systemSelectors';
import { languageChanged } from '../store/systemSlice';
import { map } from 'lodash-es';
import { IoLanguage } from 'react-icons/io5';
export const LANGUAGES = {
ar: i18n.t('common.langArabic', { lng: 'ar' }),
nl: i18n.t('common.langDutch', { lng: 'nl' }),
en: i18n.t('common.langEnglish', { lng: 'en' }),
fr: i18n.t('common.langFrench', { lng: 'fr' }),
de: i18n.t('common.langGerman', { lng: 'de' }),
he: i18n.t('common.langHebrew', { lng: 'he' }),
it: i18n.t('common.langItalian', { lng: 'it' }),
ja: i18n.t('common.langJapanese', { lng: 'ja' }),
ko: i18n.t('common.langKorean', { lng: 'ko' }),
pl: i18n.t('common.langPolish', { lng: 'pl' }),
pt_BR: i18n.t('common.langBrPortuguese', { lng: 'pt_BR' }),
pt: i18n.t('common.langPortuguese', { lng: 'pt' }),
ru: i18n.t('common.langRussian', { lng: 'ru' }),
zh_CN: i18n.t('common.langSimplifiedChinese', { lng: 'zh_CN' }),
es: i18n.t('common.langSpanish', { lng: 'es' }),
uk: i18n.t('common.langUkranian', { lng: 'ua' }),
};
export default function LanguagePicker() {
const { t, i18n } = useTranslation();
const LANGUAGES = {
ar: t('common.langArabic', { lng: 'ar' }),
nl: t('common.langDutch', { lng: 'nl' }),
en: t('common.langEnglish', { lng: 'en' }),
fr: t('common.langFrench', { lng: 'fr' }),
de: t('common.langGerman', { lng: 'de' }),
he: t('common.langHebrew', { lng: 'he' }),
it: t('common.langItalian', { lng: 'it' }),
ja: t('common.langJapanese', { lng: 'ja' }),
ko: t('common.langKorean', { lng: 'ko' }),
pl: t('common.langPolish', { lng: 'pl' }),
pt_BR: t('common.langBrPortuguese', { lng: 'pt_BR' }),
pt: t('common.langPortuguese', { lng: 'pt' }),
ru: t('common.langRussian', { lng: 'ru' }),
zh_CN: t('common.langSimplifiedChinese', { lng: 'zh_CN' }),
es: t('common.langSpanish', { lng: 'es' }),
uk: t('common.langUkranian', { lng: 'ua' }),
};
const renderLanguagePicker = () => {
const languagesToRender: ReactNode[] = [];
Object.keys(LANGUAGES).forEach((lang) => {
languagesToRender.push(
<IAIButton
key={lang}
isChecked={localStorage.getItem('i18nextLng') === lang}
leftIcon={
localStorage.getItem('i18nextLng') === lang ? (
<FaCheck />
) : undefined
}
onClick={() => i18n.changeLanguage(lang)}
aria-label={LANGUAGES[lang as keyof typeof LANGUAGES]}
size="sm"
minWidth="200px"
>
{LANGUAGES[lang as keyof typeof LANGUAGES]}
</IAIButton>
);
});
return languagesToRender;
};
const { t } = useTranslation();
const dispatch = useAppDispatch();
const language = useAppSelector(languageSelector);
return (
<IAIPopover
triggerComponent={
<IAIIconButton
aria-label={t('common.languagePickerLabel')}
tooltip={t('common.languagePickerLabel')}
icon={<FaLanguage />}
size="sm"
<Menu closeOnSelect={false}>
<Tooltip label={t('common.languagePickerLabel')} hasArrow>
<MenuButton
as={IconButton}
icon={<IoLanguage />}
variant="link"
data-variant="link"
fontSize={26}
aria-label={t('common.languagePickerLabel')}
fontSize={22}
minWidth={8}
/>
}
>
<VStack>{renderLanguagePicker()}</VStack>
</IAIPopover>
</Tooltip>
<MenuList>
<MenuOptionGroup value={language}>
{map(LANGUAGES, (languageName, l: keyof typeof LANGUAGES) => (
<MenuItemOption
key={l}
value={l}
onClick={() => dispatch(languageChanged(l))}
>
{languageName}
</MenuItemOption>
))}
</MenuOptionGroup>
</MenuList>
</Menu>
);
}

View File

@ -36,7 +36,6 @@ const ProgressBar = () => {
aria-label={t('accessibility.invokeProgressBar')}
isIndeterminate={isProcessing && !currentStatusHasSteps}
height={PROGRESS_BAR_THICKNESS}
zIndex={99}
/>
);
};

View File

@ -1,13 +1,26 @@
import { VStack } from '@chakra-ui/react';
import {
IconButton,
Menu,
MenuButton,
MenuItemOption,
MenuList,
MenuOptionGroup,
Tooltip,
} from '@chakra-ui/react';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIButton from 'common/components/IAIButton';
import IAIIconButton from 'common/components/IAIIconButton';
import IAIPopover from 'common/components/IAIPopover';
import { setCurrentTheme } from 'features/ui/store/uiSlice';
import type { ReactNode } from 'react';
import i18n from 'i18n';
import { map } from 'lodash-es';
import { useTranslation } from 'react-i18next';
import { FaCheck, FaPalette } from 'react-icons/fa';
import { FaPalette } from 'react-icons/fa';
export const THEMES = {
dark: i18n.t('common.darkTheme'),
light: i18n.t('common.lightTheme'),
green: i18n.t('common.greenTheme'),
ocean: i18n.t('common.oceanTheme'),
};
export default function ThemeChanger() {
const { t } = useTranslation();
@ -17,51 +30,31 @@ export default function ThemeChanger() {
(state: RootState) => state.ui.currentTheme
);
const THEMES = {
dark: t('common.darkTheme'),
light: t('common.lightTheme'),
green: t('common.greenTheme'),
ocean: t('common.oceanTheme'),
};
const handleChangeTheme = (theme: string) => {
dispatch(setCurrentTheme(theme));
};
const renderThemeOptions = () => {
const themesToRender: ReactNode[] = [];
Object.keys(THEMES).forEach((theme) => {
themesToRender.push(
<IAIButton
isChecked={currentTheme === theme}
leftIcon={currentTheme === theme ? <FaCheck /> : undefined}
size="sm"
onClick={() => handleChangeTheme(theme)}
key={theme}
>
{THEMES[theme as keyof typeof THEMES]}
</IAIButton>
);
});
return themesToRender;
};
return (
<IAIPopover
triggerComponent={
<IAIIconButton
aria-label={t('common.themeLabel')}
size="sm"
variant="link"
data-variant="link"
fontSize={20}
<Menu closeOnSelect={false}>
<Tooltip label={t('common.themeLabel')} hasArrow>
<MenuButton
as={IconButton}
icon={<FaPalette />}
variant="link"
aria-label={t('common.themeLabel')}
fontSize={20}
minWidth={8}
/>
}
>
<VStack align="stretch">{renderThemeOptions()}</VStack>
</IAIPopover>
</Tooltip>
<MenuList>
<MenuOptionGroup value={currentTheme}>
{map(THEMES, (themeName, themeKey: keyof typeof THEMES) => (
<MenuItemOption
key={themeKey}
value={themeKey}
onClick={() => dispatch(setCurrentTheme(themeKey))}
>
{themeName}
</MenuItemOption>
))}
</MenuOptionGroup>
</MenuList>
</Menu>
);
}

View File

@ -1,21 +1,40 @@
import { AppFeature } from 'app/types/invokeai';
import { AppFeature, SDFeature } from 'app/types/invokeai';
import { RootState } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
import { useMemo } from 'react';
import { InvokeTabName } from 'features/ui/store/tabMap';
export const useFeatureStatus = (
feature: AppFeature | SDFeature | InvokeTabName
) => {
const disabledTabs = useAppSelector(
(state: RootState) => state.config.disabledTabs
);
export const useFeatureStatus = (feature: AppFeature) => {
const disabledFeatures = useAppSelector(
(state: RootState) => state.config.disabledFeatures
);
const disabledSDFeatures = useAppSelector(
(state: RootState) => state.config.disabledSDFeatures
);
const isFeatureDisabled = useMemo(
() => disabledFeatures.includes(feature),
[disabledFeatures, feature]
() =>
disabledFeatures.includes(feature as AppFeature) ||
disabledSDFeatures.includes(feature as SDFeature) ||
disabledTabs.includes(feature as InvokeTabName),
[disabledFeatures, disabledSDFeatures, disabledTabs, feature]
);
const isFeatureEnabled = useMemo(
() => !disabledFeatures.includes(feature),
[disabledFeatures, feature]
() =>
!(
disabledFeatures.includes(feature as AppFeature) ||
disabledSDFeatures.includes(feature as SDFeature) ||
disabledTabs.includes(feature as InvokeTabName)
),
[disabledFeatures, disabledSDFeatures, disabledTabs, feature]
);
return { isFeatureDisabled, isFeatureEnabled };

View File

@ -8,6 +8,7 @@ export const initialConfigState: AppConfig = {
shouldFetchImages: false,
disabledTabs: [],
disabledFeatures: [],
disabledSDFeatures: [],
canRestoreDeletedImagesFromBin: true,
sd: {
iterations: {

View File

@ -1,6 +1,7 @@
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import { isEqual, reduce, pickBy } from 'lodash-es';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import { reduce, pickBy } from 'lodash-es';
export const systemSelector = (state: RootState) => state.system;
@ -22,11 +23,7 @@ export const activeModelSelector = createSelector(
);
return { ...model_list[activeModel], name: activeModel };
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
defaultSelectorOptions
);
export const diffusersModelsSelector = createSelector(
@ -42,9 +39,11 @@ export const diffusersModelsSelector = createSelector(
return diffusersModels;
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
defaultSelectorOptions
);
export const languageSelector = createSelector(
systemSelector,
(system) => system.language,
defaultSelectorOptions
);

View File

@ -24,6 +24,7 @@ import { InvokeLogLevel } from 'app/logging/useLogger';
import { TFuncKey } from 'i18next';
import { t } from 'i18next';
import { userInvoked } from 'app/store/actions';
import { LANGUAGES } from '../components/LanguagePicker';
export type CancelStrategy = 'immediate' | 'scheduled';
@ -91,6 +92,7 @@ export interface SystemState {
infillMethods: InfillMethod[];
isPersisted: boolean;
shouldAntialiasProgressImage: boolean;
language: keyof typeof LANGUAGES;
}
export const initialSystemState: SystemState = {
@ -125,6 +127,7 @@ export const initialSystemState: SystemState = {
canceledSession: '',
infillMethods: ['tile', 'patchmatch'],
isPersisted: false,
language: 'en',
};
export const systemSlice = createSlice({
@ -272,6 +275,9 @@ export const systemSlice = createSlice({
isPersistedChanged: (state, action: PayloadAction<boolean>) => {
state.isPersisted = action.payload;
},
languageChanged: (state, action: PayloadAction<keyof typeof LANGUAGES>) => {
state.language = action.payload;
},
},
extraReducers(builder) {
/**
@ -418,6 +424,7 @@ export const systemSlice = createSlice({
state.currentStep = 0;
state.totalSteps = 0;
state.statusTranslationKey = 'common.statusConnected';
state.progressImage = null;
state.toastQueue.push(
makeToast({ title: t('toast.canceled'), status: 'warning' })
@ -480,6 +487,7 @@ export const {
shouldLogToConsoleChanged,
isPersistedChanged,
shouldAntialiasProgressImageChanged,
languageChanged,
} = systemSlice.actions;
export default systemSlice.reducer;

View File

@ -44,7 +44,6 @@ const FloatingGalleryButton = () => {
pos: 'absolute',
top: '50%',
transform: 'translate(0, -50%)',
zIndex: 31,
p: 0,
insetInlineEnd: 0,
px: 3,

View File

@ -73,7 +73,6 @@ const FloatingParametersPanelButtons = () => {
<Flex
pos="absolute"
transform="translate(0, -50%)"
zIndex={20}
minW={8}
top="50%"
insetInlineStart="4.5rem"

View File

@ -45,12 +45,12 @@ export interface InvokeTabInfo {
const tabs: InvokeTabInfo[] = [
{
id: 'txt2img',
icon: <Icon as={GoTextSize} sx={{ boxSize: 5 }} />,
icon: <Icon as={GoTextSize} sx={{ boxSize: 6 }} />,
content: <TextToImageTab />,
},
{
id: 'img2img',
icon: <Icon as={FaImage} sx={{ boxSize: 5 }} />,
icon: <Icon as={FaImage} sx={{ boxSize: 6 }} />,
content: <ImageTab />,
},
{

View File

@ -142,7 +142,7 @@ const ResizableDrawer = ({
direction={slideDirection}
in={isOpen}
motionProps={{ initial: false }}
style={{ zIndex: 99, width: 'full' }}
style={{ width: 'full' }}
>
<Box
ref={outsideClickRef}

View File

@ -64,8 +64,6 @@ const ImageToImageTabCoreParameters = () => {
<ParamSteps />
<ParamCFGScale />
</Flex>
<ParamWidth isDisabled={!shouldFitToWidthHeight} />
<ParamHeight isDisabled={!shouldFitToWidthHeight} />
<Flex gap={3} w="full">
<Box flexGrow={2}>
<ParamSampler />
@ -74,6 +72,8 @@ const ImageToImageTabCoreParameters = () => {
<ModelSelect />
</Box>
</Flex>
<ParamWidth isDisabled={!shouldFitToWidthHeight} />
<ParamHeight isDisabled={!shouldFitToWidthHeight} />
<ImageToImageStrength />
<ImageToImageFit />
</Flex>

View File

@ -62,8 +62,6 @@ const UnifiedCanvasCoreParameters = () => {
<ParamSteps />
<ParamCFGScale />
</Flex>
<ParamWidth />
<ParamHeight />
<Flex gap={3} w="full">
<Box flexGrow={2}>
<ParamSampler />
@ -72,8 +70,9 @@ const UnifiedCanvasCoreParameters = () => {
<ModelSelect />
</Box>
</Flex>
<ParamWidth />
<ParamHeight />
<ImageToImageStrength />
<ImageToImageFit />
</Flex>
)}
</Flex>

View File

@ -3,7 +3,7 @@ import LanguageDetector from 'i18next-browser-languagedetector';
import Backend from 'i18next-http-backend';
import { initReactI18next } from 'react-i18next';
import translationEN from '../dist/locales/en.json';
import translationEN from '../public/locales/en.json';
import { LOCALSTORAGE_PREFIX } from 'app/store/constants';
if (import.meta.env.MODE === 'package') {
@ -21,11 +21,11 @@ if (import.meta.env.MODE === 'package') {
} else {
i18n
.use(Backend)
.use(
new LanguageDetector(null, {
lookupLocalStorage: `${LOCALSTORAGE_PREFIX}lng`,
})
)
// .use(
// new LanguageDetector(null, {
// lookupLocalStorage: `${LOCALSTORAGE_PREFIX}lng`,
// })
// )
.use(initReactI18next)
.init({
fallbackLng: 'en',

View File

@ -19,7 +19,6 @@ export type { ConditioningField } from './models/ConditioningField';
export type { CreateModelRequest } from './models/CreateModelRequest';
export type { CropImageInvocation } from './models/CropImageInvocation';
export type { CvInpaintInvocation } from './models/CvInpaintInvocation';
export type { DataURLToImageInvocation } from './models/DataURLToImageInvocation';
export type { DiffusersModelInfo } from './models/DiffusersModelInfo';
export type { DivideInvocation } from './models/DivideInvocation';
export type { Edge } from './models/Edge';
@ -92,7 +91,6 @@ export { $ConditioningField } from './schemas/$ConditioningField';
export { $CreateModelRequest } from './schemas/$CreateModelRequest';
export { $CropImageInvocation } from './schemas/$CropImageInvocation';
export { $CvInpaintInvocation } from './schemas/$CvInpaintInvocation';
export { $DataURLToImageInvocation } from './schemas/$DataURLToImageInvocation';
export { $DiffusersModelInfo } from './schemas/$DiffusersModelInfo';
export { $DivideInvocation } from './schemas/$DivideInvocation';
export { $Edge } from './schemas/$Edge';

View File

@ -1,19 +0,0 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Outputs an image from a data URL.
*/
export type DataURLToImageInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
type?: 'dataURL_image';
/**
* The b64 data URL
*/
dataURL: string;
};

View File

@ -8,7 +8,6 @@ import type { CollectInvocation } from './CollectInvocation';
import type { CompelInvocation } from './CompelInvocation';
import type { CropImageInvocation } from './CropImageInvocation';
import type { CvInpaintInvocation } from './CvInpaintInvocation';
import type { DataURLToImageInvocation } from './DataURLToImageInvocation';
import type { DivideInvocation } from './DivideInvocation';
import type { Edge } from './Edge';
import type { GraphInvocation } from './GraphInvocation';
@ -48,7 +47,7 @@ export type Graph = {
/**
* The nodes in this graph
*/
nodes?: Record<string, (LoadImageInvocation | ShowImageInvocation | DataURLToImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation)>;
nodes?: Record<string, (LoadImageInvocation | ShowImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation)>;
/**
* The connections between nodes and their fields in this graph
*/

View File

@ -40,7 +40,7 @@ export type ImageToImageInvocation = {
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'dpmpp_2' | 'k_dpm_2' | 'k_dpm_2_a' | 'k_dpmpp_2' | 'k_euler' | 'k_euler_a' | 'k_heun' | 'k_lms' | 'plms';
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/

View File

@ -6,7 +6,7 @@ import type { ColorField } from './ColorField';
import type { ImageField } from './ImageField';
/**
* Infills transparent areas of an image with a color
* Infills transparent areas of an image with a solid color
*/
export type InfillColorInvocation = {
/**

View File

@ -5,7 +5,7 @@
import type { ImageField } from './ImageField';
/**
* Infills transparent areas of an image with tiles of the image
* Infills transparent areas of an image using the PatchMatch algorithm
*/
export type InfillPatchMatchInvocation = {
/**

View File

@ -41,7 +41,7 @@ export type InpaintInvocation = {
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'dpmpp_2' | 'k_dpm_2' | 'k_dpm_2_a' | 'k_dpmpp_2' | 'k_euler' | 'k_euler_a' | 'k_heun' | 'k_lms' | 'plms';
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/

View File

@ -37,11 +37,19 @@ export type LatentsToLatentsInvocation = {
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'dpmpp_2' | 'k_dpm_2' | 'k_dpm_2_a' | 'k_dpmpp_2' | 'k_euler' | 'k_euler_a' | 'k_heun' | 'k_lms' | 'plms';
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/
model?: string;
/**
* Whether or not to generate an image that can tile without seams
*/
seamless?: boolean;
/**
* The axes to tile the image on, 'x' and/or 'y'
*/
seamless_axes?: string;
/**
* The latents to use as a base image
*/

View File

@ -38,7 +38,7 @@ export type TextToImageInvocation = {
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'dpmpp_2' | 'k_dpm_2' | 'k_dpm_2_a' | 'k_dpmpp_2' | 'k_euler' | 'k_euler_a' | 'k_heun' | 'k_lms' | 'plms';
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/

View File

@ -37,10 +37,18 @@ export type TextToLatentsInvocation = {
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'dpmpp_2' | 'k_dpm_2' | 'k_dpm_2_a' | 'k_dpmpp_2' | 'k_euler' | 'k_euler_a' | 'k_heun' | 'k_lms' | 'plms';
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
/**
* The model to use (currently ignored)
*/
model?: string;
/**
* Whether or not to generate an image that can tile without seams
*/
seamless?: boolean;
/**
* The axes to tile the image on, 'x' and/or 'y'
*/
seamless_axes?: string;
};

View File

@ -1,21 +0,0 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export const $DataURLToImageInvocation = {
description: `Outputs an image from a data URL.`,
properties: {
id: {
type: 'string',
description: `The id of this node. Must be unique among all nodes.`,
isRequired: true,
},
type: {
type: 'Enum',
},
dataURL: {
type: 'string',
description: `The b64 data URL`,
isRequired: true,
},
},
} as const;

View File

@ -15,8 +15,6 @@ export const $Graph = {
type: 'LoadImageInvocation',
}, {
type: 'ShowImageInvocation',
}, {
type: 'DataURLToImageInvocation',
}, {
type: 'CropImageInvocation',
}, {

View File

@ -2,7 +2,7 @@
/* tslint:disable */
/* eslint-disable */
export const $InfillColorInvocation = {
description: `Infills transparent areas of an image with a color`,
description: `Infills transparent areas of an image with a solid color`,
properties: {
id: {
type: 'string',

View File

@ -2,7 +2,7 @@
/* tslint:disable */
/* eslint-disable */
export const $InfillPatchMatchInvocation = {
description: `Infills transparent areas of an image with tiles of the image`,
description: `Infills transparent areas of an image using the PatchMatch algorithm`,
properties: {
id: {
type: 'string',

View File

@ -48,6 +48,14 @@ export const $LatentsToLatentsInvocation = {
type: 'string',
description: `The model to use (currently ignored)`,
},
seamless: {
type: 'boolean',
description: `Whether or not to generate an image that can tile without seams`,
},
seamless_axes: {
type: 'string',
description: `The axes to tile the image on, 'x' and/or 'y'`,
},
latents: {
type: 'all-of',
description: `The latents to use as a base image`,

View File

@ -48,5 +48,13 @@ export const $TextToLatentsInvocation = {
type: 'string',
description: `The model to use (currently ignored)`,
},
seamless: {
type: 'boolean',
description: `Whether or not to generate an image that can tile without seams`,
},
seamless_axes: {
type: 'string',
description: `The axes to tile the image on, 'x' and/or 'y'`,
},
},
} as const;

View File

@ -7,7 +7,6 @@ import type { CollectInvocation } from '../models/CollectInvocation';
import type { CompelInvocation } from '../models/CompelInvocation';
import type { CropImageInvocation } from '../models/CropImageInvocation';
import type { CvInpaintInvocation } from '../models/CvInpaintInvocation';
import type { DataURLToImageInvocation } from '../models/DataURLToImageInvocation';
import type { DivideInvocation } from '../models/DivideInvocation';
import type { Edge } from '../models/Edge';
import type { Graph } from '../models/Graph';
@ -150,7 +149,7 @@ export class SessionsService {
* The id of the session
*/
sessionId: string,
requestBody: (LoadImageInvocation | ShowImageInvocation | DataURLToImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation),
requestBody: (LoadImageInvocation | ShowImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation),
}): CancelablePromise<string> {
return __request(OpenAPI, {
method: 'POST',
@ -187,7 +186,7 @@ export class SessionsService {
* The path to the node in the graph
*/
nodePath: string,
requestBody: (LoadImageInvocation | ShowImageInvocation | DataURLToImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation),
requestBody: (LoadImageInvocation | ShowImageInvocation | CropImageInvocation | PasteImageInvocation | MaskFromAlphaInvocation | BlurInvocation | LerpInvocation | InverseLerpInvocation | CompelInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | ParamIntInvocation | CvInpaintInvocation | RangeInvocation | RandomRangeInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation),
}): CancelablePromise<GraphExecutionState> {
return __request(OpenAPI, {
method: 'PUT',

View File

@ -23,6 +23,8 @@ import { textareaTheme } from './components/textarea';
export const theme: ThemeOverride = {
config: {
cssVarPrefix: 'invokeai',
initialColorMode: 'dark',
useSystemColorMode: false,
},
styles: {
global: (_props: StyleFunctionProps) => ({
@ -39,7 +41,7 @@ export const theme: ThemeOverride = {
},
direction: 'ltr',
fonts: {
body: `'Inter', sans-serif`,
body: `'InterVariable', sans-serif`,
},
breakpoints: {
base: '0em', // 0px and onwards

View File

@ -20,13 +20,7 @@
"*": ["./src/*"]
}
},
"include": [
"src/**/*.ts",
"src/**/*.tsx",
"*.d.ts",
"src/app/store/middleware/listenerMiddleware",
"src/features/nodes/util/edgeBuilders"
],
"include": ["src/**/*.ts", "src/**/*.tsx", "*.d.ts"],
"exclude": ["src/services/fixtures/*", "node_modules", "dist"],
"references": [{ "path": "./tsconfig.node.json" }]
}

View File

@ -6673,6 +6673,11 @@ validator@^13.7.0:
resolved "https://registry.yarnpkg.com/validator/-/validator-13.9.0.tgz#33e7b85b604f3bbce9bb1a05d5c3e22e1c2ff855"
integrity sha512-B+dGG8U3fdtM0/aNK4/X8CXq/EcxU2WPrPEkJGslb47qyHsxmbggTWK0yEA4qnYVNF+nxNlN88o14hIcPmSIEA==
vite-plugin-css-injected-by-js@^3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/vite-plugin-css-injected-by-js/-/vite-plugin-css-injected-by-js-3.1.1.tgz#8324412636cf6fdada1a86f595aa2e78458e5ddb"
integrity sha512-mwrFvEEy0TuH8Ul0cb2HgjmNboQ/JnEFy+kHCWqAJph3ikMOiIuyYVdx0JO4nEIWJyzSnc4TTdmoTulsikvJEg==
vite-plugin-dts@^2.3.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/vite-plugin-dts/-/vite-plugin-dts-2.3.0.tgz#6ab2edf56f48261bfede03958704bfaee2fca3e4"

View File

@ -23,7 +23,7 @@
],
"threshold": 0,
"postprocessing": null,
"sampler": "k_lms",
"sampler": "lms",
"variations": [],
"type": "txt2img"
}

View File

@ -17,7 +17,7 @@ valid_metadata = {
"width": 512,
"height": 512,
"cfg_scale": 7.5,
"scheduler": "k_lms",
"scheduler": "lms",
"model": "stable-diffusion-1.5",
},
}