diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py
index 9a29502048..bc72bbe2b3 100644
--- a/invokeai/app/invocations/generate.py
+++ b/invokeai/app/invocations/generate.py
@@ -52,7 +52,7 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting image", )
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting image", )
cfg_scale: float = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
- scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
+ scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
# fmt: on
diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py
index d32f96857d..8b4163c4c6 100644
--- a/invokeai/app/invocations/image.py
+++ b/invokeai/app/invocations/image.py
@@ -33,8 +33,8 @@ class ImageOutput(BaseInvocationOutput):
# fmt: off
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
- width: Optional[int] = Field(default=None, description="The width of the image in pixels")
- height: Optional[int] = Field(default=None, description="The height of the image in pixels")
+ width: int = Field(description="The width of the image in pixels")
+ height: int = Field(description="The height of the image in pixels")
# fmt: on
class Config:
diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py
index 40575c1f64..825847cf79 100644
--- a/invokeai/app/invocations/latent.py
+++ b/invokeai/app/invocations/latent.py
@@ -17,6 +17,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post
from ...backend.image_util.seamless import configure_model_padding
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline, image_resized_to_grid_as_tensor
+from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
import numpy as np
from ..services.image_storage import ImageType
@@ -52,29 +53,20 @@ class NoiseOutput(BaseInvocationOutput):
#fmt: on
-# TODO: this seems like a hack
-scheduler_map = dict(
- ddim=diffusers.DDIMScheduler,
- dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_dpm_2=diffusers.KDPM2DiscreteScheduler,
- k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
- k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_euler=diffusers.EulerDiscreteScheduler,
- k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
- k_heun=diffusers.HeunDiscreteScheduler,
- k_lms=diffusers.LMSDiscreteScheduler,
- plms=diffusers.PNDMScheduler,
-)
-
-
SAMPLER_NAME_VALUES = Literal[
- tuple(list(scheduler_map.keys()))
+ tuple(list(SCHEDULER_MAP.keys()))
]
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
- scheduler_class = scheduler_map.get(scheduler_name,'ddim')
- scheduler = scheduler_class.from_config(model.scheduler.config)
+ scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
+
+ scheduler_config = model.scheduler.config
+ if "_backup" in scheduler_config:
+ scheduler_config = scheduler_config["_backup"]
+ scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
+ scheduler = scheduler_class.from_config(scheduler_config)
+
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
@@ -148,7 +140,7 @@ class TextToLatentsInvocation(BaseInvocation):
noise: Optional[LatentsField] = Field(description="The noise to use")
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
- scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
+ scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
@@ -216,7 +208,7 @@ class TextToLatentsInvocation(BaseInvocation):
h_symmetry_time_pct=None,#h_symmetry_time_pct,
v_symmetry_time_pct=None#v_symmetry_time_pct,
),
- ).add_scheduler_args_if_applicable(model.scheduler, eta=None)#ddim_eta)
+ ).add_scheduler_args_if_applicable(model.scheduler, eta=0.0)#ddim_eta)
return conditioning_data
@@ -293,11 +285,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
latent, device=model.device, dtype=latent.dtype
)
- timesteps, _ = model.get_img2img_timesteps(
- self.steps,
- self.strength,
- device=model.device,
- )
+ timesteps, _ = model.get_img2img_timesteps(self.steps, self.strength)
result_latents, result_attention_map_saver = model.latents_from_embeddings(
latents=initial_latents,
diff --git a/invokeai/backend/args.py b/invokeai/backend/args.py
index eb8b396ee0..db6fbe08df 100644
--- a/invokeai/backend/args.py
+++ b/invokeai/backend/args.py
@@ -108,17 +108,20 @@ APP_VERSION = invokeai.version.__version__
SAMPLER_CHOICES = [
"ddim",
- "k_dpm_2_a",
- "k_dpm_2",
- "k_dpmpp_2_a",
- "k_dpmpp_2",
- "k_euler_a",
- "k_euler",
- "k_heun",
- "k_lms",
- "plms",
- # diffusers:
+ "ddpm",
+ "deis",
+ "lms",
"pndm",
+ "heun",
+ "euler",
+ "euler_k",
+ "euler_a",
+ "kdpm_2",
+ "kdpm_2_a",
+ "dpmpp_2s",
+ "dpmpp_2m",
+ "dpmpp_2m_k",
+ "unipc",
]
PRECISION_CHOICES = [
@@ -631,7 +634,7 @@ class Args(object):
choices=SAMPLER_CHOICES,
metavar="SAMPLER_NAME",
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
- default="k_lms",
+ default="lms",
)
render_group.add_argument(
"--log_tokenization",
diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py
index 4f3df60f1c..3db987bca8 100644
--- a/invokeai/backend/generate.py
+++ b/invokeai/backend/generate.py
@@ -37,6 +37,7 @@ from .safety_checker import SafetyChecker
from .prompting import get_uc_and_c_and_ec
from .prompting.conditioning import log_tokenization
from .stable_diffusion import HuggingFaceConceptsLibrary
+from .stable_diffusion.schedulers import SCHEDULER_MAP
from .util import choose_precision, choose_torch_device
def fix_func(orig):
@@ -141,7 +142,7 @@ class Generate:
model=None,
conf="configs/models.yaml",
embedding_path=None,
- sampler_name="k_lms",
+ sampler_name="lms",
ddim_eta=0.0, # deterministic
full_precision=False,
precision="auto",
@@ -1047,29 +1048,12 @@ class Generate:
def _set_scheduler(self):
default = self.model.scheduler
- # See https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672
- scheduler_map = dict(
- ddim=diffusers.DDIMScheduler,
- dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_dpm_2=diffusers.KDPM2DiscreteScheduler,
- k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
- # DPMSolverMultistepScheduler is technically not `k_` anything, as it is neither
- # the k-diffusers implementation nor included in EDM (Karras 2022), but we can
- # provide an alias for compatibility.
- k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_euler=diffusers.EulerDiscreteScheduler,
- k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
- k_heun=diffusers.HeunDiscreteScheduler,
- k_lms=diffusers.LMSDiscreteScheduler,
- plms=diffusers.PNDMScheduler,
- )
-
- if self.sampler_name in scheduler_map:
- sampler_class = scheduler_map[self.sampler_name]
+ if self.sampler_name in SCHEDULER_MAP:
+ sampler_class, sampler_extra_config = SCHEDULER_MAP[self.sampler_name]
msg = (
f"Setting Sampler to {self.sampler_name} ({sampler_class.__name__})"
)
- self.sampler = sampler_class.from_config(self.model.scheduler.config)
+ self.sampler = sampler_class.from_config({**self.model.scheduler.config, **sampler_extra_config})
else:
msg = (
f" Unsupported Sampler: {self.sampler_name} "+
diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py
index 9887434e90..8f5b1a8395 100644
--- a/invokeai/backend/generator/base.py
+++ b/invokeai/backend/generator/base.py
@@ -31,6 +31,7 @@ from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
from ..prompting.conditioning import get_uc_and_c_and_ec
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
+from ..stable_diffusion.schedulers import SCHEDULER_MAP
downsampling = 8
@@ -71,19 +72,6 @@ class InvokeAIGeneratorOutput:
# we are interposing a wrapper around the original Generator classes so that
# old code that calls Generate will continue to work.
class InvokeAIGenerator(metaclass=ABCMeta):
- scheduler_map = dict(
- ddim=diffusers.DDIMScheduler,
- dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_dpm_2=diffusers.KDPM2DiscreteScheduler,
- k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
- k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
- k_euler=diffusers.EulerDiscreteScheduler,
- k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
- k_heun=diffusers.HeunDiscreteScheduler,
- k_lms=diffusers.LMSDiscreteScheduler,
- plms=diffusers.PNDMScheduler,
- )
-
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
@@ -175,14 +163,20 @@ class InvokeAIGenerator(metaclass=ABCMeta):
'''
Return list of all the schedulers that we currently handle.
'''
- return list(self.scheduler_map.keys())
+ return list(SCHEDULER_MAP.keys())
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
return generator_class(model, self.params.precision)
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
- scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
- scheduler = scheduler_class.from_config(model.scheduler.config)
+ scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
+
+ scheduler_config = model.scheduler.config
+ if "_backup" in scheduler_config:
+ scheduler_config = scheduler_config["_backup"]
+ scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
+ scheduler = scheduler_class.from_config(scheduler_config)
+
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
index 8aec5a01d9..1ebd044d54 100644
--- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
+++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py
@@ -47,6 +47,7 @@ from diffusers import (
LDMTextToImagePipeline,
LMSDiscreteScheduler,
PNDMScheduler,
+ UniPCMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
@@ -1209,6 +1210,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
elif scheduler_type == "dpm":
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
+ elif scheduler_type == 'unipc':
+ scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
elif scheduler_type == "ddim":
scheduler = scheduler
else:
diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py
index a0a899a319..4f94395a86 100644
--- a/invokeai/backend/model_management/model_manager.py
+++ b/invokeai/backend/model_management/model_manager.py
@@ -30,7 +30,7 @@ from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
logging as dlogging,
-)
+)
from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
@@ -68,7 +68,7 @@ class SDModelComponent(Enum):
scheduler="scheduler"
safety_checker="safety_checker"
feature_extractor="feature_extractor"
-
+
DEFAULT_MAX_MODELS = 2
class ModelManager(object):
@@ -182,7 +182,7 @@ class ModelManager(object):
vae from the model currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.vae)
-
+
def get_model_tokenizer(self, model_name: str=None)->CLIPTokenizer:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPTokenizer. If no
@@ -190,12 +190,12 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.tokenizer)
-
+
def get_model_unet(self, model_name: str=None)->UNet2DConditionModel:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned UNet2DConditionModel. If no model
name is provided, return the UNet from the model
- currently in the GPU.
+ currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.unet)
@@ -222,7 +222,7 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.scheduler)
-
+
def _get_sub_model(
self,
model_name: str=None,
@@ -1228,7 +1228,7 @@ class ModelManager(object):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
- self.logger.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
+ self.logger.debug(f"sha256 = {hash} ({count} files hashed in {toc - tic:4.2f}s)")
with open(hashpath, "w") as f:
f.write(hash)
return hash
diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
index 94ec9da7e8..c8a932b9e9 100644
--- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py
+++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py
@@ -509,10 +509,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
run_id=None,
callback: Callable[[PipelineIntermediateState], None] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
+ if self.scheduler.config.get("cpu_only", False):
+ scheduler_device = torch.device('cpu')
+ else:
+ scheduler_device = self._model_group.device_for(self.unet)
+
if timesteps is None:
- self.scheduler.set_timesteps(
- num_inference_steps, device=self._model_group.device_for(self.unet)
- )
+ self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps = self.scheduler.timesteps
infer_latents_from_embeddings = GeneratorToCallbackinator(
self.generate_latents_from_embeddings, PipelineIntermediateState
@@ -725,12 +728,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise: torch.Tensor,
run_id=None,
callback=None,
- ) -> InvokeAIStableDiffusionPipelineOutput:
- timesteps, _ = self.get_img2img_timesteps(
- num_inference_steps,
- strength,
- device=self._model_group.device_for(self.unet),
- )
+ ) -> InvokeAIStableDiffusionPipelineOutput:
+ timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
result_latents, result_attention_maps = self.latents_from_embeddings(
latents=initial_latents if strength < 1.0 else torch.zeros_like(
initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
@@ -756,13 +755,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
return self.check_for_safety(output, dtype=conditioning_data.dtype)
def get_img2img_timesteps(
- self, num_inference_steps: int, strength: float, device
+ self, num_inference_steps: int, strength: float, device=None
) -> (torch.Tensor, int):
img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
assert img2img_pipeline.scheduler is self.scheduler
- img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
+
+ if self.scheduler.config.get("cpu_only", False):
+ scheduler_device = torch.device('cpu')
+ else:
+ scheduler_device = self._model_group.device_for(self.unet)
+
+ img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
- num_inference_steps, strength, device=device
+ num_inference_steps, strength, device=scheduler_device
)
# Workaround for low strength resulting in zero timesteps.
# TODO: submit upstream fix for zero-step img2img
@@ -796,9 +801,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if init_image.dim() == 3:
init_image = init_image.unsqueeze(0)
- timesteps, _ = self.get_img2img_timesteps(
- num_inference_steps, strength, device=device
- )
+ timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
# 6. Prepare latent variables
# can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents
diff --git a/invokeai/backend/stable_diffusion/schedulers/__init__.py b/invokeai/backend/stable_diffusion/schedulers/__init__.py
new file mode 100644
index 0000000000..b2df0df231
--- /dev/null
+++ b/invokeai/backend/stable_diffusion/schedulers/__init__.py
@@ -0,0 +1 @@
+from .schedulers import SCHEDULER_MAP
\ No newline at end of file
diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py
new file mode 100644
index 0000000000..fab28aca8c
--- /dev/null
+++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py
@@ -0,0 +1,22 @@
+from diffusers import DDIMScheduler, DPMSolverMultistepScheduler, KDPM2DiscreteScheduler, \
+ KDPM2AncestralDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, \
+ HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UniPCMultistepScheduler, \
+ DPMSolverSinglestepScheduler, DEISMultistepScheduler, DDPMScheduler
+
+SCHEDULER_MAP = dict(
+ ddim=(DDIMScheduler, dict()),
+ ddpm=(DDPMScheduler, dict()),
+ deis=(DEISMultistepScheduler, dict()),
+ lms=(LMSDiscreteScheduler, dict()),
+ pndm=(PNDMScheduler, dict()),
+ heun=(HeunDiscreteScheduler, dict()),
+ euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
+ euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
+ euler_a=(EulerAncestralDiscreteScheduler, dict()),
+ kdpm_2=(KDPM2DiscreteScheduler, dict()),
+ kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
+ dpmpp_2s=(DPMSolverSinglestepScheduler, dict()),
+ dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
+ dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
+ unipc=(UniPCMultistepScheduler, dict(cpu_only=True))
+)
diff --git a/invokeai/backend/web/modules/parameters.py b/invokeai/backend/web/modules/parameters.py
index 3c9c530dd2..72211857a3 100644
--- a/invokeai/backend/web/modules/parameters.py
+++ b/invokeai/backend/web/modules/parameters.py
@@ -4,17 +4,20 @@ from .parse_seed_weights import parse_seed_weights
SAMPLER_CHOICES = [
"ddim",
- "k_dpm_2_a",
- "k_dpm_2",
- "k_dpmpp_2_a",
- "k_dpmpp_2",
- "k_euler_a",
- "k_euler",
- "k_heun",
- "k_lms",
- "plms",
- # diffusers:
+ "ddpm",
+ "deis",
+ "lms",
"pndm",
+ "heun",
+ "euler",
+ "euler_k",
+ "euler_a",
+ "kdpm_2",
+ "kdpm_2_a",
+ "dpmpp_2s",
+ "dpmpp_2m",
+ "dpmpp_2m_k",
+ "unipc",
]
diff --git a/invokeai/frontend/web/docs/README.md b/invokeai/frontend/web/docs/README.md
index 787725cdda..323dcc5bc7 100644
--- a/invokeai/frontend/web/docs/README.md
+++ b/invokeai/frontend/web/docs/README.md
@@ -37,7 +37,7 @@ From `invokeai/frontend/web/` run `yarn install` to get everything set up.
Start everything in dev mode:
1. Start the dev server: `yarn dev`
-2. Start the InvokeAI UI per usual: `invokeai --web`
+2. Start the InvokeAI Nodes backend: `python scripts/invokeai-new.py --web # run from the repo root`
3. Point your browser to the dev server address e.g.
### Production builds
diff --git a/invokeai/frontend/web/src/app/constants.ts b/invokeai/frontend/web/src/app/constants.ts
index 534ca9e29a..189fbc9dd4 100644
--- a/invokeai/frontend/web/src/app/constants.ts
+++ b/invokeai/frontend/web/src/app/constants.ts
@@ -2,17 +2,28 @@
export const DIFFUSERS_SCHEDULERS: Array = [
'ddim',
- 'plms',
- 'k_lms',
- 'dpmpp_2',
- 'k_dpm_2',
- 'k_dpm_2_a',
- 'k_dpmpp_2',
- 'k_euler',
- 'k_euler_a',
- 'k_heun',
+ 'ddpm',
+ 'deis',
+ 'lms',
+ 'pndm',
+ 'heun',
+ 'euler',
+ 'euler_k',
+ 'euler_a',
+ 'kdpm_2',
+ 'kdpm_2_a',
+ 'dpmpp_2s',
+ 'dpmpp_2m',
+ 'dpmpp_2m_k',
+ 'unipc',
];
+export const IMG2IMG_DIFFUSERS_SCHEDULERS = DIFFUSERS_SCHEDULERS.filter(
+ (scheduler) => {
+ return scheduler !== 'dpmpp_2s';
+ }
+);
+
// Valid image widths
export const WIDTHS: Array = Array.from(Array(64)).map(
(_x, i) => (i + 1) * 64
diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts
index 4023f7665d..b79504d8f2 100644
--- a/invokeai/frontend/web/src/app/types/invokeai.ts
+++ b/invokeai/frontend/web/src/app/types/invokeai.ts
@@ -47,15 +47,20 @@ export type CommonGeneratedImageMetadata = {
postprocessing: null | Array;
sampler:
| 'ddim'
- | 'k_dpm_2_a'
- | 'k_dpm_2'
- | 'k_dpmpp_2_a'
- | 'k_dpmpp_2'
- | 'k_euler_a'
- | 'k_euler'
- | 'k_heun'
- | 'k_lms'
- | 'plms';
+ | 'ddpm'
+ | 'deis'
+ | 'lms'
+ | 'pndm'
+ | 'heun'
+ | 'euler'
+ | 'euler_k'
+ | 'euler_a'
+ | 'kdpm_2'
+ | 'kdpm_2_a'
+ | 'dpmpp_2s'
+ | 'dpmpp_2m'
+ | 'dpmpp_2m_k'
+ | 'unipc';
prompt: Prompt;
seed: number;
variations: SeedWeights;
diff --git a/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx b/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx
new file mode 100644
index 0000000000..64d5e1beef
--- /dev/null
+++ b/invokeai/frontend/web/src/common/components/ImageMetadataOverlay.tsx
@@ -0,0 +1,54 @@
+import { Badge, Flex } from '@chakra-ui/react';
+import { Image } from 'app/types/invokeai';
+import { isNumber, isString } from 'lodash-es';
+import { useMemo } from 'react';
+
+type ImageMetadataOverlayProps = {
+ image: Image;
+};
+
+const ImageMetadataOverlay = ({ image }: ImageMetadataOverlayProps) => {
+ const dimensions = useMemo(() => {
+ if (!isNumber(image.metadata?.width) || isNumber(!image.metadata?.height)) {
+ return;
+ }
+
+ return `${image.metadata?.width} × ${image.metadata?.height}`;
+ }, [image.metadata]);
+
+ const model = useMemo(() => {
+ if (!isString(image.metadata?.invokeai?.node?.model)) {
+ return;
+ }
+
+ return image.metadata?.invokeai?.node?.model;
+ }, [image.metadata]);
+
+ return (
+
+ {dimensions && (
+
+ {dimensions}
+
+ )}
+ {model && (
+
+ {model}
+
+ )}
+
+ );
+};
+
+export default ImageMetadataOverlay;
diff --git a/invokeai/frontend/web/src/common/components/ImageToImageOverlay.tsx b/invokeai/frontend/web/src/common/components/ImageToImageOverlay.tsx
deleted file mode 100644
index 9d864f5c9c..0000000000
--- a/invokeai/frontend/web/src/common/components/ImageToImageOverlay.tsx
+++ /dev/null
@@ -1,37 +0,0 @@
-import { Badge, Box, Flex } from '@chakra-ui/react';
-import { Image } from 'app/types/invokeai';
-
-type ImageToImageOverlayProps = {
- image: Image;
-};
-
-const ImageToImageOverlay = ({ image }: ImageToImageOverlayProps) => {
- return (
-
-
-
- {image.metadata?.width} × {image.metadata?.height}
-
-
-
- );
-};
-
-export default ImageToImageOverlay;
diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx
index a0fbd7c5d1..b1dbed5a81 100644
--- a/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImagePreview.tsx
@@ -1,4 +1,4 @@
-import { Box, Flex, Image, Skeleton, useBoolean } from '@chakra-ui/react';
+import { Box, Flex, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { useGetUrl } from 'common/util/getUrl';
@@ -11,7 +11,8 @@ import NextPrevImageButtons from './NextPrevImageButtons';
import CurrentImageHidden from './CurrentImageHidden';
import { DragEvent, memo, useCallback } from 'react';
import { systemSelector } from 'features/system/store/systemSelectors';
-import CurrentImageFallback from './CurrentImageFallback';
+import ImageFallbackSpinner from './ImageFallbackSpinner';
+import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
export const imagesSelector = createSelector(
[uiSelector, gallerySelector, systemSelector],
@@ -50,8 +51,6 @@ const CurrentImagePreview = () => {
} = useAppSelector(imagesSelector);
const { getUrl } = useGetUrl();
- const [isLoaded, { on, off }] = useBoolean();
-
const handleDragStart = useCallback(
(e: DragEvent) => {
if (!image) {
@@ -67,11 +66,11 @@ const CurrentImagePreview = () => {
return (
{progressImage && shouldShowProgressInViewer ? (
@@ -91,28 +90,23 @@ const CurrentImagePreview = () => {
/>
) : (
image && (
-
- ) : (
-
- )
- }
- sx={{
- objectFit: 'contain',
- maxWidth: '100%',
- maxHeight: '100%',
- height: 'auto',
- position: 'absolute',
- borderRadius: 'base',
- }}
- />
+ <>
+ }
+ onDragStart={handleDragStart}
+ sx={{
+ objectFit: 'contain',
+ maxWidth: '100%',
+ maxHeight: '100%',
+ height: 'auto',
+ position: 'absolute',
+ borderRadius: 'base',
+ }}
+ />
+
+ >
)
)}
{shouldShowImageDetails && image && 'metadata' in image && (
diff --git a/invokeai/frontend/web/src/features/gallery/components/GalleryProgressImage.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryProgressImage.tsx
index b812849c44..a2103eb8e2 100644
--- a/invokeai/frontend/web/src/features/gallery/components/GalleryProgressImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/GalleryProgressImage.tsx
@@ -1,4 +1,4 @@
-import { Box, Flex, Image } from '@chakra-ui/react';
+import { Flex, Image, Spinner } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
@@ -42,6 +42,7 @@ const GalleryProgressImage = () => {
alignItems: 'center',
justifyContent: 'center',
aspectRatio: '1/1',
+ position: 'relative',
}}
>
{
imageRendering: shouldAntialiasProgressImage ? 'auto' : 'pixelated',
}}
/>
+
);
};
diff --git a/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx
index 2e5f166025..35ddefe181 100644
--- a/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/HoverableImage.tsx
@@ -278,6 +278,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
h: 'full',
transition: 'transform 0.2s ease-out',
aspectRatio: '1/1',
+ cursor: 'pointer',
}}
>
{
+const ImageFallbackSpinner = (props: ImageFallbackSpinnerProps) => {
const { size = 'xl', ...rest } = props;
return (
@@ -21,4 +21,4 @@ const CurrentImageFallback = (props: CurrentImageFallbackProps) => {
);
};
-export default CurrentImageFallback;
+export default ImageFallbackSpinner;
diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
index 2326295451..81705086b3 100644
--- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
+++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts
@@ -1,6 +1,7 @@
import type { PayloadAction } from '@reduxjs/toolkit';
import { createSlice } from '@reduxjs/toolkit';
import { Image } from 'app/types/invokeai';
+import { imageReceived, thumbnailReceived } from 'services/thunks/image';
type GalleryImageObjectFitType = 'contain' | 'cover';
@@ -63,6 +64,29 @@ export const gallerySlice = createSlice({
state.shouldUseSingleGalleryColumn = action.payload;
},
},
+ extraReducers(builder) {
+ builder.addCase(imageReceived.fulfilled, (state, action) => {
+ // When we get an updated URL for an image, we need to update the selectedImage in gallery,
+ // which is currently its own object (instead of a reference to an image in results/uploads)
+ const { imagePath } = action.payload;
+ const { imageName } = action.meta.arg;
+
+ if (state.selectedImage?.name === imageName) {
+ state.selectedImage.url = imagePath;
+ }
+ });
+
+ builder.addCase(thumbnailReceived.fulfilled, (state, action) => {
+ // When we get an updated URL for an image, we need to update the selectedImage in gallery,
+ // which is currently its own object (instead of a reference to an image in results/uploads)
+ const { thumbnailPath } = action.payload;
+ const { thumbnailName } = action.meta.arg;
+
+ if (state.selectedImage?.name === thumbnailName) {
+ state.selectedImage.thumbnail = thumbnailPath;
+ }
+ });
+ },
});
export const {
diff --git a/invokeai/frontend/web/src/features/nodes/examples/iterationGraph.ts b/invokeai/frontend/web/src/features/nodes/examples/iterationGraph.ts
index 46ee5289b2..3a5b6fb888 100644
--- a/invokeai/frontend/web/src/features/nodes/examples/iterationGraph.ts
+++ b/invokeai/frontend/web/src/features/nodes/examples/iterationGraph.ts
@@ -20,7 +20,7 @@ export const iterationGraph = {
model: '',
progress_images: false,
prompt: 'dog',
- sampler_name: 'k_lms',
+ sampler_name: 'lms',
seamless: false,
steps: 11,
type: 'txt2img',
diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamSampler.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamSampler.tsx
index 5a20f54438..9bd22d9abe 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamSampler.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Core/ParamSampler.tsx
@@ -1,8 +1,12 @@
-import { DIFFUSERS_SCHEDULERS } from 'app/constants';
+import {
+ DIFFUSERS_SCHEDULERS,
+ IMG2IMG_DIFFUSERS_SCHEDULERS,
+} from 'app/constants';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAISelect from 'common/components/IAISelect';
import { setSampler } from 'features/parameters/store/generationSlice';
+import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ChangeEvent, memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@@ -10,6 +14,9 @@ const ParamSampler = () => {
const sampler = useAppSelector(
(state: RootState) => state.generation.sampler
);
+
+ const activeTabName = useAppSelector(activeTabNameSelector);
+
const dispatch = useAppDispatch();
const { t } = useTranslation();
@@ -23,7 +30,11 @@ const ParamSampler = () => {
label={t('parameters.sampler')}
value={sampler}
onChange={handleChange}
- validValues={DIFFUSERS_SCHEDULERS}
+ validValues={
+ activeTabName === 'img2img' || activeTabName == 'unifiedCanvas'
+ ? IMG2IMG_DIFFUSERS_SCHEDULERS
+ : DIFFUSERS_SCHEDULERS
+ }
minWidth={36}
/>
);
diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/ImageToImageStrength.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/ImageToImageStrength.tsx
index b467b15091..eea5ec3c27 100644
--- a/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/ImageToImageStrength.tsx
+++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/ImageToImage/ImageToImageStrength.tsx
@@ -47,7 +47,7 @@ const ImageToImageStrength = () => {
return (
{
const dispatch = useAppDispatch();
const { t } = useTranslation();
- const [isLoaded, setIsLoaded] = useState(false);
-
const onError = () => {
dispatch(
addToast({
@@ -42,13 +41,10 @@ const InitialImagePreview = () => {
})
);
dispatch(clearInitialImage());
- setIsLoaded(false);
};
const handleDrop = useCallback(
(e: DragEvent) => {
- setIsLoaded(false);
-
const name = e.dataTransfer.getData('invokeai/imageName');
const type = e.dataTransfer.getData('invokeai/imageType') as ImageType;
@@ -62,48 +58,32 @@ const InitialImagePreview = () => {
sx={{
width: 'full',
height: 'full',
+ position: 'relative',
alignItems: 'center',
justifyContent: 'center',
- position: 'relative',
}}
onDrop={handleDrop}
>
-
- {initialImage?.url && (
- <>
- {
- setIsLoaded(true);
- }}
- fallback={
-
-
-
- }
- />
- {isLoaded && }
- >
- )}
- {!initialImage?.url && }
-
+ {initialImage?.url && (
+ <>
+ }
+ onError={onError}
+ sx={{
+ objectFit: 'contain',
+ maxWidth: '100%',
+ maxHeight: '100%',
+ height: 'auto',
+ position: 'absolute',
+ borderRadius: 'base',
+ }}
+ />
+
+ >
+ )}
+ {!initialImage?.url && }
);
};
diff --git a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts
index b0adc578a0..5ad0e4973c 100644
--- a/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts
+++ b/invokeai/frontend/web/src/features/parameters/store/generationSlice.ts
@@ -51,7 +51,7 @@ export const initialGenerationState: GenerationState = {
perlin: 0,
prompt: '',
negativePrompt: '',
- sampler: 'k_lms',
+ sampler: 'lms',
seamBlur: 16,
seamSize: 96,
seamSteps: 30,
diff --git a/invokeai/frontend/web/src/features/system/store/systemSlice.ts b/invokeai/frontend/web/src/features/system/store/systemSlice.ts
index 1aeb2a1939..e9cbd21a15 100644
--- a/invokeai/frontend/web/src/features/system/store/systemSlice.ts
+++ b/invokeai/frontend/web/src/features/system/store/systemSlice.ts
@@ -418,6 +418,7 @@ export const systemSlice = createSlice({
state.currentStep = 0;
state.totalSteps = 0;
state.statusTranslationKey = 'common.statusConnected';
+ state.progressImage = null;
state.toastQueue.push(
makeToast({ title: t('toast.canceled'), status: 'warning' })
diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx
index 5d85230140..aba85646af 100644
--- a/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/tabs/ImageToImage/ImageToImageTabCoreParameters.tsx
@@ -64,8 +64,6 @@ const ImageToImageTabCoreParameters = () => {
-
-
@@ -74,6 +72,8 @@ const ImageToImageTabCoreParameters = () => {
+
+
diff --git a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters.tsx b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters.tsx
index cc03ef560d..74949a399d 100644
--- a/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters.tsx
+++ b/invokeai/frontend/web/src/features/ui/components/tabs/UnifiedCanvas/UnifiedCanvasCoreParameters.tsx
@@ -62,8 +62,6 @@ const UnifiedCanvasCoreParameters = () => {
-
-
@@ -72,8 +70,9 @@ const UnifiedCanvasCoreParameters = () => {
+
+
-
)}
diff --git a/invokeai/frontend/web/src/i18n.ts b/invokeai/frontend/web/src/i18n.ts
index f28365db20..71d4dfb35f 100644
--- a/invokeai/frontend/web/src/i18n.ts
+++ b/invokeai/frontend/web/src/i18n.ts
@@ -3,7 +3,7 @@ import LanguageDetector from 'i18next-browser-languagedetector';
import Backend from 'i18next-http-backend';
import { initReactI18next } from 'react-i18next';
-import translationEN from '../dist/locales/en.json';
+import translationEN from '../public/locales/en.json';
import { LOCALSTORAGE_PREFIX } from 'app/store/constants';
if (import.meta.env.MODE === 'package') {
diff --git a/tests/inpainting/original.json b/tests/inpainting/original.json
index f057f6d0dc..1a9320da03 100644
--- a/tests/inpainting/original.json
+++ b/tests/inpainting/original.json
@@ -23,7 +23,7 @@
],
"threshold": 0,
"postprocessing": null,
- "sampler": "k_lms",
+ "sampler": "lms",
"variations": [],
"type": "txt2img"
}
diff --git a/tests/nodes/test_png_metadata_service.py b/tests/nodes/test_png_metadata_service.py
index c724074518..975e716fa9 100644
--- a/tests/nodes/test_png_metadata_service.py
+++ b/tests/nodes/test_png_metadata_service.py
@@ -17,7 +17,7 @@ valid_metadata = {
"width": 512,
"height": 512,
"cfg_scale": 7.5,
- "scheduler": "k_lms",
+ "scheduler": "lms",
"model": "stable-diffusion-1.5",
},
}