Merge branch 'main' into feat/ui/expand-config

This commit is contained in:
blessedcoolant 2023-05-12 16:06:31 +12:00 committed by GitHub
commit f929359322
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 217 additions and 163 deletions

View File

@ -52,7 +52,7 @@ class TextToImageInvocation(BaseInvocation, SDImageInvocation):
width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting image", )
height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting image", )
cfg_scale: float = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
# fmt: on

View File

@ -33,8 +33,8 @@ class ImageOutput(BaseInvocationOutput):
# fmt: off
type: Literal["image"] = "image"
image: ImageField = Field(default=None, description="The output image")
width: Optional[int] = Field(default=None, description="The width of the image in pixels")
height: Optional[int] = Field(default=None, description="The height of the image in pixels")
width: int = Field(description="The width of the image in pixels")
height: int = Field(description="The height of the image in pixels")
# fmt: on
class Config:

View File

@ -17,6 +17,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import Post
from ...backend.image_util.seamless import configure_model_padding
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline, image_resized_to_grid_as_tensor
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
import numpy as np
from ..services.image_storage import ImageType
@ -40,41 +41,55 @@ class LatentsField(BaseModel):
class LatentsOutput(BaseInvocationOutput):
"""Base class for invocations that output latents"""
#fmt: off
type: Literal["latent_output"] = "latent_output"
latents: LatentsField = Field(default=None, description="The output latents")
type: Literal["latents_output"] = "latents_output"
# Inputs
latents: LatentsField = Field(default=None, description="The output latents")
width: int = Field(description="The width of the latents in pixels")
height: int = Field(description="The height of the latents in pixels")
#fmt: on
def build_latents_output(latents_name: str, latents: torch.Tensor):
return LatentsOutput(
latents=LatentsField(latents_name=latents_name),
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
class NoiseOutput(BaseInvocationOutput):
"""Invocation noise output"""
#fmt: off
type: Literal["noise_output"] = "noise_output"
type: Literal["noise_output"] = "noise_output"
# Inputs
noise: LatentsField = Field(default=None, description="The output noise")
width: int = Field(description="The width of the noise in pixels")
height: int = Field(description="The height of the noise in pixels")
#fmt: on
# TODO: this seems like a hack
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def build_noise_output(latents_name: str, latents: torch.Tensor):
return NoiseOutput(
noise=LatentsField(latents_name=latents_name),
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
SAMPLER_NAME_VALUES = Literal[
tuple(list(scheduler_map.keys()))
tuple(list(SCHEDULER_MAP.keys()))
]
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
scheduler_config = model.scheduler.config
if "_backup" in scheduler_config:
scheduler_config = scheduler_config["_backup"]
scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
scheduler = scheduler_class.from_config(scheduler_config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False
@ -130,9 +145,7 @@ class NoiseInvocation(BaseInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, noise)
return NoiseOutput(
noise=LatentsField(latents_name=name)
)
return build_noise_output(latents_name=name, latents=noise)
# Text to image
@ -148,7 +161,7 @@ class TextToLatentsInvocation(BaseInvocation):
noise: Optional[LatentsField] = Field(description="The noise to use")
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
scheduler: SAMPLER_NAME_VALUES = Field(default="lms", description="The scheduler to use" )
model: str = Field(default="", description="The model to use (currently ignored)")
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
@ -216,7 +229,7 @@ class TextToLatentsInvocation(BaseInvocation):
h_symmetry_time_pct=None,#h_symmetry_time_pct,
v_symmetry_time_pct=None#v_symmetry_time_pct,
),
).add_scheduler_args_if_applicable(model.scheduler, eta=None)#ddim_eta)
).add_scheduler_args_if_applicable(model.scheduler, eta=0.0)#ddim_eta)
return conditioning_data
@ -248,9 +261,7 @@ class TextToLatentsInvocation(BaseInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
return build_latents_output(latents_name=name, latents=result_latents)
class LatentsToLatentsInvocation(TextToLatentsInvocation):
@ -293,11 +304,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
latent, device=model.device, dtype=latent.dtype
)
timesteps, _ = model.get_img2img_timesteps(
self.steps,
self.strength,
device=model.device,
)
timesteps, _ = model.get_img2img_timesteps(self.steps, self.strength)
result_latents, result_attention_map_saver = model.latents_from_embeddings(
latents=initial_latents,
@ -313,9 +320,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
return build_latents_output(latents_name=name, latents=result_latents)
# Latent to image
@ -379,11 +384,11 @@ class ResizeLatentsInvocation(BaseInvocation):
type: Literal["lresize"] = "lresize"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to resize")
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
mode: Optional[LATENTS_INTERPOLATION_MODE] = Field(default="bilinear", description="The interpolation mode")
antialias: Optional[bool] = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
latents: Optional[LatentsField] = Field(description="The latents to resize")
width: int = Field(ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(ge=64, multiple_of=8, description="The height to resize to (px)")
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -400,7 +405,7 @@ class ResizeLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, resized_latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=resized_latents)
class ScaleLatentsInvocation(BaseInvocation):
@ -409,10 +414,10 @@ class ScaleLatentsInvocation(BaseInvocation):
type: Literal["lscale"] = "lscale"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to scale")
scale_factor: float = Field(gt=0, description="The factor by which to scale the latents")
mode: Optional[LATENTS_INTERPOLATION_MODE] = Field(default="bilinear", description="The interpolation mode")
antialias: Optional[bool] = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
latents: Optional[LatentsField] = Field(description="The latents to scale")
scale_factor: float = Field(gt=0, description="The factor by which to scale the latents")
mode: LATENTS_INTERPOLATION_MODE = Field(default="bilinear", description="The interpolation mode")
antialias: bool = Field(default=False, description="Whether or not to antialias (applied in bilinear and bicubic modes only)")
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
@ -430,7 +435,7 @@ class ScaleLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, resized_latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=resized_latents)
class ImageToLatentsInvocation(BaseInvocation):
@ -474,4 +479,4 @@ class ImageToLatentsInvocation(BaseInvocation):
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.set(name, latents)
return LatentsOutput(latents=LatentsField(latents_name=name))
return build_latents_output(latents_name=name, latents=latents)

View File

@ -3,6 +3,7 @@
from typing import Literal
from pydantic import BaseModel, Field
import numpy as np
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
@ -73,3 +74,12 @@ class DivideInvocation(BaseInvocation, MathInvocationConfig):
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=int(self.a / self.b))
class RandomIntInvocation(BaseInvocation):
"""Outputs a single random integer."""
#fmt: off
type: Literal["rand_int"] = "rand_int"
#fmt: on
def invoke(self, context: InvocationContext) -> IntOutput:
return IntOutput(a=np.random.randint(0, np.iinfo(np.int32).max))

View File

@ -108,17 +108,20 @@ APP_VERSION = invokeai.version.__version__
SAMPLER_CHOICES = [
"ddim",
"k_dpm_2_a",
"k_dpm_2",
"k_dpmpp_2_a",
"k_dpmpp_2",
"k_euler_a",
"k_euler",
"k_heun",
"k_lms",
"plms",
# diffusers:
"ddpm",
"deis",
"lms",
"pndm",
"heun",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2m",
"dpmpp_2m_k",
"unipc",
]
PRECISION_CHOICES = [
@ -631,7 +634,7 @@ class Args(object):
choices=SAMPLER_CHOICES,
metavar="SAMPLER_NAME",
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
default="k_lms",
default="lms",
)
render_group.add_argument(
"--log_tokenization",

View File

@ -37,6 +37,7 @@ from .safety_checker import SafetyChecker
from .prompting import get_uc_and_c_and_ec
from .prompting.conditioning import log_tokenization
from .stable_diffusion import HuggingFaceConceptsLibrary
from .stable_diffusion.schedulers import SCHEDULER_MAP
from .util import choose_precision, choose_torch_device
def fix_func(orig):
@ -141,7 +142,7 @@ class Generate:
model=None,
conf="configs/models.yaml",
embedding_path=None,
sampler_name="k_lms",
sampler_name="lms",
ddim_eta=0.0, # deterministic
full_precision=False,
precision="auto",
@ -1047,29 +1048,12 @@ class Generate:
def _set_scheduler(self):
default = self.model.scheduler
# See https://github.com/huggingface/diffusers/issues/277#issuecomment-1371428672
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
# DPMSolverMultistepScheduler is technically not `k_` anything, as it is neither
# the k-diffusers implementation nor included in EDM (Karras 2022), but we can
# provide an alias for compatibility.
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
if self.sampler_name in scheduler_map:
sampler_class = scheduler_map[self.sampler_name]
if self.sampler_name in SCHEDULER_MAP:
sampler_class, sampler_extra_config = SCHEDULER_MAP[self.sampler_name]
msg = (
f"Setting Sampler to {self.sampler_name} ({sampler_class.__name__})"
)
self.sampler = sampler_class.from_config(self.model.scheduler.config)
self.sampler = sampler_class.from_config({**self.model.scheduler.config, **sampler_extra_config})
else:
msg = (
f" Unsupported Sampler: {self.sampler_name} "+

View File

@ -31,6 +31,7 @@ from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
from ..prompting.conditioning import get_uc_and_c_and_ec
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
from ..stable_diffusion.schedulers import SCHEDULER_MAP
downsampling = 8
@ -71,19 +72,6 @@ class InvokeAIGeneratorOutput:
# we are interposing a wrapper around the original Generator classes so that
# old code that calls Generate will continue to work.
class InvokeAIGenerator(metaclass=ABCMeta):
scheduler_map = dict(
ddim=diffusers.DDIMScheduler,
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
k_euler=diffusers.EulerDiscreteScheduler,
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
k_heun=diffusers.HeunDiscreteScheduler,
k_lms=diffusers.LMSDiscreteScheduler,
plms=diffusers.PNDMScheduler,
)
def __init__(self,
model_info: dict,
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
@ -175,14 +163,20 @@ class InvokeAIGenerator(metaclass=ABCMeta):
'''
Return list of all the schedulers that we currently handle.
'''
return list(self.scheduler_map.keys())
return list(SCHEDULER_MAP.keys())
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
return generator_class(model, self.params.precision)
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
scheduler = scheduler_class.from_config(model.scheduler.config)
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP['ddim'])
scheduler_config = model.scheduler.config
if "_backup" in scheduler_config:
scheduler_config = scheduler_config["_backup"]
scheduler_config = {**scheduler_config, **scheduler_extra_config, "_backup": scheduler_config}
scheduler = scheduler_class.from_config(scheduler_config)
# hack copied over from generate.py
if not hasattr(scheduler, 'uses_inpainting_model'):
scheduler.uses_inpainting_model = lambda: False

View File

@ -47,6 +47,7 @@ from diffusers import (
LDMTextToImagePipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
@ -1209,6 +1210,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
elif scheduler_type == "dpm":
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
elif scheduler_type == 'unipc':
scheduler = UniPCMultistepScheduler.from_config(scheduler.config)
elif scheduler_type == "ddim":
scheduler = scheduler
else:

View File

@ -30,7 +30,7 @@ from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
logging as dlogging,
)
)
from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
@ -68,7 +68,7 @@ class SDModelComponent(Enum):
scheduler="scheduler"
safety_checker="safety_checker"
feature_extractor="feature_extractor"
DEFAULT_MAX_MODELS = 2
class ModelManager(object):
@ -182,7 +182,7 @@ class ModelManager(object):
vae from the model currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.vae)
def get_model_tokenizer(self, model_name: str=None)->CLIPTokenizer:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned CLIPTokenizer. If no
@ -190,12 +190,12 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.tokenizer)
def get_model_unet(self, model_name: str=None)->UNet2DConditionModel:
"""Given a model name identified in models.yaml, load the model into
GPU if necessary and return its assigned UNet2DConditionModel. If no model
name is provided, return the UNet from the model
currently in the GPU.
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.unet)
@ -222,7 +222,7 @@ class ModelManager(object):
currently in the GPU.
"""
return self._get_sub_model(model_name, SDModelComponent.scheduler)
def _get_sub_model(
self,
model_name: str=None,
@ -1228,7 +1228,7 @@ class ModelManager(object):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
self.logger.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
self.logger.debug(f"sha256 = {hash} ({count} files hashed in {toc - tic:4.2f}s)")
with open(hashpath, "w") as f:
f.write(hash)
return hash

View File

@ -509,10 +509,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
run_id=None,
callback: Callable[[PipelineIntermediateState], None] = None,
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if self.scheduler.config.get("cpu_only", False):
scheduler_device = torch.device('cpu')
else:
scheduler_device = self._model_group.device_for(self.unet)
if timesteps is None:
self.scheduler.set_timesteps(
num_inference_steps, device=self._model_group.device_for(self.unet)
)
self.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps = self.scheduler.timesteps
infer_latents_from_embeddings = GeneratorToCallbackinator(
self.generate_latents_from_embeddings, PipelineIntermediateState
@ -725,12 +728,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
noise: torch.Tensor,
run_id=None,
callback=None,
) -> InvokeAIStableDiffusionPipelineOutput:
timesteps, _ = self.get_img2img_timesteps(
num_inference_steps,
strength,
device=self._model_group.device_for(self.unet),
)
) -> InvokeAIStableDiffusionPipelineOutput:
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
result_latents, result_attention_maps = self.latents_from_embeddings(
latents=initial_latents if strength < 1.0 else torch.zeros_like(
initial_latents, device=initial_latents.device, dtype=initial_latents.dtype
@ -756,13 +755,19 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
return self.check_for_safety(output, dtype=conditioning_data.dtype)
def get_img2img_timesteps(
self, num_inference_steps: int, strength: float, device
self, num_inference_steps: int, strength: float, device=None
) -> (torch.Tensor, int):
img2img_pipeline = StableDiffusionImg2ImgPipeline(**self.components)
assert img2img_pipeline.scheduler is self.scheduler
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
if self.scheduler.config.get("cpu_only", False):
scheduler_device = torch.device('cpu')
else:
scheduler_device = self._model_group.device_for(self.unet)
img2img_pipeline.scheduler.set_timesteps(num_inference_steps, device=scheduler_device)
timesteps, adjusted_steps = img2img_pipeline.get_timesteps(
num_inference_steps, strength, device=device
num_inference_steps, strength, device=scheduler_device
)
# Workaround for low strength resulting in zero timesteps.
# TODO: submit upstream fix for zero-step img2img
@ -796,9 +801,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if init_image.dim() == 3:
init_image = init_image.unsqueeze(0)
timesteps, _ = self.get_img2img_timesteps(
num_inference_steps, strength, device=device
)
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength)
# 6. Prepare latent variables
# can't quite use upstream StableDiffusionImg2ImgPipeline.prepare_latents

View File

@ -0,0 +1 @@
from .schedulers import SCHEDULER_MAP

View File

@ -0,0 +1,22 @@
from diffusers import DDIMScheduler, DPMSolverMultistepScheduler, KDPM2DiscreteScheduler, \
KDPM2AncestralDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, \
HeunDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UniPCMultistepScheduler, \
DPMSolverSinglestepScheduler, DEISMultistepScheduler, DDPMScheduler
SCHEDULER_MAP = dict(
ddim=(DDIMScheduler, dict()),
ddpm=(DDPMScheduler, dict()),
deis=(DEISMultistepScheduler, dict()),
lms=(LMSDiscreteScheduler, dict()),
pndm=(PNDMScheduler, dict()),
heun=(HeunDiscreteScheduler, dict()),
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
euler_a=(EulerAncestralDiscreteScheduler, dict()),
kdpm_2=(KDPM2DiscreteScheduler, dict()),
kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
dpmpp_2s=(DPMSolverSinglestepScheduler, dict()),
dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True))
)

View File

@ -4,17 +4,20 @@ from .parse_seed_weights import parse_seed_weights
SAMPLER_CHOICES = [
"ddim",
"k_dpm_2_a",
"k_dpm_2",
"k_dpmpp_2_a",
"k_dpmpp_2",
"k_euler_a",
"k_euler",
"k_heun",
"k_lms",
"plms",
# diffusers:
"ddpm",
"deis",
"lms",
"pndm",
"heun",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2m",
"dpmpp_2m_k",
"unipc",
]

View File

@ -37,7 +37,7 @@ From `invokeai/frontend/web/` run `yarn install` to get everything set up.
Start everything in dev mode:
1. Start the dev server: `yarn dev`
2. Start the InvokeAI UI per usual: `invokeai --web`
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-new.py --web # run from the repo root`
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
### Production builds

View File

@ -2,17 +2,28 @@
export const DIFFUSERS_SCHEDULERS: Array<string> = [
'ddim',
'plms',
'k_lms',
'dpmpp_2',
'k_dpm_2',
'k_dpm_2_a',
'k_dpmpp_2',
'k_euler',
'k_euler_a',
'k_heun',
'ddpm',
'deis',
'lms',
'pndm',
'heun',
'euler',
'euler_k',
'euler_a',
'kdpm_2',
'kdpm_2_a',
'dpmpp_2s',
'dpmpp_2m',
'dpmpp_2m_k',
'unipc',
];
export const IMG2IMG_DIFFUSERS_SCHEDULERS = DIFFUSERS_SCHEDULERS.filter(
(scheduler) => {
return scheduler !== 'dpmpp_2s';
}
);
// Valid image widths
export const WIDTHS: Array<number> = Array.from(Array(64)).map(
(_x, i) => (i + 1) * 64

View File

@ -47,15 +47,20 @@ export type CommonGeneratedImageMetadata = {
postprocessing: null | Array<ESRGANMetadata | FacetoolMetadata>;
sampler:
| 'ddim'
| 'k_dpm_2_a'
| 'k_dpm_2'
| 'k_dpmpp_2_a'
| 'k_dpmpp_2'
| 'k_euler_a'
| 'k_euler'
| 'k_heun'
| 'k_lms'
| 'plms';
| 'ddpm'
| 'deis'
| 'lms'
| 'pndm'
| 'heun'
| 'euler'
| 'euler_k'
| 'euler_a'
| 'kdpm_2'
| 'kdpm_2_a'
| 'dpmpp_2s'
| 'dpmpp_2m'
| 'dpmpp_2m_k'
| 'unipc';
prompt: Prompt;
seed: number;
variations: SeedWeights;

View File

@ -20,7 +20,7 @@ export const iterationGraph = {
model: '',
progress_images: false,
prompt: 'dog',
sampler_name: 'k_lms',
sampler_name: 'lms',
seamless: false,
steps: 11,
type: 'txt2img',

View File

@ -1,8 +1,12 @@
import { DIFFUSERS_SCHEDULERS } from 'app/constants';
import {
DIFFUSERS_SCHEDULERS,
IMG2IMG_DIFFUSERS_SCHEDULERS,
} from 'app/constants';
import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAISelect from 'common/components/IAISelect';
import { setSampler } from 'features/parameters/store/generationSlice';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { ChangeEvent, memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
@ -10,6 +14,9 @@ const ParamSampler = () => {
const sampler = useAppSelector(
(state: RootState) => state.generation.sampler
);
const activeTabName = useAppSelector(activeTabNameSelector);
const dispatch = useAppDispatch();
const { t } = useTranslation();
@ -23,7 +30,11 @@ const ParamSampler = () => {
label={t('parameters.sampler')}
value={sampler}
onChange={handleChange}
validValues={DIFFUSERS_SCHEDULERS}
validValues={
activeTabName === 'img2img' || activeTabName == 'unifiedCanvas'
? IMG2IMG_DIFFUSERS_SCHEDULERS
: DIFFUSERS_SCHEDULERS
}
minWidth={36}
/>
);

View File

@ -47,7 +47,7 @@ const ImageToImageStrength = () => {
return (
<IAISlider
label={`${t('parameters.strength')}`}
label={`${t('parameters.denoisingStrength')}`}
step={step}
min={min}
max={sliderMax}

View File

@ -51,7 +51,7 @@ export const initialGenerationState: GenerationState = {
perlin: 0,
prompt: '',
negativePrompt: '',
sampler: 'k_lms',
sampler: 'lms',
seamBlur: 16,
seamSize: 96,
seamSteps: 30,

View File

@ -64,8 +64,6 @@ const ImageToImageTabCoreParameters = () => {
<ParamSteps />
<ParamCFGScale />
</Flex>
<ParamWidth isDisabled={!shouldFitToWidthHeight} />
<ParamHeight isDisabled={!shouldFitToWidthHeight} />
<Flex gap={3} w="full">
<Box flexGrow={2}>
<ParamSampler />
@ -74,6 +72,8 @@ const ImageToImageTabCoreParameters = () => {
<ModelSelect />
</Box>
</Flex>
<ParamWidth isDisabled={!shouldFitToWidthHeight} />
<ParamHeight isDisabled={!shouldFitToWidthHeight} />
<ImageToImageStrength />
<ImageToImageFit />
</Flex>

View File

@ -62,8 +62,6 @@ const UnifiedCanvasCoreParameters = () => {
<ParamSteps />
<ParamCFGScale />
</Flex>
<ParamWidth />
<ParamHeight />
<Flex gap={3} w="full">
<Box flexGrow={2}>
<ParamSampler />
@ -72,8 +70,9 @@ const UnifiedCanvasCoreParameters = () => {
<ModelSelect />
</Box>
</Flex>
<ParamWidth />
<ParamHeight />
<ImageToImageStrength />
<ImageToImageFit />
</Flex>
)}
</Flex>

View File

@ -3,7 +3,7 @@ import LanguageDetector from 'i18next-browser-languagedetector';
import Backend from 'i18next-http-backend';
import { initReactI18next } from 'react-i18next';
import translationEN from '../dist/locales/en.json';
import translationEN from '../public/locales/en.json';
import { LOCALSTORAGE_PREFIX } from 'app/store/constants';
if (import.meta.env.MODE === 'package') {

View File

@ -23,7 +23,7 @@
],
"threshold": 0,
"postprocessing": null,
"sampler": "k_lms",
"sampler": "lms",
"variations": [],
"type": "txt2img"
}

View File

@ -17,7 +17,7 @@ valid_metadata = {
"width": 512,
"height": 512,
"cfg_scale": 7.5,
"scheduler": "k_lms",
"scheduler": "lms",
"model": "stable-diffusion-1.5",
},
}