Merge branch 'main' into refactor/model-manager-2

This commit is contained in:
Lincoln Stein 2023-11-10 17:54:28 -05:00
commit f1c195afb7
29 changed files with 1002 additions and 1389 deletions

View File

@ -198,6 +198,7 @@ The list of schedulers has been completely revamped and brought up to date:
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
| **unipc** | UniPCMultistepScheduler | CPU only |
| **lcm** | LCMScheduler | |
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.

View File

@ -34,4 +34,4 @@ class SocketIO:
async def _handle_unsub_queue(self, sid, data, *args, **kwargs):
if "queue_id" in data:
await self.__sio.enter_room(sid, data["queue_id"])
await self.__sio.leave_room(sid, data["queue_id"])

View File

@ -16,6 +16,7 @@ from pydantic.fields import FieldInfo, _Unset
from pydantic_core import PydanticUndefined
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import uuid_string
if TYPE_CHECKING:
@ -30,74 +31,6 @@ class InvalidFieldError(TypeError):
pass
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."
class Input(str, Enum):
"""
The type of input a field accepts.

View File

@ -7,6 +7,7 @@ from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
BasicConditioningInfo,
ExtraConditioningInfo,
@ -19,7 +20,6 @@ from ...backend.util.devices import torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@ -28,12 +28,12 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import BaseModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@ -9,19 +9,11 @@ from PIL import Image, ImageChops, ImageFilter, ImageOps
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
from invokeai.backend.image_util.safety_checker import SafetyChecker
from .baseinvocation import (
BaseInvocation,
FieldDescriptions,
Input,
InputField,
InvocationContext,
WithMetadata,
WithWorkflow,
invocation,
)
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, WithWorkflow, invocation
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")

View File

@ -7,7 +7,6 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@ -17,6 +16,7 @@ from invokeai.app.invocations.baseinvocation import (
invocation_output,
)
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id

View File

@ -10,7 +10,7 @@ import torch
import torchvision.transforms as T
from diffusers import AutoencoderKL, AutoencoderTiny
from diffusers.image_processor import VaeImageProcessor
from diffusers.models.adapter import FullAdapterXL, T2IAdapter
from diffusers.models.adapter import T2IAdapter
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
@ -34,6 +34,7 @@ from invokeai.app.invocations.primitives import (
)
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
@ -57,7 +58,6 @@ from ...backend.util.devices import choose_precision, choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@ -562,10 +562,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
t2i_adapter_model: T2IAdapter
with t2i_adapter_model_info as t2i_adapter_model:
total_downscale_factor = t2i_adapter_model.total_downscale_factor
if isinstance(t2i_adapter_model.adapter, FullAdapterXL):
# HACK(ryand): Work around a bug in FullAdapterXL. This is being addressed upstream in diffusers by
# this PR: https://github.com/huggingface/diffusers/pull/5134.
total_downscale_factor = total_downscale_factor // 2
# Resize the T2I-Adapter input image.
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the

View File

@ -6,8 +6,9 @@ import numpy as np
from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import BaseInvocation, FieldDescriptions, InputField, InvocationContext, invocation
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")

View File

@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
MetadataField,
@ -19,6 +18,7 @@ from invokeai.app.invocations.ip_adapter import IPAdapterModelField
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
from invokeai.app.shared.fields import FieldDescriptions
from ...version import __version__

View File

@ -3,11 +3,13 @@ from typing import List, Optional
from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.shared.models import FreeUConfig
from ...backend.model_management import BaseModelType, ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@ -17,22 +19,6 @@ from .baseinvocation import (
invocation_output,
)
# TODO: Permanent fix for this
# from invokeai.app.invocations.shared import FreeUConfig
class FreeUConfig(BaseModel):
"""
Configuration for the FreeU hyperparameters.
- https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu
- https://github.com/ChenyangSi/FreeU
"""
s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1)
s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2)
b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1)
b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2)
class ModelInfo(BaseModel):
model_name: str = Field(description="Info to load submodel")

View File

@ -5,13 +5,13 @@ import torch
from pydantic import field_validator
from invokeai.app.invocations.latent import LatentsField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.misc import SEED_MAX, get_random_seed
from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
InvocationContext,
OutputField,

View File

@ -14,6 +14,7 @@ from tqdm import tqdm
from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType
@ -23,7 +24,6 @@ from ...backend.util import choose_torch_device
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@ -5,10 +5,11 @@ from typing import Optional, Tuple
import torch
from pydantic import BaseModel, Field
from invokeai.app.shared.fields import FieldDescriptions
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@ -1,8 +1,9 @@
from invokeai.app.shared.fields import FieldDescriptions
from ...backend.model_management import ModelType, SubModelType
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,

View File

@ -5,7 +5,6 @@ from pydantic import BaseModel, ConfigDict, Field
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
Input,
InputField,
InvocationContext,
@ -16,6 +15,7 @@ from invokeai.app.invocations.baseinvocation import (
)
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
from invokeai.app.invocations.primitives import ImageField
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.model_management.models.base import BaseModelType

View File

@ -0,0 +1,5 @@
"""
This module contains various classes, functions and models which are shared across the app, particularly by invocations.
Lifting these classes, functions and models into this shared module helps to reduce circular imports.
"""

View File

@ -0,0 +1,66 @@
class FieldDescriptions:
denoising_start = "When to start denoising, expressed a percentage of total steps"
denoising_end = "When to stop denoising, expressed a percentage of total steps"
cfg_scale = "Classifier-Free Guidance scale"
scheduler = "Scheduler to use during inference"
positive_cond = "Positive conditioning tensor"
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
unet = "UNet (scheduler, LoRAs)"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
lora_weight = "The weight at which the LoRA is applied to each model"
compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor"
raw_prompt = "Raw prompt text (no parsing)"
sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor"
skipped_layers = "Number of layers to skip in text encoder"
seed = "Seed for random number generation"
steps = "Number of steps to run"
width = "Width of output (px)"
height = "Height of output (px)"
control = "ControlNet(s) to apply"
ip_adapter = "IP-Adapter to apply"
t2i_adapter = "T2I-Adapter(s) to apply"
denoised_latents = "Denoised latents tensor"
latents = "Latents tensor"
strength = "Strength of denoising (proportional to steps)"
metadata = "Optional metadata to be saved with the image"
metadata_collection = "Collection of Metadata"
metadata_item_polymorphic = "A single metadata item or collection of metadata items"
metadata_item_label = "Label for this metadata item"
metadata_item_value = "The value for this metadata item (may be any type)"
workflow = "Optional workflow to be saved with the image"
interp_mode = "Interpolation mode"
torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)"
fp32 = "Whether or not to use full float32 precision"
precision = "Precision to use"
tiled = "Processing using overlapping tiles (reduce memory consumption)"
detect_res = "Pixel resolution for detection"
image_res = "Pixel resolution for output image"
safe_mode = "Whether or not to use safe mode"
scribble_mode = "Whether or not to use scribble mode"
scale_factor = "The factor by which to scale"
blend_alpha = (
"Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B."
)
num_1 = "The first number"
num_2 = "The second number"
mask = "The mask to use for the operation"
board = "The board to save the image to"
image = "The image to process"
tile_size = "Tile size"
inclusive_low = "The inclusive low value"
exclusive_high = "The exclusive high value"
decimal_places = "The number of decimal places to round to"
freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.'
freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features."
freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features."

View File

@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from invokeai.app.invocations.baseinvocation import FieldDescriptions
from invokeai.app.shared.fields import FieldDescriptions
class FreeUConfig(BaseModel):

View File

@ -32,7 +32,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
from pydantic.error_wrappers import ValidationError
from pydantic import ValidationError
from tqdm import tqdm
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer

View File

@ -38,6 +38,7 @@ SAMPLER_CHOICES = [
"k_heun",
"k_lms",
"plms",
"lcm",
]
PRECISION_CHOICES = [

View File

@ -12,7 +12,7 @@ from diffusers.models import UNet2DConditionModel
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
from invokeai.app.invocations.shared import FreeUConfig
from invokeai.app.shared.models import FreeUConfig
from .models.lora import LoRAModel

View File

@ -10,6 +10,7 @@ from diffusers import (
HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LCMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
@ -38,4 +39,5 @@ SCHEDULER_MAP = dict(
dpmpp_sde=(DPMSolverSDEScheduler, dict(use_karras_sigmas=False, noise_sampler_seed=0)),
dpmpp_sde_k=(DPMSolverSDEScheduler, dict(use_karras_sigmas=True, noise_sampler_seed=0)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True)),
lcm=(LCMScheduler, dict()),
)

View File

@ -54,42 +54,35 @@
]
},
"dependencies": {
"@chakra-ui/anatomy": "^2.2.1",
"@chakra-ui/anatomy": "^2.2.2",
"@chakra-ui/icons": "^2.1.1",
"@chakra-ui/react": "^2.8.1",
"@chakra-ui/styled-system": "^2.9.1",
"@chakra-ui/theme-tools": "^2.1.1",
"@chakra-ui/react": "^2.8.2",
"@chakra-ui/styled-system": "^2.9.2",
"@chakra-ui/theme-tools": "^2.1.2",
"@dagrejs/graphlib": "^2.1.13",
"@dnd-kit/core": "^6.0.8",
"@dnd-kit/modifiers": "^6.0.1",
"@dnd-kit/utilities": "^3.2.1",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/utilities": "^3.2.2",
"@emotion/react": "^11.11.1",
"@emotion/styled": "^11.11.0",
"@floating-ui/react-dom": "^2.0.2",
"@fontsource-variable/inter": "^5.0.13",
"@fontsource/inter": "^5.0.13",
"@fontsource-variable/inter": "^5.0.15",
"@mantine/core": "^6.0.19",
"@mantine/form": "^6.0.19",
"@mantine/hooks": "^6.0.19",
"@nanostores/react": "^0.7.1",
"@reduxjs/toolkit": "^1.9.7",
"@roarr/browser-log-writer": "^1.3.0",
"@stevebel/png": "^1.5.1",
"compare-versions": "^6.1.0",
"dateformat": "^5.0.3",
"formik": "^2.4.5",
"framer-motion": "^10.16.4",
"fuse.js": "^6.6.2",
"i18next": "^23.5.1",
"i18next-browser-languagedetector": "^7.0.2",
"i18next-http-backend": "^2.2.2",
"konva": "^9.2.2",
"i18next": "^23.6.0",
"i18next-http-backend": "^2.3.1",
"konva": "^9.2.3",
"lodash-es": "^4.17.21",
"nanostores": "^0.9.2",
"nanostores": "^0.9.4",
"new-github-issue-url": "^1.0.0",
"openapi-fetch": "^0.7.10",
"overlayscrollbars": "^2.3.2",
"overlayscrollbars-react": "^0.5.2",
"openapi-fetch": "^0.8.1",
"overlayscrollbars": "^2.4.4",
"overlayscrollbars-react": "^0.5.3",
"patch-package": "^8.0.0",
"query-string": "^8.1.0",
"react": "^18.2.0",
@ -98,26 +91,25 @@
"react-dropzone": "^14.2.3",
"react-error-boundary": "^4.0.11",
"react-hotkeys-hook": "4.4.1",
"react-i18next": "^13.3.0",
"react-i18next": "^13.3.1",
"react-icons": "^4.11.0",
"react-konva": "^18.2.10",
"react-redux": "^8.1.3",
"react-resizable-panels": "^0.0.55",
"react-use": "^17.4.0",
"react-virtuoso": "^4.6.1",
"react-zoom-pan-pinch": "^3.2.0",
"reactflow": "^11.9.3",
"react-virtuoso": "^4.6.2",
"reactflow": "^11.9.4",
"redux-dynamic-middlewares": "^2.2.0",
"redux-remember": "^4.0.4",
"roarr": "^7.15.1",
"roarr": "^7.18.3",
"serialize-error": "^11.0.2",
"socket.io-client": "^4.7.2",
"type-fest": "^4.4.0",
"use-debounce": "^9.0.4",
"type-fest": "^4.7.1",
"use-debounce": "^10.0.0",
"use-image": "^1.1.1",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zod-validation-error": "^1.5.0"
"zod-validation-error": "^2.1.0"
},
"peerDependencies": {
"@chakra-ui/cli": "^2.4.0",
@ -128,39 +120,33 @@
},
"devDependencies": {
"@chakra-ui/cli": "^2.4.1",
"@types/dateformat": "^5.0.0",
"@types/lodash-es": "^4.17.9",
"@types/node": "^20.8.6",
"@types/react": "^18.2.28",
"@types/react-dom": "^18.2.13",
"@types/react-redux": "^7.1.27",
"@types/react-transition-group": "^4.4.7",
"@types/uuid": "^9.0.5",
"@typescript-eslint/eslint-plugin": "^6.7.5",
"@typescript-eslint/parser": "^6.7.5",
"@vitejs/plugin-react-swc": "^3.4.0",
"axios": "^1.5.1",
"babel-plugin-transform-imports": "^2.0.0",
"concurrently": "^8.2.1",
"eslint": "^8.51.0",
"@types/dateformat": "^5.0.2",
"@types/lodash-es": "^4.17.11",
"@types/node": "^20.9.0",
"@types/react": "^18.2.37",
"@types/react-dom": "^18.2.15",
"@types/react-redux": "^7.1.30",
"@types/uuid": "^9.0.7",
"@typescript-eslint/eslint-plugin": "^6.10.0",
"@typescript-eslint/parser": "^6.10.0",
"@vitejs/plugin-react-swc": "^3.4.1",
"concurrently": "^8.2.2",
"eslint": "^8.53.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-prettier": "^5.0.1",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"form-data": "^4.0.0",
"husky": "^8.0.3",
"lint-staged": "^15.0.1",
"lint-staged": "^15.0.2",
"madge": "^6.1.0",
"openapi-types": "^12.1.3",
"openapi-typescript": "^6.7.0",
"postinstall-postinstall": "^2.1.0",
"prettier": "^3.0.3",
"rollup-plugin-visualizer": "^5.9.2",
"ts-toolbelt": "^9.6.0",
"typescript": "^5.2.2",
"vite": "^4.4.11",
"vite": "^4.5.0",
"vite-plugin-css-injected-by-js": "^3.3.0",
"vite-plugin-dts": "^3.6.0",
"vite-plugin-dts": "^3.6.3",
"vite-plugin-eslint": "^1.8.1",
"vite-tsconfig-paths": "^4.2.1",
"yarn": "^1.22.19"

View File

@ -1231,7 +1231,9 @@
"noLoRAsAvailable": "无可用 LoRA",
"noModelsAvailable": "无可用模型",
"selectModel": "选择一个模型",
"selectLoRA": "选择一个 LoRA"
"selectLoRA": "选择一个 LoRA",
"noRefinerModelsInstalled": "无已安装的 SDXL Refiner 模型",
"noLoRAsInstalled": "无已安装的 LoRA"
},
"boards": {
"autoAddBoard": "自动添加面板",

View File

@ -132,6 +132,7 @@ export const zScheduler = z.enum([
'lms_k',
'euler_a',
'kdpm_2_a',
'lcm',
]);
/**
* Type alias for scheduler parameter, inferred from its zod schema
@ -166,6 +167,7 @@ export const SCHEDULER_LABEL_MAP: Record<SchedulerParam, string> = {
lms_k: 'LMS Karras',
euler_a: 'Euler Ancestral',
kdpm_2_a: 'KDPM 2 Ancestral',
lcm: 'LCM',
};
/**

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -40,9 +40,7 @@ dependencies = [
"controlnet-aux>=0.0.6",
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
"datasets",
# When bumping diffusers beyond 0.21, make sure to address this:
# https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513
"diffusers[torch]~=0.22.0",
"diffusers[torch]~=0.23.0",
"dnspython~=2.4.0",
"dynamicprompts",
"easing-functions",