Merge branch 'main' into feat/select-vram-in-config

This commit is contained in:
Lincoln Stein 2023-08-08 13:50:02 -04:00 committed by GitHub
commit 4d5169e16d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 669 additions and 137 deletions

View File

@ -1,22 +1,20 @@
import io import io
from typing import Optional from typing import Optional
from PIL import Image
from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile
from fastapi.responses import FileResponse from fastapi.responses import FileResponse
from fastapi.routing import APIRouter from fastapi.routing import APIRouter
from PIL import Image from pydantic import BaseModel
from pydantic import BaseModel, Field
from invokeai.app.invocations.metadata import ImageMetadata from invokeai.app.invocations.metadata import ImageMetadata
from invokeai.app.models.image import ImageCategory, ResourceOrigin from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.services.image_record_storage import OffsetPaginatedResults from invokeai.app.services.image_record_storage import OffsetPaginatedResults
from invokeai.app.services.item_storage import PaginatedResults
from invokeai.app.services.models.image_record import ( from invokeai.app.services.models.image_record import (
ImageDTO, ImageDTO,
ImageRecordChanges, ImageRecordChanges,
ImageUrlsDTO, ImageUrlsDTO,
) )
from ..dependencies import ApiDependencies from ..dependencies import ApiDependencies
images_router = APIRouter(prefix="/v1/images", tags=["images"]) images_router = APIRouter(prefix="/v1/images", tags=["images"])
@ -152,8 +150,9 @@ async def get_image_metadata(
raise HTTPException(status_code=404) raise HTTPException(status_code=404)
@images_router.get( @images_router.api_route(
"/i/{image_name}/full", "/i/{image_name}/full",
methods=["GET", "HEAD"],
operation_id="get_image_full", operation_id="get_image_full",
response_class=Response, response_class=Response,
responses={ responses={

View File

@ -28,7 +28,6 @@ InvokeAI:
always_use_cpu: false always_use_cpu: false
free_gpu_mem: false free_gpu_mem: false
Features: Features:
restore: true
esrgan: true esrgan: true
patchmatch: true patchmatch: true
internet_available: true internet_available: true
@ -165,7 +164,7 @@ import pydoc
import os import os
import sys import sys
from argparse import ArgumentParser from argparse import ArgumentParser
from omegaconf import OmegaConf, DictConfig from omegaconf import OmegaConf, DictConfig, ListConfig
from pathlib import Path from pathlib import Path
from pydantic import BaseSettings, Field, parse_obj_as from pydantic import BaseSettings, Field, parse_obj_as
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
@ -190,7 +189,12 @@ class InvokeAISettings(BaseSettings):
opt = parser.parse_args(argv) opt = parser.parse_args(argv)
for name in self.__fields__: for name in self.__fields__:
if name not in self._excluded(): if name not in self._excluded():
setattr(self, name, getattr(opt, name)) value = getattr(opt, name)
if isinstance(value, ListConfig):
value = list(value)
elif isinstance(value, DictConfig):
value = dict(value)
setattr(self, name, value)
def to_yaml(self) -> str: def to_yaml(self) -> str:
""" """
@ -283,14 +287,10 @@ class InvokeAISettings(BaseSettings):
return [ return [
"type", "type",
"initconf", "initconf",
"gpu_mem_reserved",
"max_loaded_models",
"version", "version",
"from_file", "from_file",
"model", "model",
"restore",
"root", "root",
"nsfw_checker",
] ]
class Config: class Config:
@ -389,15 +389,11 @@ class InvokeAIAppConfig(InvokeAISettings):
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features') internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features') log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features') patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features')
restore : bool = Field(default=True, description="Enable/disable face restoration code (DEPRECATED)", category='DEPRECATED')
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance') free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='DEPRECATED')
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance') max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
max_vram_cache_size : float = Field(default=DEFAULT_MAX_VRAM, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance') max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
nsfw_checker : bool = Field(default=True, description="DEPRECATED: use Web settings to enable/disable", category='DEPRECATED')
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance') precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
@ -415,9 +411,7 @@ class InvokeAIAppConfig(InvokeAISettings):
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert') ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', category='Features')
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging") log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
@ -427,6 +421,9 @@ class InvokeAIAppConfig(InvokeAISettings):
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# fmt: on # fmt: on
class Config:
validate_assignment = True
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False): def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
""" """
Update settings with contents of init file, environment, and Update settings with contents of init file, environment, and

View File

@ -47,6 +47,8 @@ from invokeai.app.services.config import (
) )
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
# TO DO - Move all the frontend code into invokeai.frontend.install
from invokeai.frontend.install.widgets import ( from invokeai.frontend.install.widgets import (
SingleSelectColumns, SingleSelectColumns,
CenteredButtonPress, CenteredButtonPress,
@ -65,6 +67,7 @@ from invokeai.backend.install.model_install_backend import (
ModelInstall, ModelInstall,
) )
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
from pydantic.error_wrappers import ValidationError
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
@ -694,10 +697,13 @@ def migrate_init_file(legacy_format: Path):
old = legacy_parser.parse_args([f"@{str(legacy_format)}"]) old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
new = InvokeAIAppConfig.get_config() new = InvokeAIAppConfig.get_config()
fields = list(get_type_hints(InvokeAIAppConfig).keys()) fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"]
for attr in fields: for attr in fields:
if hasattr(old, attr): if hasattr(old, attr):
setattr(new, attr, getattr(old, attr)) try:
setattr(new, attr, getattr(old, attr))
except ValidationError as e:
print(f"* Ignoring incompatible value for field {attr}:\n {str(e)}")
# a few places where the field names have changed and we have to # a few places where the field names have changed and we have to
# manually add in the new names/values # manually add in the new names/values

View File

@ -228,19 +228,19 @@ the root is the InvokeAI ROOTDIR.
""" """
from __future__ import annotations from __future__ import annotations
import os
import hashlib import hashlib
import os
import textwrap import textwrap
import yaml import types
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Literal, Optional, List, Tuple, Union, Dict, Set, Callable, types
from shutil import rmtree, move from shutil import rmtree, move
from typing import Optional, List, Literal, Tuple, Union, Dict, Set, Callable
import torch import torch
import yaml
from omegaconf import OmegaConf from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig from omegaconf.dictconfig import DictConfig
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
@ -259,6 +259,7 @@ from .models import (
ModelNotFoundException, ModelNotFoundException,
InvalidModelException, InvalidModelException,
DuplicateModelException, DuplicateModelException,
ModelBase,
) )
# We are only starting to number the config file with release 3. # We are only starting to number the config file with release 3.
@ -361,7 +362,7 @@ class ModelManager(object):
if model_key.startswith("_"): if model_key.startswith("_"):
continue continue
model_name, base_model, model_type = self.parse_key(model_key) model_name, base_model, model_type = self.parse_key(model_key)
model_class = MODEL_CLASSES[base_model][model_type] model_class = self._get_implementation(base_model, model_type)
# alias for config file # alias for config file
model_config["model_format"] = model_config.pop("format") model_config["model_format"] = model_config.pop("format")
self.models[model_key] = model_class.create_config(**model_config) self.models[model_key] = model_class.create_config(**model_config)
@ -381,18 +382,24 @@ class ModelManager(object):
# causing otherwise unreferenced models to be removed from memory # causing otherwise unreferenced models to be removed from memory
self._read_models() self._read_models()
def model_exists( def model_exists(self, model_name: str, base_model: BaseModelType, model_type: ModelType, *, rescan=False) -> bool:
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
) -> bool:
""" """
Given a model name, returns True if it is a valid Given a model name, returns True if it is a valid identifier.
identifier.
:param model_name: symbolic name of the model in models.yaml
:param model_type: ModelType enum indicating the type of model to return
:param base_model: BaseModelType enum indicating the base model used by this model
:param rescan: if True, scan_models_directory
""" """
model_key = self.create_key(model_name, base_model, model_type) model_key = self.create_key(model_name, base_model, model_type)
return model_key in self.models exists = model_key in self.models
# if model not found try to find it (maybe file just pasted)
if rescan and not exists:
self.scan_models_directory(base_model=base_model, model_type=model_type)
exists = self.model_exists(model_name, base_model, model_type, rescan=False)
return exists
@classmethod @classmethod
def create_key( def create_key(
@ -443,39 +450,32 @@ class ModelManager(object):
:param model_name: symbolic name of the model in models.yaml :param model_name: symbolic name of the model in models.yaml
:param model_type: ModelType enum indicating the type of model to return :param model_type: ModelType enum indicating the type of model to return
:param base_model: BaseModelType enum indicating the base model used by this model :param base_model: BaseModelType enum indicating the base model used by this model
:param submode_typel: an ModelType enum indicating the portion of :param submodel_type: an ModelType enum indicating the portion of
the model to retrieve (e.g. ModelType.Vae) the model to retrieve (e.g. ModelType.Vae)
""" """
model_class = MODEL_CLASSES[base_model][model_type]
model_key = self.create_key(model_name, base_model, model_type) model_key = self.create_key(model_name, base_model, model_type)
# if model not found try to find it (maybe file just pasted) if not self.model_exists(model_name, base_model, model_type, rescan=True):
if model_key not in self.models: raise ModelNotFoundException(f"Model not found - {model_key}")
self.scan_models_directory(base_model=base_model, model_type=model_type)
if model_key not in self.models:
raise ModelNotFoundException(f"Model not found - {model_key}")
model_config = self.models[model_key] model_config = self._get_model_config(base_model, model_name, model_type)
model_path = self.resolve_model_path(model_config.path)
model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
if is_submodel_override:
model_type = submodel_type
submodel_type = None
model_class = self._get_implementation(base_model, model_type)
if not model_path.exists(): if not model_path.exists():
if model_class.save_to_config: if model_class.save_to_config:
self.models[model_key].error = ModelError.NotFound self.models[model_key].error = ModelError.NotFound
raise Exception(f'Files for model "{model_key}" not found') raise Exception(f'Files for model "{model_key}" not found at {model_path}')
else: else:
self.models.pop(model_key, None) self.models.pop(model_key, None)
raise ModelNotFoundException(f"Model not found - {model_key}") raise ModelNotFoundException(f'Files for model "{model_key}" not found at {model_path}')
# vae/movq override
# TODO:
if submodel_type is not None and hasattr(model_config, submodel_type):
override_path = getattr(model_config, submodel_type)
if override_path:
model_path = self.resolve_path(override_path)
model_type = submodel_type
submodel_type = None
model_class = MODEL_CLASSES[base_model][model_type]
# TODO: path # TODO: path
# TODO: is it accurate to use path as id # TODO: is it accurate to use path as id
@ -513,6 +513,55 @@ class ModelManager(object):
_cache=self.cache, _cache=self.cache,
) )
def _get_model_path(
self, model_config: ModelConfigBase, submodel_type: Optional[SubModelType] = None
) -> (Path, bool):
"""Extract a model's filesystem path from its config.
:return: The fully qualified Path of the module (or submodule).
"""
model_path = model_config.path
is_submodel_override = False
# Does the config explicitly override the submodel?
if submodel_type is not None and hasattr(model_config, submodel_type):
submodel_path = getattr(model_config, submodel_type)
if submodel_path is not None:
model_path = getattr(model_config, submodel_type)
is_submodel_override = True
model_path = self.resolve_model_path(model_path)
return model_path, is_submodel_override
def _get_model_config(self, base_model: BaseModelType, model_name: str, model_type: ModelType) -> ModelConfigBase:
"""Get a model's config object."""
model_key = self.create_key(model_name, base_model, model_type)
try:
model_config = self.models[model_key]
except KeyError:
raise ModelNotFoundException(f"Model not found - {model_key}")
return model_config
def _get_implementation(self, base_model: BaseModelType, model_type: ModelType) -> type[ModelBase]:
"""Get the concrete implementation class for a specific model type."""
model_class = MODEL_CLASSES[base_model][model_type]
return model_class
def _instantiate(
self,
model_name: str,
base_model: BaseModelType,
model_type: ModelType,
submodel_type: Optional[SubModelType] = None,
) -> ModelBase:
"""Make a new instance of this model, without loading it."""
model_config = self._get_model_config(base_model, model_name, model_type)
model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
# FIXME: do non-overriden submodels get the right class?
constructor = self._get_implementation(base_model, model_type)
instance = constructor(model_path, base_model, model_type)
return instance
def model_info( def model_info(
self, self,
model_name: str, model_name: str,
@ -546,9 +595,10 @@ class ModelManager(object):
the combined format of the list_models() method. the combined format of the list_models() method.
""" """
models = self.list_models(base_model, model_type, model_name) models = self.list_models(base_model, model_type, model_name)
if len(models) > 1: if len(models) >= 1:
return models[0] return models[0]
return None else:
return None
def list_models( def list_models(
self, self,
@ -660,7 +710,7 @@ class ModelManager(object):
if path := model_attributes.get("path"): if path := model_attributes.get("path"):
model_attributes["path"] = str(self.relative_model_path(Path(path))) model_attributes["path"] = str(self.relative_model_path(Path(path)))
model_class = MODEL_CLASSES[base_model][model_type] model_class = self._get_implementation(base_model, model_type)
model_config = model_class.create_config(**model_attributes) model_config = model_class.create_config(**model_attributes)
model_key = self.create_key(model_name, base_model, model_type) model_key = self.create_key(model_name, base_model, model_type)
@ -851,7 +901,7 @@ class ModelManager(object):
for model_key, model_config in self.models.items(): for model_key, model_config in self.models.items():
model_name, base_model, model_type = self.parse_key(model_key) model_name, base_model, model_type = self.parse_key(model_key)
model_class = MODEL_CLASSES[base_model][model_type] model_class = self._get_implementation(base_model, model_type)
if model_class.save_to_config: if model_class.save_to_config:
# TODO: or exclude_unset better fits here? # TODO: or exclude_unset better fits here?
data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"}) data_to_save[model_key] = model_config.dict(exclude_defaults=True, exclude={"error"})
@ -909,7 +959,7 @@ class ModelManager(object):
model_path = self.resolve_model_path(model_config.path).absolute() model_path = self.resolve_model_path(model_config.path).absolute()
if not model_path.exists(): if not model_path.exists():
model_class = MODEL_CLASSES[cur_base_model][cur_model_type] model_class = self._get_implementation(cur_base_model, cur_model_type)
if model_class.save_to_config: if model_class.save_to_config:
model_config.error = ModelError.NotFound model_config.error = ModelError.NotFound
self.models.pop(model_key, None) self.models.pop(model_key, None)
@ -925,7 +975,7 @@ class ModelManager(object):
for cur_model_type in ModelType: for cur_model_type in ModelType:
if model_type is not None and cur_model_type != model_type: if model_type is not None and cur_model_type != model_type:
continue continue
model_class = MODEL_CLASSES[cur_base_model][cur_model_type] model_class = self._get_implementation(cur_base_model, cur_model_type)
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value)) models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
if not models_dir.exists(): if not models_dir.exists():

View File

@ -1,9 +1,14 @@
import os import os
import torch
import safetensors
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Optional, Union, Literal from typing import Optional
import safetensors
import torch
from diffusers.utils import is_safetensors_available
from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig
from .base import ( from .base import (
ModelBase, ModelBase,
ModelConfigBase, ModelConfigBase,
@ -18,9 +23,6 @@ from .base import (
InvalidModelException, InvalidModelException,
ModelNotFoundException, ModelNotFoundException,
) )
from invokeai.app.services.config import InvokeAIAppConfig
from diffusers.utils import is_safetensors_available
from omegaconf import OmegaConf
class VaeModelFormat(str, Enum): class VaeModelFormat(str, Enum):
@ -80,7 +82,7 @@ class VaeModel(ModelBase):
@classmethod @classmethod
def detect_format(cls, path: str): def detect_format(cls, path: str):
if not os.path.exists(path): if not os.path.exists(path):
raise ModelNotFoundException() raise ModelNotFoundException(f"Does not exist as local file: {path}")
if os.path.isdir(path): if os.path.isdir(path):
if os.path.exists(os.path.join(path, "config.json")): if os.path.exists(os.path.join(path, "config.json")):

View File

@ -96,7 +96,8 @@ export type AppFeature =
| 'consoleLogging' | 'consoleLogging'
| 'dynamicPrompting' | 'dynamicPrompting'
| 'batches' | 'batches'
| 'syncModels'; | 'syncModels'
| 'multiselect';
/** /**
* A disable-able Stable Diffusion feature * A disable-able Stable Diffusion feature

View File

@ -9,6 +9,7 @@ import { useListImagesQuery } from 'services/api/endpoints/images';
import { ImageDTO } from 'services/api/types'; import { ImageDTO } from 'services/api/types';
import { selectionChanged } from '../store/gallerySlice'; import { selectionChanged } from '../store/gallerySlice';
import { imagesSelectors } from 'services/api/util'; import { imagesSelectors } from 'services/api/util';
import { useFeatureStatus } from '../../system/hooks/useFeatureStatus';
const selector = createSelector( const selector = createSelector(
[stateSelector, selectListImagesBaseQueryArgs], [stateSelector, selectListImagesBaseQueryArgs],
@ -33,11 +34,18 @@ export const useMultiselect = (imageDTO?: ImageDTO) => {
}), }),
}); });
const isMultiSelectEnabled = useFeatureStatus('multiselect').isFeatureEnabled;
const handleClick = useCallback( const handleClick = useCallback(
(e: MouseEvent<HTMLDivElement>) => { (e: MouseEvent<HTMLDivElement>) => {
if (!imageDTO) { if (!imageDTO) {
return; return;
} }
if (!isMultiSelectEnabled) {
dispatch(selectionChanged([imageDTO]));
return;
}
if (e.shiftKey) { if (e.shiftKey) {
const rangeEndImageName = imageDTO.image_name; const rangeEndImageName = imageDTO.image_name;
const lastSelectedImage = selection[selection.length - 1]?.image_name; const lastSelectedImage = selection[selection.length - 1]?.image_name;
@ -71,7 +79,7 @@ export const useMultiselect = (imageDTO?: ImageDTO) => {
dispatch(selectionChanged([imageDTO])); dispatch(selectionChanged([imageDTO]));
} }
}, },
[dispatch, imageDTO, imageDTOs, selection] [dispatch, imageDTO, imageDTOs, selection, isMultiSelectEnabled]
); );
const isSelected = useMemo( const isSelected = useMemo(

View File

@ -31,7 +31,7 @@ const ParamLoraCollapse = () => {
} }
return ( return (
<IAICollapse label={'LoRA'} activeLabel={activeLabel}> <IAICollapse label="LoRA" activeLabel={activeLabel}>
<Flex sx={{ flexDir: 'column', gap: 2 }}> <Flex sx={{ flexDir: 'column', gap: 2 }}>
<ParamLoRASelect /> <ParamLoRASelect />
<ParamLoraList /> <ParamLoraList />

View File

@ -1,3 +1,4 @@
import { Divider } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store'; import { stateSelector } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks'; import { useAppSelector } from 'app/store/storeHooks';
@ -8,20 +9,21 @@ import ParamLora from './ParamLora';
const selector = createSelector( const selector = createSelector(
stateSelector, stateSelector,
({ lora }) => { ({ lora }) => {
const { loras } = lora; return { lorasArray: map(lora.loras) };
return { loras };
}, },
defaultSelectorOptions defaultSelectorOptions
); );
const ParamLoraList = () => { const ParamLoraList = () => {
const { loras } = useAppSelector(selector); const { lorasArray } = useAppSelector(selector);
return ( return (
<> <>
{map(loras, (lora) => ( {lorasArray.map((lora, i) => (
<ParamLora key={lora.model_name} lora={lora} /> <>
{i > 0 && <Divider key={`${lora.model_name}-divider`} pt={1} />}
<ParamLora key={lora.model_name} lora={lora} />
</>
))} ))}
</> </>
); );

View File

@ -9,7 +9,6 @@ import {
CLIP_SKIP, CLIP_SKIP,
LORA_LOADER, LORA_LOADER,
MAIN_MODEL_LOADER, MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR, METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING, NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING, POSITIVE_CONDITIONING,
@ -36,15 +35,11 @@ export const addLoRAsToGraph = (
| undefined; | undefined;
if (loraCount > 0) { if (loraCount > 0) {
// Remove MAIN_MODEL_LOADER unet connection to feed it to LoRAs // Remove modelLoaderNodeId unet connection to feed it to LoRAs
graph.edges = graph.edges.filter( graph.edges = graph.edges.filter(
(e) => (e) =>
!( !(
e.source.node_id === MAIN_MODEL_LOADER && e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === ONNX_MODEL_LOADER &&
['unet'].includes(e.source.field) ['unet'].includes(e.source.field)
) )
); );

View File

@ -0,0 +1,212 @@
import { RootState } from 'app/store/store';
import { NonNullableGraph } from 'features/nodes/types/types';
import { forEach, size } from 'lodash-es';
import {
MetadataAccumulatorInvocation,
SDXLLoraLoaderInvocation,
} from 'services/api/types';
import {
LORA_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
SDXL_MODEL_LOADER,
} from './constants';
export const addSDXLLoRAsToGraph = (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string,
modelLoaderNodeId: string = SDXL_MODEL_LOADER
): void => {
/**
* LoRA nodes get the UNet and CLIP models from the main model loader and apply the LoRA to them.
* They then output the UNet and CLIP models references on to either the next LoRA in the chain,
* or to the inference/conditioning nodes.
*
* So we need to inject a LoRA chain into the graph.
*/
const { loras } = state.lora;
const loraCount = size(loras);
const metadataAccumulator = graph.nodes[METADATA_ACCUMULATOR] as
| MetadataAccumulatorInvocation
| undefined;
if (loraCount > 0) {
// Remove modelLoaderNodeId unet/clip/clip2 connections to feed it to LoRAs
graph.edges = graph.edges.filter(
(e) =>
!(
e.source.node_id === modelLoaderNodeId &&
['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === modelLoaderNodeId &&
['clip'].includes(e.source.field)
) &&
!(
e.source.node_id === modelLoaderNodeId &&
['clip2'].includes(e.source.field)
)
);
}
// we need to remember the last lora so we can chain from it
let lastLoraNodeId = '';
let currentLoraIndex = 0;
forEach(loras, (lora) => {
const { model_name, base_model, weight } = lora;
const currentLoraNodeId = `${LORA_LOADER}_${model_name.replace('.', '_')}`;
const loraLoaderNode: SDXLLoraLoaderInvocation = {
type: 'sdxl_lora_loader',
id: currentLoraNodeId,
is_intermediate: true,
lora: { model_name, base_model },
weight,
};
// add the lora to the metadata accumulator
if (metadataAccumulator) {
metadataAccumulator.loras.push({
lora: { model_name, base_model },
weight,
});
}
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
if (currentLoraIndex === 0) {
// first lora = start the lora chain, attach directly to model loader
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: modelLoaderNodeId,
field: 'clip2',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip2',
},
});
} else {
// we are in the middle of the lora chain, instead connect to the previous lora
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'unet',
},
destination: {
node_id: currentLoraNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'clip',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: lastLoraNodeId,
field: 'clip2',
},
destination: {
node_id: currentLoraNodeId,
field: 'clip2',
},
});
}
if (currentLoraIndex === loraCount - 1) {
// final lora, end the lora chain - we need to connect up to inference and conditioning nodes
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'unet',
},
destination: {
node_id: baseNodeId,
field: 'unet',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip2',
},
destination: {
node_id: POSITIVE_CONDITIONING,
field: 'clip2',
},
});
graph.edges.push({
source: {
node_id: currentLoraNodeId,
field: 'clip2',
},
destination: {
node_id: NEGATIVE_CONDITIONING,
field: 'clip2',
},
});
}
// increment the lora for the next one in the chain
lastLoraNodeId = currentLoraNodeId;
currentLoraIndex += 1;
});
};

View File

@ -22,6 +22,7 @@ import {
SDXL_LATENTS_TO_LATENTS, SDXL_LATENTS_TO_LATENTS,
SDXL_MODEL_LOADER, SDXL_MODEL_LOADER,
} from './constants'; } from './constants';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
/** /**
* Builds the Image to Image tab graph. * Builds the Image to Image tab graph.
@ -364,6 +365,8 @@ export const buildLinearSDXLImageToImageGraph = (
}, },
}); });
addSDXLLoRAsToGraph(state, graph, SDXL_LATENTS_TO_LATENTS, SDXL_MODEL_LOADER);
// Add Refiner if enabled // Add Refiner if enabled
if (shouldUseSDXLRefiner) { if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_LATENTS_TO_LATENTS); addSDXLRefinerToGraph(state, graph, SDXL_LATENTS_TO_LATENTS);

View File

@ -4,6 +4,7 @@ import { NonNullableGraph } from 'features/nodes/types/types';
import { initialGenerationState } from 'features/parameters/store/generationSlice'; import { initialGenerationState } from 'features/parameters/store/generationSlice';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph'; import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph'; import { addNSFWCheckerToGraph } from './addNSFWCheckerToGraph';
import { addSDXLLoRAsToGraph } from './addSDXLLoRAstoGraph';
import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph'; import { addSDXLRefinerToGraph } from './addSDXLRefinerToGraph';
import { addWatermarkerToGraph } from './addWatermarkerToGraph'; import { addWatermarkerToGraph } from './addWatermarkerToGraph';
import { import {
@ -246,6 +247,8 @@ export const buildLinearSDXLTextToImageGraph = (
}, },
}); });
addSDXLLoRAsToGraph(state, graph, SDXL_TEXT_TO_LATENTS, SDXL_MODEL_LOADER);
// Add Refiner if enabled // Add Refiner if enabled
if (shouldUseSDXLRefiner) { if (shouldUseSDXLRefiner) {
addSDXLRefinerToGraph(state, graph, SDXL_TEXT_TO_LATENTS); addSDXLRefinerToGraph(state, graph, SDXL_TEXT_TO_LATENTS);

View File

@ -4,6 +4,7 @@ import ProcessButtons from 'features/parameters/components/ProcessButtons/Proces
import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
import SDXLImageToImageTabCoreParameters from './SDXLImageToImageTabCoreParameters'; import SDXLImageToImageTabCoreParameters from './SDXLImageToImageTabCoreParameters';
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
const SDXLImageToImageTabParameters = () => { const SDXLImageToImageTabParameters = () => {
return ( return (
@ -12,6 +13,7 @@ const SDXLImageToImageTabParameters = () => {
<ProcessButtons /> <ProcessButtons />
<SDXLImageToImageTabCoreParameters /> <SDXLImageToImageTabCoreParameters />
<ParamSDXLRefinerCollapse /> <ParamSDXLRefinerCollapse />
<ParamLoraCollapse />
<ParamDynamicPromptsCollapse /> <ParamDynamicPromptsCollapse />
<ParamNoiseCollapse /> <ParamNoiseCollapse />
</> </>

View File

@ -4,6 +4,7 @@ import ProcessButtons from 'features/parameters/components/ProcessButtons/Proces
import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters'; import TextToImageTabCoreParameters from 'features/ui/components/tabs/TextToImage/TextToImageTabCoreParameters';
import ParamSDXLPromptArea from './ParamSDXLPromptArea'; import ParamSDXLPromptArea from './ParamSDXLPromptArea';
import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse'; import ParamSDXLRefinerCollapse from './ParamSDXLRefinerCollapse';
import ParamLoraCollapse from 'features/lora/components/ParamLoraCollapse';
const SDXLTextToImageTabParameters = () => { const SDXLTextToImageTabParameters = () => {
return ( return (
@ -12,6 +13,7 @@ const SDXLTextToImageTabParameters = () => {
<ProcessButtons /> <ProcessButtons />
<TextToImageTabCoreParameters /> <TextToImageTabCoreParameters />
<ParamSDXLRefinerCollapse /> <ParamSDXLRefinerCollapse />
<ParamLoraCollapse />
<ParamDynamicPromptsCollapse /> <ParamDynamicPromptsCollapse />
<ParamNoiseCollapse /> <ParamNoiseCollapse />
</> </>

View File

@ -4,6 +4,7 @@ import {
ASSETS_CATEGORIES, ASSETS_CATEGORIES,
BoardId, BoardId,
IMAGE_CATEGORIES, IMAGE_CATEGORIES,
IMAGE_LIMIT,
} from 'features/gallery/store/types'; } from 'features/gallery/store/types';
import { keyBy } from 'lodash'; import { keyBy } from 'lodash';
import { ApiFullTagDescription, LIST_TAG, api } from '..'; import { ApiFullTagDescription, LIST_TAG, api } from '..';
@ -167,7 +168,14 @@ export const imagesApi = api.injectEndpoints({
}, },
}; };
}, },
invalidatesTags: (result, error, imageDTOs) => [], invalidatesTags: (result, error, { imageDTOs }) => {
// for now, assume bulk delete is all on one board
const boardId = imageDTOs[0]?.board_id;
return [
{ type: 'BoardImagesTotal', id: boardId ?? 'none' },
{ type: 'BoardAssetsTotal', id: boardId ?? 'none' },
];
},
async onQueryStarted({ imageDTOs }, { dispatch, queryFulfilled }) { async onQueryStarted({ imageDTOs }, { dispatch, queryFulfilled }) {
/** /**
* Cache changes for `deleteImages`: * Cache changes for `deleteImages`:
@ -889,18 +897,25 @@ export const imagesApi = api.injectEndpoints({
board_id, board_id,
}, },
}), }),
invalidatesTags: (result, error, { board_id }) => [ invalidatesTags: (result, error, { imageDTOs, board_id }) => {
// update the destination board //assume all images are being moved from one board for now
{ type: 'Board', id: board_id ?? 'none' }, const oldBoardId = imageDTOs[0]?.board_id;
// update old board totals return [
{ type: 'BoardImagesTotal', id: board_id ?? 'none' }, // update the destination board
{ type: 'BoardAssetsTotal', id: board_id ?? 'none' }, { type: 'Board', id: board_id ?? 'none' },
// update the no_board totals // update new board totals
{ type: 'BoardImagesTotal', id: 'none' }, { type: 'BoardImagesTotal', id: board_id ?? 'none' },
{ type: 'BoardAssetsTotal', id: 'none' }, { type: 'BoardAssetsTotal', id: board_id ?? 'none' },
], // update old board totals
{ type: 'BoardImagesTotal', id: oldBoardId ?? 'none' },
{ type: 'BoardAssetsTotal', id: oldBoardId ?? 'none' },
// update the no_board totals
{ type: 'BoardImagesTotal', id: 'none' },
{ type: 'BoardAssetsTotal', id: 'none' },
];
},
async onQueryStarted( async onQueryStarted(
{ board_id, imageDTOs }, { board_id: new_board_id, imageDTOs },
{ dispatch, queryFulfilled, getState } { dispatch, queryFulfilled, getState }
) { ) {
try { try {
@ -920,7 +935,7 @@ export const imagesApi = api.injectEndpoints({
'getImageDTO', 'getImageDTO',
image_name, image_name,
(draft) => { (draft) => {
draft.board_id = board_id; draft.board_id = new_board_id;
} }
) )
); );
@ -946,7 +961,7 @@ export const imagesApi = api.injectEndpoints({
); );
const queryArgs = { const queryArgs = {
board_id, board_id: new_board_id,
categories, categories,
}; };
@ -954,23 +969,24 @@ export const imagesApi = api.injectEndpoints({
queryArgs queryArgs
)(getState()); )(getState());
const { data: total } = IMAGE_CATEGORIES.includes( const { data: previousTotal } = IMAGE_CATEGORIES.includes(
imageDTO.image_category imageDTO.image_category
) )
? boardsApi.endpoints.getBoardImagesTotal.select( ? boardsApi.endpoints.getBoardImagesTotal.select(
imageDTO.board_id ?? 'none' new_board_id ?? 'none'
)(getState()) )(getState())
: boardsApi.endpoints.getBoardAssetsTotal.select( : boardsApi.endpoints.getBoardAssetsTotal.select(
imageDTO.board_id ?? 'none' new_board_id ?? 'none'
)(getState()); )(getState());
const isCacheFullyPopulated = const isCacheFullyPopulated =
currentCache.data && currentCache.data.ids.length >= (total ?? 0); currentCache.data &&
currentCache.data.ids.length >= (previousTotal ?? 0);
const isInDateRange = getIsImageInDateRange( const isInDateRange =
currentCache.data, (previousTotal || 0) >= IMAGE_LIMIT
imageDTO ? getIsImageInDateRange(currentCache.data, imageDTO)
); : true;
if (isCacheFullyPopulated || isInDateRange) { if (isCacheFullyPopulated || isInDateRange) {
// *upsert* to $cache // *upsert* to $cache
@ -981,7 +997,7 @@ export const imagesApi = api.injectEndpoints({
(draft) => { (draft) => {
imagesAdapter.upsertOne(draft, { imagesAdapter.upsertOne(draft, {
...imageDTO, ...imageDTO,
board_id, board_id: new_board_id,
}); });
} }
) )
@ -1097,10 +1113,10 @@ export const imagesApi = api.injectEndpoints({
const isCacheFullyPopulated = const isCacheFullyPopulated =
currentCache.data && currentCache.data.ids.length >= (total ?? 0); currentCache.data && currentCache.data.ids.length >= (total ?? 0);
const isInDateRange = getIsImageInDateRange( const isInDateRange =
currentCache.data, (total || 0) >= IMAGE_LIMIT
imageDTO ? getIsImageInDateRange(currentCache.data, imageDTO)
); : true;
if (isCacheFullyPopulated || isInDateRange) { if (isCacheFullyPopulated || isInDateRange) {
// *upsert* to $cache // *upsert* to $cache
@ -1111,7 +1127,7 @@ export const imagesApi = api.injectEndpoints({
(draft) => { (draft) => {
imagesAdapter.upsertOne(draft, { imagesAdapter.upsertOne(draft, {
...imageDTO, ...imageDTO,
board_id: undefined, board_id: 'none',
}); });
} }
) )

View File

@ -1443,7 +1443,7 @@ export type components = {
* @description The nodes in this graph * @description The nodes in this graph
*/ */
nodes?: { nodes?: {
[key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined; [key: string]: (components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined;
}; };
/** /**
* Edges * Edges
@ -1486,7 +1486,7 @@ export type components = {
* @description The results of node executions * @description The results of node executions
*/ */
results: { results: {
[key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; [key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["SDXLLoraLoaderOutput"] | components["schemas"]["VaeLoaderOutput"] | components["schemas"]["MetadataAccumulatorOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["ClipSkipInvocationOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["ONNXModelLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined;
}; };
/** /**
* Errors * Errors
@ -1904,6 +1904,40 @@ export type components = {
*/ */
image_name: string; image_name: string;
}; };
/**
* ImageHueAdjustmentInvocation
* @description Adjusts the Hue of an image.
*/
ImageHueAdjustmentInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default img_hue_adjust
* @enum {string}
*/
type?: "img_hue_adjust";
/**
* Image
* @description The image to adjust
*/
image?: components["schemas"]["ImageField"];
/**
* Hue
* @description The degrees by which to rotate the hue, 0-360
* @default 0
*/
hue?: number;
};
/** /**
* ImageInverseLerpInvocation * ImageInverseLerpInvocation
* @description Inverse linear interpolation of all pixels of an image * @description Inverse linear interpolation of all pixels of an image
@ -1984,6 +2018,40 @@ export type components = {
*/ */
max?: number; max?: number;
}; };
/**
* ImageLuminosityAdjustmentInvocation
* @description Adjusts the Luminosity (Value) of an image.
*/
ImageLuminosityAdjustmentInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default img_luminosity_adjust
* @enum {string}
*/
type?: "img_luminosity_adjust";
/**
* Image
* @description The image to adjust
*/
image?: components["schemas"]["ImageField"];
/**
* Luminosity
* @description The factor by which to adjust the luminosity (value)
* @default 1
*/
luminosity?: number;
};
/** /**
* ImageMetadata * ImageMetadata
* @description An image's generation metadata * @description An image's generation metadata
@ -2239,6 +2307,40 @@ export type components = {
*/ */
resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos"; resample_mode?: "nearest" | "box" | "bilinear" | "hamming" | "bicubic" | "lanczos";
}; };
/**
* ImageSaturationAdjustmentInvocation
* @description Adjusts the Saturation of an image.
*/
ImageSaturationAdjustmentInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default img_saturation_adjust
* @enum {string}
*/
type?: "img_saturation_adjust";
/**
* Image
* @description The image to adjust
*/
image?: components["schemas"]["ImageField"];
/**
* Saturation
* @description The factor by which to adjust the saturation
* @default 1
*/
saturation?: number;
};
/** /**
* ImageScaleInvocation * ImageScaleInvocation
* @description Scales an image by a factor * @description Scales an image by a factor
@ -4912,6 +5014,82 @@ export type components = {
*/ */
denoising_end?: number; denoising_end?: number;
}; };
/**
* SDXLLoraLoaderInvocation
* @description Apply selected lora to unet and text_encoder.
*/
SDXLLoraLoaderInvocation: {
/**
* Id
* @description The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Is Intermediate
* @description Whether or not this node is an intermediate node.
* @default false
*/
is_intermediate?: boolean;
/**
* Type
* @default sdxl_lora_loader
* @enum {string}
*/
type?: "sdxl_lora_loader";
/**
* Lora
* @description Lora model name
*/
lora?: components["schemas"]["LoRAModelField"];
/**
* Weight
* @description With what weight to apply lora
* @default 0.75
*/
weight?: number;
/**
* Unet
* @description UNet model for applying lora
*/
unet?: components["schemas"]["UNetField"];
/**
* Clip
* @description Clip model for applying lora
*/
clip?: components["schemas"]["ClipField"];
/**
* Clip2
* @description Clip2 model for applying lora
*/
clip2?: components["schemas"]["ClipField"];
};
/**
* SDXLLoraLoaderOutput
* @description Model loader output
*/
SDXLLoraLoaderOutput: {
/**
* Type
* @default sdxl_lora_loader_output
* @enum {string}
*/
type?: "sdxl_lora_loader_output";
/**
* Unet
* @description UNet submodel
*/
unet?: components["schemas"]["UNetField"];
/**
* Clip
* @description Tokenizer and text_encoder submodels
*/
clip?: components["schemas"]["ClipField"];
/**
* Clip2
* @description Tokenizer2 and text_encoder2 submodels
*/
clip2?: components["schemas"]["ClipField"];
};
/** /**
* SDXLModelLoaderInvocation * SDXLModelLoaderInvocation
* @description Loads an sdxl base model, outputting its submodels. * @description Loads an sdxl base model, outputting its submodels.
@ -5961,6 +6139,24 @@ export type components = {
*/ */
image?: components["schemas"]["ImageField"]; image?: components["schemas"]["ImageField"];
}; };
/**
* ControlNetModelFormat
* @description An enumeration.
* @enum {string}
*/
ControlNetModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusionXLModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
/** /**
* StableDiffusionOnnxModelFormat * StableDiffusionOnnxModelFormat
* @description An enumeration. * @description An enumeration.
@ -5973,24 +6169,6 @@ export type components = {
* @enum {string} * @enum {string}
*/ */
StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; StableDiffusion2ModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusion1ModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusion1ModelFormat: "checkpoint" | "diffusers";
/**
* StableDiffusionXLModelFormat
* @description An enumeration.
* @enum {string}
*/
StableDiffusionXLModelFormat: "checkpoint" | "diffusers";
/**
* ControlNetModelFormat
* @description An enumeration.
* @enum {string}
*/
ControlNetModelFormat: "checkpoint" | "diffusers";
}; };
responses: never; responses: never;
parameters: never; parameters: never;
@ -6101,7 +6279,7 @@ export type operations = {
}; };
requestBody: { requestBody: {
content: { content: {
"application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
}; };
}; };
responses: { responses: {
@ -6138,7 +6316,7 @@ export type operations = {
}; };
requestBody: { requestBody: {
content: { content: {
"application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; "application/json": components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["SDXLLoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["SDXLRawPromptInvocation"] | components["schemas"]["SDXLRefinerRawPromptInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["ImageLuminosityAdjustmentInvocation"] | components["schemas"]["ImageSaturationAdjustmentInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["SDXLTextToLatentsInvocation"] | components["schemas"]["SDXLLatentsToLatentsInvocation"] | components["schemas"]["ONNXPromptInvocation"] | components["schemas"]["ONNXTextToLatentsInvocation"] | components["schemas"]["ONNXLatentsToImageInvocation"] | components["schemas"]["ONNXSD1ModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["ParamStringInvocation"] | components["schemas"]["ParamPromptInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
}; };
}; };
responses: { responses: {

View File

@ -166,6 +166,9 @@ export type OnnxModelLoaderInvocation = TypeReq<
export type LoraLoaderInvocation = TypeReq< export type LoraLoaderInvocation = TypeReq<
components['schemas']['LoraLoaderInvocation'] components['schemas']['LoraLoaderInvocation']
>; >;
export type SDXLLoraLoaderInvocation = TypeReq<
components['schemas']['SDXLLoraLoaderInvocation']
>;
export type MetadataAccumulatorInvocation = TypeReq< export type MetadataAccumulatorInvocation = TypeReq<
components['schemas']['MetadataAccumulatorInvocation'] components['schemas']['MetadataAccumulatorInvocation']
>; >;

View File

@ -100,7 +100,7 @@ dependencies = [
"dev" = [ "dev" = [
"pudb", "pudb",
] ]
"test" = ["pytest>6.0.0", "pytest-cov", "black"] "test" = ["pytest>6.0.0", "pytest-cov", "pytest-datadir", "black"]
"xformers" = [ "xformers" = [
"xformers~=0.0.19; sys_platform!='darwin'", "xformers~=0.0.19; sys_platform!='darwin'",
"triton; sys_platform=='linux'", "triton; sys_platform=='linux'",

View File

@ -0,0 +1,38 @@
from pathlib import Path
import pytest
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend import ModelManager, BaseModelType, ModelType, SubModelType
BASIC_MODEL_NAME = ("SDXL base", BaseModelType.StableDiffusionXL, ModelType.Main)
VAE_OVERRIDE_MODEL_NAME = ("SDXL with VAE", BaseModelType.StableDiffusionXL, ModelType.Main)
@pytest.fixture
def model_manager(datadir) -> ModelManager:
InvokeAIAppConfig.get_config(root=datadir)
return ModelManager(datadir / "configs" / "relative_sub.models.yaml")
def test_get_model_names(model_manager: ModelManager):
names = model_manager.model_names()
assert names[:2] == [BASIC_MODEL_NAME, VAE_OVERRIDE_MODEL_NAME]
def test_get_model_path_for_diffusers(model_manager: ModelManager, datadir: Path):
model_config = model_manager._get_model_config(BASIC_MODEL_NAME[1], BASIC_MODEL_NAME[0], BASIC_MODEL_NAME[2])
top_model_path, is_override = model_manager._get_model_path(model_config)
expected_model_path = datadir / "models" / "sdxl" / "main" / "SDXL base 1_0"
assert top_model_path == expected_model_path
assert not is_override
def test_get_model_path_for_overridden_vae(model_manager: ModelManager, datadir: Path):
model_config = model_manager._get_model_config(
VAE_OVERRIDE_MODEL_NAME[1], VAE_OVERRIDE_MODEL_NAME[0], VAE_OVERRIDE_MODEL_NAME[2]
)
vae_model_path, is_override = model_manager._get_model_path(model_config, SubModelType.Vae)
expected_vae_path = datadir / "models" / "sdxl" / "vae" / "sdxl-vae-fp16-fix"
assert vae_model_path == expected_vae_path
assert is_override

View File

@ -0,0 +1,15 @@
__metadata__:
version: 3.0.0
sdxl/main/SDXL base:
path: sdxl/main/SDXL base 1_0
description: SDXL base v1.0
variant: normal
format: diffusers
sdxl/main/SDXL with VAE:
path: sdxl/main/SDXL base 1_0
description: SDXL with customized VAE
vae: sdxl/vae/sdxl-vae-fp16-fix/
variant: normal
format: diffusers