Compare commits

...

50 Commits

Author SHA1 Message Date
e8299d0abb Comment out erroniously removed del statement, comment out opt tests 2023-07-18 23:23:34 -04:00
a28ab654ef Setup dist folder 2023-07-18 23:18:46 -04:00
8699fd7050 Fix invoke UI graphs for onnx 2023-07-18 23:16:51 -04:00
9e65470ada Setup dist 2023-07-18 23:07:31 -04:00
f4e52fafac Fix as part of merging main in 2023-07-18 23:05:33 -04:00
ee7b36cea5 Merge branch 'main' into onnx-testing 2023-07-18 22:56:41 -04:00
487455ef2e Add model_type to the model state object 2023-07-18 22:40:27 -04:00
e201ad2f51 Switch to io_binding for run, testing different session options 2023-07-18 21:54:54 -04:00
869f418b03 Setup onnx on linear text2image 2023-07-18 14:27:54 -04:00
35d5ef9118 Emit step completions 2023-07-18 12:35:07 -04:00
bcce70fca6 Testing different session opts, added timings for testing 2023-07-17 16:27:33 -04:00
932112b640 testing being super wasteful with data 2023-07-16 00:17:33 -04:00
91112167b1 Fix syntax err 2023-07-15 23:56:48 -04:00
bd7b59910d Testing onnx in new ui updates 2023-07-14 14:24:15 -04:00
524888bf3b Merge branch 'main' into feat/onnx 2023-07-13 14:23:57 -04:00
0327eae509 chore: Regen API 2023-06-23 05:21:06 +12:00
bb85608890 Merge branch 'main' into feat/onnx 2023-06-23 05:18:41 +12:00
6c7668aaca Update onnx model structure, change code according 2023-06-22 20:03:17 +03:00
7759b3f75a Small refactor 2023-06-21 04:24:25 +03:00
4d337f6abc ONNX Model/runtime first implementation 2023-06-21 02:12:21 +03:00
92c86fd0b8 Set model type to const value in openapi schema, add model format enums to model schema(as they not not referenced in case of Literal definition) 2023-06-20 03:44:58 +03:00
46dc751139 Update model format field to use enums 2023-06-20 03:30:09 +03:00
4cefe37723 Rename format to model_format(still named format when work with config) 2023-06-20 03:25:08 +03:00
82b73c50a0 Remove default model logic 2023-06-20 03:13:10 +03:00
7df7a95299 Merge branch 'main' into model-manager-ui-30 2023-06-19 23:26:11 +12:00
85b4b359c2 tweal: UI colors 2023-06-19 23:16:14 +12:00
cfe81b5e00 fix: Adjust the Schedular select width
So the long names do not get cut off.
2023-06-19 23:05:32 +12:00
b0c4451324 Merge branch 'main' into model-manager-ui-30 2023-06-19 23:02:59 +12:00
d4931522d4 Merge branch 'main' into model-manager-ui-30 2023-06-19 22:53:13 +12:00
17e2a35228 fix: merge conflicts 2023-06-18 22:25:48 +12:00
91016d8b29 Merge branch 'main' into model-manager-ui-30 2023-06-18 22:23:18 +12:00
9fda21cf40 Revert "feat: Port Schedulers to Mantine"
This reverts commit e0c105f413.
2023-06-18 22:22:56 +12:00
809ec7163e fix: Remove type from Model type name 2023-06-18 19:41:30 +12:00
7c9a939b47 fix: Unserialization key issue 2023-06-18 19:38:15 +12:00
9634c96020 revert: getModels to receivedModels 2023-06-18 19:35:46 +12:00
e0c105f413 feat: Port Schedulers to Mantine 2023-06-18 19:31:53 +12:00
f0bf32c476 Merge branch 'main' into model-manager-ui-30 2023-06-18 17:37:34 +12:00
28373dbb98 cleanup: Updated model slice names to be more descriptive
Basically updated all slices to be more descriptive in their names. Did so in order to make sure theres good naming scheme available for secondary models.
2023-06-18 17:36:23 +12:00
4133d77772 wip: Move Model Selector to own file 2023-06-18 09:19:13 +12:00
61c426f502 feat: Enable 2.x Model Generation in Linear UI 2023-06-18 08:27:13 +12:00
bf0577c882 fix: 2.1 models breaking generation
Co-Authored-By: StAlKeR7779 <7768370+StAlKeR7779@users.noreply.github.com>
2023-06-18 08:26:25 +12:00
24673fd859 chore: Rebuild API - base_model and type added 2023-06-18 07:50:28 +12:00
dc669d1447 Add name, base_mode, type fields to model info 2023-06-17 22:48:44 +03:00
ce4110b9f4 wip: Add 2.x Models to the Model List 2023-06-18 07:01:44 +12:00
0f3b7d2b3d chore: Rebuild API with new Model API names 2023-06-18 03:00:16 +12:00
16dc78f6c6 Generate config names for openapi 2023-06-17 17:15:36 +03:00
7a66856785 wip: Update Linear UI Txt2Img and Img2Img Graphs
Update the text to imaeg and image to image graphs to work with the new model loader. Currently only supports 1.x models. Will update this soon to make it work with all models.
2023-06-18 01:38:01 +12:00
c8dfa49d86 fix: Update missing name types to new names 2023-06-17 22:04:28 +12:00
76dd749b1e chore: Rebuild API 2023-06-17 21:29:32 +12:00
67d05d2066 chore: Update model config type names 2023-06-17 21:28:43 +12:00
178 changed files with 6773 additions and 403 deletions

View File

@ -1,6 +1,14 @@
from typing import Literal, Optional, Union, List, Annotated
from pydantic import BaseModel, Field
import re
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
from .model import ClipField
from ...backend.util.devices import torch_dtype
from ...backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent
from ...backend.model_management import BaseModelType, ModelType, SubModelType, ModelPatcher
import torch
from compel import Compel, ReturnedEmbeddingsType
from compel.prompt_parser import (Blend, Conjunction,

View File

@ -22,7 +22,8 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \
PostprocessingSettings
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from ...backend.util.devices import torch_dtype
from ...backend.util.devices import choose_torch_device, torch_dtype
from ...backend.model_management import ModelPatcher
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext)

View File

@ -54,6 +54,7 @@ class MainModelField(BaseModel):
model_name: str = Field(description="Name of the model")
base_model: BaseModelType = Field(description="Base model")
model_type: ModelType = Field(description="Model Type")
class LoRAModelField(BaseModel):
@ -221,6 +222,9 @@ class LoraLoaderInvocation(BaseInvocation):
base_model = self.lora.base_model
lora_name = self.lora.model_name
# TODO: ui rewrite
base_model = BaseModelType.StableDiffusion1
if not context.services.model_manager.model_exists(
base_model=base_model,
model_name=lora_name,

View File

@ -0,0 +1,591 @@
# Copyright (c) 2023 Borisov Sergey (https://github.com/StAlKeR7779)
from contextlib import ExitStack
from typing import List, Literal, Optional, Union
import re
import inspect
from pydantic import BaseModel, Field, validator
import torch
import numpy as np
from diffusers import ControlNetModel, DPMSolverMultistepScheduler
from diffusers.image_processor import VaeImageProcessor
from diffusers.schedulers import SchedulerMixin as Scheduler
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from ...backend.model_management import ONNXModelPatcher
from ...backend.util import choose_torch_device
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext)
from .compel import ConditioningField
from .controlnet_image_processors import ControlField
from .image import ImageOutput
from .model import ModelInfo, UNetField, VaeField
from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.backend import BaseModelType, ModelType, SubModelType
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from ...backend.stable_diffusion import PipelineIntermediateState
from tqdm import tqdm
from .model import ClipField
from .latent import LatentsField, LatentsOutput, build_latents_output, get_scheduler, SAMPLER_NAME_VALUES
from .compel import CompelOutput
ORT_TO_NP_TYPE = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.int8,
"tensor(uint8)": np.uint8,
"tensor(int16)": np.int16,
"tensor(uint16)": np.uint16,
"tensor(int32)": np.int32,
"tensor(uint32)": np.uint32,
"tensor(int64)": np.int64,
"tensor(uint64)": np.uint64,
"tensor(float16)": np.float16,
"tensor(float)": np.float32,
"tensor(double)": np.float64,
}
class ONNXPromptInvocation(BaseInvocation):
type: Literal["prompt_onnx"] = "prompt_onnx"
prompt: str = Field(default="", description="Prompt")
clip: ClipField = Field(None, description="Clip to use")
def invoke(self, context: InvocationContext) -> CompelOutput:
tokenizer_info = context.services.model_manager.get_model(
**self.clip.tokenizer.dict(),
)
text_encoder_info = context.services.model_manager.get_model(
**self.clip.text_encoder.dict(),
)
with tokenizer_info as orig_tokenizer,\
text_encoder_info as text_encoder,\
ExitStack() as stack:
#loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.clip.loras]
loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
ti_list = []
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", self.prompt):
name = trigger[1:-1]
try:
ti_list.append(
#stack.enter_context(
# context.services.model_manager.get_model(
# model_name=name,
# base_model=self.clip.text_encoder.base_model,
# model_type=ModelType.TextualInversion,
# )
#)
context.services.model_manager.get_model(
model_name=name,
base_model=self.clip.text_encoder.base_model,
model_type=ModelType.TextualInversion,
).context.model
)
except Exception:
#print(e)
#import traceback
#print(traceback.format_exc())
print(f"Warn: trigger: \"{trigger}\" not found")
with ONNXModelPatcher.apply_lora_text_encoder(text_encoder, loras),\
ONNXModelPatcher.apply_ti(orig_tokenizer, text_encoder, ti_list) as (tokenizer, ti_manager):
text_encoder.create_session()
# copy from
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L153
text_inputs = tokenizer(
self.prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="np",
)
text_input_ids = text_inputs.input_ids
"""
untruncated_ids = tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
if not np.array_equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
"""
prompt_embeds = text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
text_encoder.release_session()
conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning"
# TODO: hacky but works ;D maybe rename latents somehow?
context.services.latents.save(conditioning_name, (prompt_embeds, None))
return CompelOutput(
conditioning=ConditioningField(
conditioning_name=conditioning_name,
),
)
# Text to image
class ONNXTextToLatentsInvocation(BaseInvocation):
"""Generates latents from conditionings."""
type: Literal["t2l_onnx"] = "t2l_onnx"
# Inputs
# fmt: off
positive_conditioning: Optional[ConditioningField] = Field(description="Positive conditioning for generation")
negative_conditioning: Optional[ConditioningField] = Field(description="Negative conditioning for generation")
noise: Optional[LatentsField] = Field(description="The noise to use")
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
cfg_scale: Union[float, List[float]] = Field(default=7.5, ge=1, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
scheduler: SAMPLER_NAME_VALUES = Field(default="euler", description="The scheduler to use" )
unet: UNetField = Field(default=None, description="UNet submodel")
#control: Union[ControlField, list[ControlField]] = Field(default=None, description="The control to use")
#seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
#seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
# fmt: on
@validator("cfg_scale")
def ge_one(cls, v):
"""validate that all cfg_scale values are >= 1"""
if isinstance(v, list):
for i in v:
if i < 1:
raise ValueError('cfg_scale must be greater than 1')
else:
if v < 1:
raise ValueError('cfg_scale must be greater than 1')
return v
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents"],
"type_hints": {
"model": "model",
# "cfg_scale": "float",
"cfg_scale": "number"
}
},
}
# based on
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375
def invoke(self, context: InvocationContext) -> LatentsOutput:
c, _ = context.services.latents.get(self.positive_conditioning.conditioning_name)
uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name)
graph_execution_state = context.services.graph_execution_manager.get(
context.graph_execution_state_id)
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
if isinstance(c, torch.Tensor):
c = c.cpu().numpy()
if isinstance(uc, torch.Tensor):
uc = uc.cpu().numpy()
device = torch.device(choose_torch_device())
prompt_embeds = np.concatenate([uc, c])
latents = context.services.latents.get(self.noise.latents_name)
if isinstance(latents, torch.Tensor):
latents = latents.cpu().numpy()
# TODO: better execution device handling
latents = latents.astype(np.float16)
# get the initial random noise unless the user supplied it
do_classifier_free_guidance = True
#latents_dtype = prompt_embeds.dtype
#latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8)
#if latents.shape != latents_shape:
# raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
scheduler = get_scheduler(
context=context,
scheduler_info=self.unet.scheduler,
scheduler_name=self.scheduler,
)
def torch2numpy(latent: torch.Tensor):
return latent.cpu().numpy()
def numpy2torch(latent, device):
return torch.from_numpy(latent).to(device)
def dispatch_progress(
self, context: InvocationContext, source_node_id: str,
intermediate_state: PipelineIntermediateState) -> None:
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
)
scheduler.set_timesteps(self.steps)
latents = latents * np.float64(scheduler.init_noise_sigma)
extra_step_kwargs = dict()
if "eta" in set(inspect.signature(scheduler.step).parameters.keys()):
extra_step_kwargs.update(
eta=0.0,
)
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
with unet_info as unet,\
ExitStack() as stack:
#loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras]
loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.unet.loras]
with ONNXModelPatcher.apply_lora_unet(unet, loras):
# TODO:
unet.create_session()
timestep_dtype = next(
(input.type for input in unet.session.get_inputs() if input.name == "timestep"), "tensor(float16)"
)
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
import time
times = []
for i in tqdm(range(len(scheduler.timesteps))):
t = scheduler.timesteps[i]
# expand the latents if we are doing classifier free guidance
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = scheduler.scale_model_input(numpy2torch(latent_model_input, device), t)
latent_model_input = latent_model_input.cpu().numpy()
# predict the noise residual
timestep = np.array([t], dtype=timestep_dtype)
start_time = time.time()
noise_pred = unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)
times.append(time.time() - start_time)
noise_pred = noise_pred[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
scheduler_output = scheduler.step(
numpy2torch(noise_pred, device), t, numpy2torch(latents, device), **extra_step_kwargs
)
latents = torch2numpy(scheduler_output.prev_sample)
state = PipelineIntermediateState(
run_id= "test",
step=i,
timestep=timestep,
latents=scheduler_output.prev_sample
)
dispatch_progress(
self,
context=context,
source_node_id=source_node_id,
intermediate_state=state
)
# call the callback, if provided
#if callback is not None and i % callback_steps == 0:
# callback(i, t, latents)
print(times)
unet.release_session()
torch.cuda.empty_cache()
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.save(name, latents)
return build_latents_output(latents_name=name, latents=torch.from_numpy(latents))
# Latent to image
class ONNXLatentsToImageInvocation(BaseInvocation):
"""Generates an image from latents."""
type: Literal["l2i_onnx"] = "l2i_onnx"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
vae: VaeField = Field(default=None, description="Vae submodel")
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
#tiled: bool = Field(default=False, description="Decode latents by overlaping tiles(less memory consumption)")
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["latents", "image"],
},
}
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.services.latents.get(self.latents.latents_name)
if self.vae.vae.submodel != SubModelType.VaeDecoder:
raise Exception(f"Expected vae_decoder, found: {self.vae.vae.model_type}")
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
)
# clear memory as vae decode can request a lot
torch.cuda.empty_cache()
with vae_info as vae:
vae.create_session()
# copied from
# https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L427
latents = 1 / 0.18215 * latents
# image = self.vae_decoder(latent_sample=latents)[0]
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
image = np.concatenate(
[vae(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
)
image = np.clip(image / 2 + 0.5, 0, 1)
image = image.transpose((0, 2, 3, 1))
image = VaeImageProcessor.numpy_to_pil(image)[0]
vae.release_session()
torch.cuda.empty_cache()
image_dto = context.services.images.create(
image=image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
metadata=self.metadata.dict() if self.metadata else None,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)
class ONNXModelLoaderOutput(BaseInvocationOutput):
"""Model loader output"""
#fmt: off
type: Literal["model_loader_output_onnx"] = "model_loader_output_onnx"
unet: UNetField = Field(default=None, description="UNet submodel")
clip: ClipField = Field(default=None, description="Tokenizer and text_encoder submodels")
vae_decoder: VaeField = Field(default=None, description="Vae submodel")
vae_encoder: VaeField = Field(default=None, description="Vae submodel")
#fmt: on
class ONNXSD1ModelLoaderInvocation(BaseInvocation):
"""Loading submodels of selected model."""
type: Literal["sd1_model_loader_onnx"] = "sd1_model_loader_onnx"
model_name: str = Field(default="", description="Model to load")
# TODO: precision?
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"tags": ["model", "loader"],
"type_hints": {
"model_name": "model" # TODO: rename to model_name?
}
},
}
def invoke(self, context: InvocationContext) -> ONNXModelLoaderOutput:
model_name = "stable-diffusion-v1-5"
base_model = BaseModelType.StableDiffusion1
# TODO: not found exceptions
if not context.services.model_manager.model_exists(
model_name=model_name,
base_model=BaseModelType.StableDiffusion1,
model_type=ModelType.ONNX,
):
raise Exception(f"Unkown model name: {model_name}!")
return ONNXModelLoaderOutput(
unet=UNetField(
unet=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.UNet,
),
scheduler=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.Scheduler,
),
loras=[],
),
clip=ClipField(
tokenizer=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.Tokenizer,
),
text_encoder=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.TextEncoder,
),
loras=[],
),
vae_decoder=VaeField(
vae=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.VaeDecoder,
),
),
vae_encoder=VaeField(
vae=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=ModelType.ONNX,
submodel=SubModelType.VaeEncoder,
),
)
)
class OnnxModelField(BaseModel):
"""Onnx model field"""
model_name: str = Field(description="Name of the model")
base_model: BaseModelType = Field(description="Base model")
model_type: ModelType = Field(description="Model Type")
class OnnxModelLoaderInvocation(BaseInvocation):
"""Loads a main model, outputting its submodels."""
type: Literal["onnx_model_loader"] = "onnx_model_loader"
model: OnnxModelField = Field(description="The model to load")
# TODO: precision?
# Schema customisation
class Config(InvocationConfig):
schema_extra = {
"ui": {
"title": "Onnx Model Loader",
"tags": ["model", "loader"],
"type_hints": {"model": "model"},
},
}
def invoke(self, context: InvocationContext) -> ONNXModelLoaderOutput:
base_model = self.model.base_model
model_name = self.model.model_name
model_type = ModelType.ONNX
# TODO: not found exceptions
if not context.services.model_manager.model_exists(
model_name=model_name,
base_model=base_model,
model_type=model_type,
):
raise Exception(f"Unknown {base_model} {model_type} model: {model_name}")
"""
if not context.services.model_manager.model_exists(
model_name=self.model_name,
model_type=SDModelType.Diffusers,
submodel=SDModelType.Tokenizer,
):
raise Exception(
f"Failed to find tokenizer submodel in {self.model_name}! Check if model corrupted"
)
if not context.services.model_manager.model_exists(
model_name=self.model_name,
model_type=SDModelType.Diffusers,
submodel=SDModelType.TextEncoder,
):
raise Exception(
f"Failed to find text_encoder submodel in {self.model_name}! Check if model corrupted"
)
if not context.services.model_manager.model_exists(
model_name=self.model_name,
model_type=SDModelType.Diffusers,
submodel=SDModelType.UNet,
):
raise Exception(
f"Failed to find unet submodel from {self.model_name}! Check if model corrupted"
)
"""
return ONNXModelLoaderOutput(
unet=UNetField(
unet=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.UNet,
),
scheduler=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.Scheduler,
),
loras=[],
),
clip=ClipField(
tokenizer=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.Tokenizer,
),
text_encoder=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.TextEncoder,
),
loras=[],
skipped_layers=0,
),
vae_decoder=VaeField(
vae=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.VaeDecoder,
),
),
vae_encoder=VaeField(
vae=ModelInfo(
model_name=model_name,
base_model=base_model,
model_type=model_type,
submodel=SubModelType.VaeEncoder,
),
)
)

View File

@ -466,7 +466,6 @@ class Generator:
dtype=samples.dtype,
device=samples.device,
)
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
latents_ubyte = (
((latent_image + 1) / 2)

View File

@ -3,6 +3,7 @@ Initialization file for invokeai.backend.model_management
"""
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType
from .model_cache import ModelCache
from .lora import ModelPatcher, ONNXModelPatcher
from .models import BaseModelType, ModelType, SubModelType, ModelVariantType, ModelNotFoundException
from .model_merge import ModelMerger, MergeInterpolationMethod

View File

@ -6,11 +6,22 @@ from typing import Optional, Dict, Tuple, Any, Union, List
from pathlib import Path
import torch
from safetensors.torch import load_file
from torch.utils.hooks import RemovableHandle
from diffusers.models import UNet2DConditionModel
from transformers import CLIPTextModel
from onnx import numpy_helper
from onnxruntime import OrtValue
import numpy as np
from compel.embeddings_provider import BaseTextualInversionManager
from diffusers.models import UNet2DConditionModel
from safetensors.torch import load_file
from transformers import CLIPTextModel, CLIPTokenizer
# TODO: rename and split this file
class LoRALayerBase:
#rank: Optional[int]
#alpha: Optional[float]
@ -708,3 +719,185 @@ class TextualInversionManager(BaseTextualInversionManager):
return new_token_ids
class ONNXModelPatcher:
@classmethod
@contextmanager
def apply_lora_unet(
cls,
unet: OnnxRuntimeModel,
loras: List[Tuple[LoRAModel, float]],
):
with cls.apply_lora(unet, loras, "lora_unet_"):
yield
@classmethod
@contextmanager
def apply_lora_text_encoder(
cls,
text_encoder: OnnxRuntimeModel,
loras: List[Tuple[LoRAModel, float]],
):
with cls.apply_lora(text_encoder, loras, "lora_te_"):
yield
# based on
# https://github.com/ssube/onnx-web/blob/ca2e436f0623e18b4cfe8a0363fcfcf10508acf7/api/onnx_web/convert/diffusion/lora.py#L323
@classmethod
@contextmanager
def apply_lora(
cls,
model: IAIOnnxRuntimeModel,
loras: List[Tuple[LoraModel, float]],
prefix: str,
):
from .models.base import IAIOnnxRuntimeModel
if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_weights = dict()
try:
blended_loras = dict()
for lora, lora_weight in loras:
for layer_key, layer in lora.layers.items():
if not layer_key.startswith(prefix):
continue
layer_key = layer_key.replace(prefix, "")
layer_weight = layer.get_weight().detach().cpu().numpy() * lora_weight
if layer_key is blended_loras:
blended_loras[layer_key] += layer_weight
else:
blended_loras[layer_key] = layer_weight
node_names = dict()
for node in model.nodes.values():
node_names[node.name.replace("/", "_").replace(".", "_").lstrip("_")] = node.name
for layer_key, lora_weight in blended_loras.items():
conv_key = layer_key + "_Conv"
gemm_key = layer_key + "_Gemm"
matmul_key = layer_key + "_MatMul"
if conv_key in node_names or gemm_key in node_names:
if conv_key in node_names:
conv_node = model.nodes[node_names[conv_key]]
else:
conv_node = model.nodes[node_names[gemm_key]]
weight_name = [n for n in conv_node.input if ".weight" in n][0]
orig_weight = model.tensors[weight_name]
if orig_weight.shape[-2:] == (1, 1):
if lora_weight.shape[-2:] == (1, 1):
new_weight = orig_weight.squeeze((3, 2)) + lora_weight.squeeze((3, 2))
else:
new_weight = orig_weight.squeeze((3, 2)) + lora_weight
new_weight = np.expand_dims(new_weight, (2, 3))
else:
if orig_weight.shape != lora_weight.shape:
new_weight = orig_weight + lora_weight.reshape(orig_weight.shape)
else:
new_weight = orig_weight + lora_weight
orig_weights[weight_name] = orig_weight
model.tensors[weight_name] = new_weight.astype(orig_weight.dtype)
elif matmul_key in node_names:
weight_node = model.nodes[node_names[matmul_key]]
matmul_name = [n for n in weight_node.input if "MatMul" in n][0]
orig_weight = model.tensors[matmul_name]
new_weight = orig_weight + lora_weight.transpose()
orig_weights[matmul_name] = orig_weight
model.tensors[matmul_name] = new_weight.astype(orig_weight.dtype)
else:
# warn? err?
pass
yield
finally:
# restore original weights
for name, orig_weight in orig_weights.items():
model.tensors[name] = orig_weight
@classmethod
@contextmanager
def apply_ti(
cls,
tokenizer: CLIPTokenizer,
text_encoder: IAIOnnxRuntimeModel,
ti_list: List[Any],
) -> Tuple[CLIPTokenizer, TextualInversionManager]:
from .models.base import IAIOnnxRuntimeModel
if not isinstance(text_encoder, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_embeddings = None
try:
ti_tokenizer = copy.deepcopy(tokenizer)
ti_manager = TextualInversionManager(ti_tokenizer)
def _get_trigger(ti, index):
trigger = ti.name
if index > 0:
trigger += f"-!pad-{i}"
return f"<{trigger}>"
# modify tokenizer
new_tokens_added = 0
for ti in ti_list:
for i in range(ti.embedding.shape[0]):
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti, i))
# modify text_encoder
orig_embeddings = text_encoder.tensors["text_model.embeddings.token_embedding.weight"]
embeddings = np.concatenate(
(
np.copy(orig_embeddings),
np.zeros((new_tokens_added, orig_embeddings.shape[1]))
),
axis=0,
)
for ti in ti_list:
ti_tokens = []
for i in range(ti.embedding.shape[0]):
embedding = ti.embedding[i].detach().numpy()
trigger = _get_trigger(ti, i)
token_id = ti_tokenizer.convert_tokens_to_ids(trigger)
if token_id == ti_tokenizer.unk_token_id:
raise RuntimeError(f"Unable to find token id for token '{trigger}'")
if embeddings[token_id].shape != embedding.shape:
raise ValueError(
f"Cannot load embedding for {trigger}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {embeddings[token_id].shape[0]}."
)
embeddings[token_id] = embedding
ti_tokens.append(token_id)
if len(ti_tokens) > 1:
ti_manager.pad_tokens[ti_tokens[0]] = ti_tokens[1:]
text_encoder.tensors["text_model.embeddings.token_embedding.weight"] = embeddings.astype(orig_embeddings.dtype)
yield ti_tokenizer, ti_manager
finally:
# restore
if orig_embeddings is not None:
text_encoder.tensors["text_model.embeddings.token_embedding.weight"] = orig_embeddings

View File

@ -23,7 +23,7 @@ class ModelProbeInfo(object):
variant_type: ModelVariantType
prediction_type: SchedulerPredictionType
upcast_attention: bool
format: Literal['diffusers','checkpoint', 'lycoris']
format: Literal['diffusers','checkpoint', 'lycoris', 'olive']
image_size: int
class ProbeBase(object):

View File

@ -10,8 +10,11 @@ from .lora import LoRAModel
from .controlnet import ControlNetModel # TODO:
from .textual_inversion import TextualInversionModel
from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model
MODEL_CLASSES = {
BaseModelType.StableDiffusion1: {
ModelType.ONNX: ONNXStableDiffusion1Model,
ModelType.Main: StableDiffusion1Model,
ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel,
@ -19,6 +22,7 @@ MODEL_CLASSES = {
ModelType.TextualInversion: TextualInversionModel,
},
BaseModelType.StableDiffusion2: {
ModelType.ONNX: ONNXStableDiffusion2Model,
ModelType.Main: StableDiffusion2Model,
ModelType.Vae: VaeModel,
ModelType.Lora: LoRAModel,
@ -32,6 +36,7 @@ MODEL_CLASSES = {
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
ModelType.ONNX: ONNXStableDiffusion2Model,
},
BaseModelType.StableDiffusionXLRefiner: {
ModelType.Main: StableDiffusionXLModel,
@ -40,6 +45,7 @@ MODEL_CLASSES = {
ModelType.Lora: LoRAModel,
ModelType.ControlNet: ControlNetModel,
ModelType.TextualInversion: TextualInversionModel,
ModelType.ONNX: ONNXStableDiffusion2Model,
},
#BaseModelType.Kandinsky2_1: {
# ModelType.Main: Kandinsky2_1Model,

View File

@ -8,13 +8,19 @@ from abc import ABCMeta, abstractmethod
from pathlib import Path
from picklescan.scanner import scan_file_path
import torch
import numpy as np
import safetensors.torch
from diffusers import DiffusionPipeline, ConfigMixin
from pathlib import Path
from diffusers import DiffusionPipeline, ConfigMixin, OnnxRuntimeModel
from contextlib import suppress
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
import onnx
from onnx import numpy_helper
from onnx.external_data_helper import set_external_data
from onnxruntime import InferenceSession, OrtValue, SessionOptions, ExecutionMode, GraphOptimizationLevel
class InvalidModelException(Exception):
pass
@ -29,6 +35,7 @@ class BaseModelType(str, Enum):
#Kandinsky2_1 = "kandinsky-2.1"
class ModelType(str, Enum):
ONNX = "onnx"
Main = "main"
Vae = "vae"
Lora = "lora"
@ -42,6 +49,8 @@ class SubModelType(str, Enum):
Tokenizer = "tokenizer"
Tokenizer2 = "tokenizer_2"
Vae = "vae"
VaeDecoder = "vae_decoder"
VaeEncoder = "vae_encoder"
Scheduler = "scheduler"
SafetyChecker = "safety_checker"
#MoVQ = "movq"
@ -254,16 +263,18 @@ class DiffusersModel(ModelBase):
try:
# TODO: set cache_dir to /dev/null to be sure that cache not used?
model = self.child_types[child_type].from_pretrained(
self.model_path,
subfolder=child_type.value,
os.path.join(self.model_path, child_type.value),
#subfolder=child_type.value,
torch_dtype=torch_dtype,
variant=variant,
local_files_only=True,
)
break
except Exception as e:
#print("====ERR LOAD====")
#print(f"{variant}: {e}")
print("====ERR LOAD====")
print(f"{variant}: {e}")
import traceback
traceback.print_exc()
pass
else:
raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model")
@ -430,3 +441,188 @@ class SilenceWarnings(object):
transformers_logging.set_verbosity(self.transformers_verbosity)
diffusers_logging.set_verbosity(self.diffusers_verbosity)
warnings.simplefilter('default')
ONNX_WEIGHTS_NAME = "model.onnx"
class IAIOnnxRuntimeModel:
class _tensor_access:
def __init__(self, model):
self.model = model
self.indexes = dict()
for idx, obj in enumerate(self.model.proto.graph.initializer):
self.indexes[obj.name] = idx
def __getitem__(self, key: str):
return self.model.data[key].numpy()
def __setitem__(self, key: str, value: np.ndarray):
new_node = numpy_helper.from_array(value)
# set_external_data(new_node, location="in-memory-location")
new_node.name = key
# new_node.ClearField("raw_data")
del self.model.proto.graph.initializer[self.indexes[key]]
self.model.proto.graph.initializer.insert(self.indexes[key], new_node)
self.model.data[key] = OrtValue.ortvalue_from_numpy(value)
# __delitem__
def __contains__(self, key: str):
return key in self.model.data
def items(self):
raise NotImplementedError("tensor.items")
#return [(obj.name, obj) for obj in self.raw_proto]
def keys(self):
return self.model.data.keys()
def values(self):
raise NotImplementedError("tensor.values")
#return [obj for obj in self.raw_proto]
class _access_helper:
def __init__(self, raw_proto):
self.indexes = dict()
self.raw_proto = raw_proto
for idx, obj in enumerate(raw_proto):
self.indexes[obj.name] = idx
def __getitem__(self, key: str):
return self.raw_proto[self.indexes[key]]
def __setitem__(self, key: str, value):
index = self.indexes[key]
del self.raw_proto[index]
self.raw_proto.insert(index, value)
# __delitem__
def __contains__(self, key: str):
return key in self.indexes
def items(self):
return [(obj.name, obj) for obj in self.raw_proto]
def keys(self):
return self.indexes.keys()
def values(self):
return [obj for obj in self.raw_proto]
def __init__(self, model_path: str, provider: Optional[str]):
self.path = model_path
self.session = None
self.provider = provider or "CPUExecutionProvider"
"""
self.data_path = self.path + "_data"
if not os.path.exists(self.data_path):
print(f"Moving model tensors to separate file: {self.data_path}")
tmp_proto = onnx.load(model_path, load_external_data=True)
onnx.save_model(tmp_proto, self.path, save_as_external_data=True, all_tensors_to_one_file=True, location=os.path.basename(self.data_path), size_threshold=1024, convert_attribute=False)
del tmp_proto
gc.collect()
self.proto = onnx.load(model_path, load_external_data=False)
"""
self.proto = onnx.load(model_path, load_external_data=True)
self.data = dict()
for tensor in self.proto.graph.initializer:
name = tensor.name
if tensor.HasField("raw_data"):
npt = numpy_helper.to_array(tensor)
orv = OrtValue.ortvalue_from_numpy(npt)
self.data[name] = orv
# set_external_data(tensor, location="in-memory-location")
tensor.name = name
# tensor.ClearField("raw_data")
self.nodes = self._access_helper(self.proto.graph.node)
self.initializers = self._access_helper(self.proto.graph.initializer)
# print(self.proto.graph.input)
# print(self.proto.graph.initializer)
self.tensors = self._tensor_access(self)
# TODO: integrate with model manager/cache
def create_session(self):
if self.session is None:
#onnx.save(self.proto, "tmp.onnx")
#onnx.save_model(self.proto, "tmp.onnx", save_as_external_data=True, all_tensors_to_one_file=True, location="tmp.onnx_data", size_threshold=1024, convert_attribute=False)
# TODO: something to be able to get weight when they already moved outside of model proto
#(trimmed_model, external_data) = buffer_external_data_tensors(self.proto)
sess = SessionOptions()
#self._external_data.update(**external_data)
# sess.add_external_initializers(list(self.data.keys()), list(self.data.values()))
# sess.enable_profiling = True
# sess.intra_op_num_threads = 1
# sess.inter_op_num_threads = 1
# sess.execution_mode = ExecutionMode.ORT_SEQUENTIAL
# sess.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
# sess.enable_cpu_mem_arena = True
# sess.enable_mem_pattern = True
# sess.add_session_config_entry("session.intra_op.use_xnnpack_threadpool", "1") ########### It's the key code
sess.add_free_dimension_override_by_name("unet_sample_batch", 2)
sess.add_free_dimension_override_by_name("unet_sample_channels", 4)
sess.add_free_dimension_override_by_name("unet_hidden_batch", 2)
sess.add_free_dimension_override_by_name("unet_hidden_sequence", 77)
sess.add_free_dimension_override_by_name("unet_sample_height", 64)
sess.add_free_dimension_override_by_name("unet_sample_width", 64)
sess.add_free_dimension_override_by_name("unet_time_batch", 1)
self.session = InferenceSession(self.proto.SerializeToString(), providers=['CUDAExecutionProvider', 'CPUExecutionProvider'], sess_options=sess)
#self.session = InferenceSession("tmp.onnx", providers=[self.provider], sess_options=self.sess_options)
self.io_binding = self.session.io_binding()
def release_session(self):
self.session = None
import gc
gc.collect()
def __call__(self, **kwargs):
if self.session is None:
raise Exception("You should call create_session before running model")
inputs = {k: np.array(v) for k, v in kwargs.items()}
output_names = self.session.get_outputs()
for k in inputs:
self.io_binding.bind_cpu_input(k, inputs[k])
for name in output_names:
self.io_binding.bind_output(name.name)
self.session.run_with_iobinding(self.io_binding, None)
return self.io_binding.copy_outputs_to_cpu()
# compatability with diffusers load code
@classmethod
def from_pretrained(
cls,
model_id: Union[str, Path],
subfolder: Union[str, Path] = None,
file_name: Optional[str] = None,
provider: Optional[str] = None,
sess_options: Optional["SessionOptions"] = None,
**kwargs,
):
file_name = file_name or ONNX_WEIGHTS_NAME
if os.path.isdir(model_id):
model_path = model_id
if subfolder is not None:
model_path = os.path.join(model_path, subfolder)
model_path = os.path.join(model_path, file_name)
else:
model_path = model_id
# load model from local directory
if not os.path.isfile(model_path):
raise Exception(f"Model not found: {model_path}")
# TODO: session options
return cls(model_path, provider=provider)

View File

@ -0,0 +1,156 @@
import os
import json
from enum import Enum
from pydantic import Field
from pathlib import Path
from typing import Literal, Optional, Union
from .base import (
ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
DiffusersModel,
SchedulerPredictionType,
SilenceWarnings,
read_checkpoint_meta,
classproperty,
OnnxRuntimeModel,
IAIOnnxRuntimeModel,
)
from invokeai.app.services.config import InvokeAIAppConfig
class ONNXStableDiffusion1Model(DiffusersModel):
class Config(ModelConfigBase):
model_format: None
variant: ModelVariantType
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
assert base_model == BaseModelType.StableDiffusion1
assert model_type == ModelType.ONNX
super().__init__(
model_path=model_path,
base_model=BaseModelType.StableDiffusion1,
model_type=ModelType.ONNX,
)
for child_name, child_type in self.child_types.items():
if child_type is OnnxRuntimeModel:
self.child_types[child_name] = IAIOnnxRuntimeModel
# TODO: check that no optimum models provided
@classmethod
def probe_config(cls, path: str, **kwargs):
model_format = cls.detect_format(path)
in_channels = 4 # TODO:
if in_channels == 9:
variant = ModelVariantType.Inpaint
elif in_channels == 4:
variant = ModelVariantType.Normal
else:
raise Exception("Unkown stable diffusion 1.* model format")
return cls.create_config(
path=path,
model_format=model_format,
variant=variant,
)
@classproperty
def save_to_config(cls) -> bool:
return True
@classmethod
def detect_format(cls, model_path: str):
return None
@classmethod
def convert_if_required(
cls,
model_path: str,
output_path: str,
config: ModelConfigBase,
base_model: BaseModelType,
) -> str:
return model_path
class ONNXStableDiffusion2Model(DiffusersModel):
# TODO: check that configs overwriten properly
class Config(ModelConfigBase):
model_format: None
variant: ModelVariantType
prediction_type: SchedulerPredictionType
upcast_attention: bool
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
assert base_model == BaseModelType.StableDiffusion2
assert model_type == ModelType.ONNX
super().__init__(
model_path=model_path,
base_model=BaseModelType.StableDiffusion2,
model_type=ModelType.ONNX,
)
for child_name, child_type in self.child_types.items():
if child_type is OnnxRuntimeModel:
self.child_types[child_name] = IAIOnnxRuntimeModel
# TODO: check that no optimum models provided
@classmethod
def probe_config(cls, path: str, **kwargs):
model_format = cls.detect_format(path)
in_channels = 4 # TODO:
if in_channels == 9:
variant = ModelVariantType.Inpaint
elif in_channels == 5:
variant = ModelVariantType.Depth
elif in_channels == 4:
variant = ModelVariantType.Normal
else:
raise Exception("Unkown stable diffusion 2.* model format")
if variant == ModelVariantType.Normal:
prediction_type = SchedulerPredictionType.VPrediction
upcast_attention = True
else:
prediction_type = SchedulerPredictionType.Epsilon
upcast_attention = False
return cls.create_config(
path=path,
model_format=model_format,
variant=variant,
prediction_type=prediction_type,
upcast_attention=upcast_attention,
)
@classproperty
def save_to_config(cls) -> bool:
return True
@classmethod
def detect_format(cls, model_path: str):
return None
@classmethod
def convert_if_required(
cls,
model_path: str,
output_path: str,
config: ModelConfigBase,
base_model: BaseModelType,
) -> str:
return model_path

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-adc79457.js"></script>
<script type="module" crossorigin src="./assets/index-ba194473.js"></script>
</head>
<body dir="ltr">

View File

@ -12,7 +12,10 @@ import { modelIdToMainModelParam } from 'features/parameters/util/modelIdToMainM
import { forEach } from 'lodash-es';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useGetMainModelsQuery } from 'services/api/endpoints/models';
import {
useGetMainModelsQuery,
useGetOnnxModelsQuery,
} from 'services/api/endpoints/models';
import { FieldComponentProps } from './types';
const ModelInputFieldComponent = (
@ -23,6 +26,7 @@ const ModelInputFieldComponent = (
const dispatch = useAppDispatch();
const { t } = useTranslation();
const { data: onnxModels } = useGetOnnxModelsQuery();
const { data: mainModels, isLoading } = useGetMainModelsQuery();
const data = useMemo(() => {
@ -44,17 +48,39 @@ const ModelInputFieldComponent = (
});
});
if (onnxModels) {
forEach(onnxModels.entities, (model, id) => {
if (!model) {
return;
}
data.push({
value: id,
label: model.model_name,
group: BASE_MODEL_NAME_MAP[model.base_model],
});
});
}
return data;
}, [mainModels]);
}, [mainModels, onnxModels]);
// grab the full model entity from the RTK Query cache
// TODO: maybe we should just store the full model entity in state?
const selectedModel = useMemo(
() =>
mainModels?.entities[
(mainModels?.entities[
`${field.value?.base_model}/main/${field.value?.model_name}`
] ?? null,
[field.value?.base_model, field.value?.model_name, mainModels?.entities]
] ||
onnxModels?.entities[
`${field.value?.base_model}/onnx/${field.value?.model_name}`
]) ??
null,
[
field.value?.base_model,
field.value?.model_name,
mainModels?.entities,
onnxModels?.entities,
]
);
const handleChangeModel = useCallback(

View File

@ -9,6 +9,7 @@ import {
CLIP_SKIP,
LORA_LOADER,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
@ -17,7 +18,8 @@ import {
export const addLoRAsToGraph = (
state: RootState,
graph: NonNullableGraph,
baseNodeId: string
baseNodeId: string,
modelLoader: string = MAIN_MODEL_LOADER
): void => {
/**
* LoRA nodes get the UNet and CLIP models from the main model loader and apply the LoRA to them.
@ -40,6 +42,10 @@ export const addLoRAsToGraph = (
!(
e.source.node_id === MAIN_MODEL_LOADER &&
['unet'].includes(e.source.field)
) &&
!(
e.source.node_id === ONNX_MODEL_LOADER &&
['unet'].includes(e.source.field)
)
);
// Remove CLIP_SKIP connections to conditionings to feed it through LoRAs
@ -74,12 +80,11 @@ export const addLoRAsToGraph = (
// add to graph
graph.nodes[currentLoraNodeId] = loraLoaderNode;
if (currentLoraIndex === 0) {
// first lora = start the lora chain, attach directly to model loader
graph.edges.push({
source: {
node_id: MAIN_MODEL_LOADER,
node_id: modelLoader,
field: 'unet',
},
destination: {

View File

@ -9,13 +9,15 @@ import {
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
METADATA_ACCUMULATOR,
ONNX_MODEL_LOADER,
TEXT_TO_IMAGE_GRAPH,
VAE_LOADER,
} from './constants';
export const addVAEToGraph = (
state: RootState,
graph: NonNullableGraph
graph: NonNullableGraph,
modelLoader: string = MAIN_MODEL_LOADER
): void => {
const { vae } = state.generation;
@ -31,12 +33,12 @@ export const addVAEToGraph = (
vae_model: vae,
};
}
const isOnnxModel = modelLoader == ONNX_MODEL_LOADER;
if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) {
graph.edges.push({
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
node_id: isAutoVae ? modelLoader : VAE_LOADER,
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: LATENTS_TO_IMAGE,
@ -48,8 +50,8 @@ export const addVAEToGraph = (
if (graph.id === IMAGE_TO_IMAGE_GRAPH) {
graph.edges.push({
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
node_id: isAutoVae ? modelLoader : VAE_LOADER,
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: IMAGE_TO_LATENTS,
@ -61,8 +63,8 @@ export const addVAEToGraph = (
if (graph.id === INPAINT_GRAPH) {
graph.edges.push({
source: {
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
field: 'vae',
node_id: isAutoVae ? modelLoader : VAE_LOADER,
field: isAutoVae && isOnnxModel ? 'vae_decoder' : 'vae',
},
destination: {
node_id: INPAINT,

View File

@ -18,6 +18,7 @@ import {
LATENTS_TO_IMAGE,
LATENTS_TO_LATENTS,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
@ -59,6 +60,9 @@ export const buildCanvasImageToImageGraph = (
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
const onnx_model_type = model.model_type.includes('onnx');
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
@ -69,16 +73,17 @@ export const buildCanvasImageToImageGraph = (
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
// TODO: Actually create the graph correctly for ONNX
const graph: NonNullableGraph = {
id: IMAGE_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
@ -87,9 +92,9 @@ export const buildCanvasImageToImageGraph = (
id: NOISE,
use_cpu,
},
[MAIN_MODEL_LOADER]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
[model_loader]: {
type: model_loader,
id: model_loader,
model,
},
[CLIP_SKIP]: {
@ -98,11 +103,11 @@ export const buildCanvasImageToImageGraph = (
skipped_layers: clipSkip,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
id: LATENTS_TO_IMAGE,
},
[LATENTS_TO_LATENTS]: {
type: 'l2l',
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
id: LATENTS_TO_LATENTS,
cfg_scale,
scheduler,
@ -110,7 +115,7 @@ export const buildCanvasImageToImageGraph = (
strength,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
id: IMAGE_TO_LATENTS,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
@ -121,7 +126,7 @@ export const buildCanvasImageToImageGraph = (
edges: [
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'clip',
},
destination: {
@ -181,7 +186,7 @@ export const buildCanvasImageToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'unet',
},
destination: {
@ -313,10 +318,10 @@ export const buildCanvasImageToImageGraph = (
});
// add LoRA support
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
// optionally add custom VAE
addVAEToGraph(state, graph);
addVAEToGraph(state, graph, model_loader);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -15,6 +15,7 @@ import {
INPAINT_GRAPH,
ITERATE,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
NEGATIVE_CONDITIONING,
POSITIVE_CONDITIONING,
RANDOM_INT,
@ -63,6 +64,11 @@ export const buildCanvasInpaintGraph = (
// We may need to set the inpaint width and height to scale the image
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
const model_loader = model.model_type.includes('onnx')
? ONNX_MODEL_LOADER
: MAIN_MODEL_LOADER;
// TODO: Actually create the graph correctly for ONNX
const graph: NonNullableGraph = {
id: INPAINT_GRAPH,
nodes: {
@ -107,9 +113,9 @@ export const buildCanvasInpaintGraph = (
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
[MAIN_MODEL_LOADER]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
[model_loader]: {
type: model_loader,
id: model_loader,
model,
},
[CLIP_SKIP]: {
@ -133,7 +139,7 @@ export const buildCanvasInpaintGraph = (
edges: [
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'unet',
},
destination: {
@ -143,7 +149,7 @@ export const buildCanvasInpaintGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'clip',
},
destination: {

View File

@ -10,6 +10,7 @@ import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
@ -49,7 +50,8 @@ export const buildCanvasTextToImageGraph = (
const use_cpu = shouldUseNoiseSettings
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
const onnx_model_type = model.model_type.includes('onnx');
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
@ -60,16 +62,17 @@ export const buildCanvasTextToImageGraph = (
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
// TODO: Actually create the graph correctly for ONNX
const graph: NonNullableGraph = {
id: TEXT_TO_IMAGE_GRAPH,
nodes: {
[POSITIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
@ -81,15 +84,15 @@ export const buildCanvasTextToImageGraph = (
use_cpu,
},
[TEXT_TO_LATENTS]: {
type: 't2l',
type: onnx_model_type ? 't2l_onnx' : 't2l',
id: TEXT_TO_LATENTS,
cfg_scale,
scheduler,
steps,
},
[MAIN_MODEL_LOADER]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
[model_loader]: {
type: model_loader,
id: model_loader,
model,
},
[CLIP_SKIP]: {
@ -98,7 +101,7 @@ export const buildCanvasTextToImageGraph = (
skipped_layers: clipSkip,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
id: LATENTS_TO_IMAGE,
},
},
@ -125,7 +128,7 @@ export const buildCanvasTextToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'clip',
},
destination: {
@ -155,7 +158,7 @@ export const buildCanvasTextToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'unet',
},
destination: {
@ -219,10 +222,10 @@ export const buildCanvasTextToImageGraph = (
});
// add LoRA support
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS);
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS, model_loader);
// optionally add custom VAE
addVAEToGraph(state, graph);
addVAEToGraph(state, graph, model_loader);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -17,6 +17,7 @@ import {
LATENTS_TO_IMAGE,
LATENTS_TO_LATENTS,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
@ -82,13 +83,17 @@ export const buildLinearImageToImageGraph = (
? shouldUseCpuNoise
: initialGenerationState.shouldUseCpuNoise;
const onnx_model_type = model.model_type.includes('onnx');
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
// copy-pasted graph from node editor, filled in with state values & friendly node ids
// TODO: Actually create the graph correctly for ONNX
const graph: NonNullableGraph = {
id: IMAGE_TO_IMAGE_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
[model_loader]: {
type: model_loader,
id: model_loader,
model,
},
[CLIP_SKIP]: {
@ -97,12 +102,12 @@ export const buildLinearImageToImageGraph = (
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
@ -112,11 +117,11 @@ export const buildLinearImageToImageGraph = (
use_cpu,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
id: LATENTS_TO_IMAGE,
},
[LATENTS_TO_LATENTS]: {
type: 'l2l',
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
id: LATENTS_TO_LATENTS,
cfg_scale,
scheduler,
@ -124,7 +129,7 @@ export const buildLinearImageToImageGraph = (
strength,
},
[IMAGE_TO_LATENTS]: {
type: 'i2l',
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
id: IMAGE_TO_LATENTS,
// must be set manually later, bc `fit` parameter may require a resize node inserted
// image: {
@ -135,7 +140,7 @@ export const buildLinearImageToImageGraph = (
edges: [
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'unet',
},
destination: {
@ -145,7 +150,7 @@ export const buildLinearImageToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'clip',
},
destination: {
@ -366,10 +371,10 @@ export const buildLinearImageToImageGraph = (
});
// add LoRA support
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
// optionally add custom VAE
addVAEToGraph(state, graph);
addVAEToGraph(state, graph, model_loader);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -6,10 +6,13 @@ import { addControlNetToLinearGraph } from './addControlNetToLinearGraph';
import { addDynamicPromptsToGraph } from './addDynamicPromptsToGraph';
import { addLoRAsToGraph } from './addLoRAsToGraph';
import { addVAEToGraph } from './addVAEToGraph';
import { BaseModelType, OnnxModelField } from 'services/api/types';
import {
CLIP_SKIP,
LATENTS_TO_IMAGE,
MAIN_MODEL_LOADER,
ONNX_MODEL_LOADER,
METADATA_ACCUMULATOR,
NEGATIVE_CONDITIONING,
NOISE,
@ -46,6 +49,8 @@ export const buildLinearTextToImageGraph = (
throw new Error('No model found in state');
}
const onnx_model_type = model.model_type.includes('onnx');
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
/**
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
* full graph here as a template. Then use the parameters from app state and set friendlier node
@ -56,12 +61,14 @@ export const buildLinearTextToImageGraph = (
*/
// copy-pasted graph from node editor, filled in with state values & friendly node ids
// TODO: Actually create the graph correctly for ONNX
const graph: NonNullableGraph = {
id: TEXT_TO_IMAGE_GRAPH,
nodes: {
[MAIN_MODEL_LOADER]: {
type: 'main_model_loader',
id: MAIN_MODEL_LOADER,
[model_loader]: {
type: model_loader,
id: model_loader,
model,
},
[CLIP_SKIP]: {
@ -70,12 +77,12 @@ export const buildLinearTextToImageGraph = (
skipped_layers: clipSkip,
},
[POSITIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: POSITIVE_CONDITIONING,
prompt: positivePrompt,
},
[NEGATIVE_CONDITIONING]: {
type: 'compel',
type: onnx_model_type ? 'prompt_onnx' : 'compel',
id: NEGATIVE_CONDITIONING,
prompt: negativePrompt,
},
@ -87,21 +94,21 @@ export const buildLinearTextToImageGraph = (
use_cpu,
},
[TEXT_TO_LATENTS]: {
type: 't2l',
type: onnx_model_type ? 't2l_onnx' : 't2l',
id: TEXT_TO_LATENTS,
cfg_scale,
scheduler,
steps,
},
[LATENTS_TO_IMAGE]: {
type: 'l2i',
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
id: LATENTS_TO_IMAGE,
},
},
edges: [
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'clip',
},
destination: {
@ -111,7 +118,7 @@ export const buildLinearTextToImageGraph = (
},
{
source: {
node_id: MAIN_MODEL_LOADER,
node_id: model_loader,
field: 'unet',
},
destination: {
@ -215,10 +222,10 @@ export const buildLinearTextToImageGraph = (
});
// add LoRA support
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS);
addLoRAsToGraph(state, graph, TEXT_TO_LATENTS, model_loader);
// optionally add custom VAE
addVAEToGraph(state, graph);
addVAEToGraph(state, graph, model_loader);
// add dynamic prompts - also sets up core iteration and seed
addDynamicPromptsToGraph(state, graph);

View File

@ -8,6 +8,7 @@ export const RANDOM_INT = 'rand_int';
export const RANGE_OF_SIZE = 'range_of_size';
export const ITERATE = 'iterate';
export const MAIN_MODEL_LOADER = 'main_model_loader';
export const ONNX_MODEL_LOADER = 'onnx_model_loader';
export const VAE_LOADER = 'vae_loader';
export const LORA_LOADER = 'lora_loader';
export const CLIP_SKIP = 'clip_skip';

View File

@ -0,0 +1,17 @@
import { BaseModelType, MainModelField, ModelType } from 'services/api/types';
/**
* Crudely converts a model id to a main model field
* TODO: Make better
*/
export const modelIdToMainModelField = (modelId: string): MainModelField => {
const [base_model, model_type, model_name] = modelId.split('/');
const field: MainModelField = {
base_model: base_model as BaseModelType,
model_type: model_type as ModelType,
model_name,
};
return field;
};

View File

@ -0,0 +1,17 @@
import { BaseModelType, OnnxModelField, ModelType } from 'services/api/types';
/**
* Crudely converts a model id to a main model field
* TODO: Make better
*/
export const modelIdToOnnxModelField = (modelId: string): OnnxModelField => {
const [base_model, model_type, model_name] = modelId.split('/');
const field: OnnxModelField = {
base_model: base_model as BaseModelType,
model_name,
model_type: model_type as ModelType,
};
return field;
};

View File

@ -12,7 +12,11 @@ import { modelSelected } from 'features/parameters/store/actions';
import { MODEL_TYPE_MAP } from 'features/parameters/types/constants';
import { modelIdToMainModelParam } from 'features/parameters/util/modelIdToMainModelParam';
import { forEach } from 'lodash-es';
import { useGetMainModelsQuery } from 'services/api/endpoints/models';
import {
useGetMainModelsQuery,
useGetOnnxModelsQuery,
} from 'services/api/endpoints/models';
import { modelIdToOnnxModelField } from 'features/nodes/util/modelIdToOnnxModelField';
const selector = createSelector(
stateSelector,
@ -27,6 +31,7 @@ const ParamMainModelSelect = () => {
const { model } = useAppSelector(selector);
const { data: mainModels, isLoading } = useGetMainModelsQuery();
const { data: onnxModels, isLoading: onnxLoading } = useGetOnnxModelsQuery();
const data = useMemo(() => {
if (!mainModels) {
@ -46,17 +51,31 @@ const ParamMainModelSelect = () => {
group: MODEL_TYPE_MAP[model.base_model],
});
});
forEach(onnxModels?.entities, (model, id) => {
if (!model) {
return;
}
data.push({
value: id,
label: model.model_name,
group: MODEL_TYPE_MAP[model.base_model],
});
});
return data;
}, [mainModels]);
}, [mainModels, onnxModels]);
// grab the full model entity from the RTK Query cache
// TODO: maybe we should just store the full model entity in state?
const selectedModel = useMemo(
() =>
mainModels?.entities[`${model?.base_model}/main/${model?.model_name}`] ??
(mainModels?.entities[`${model?.base_model}/main/${model?.model_name}`] ||
onnxModels?.entities[
`${model?.base_model}/onnx/${model?.model_name}`
]) ??
null,
[mainModels?.entities, model]
[mainModels?.entities, model, onnxModels?.entities]
);
const handleChangeModel = useCallback(
@ -65,7 +84,11 @@ const ParamMainModelSelect = () => {
return;
}
const newModel = modelIdToMainModelParam(v);
let newModel = modelIdToMainModelParam(v);
if (v.includes('onnx')) {
newModel = modelIdToOnnxModelField(v);
}
if (!newModel) {
return;
@ -76,7 +99,7 @@ const ParamMainModelSelect = () => {
[dispatch]
);
return isLoading ? (
return isLoading || onnxLoading ? (
<IAIMantineSearchableSelect
label={t('modelManager.model')}
placeholder="Loading..."

View File

@ -1,10 +1,10 @@
import { createAction } from '@reduxjs/toolkit';
import { ImageDTO, MainModelField } from 'services/api/types';
import { ImageDTO, MainModelField, OnnxModelField } from 'services/api/types';
export const initialImageSelected = createAction<ImageDTO | string | undefined>(
'generation/initialImageSelected'
);
export const modelSelected = createAction<MainModelField>(
export const modelSelected = createAction<MainModelField | OnnxModelField>(
'generation/modelSelected'
);

View File

@ -8,7 +8,7 @@ import {
setShouldShowAdvancedOptions,
} from 'features/ui/store/uiSlice';
import { clamp } from 'lodash-es';
import { ImageDTO, MainModelField } from 'services/api/types';
import { ImageDTO, MainModelField, OnnxModelField } from 'services/api/types';
import { clipSkipMap } from '../components/Parameters/Advanced/ParamClipSkip';
import {
CfgScaleParam,
@ -54,7 +54,7 @@ export interface GenerationState {
shouldUseSymmetry: boolean;
horizontalSymmetrySteps: number;
verticalSymmetrySteps: number;
model: MainModelField | null;
model: MainModelField | OnnxModelField | null;
vae: VaeModelParam | null;
seamlessXAxis: boolean;
seamlessYAxis: boolean;
@ -227,7 +227,10 @@ export const generationSlice = createSlice({
const { image_name, width, height } = action.payload;
state.initialImage = { imageName: image_name, width, height };
},
modelChanged: (state, action: PayloadAction<MainModelParam | null>) => {
modelChanged: (
state,
action: PayloadAction<MainModelParam | OnnxModelField | null>
) => {
state.model = action.payload;
if (state.model === null) {
@ -259,6 +262,7 @@ export const generationSlice = createSlice({
const result = zMainModel.safeParse({
model_name,
base_model,
model_type,
});
if (result.success) {

View File

@ -126,6 +126,14 @@ export type HeightParam = z.infer<typeof zHeight>;
export const isValidHeight = (val: unknown): val is HeightParam =>
zHeight.safeParse(val).success;
const zModelType = z.enum([
'vae',
'lora',
'onnx',
'main',
'controlnet',
'embedding',
]);
const zBaseModel = z.enum(['sd-1', 'sd-2', 'sdxl', 'sdxl-refiner']);
export type BaseModelParam = z.infer<typeof zBaseModel>;
@ -137,6 +145,7 @@ export type BaseModelParam = z.infer<typeof zBaseModel>;
export const zMainModel = z.object({
model_name: z.string().min(1),
base_model: zBaseModel,
model_type: zModelType,
});
/**

View File

@ -14,6 +14,7 @@ export const modelIdToMainModelParam = (
const result = zMainModel.safeParse({
base_model,
model_name,
model_type,
});
if (!result.success) {

View File

@ -0,0 +1,119 @@
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIMantineSelect from 'common/components/IAIMantineSelect';
import { SelectItem } from '@mantine/core';
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import { modelIdToMainModelField } from 'features/nodes/util/modelIdToMainModelField';
import { modelSelected } from 'features/parameters/store/actions';
import { forEach } from 'lodash-es';
import {
useGetMainModelsQuery,
useGetOnnxModelsQuery,
} from 'services/api/endpoints/models';
import { modelIdToOnnxModelField } from 'features/nodes/util/modelIdToOnnxModelField';
export const MODEL_TYPE_MAP = {
'sd-1': 'Stable Diffusion 1.x',
'sd-2': 'Stable Diffusion 2.x',
};
const selector = createSelector(
stateSelector,
(state) => ({ currentModel: state.generation.model }),
defaultSelectorOptions
);
const ModelSelect = () => {
const dispatch = useAppDispatch();
const { t } = useTranslation();
const { currentModel } = useAppSelector(selector);
const { data: mainModels, isLoading } = useGetMainModelsQuery();
const { data: onnxModels, isLoading: onnxLoading } = useGetOnnxModelsQuery();
const data = useMemo(() => {
if (!mainModels) {
return [];
}
const data: SelectItem[] = [];
forEach(mainModels.entities, (model, id) => {
if (!model) {
return;
}
data.push({
value: id,
label: model.model_name,
group: MODEL_TYPE_MAP[model.base_model],
});
});
forEach(onnxModels?.entities, (model, id) => {
if (!model) {
return;
}
data.push({
value: id,
label: model.model_name,
group: MODEL_TYPE_MAP[model.base_model],
});
});
return data;
}, [mainModels, onnxModels]);
const selectedModel = useMemo(
() =>
mainModels?.entities[
`${currentModel?.base_model}/main/${currentModel?.model_name}`
] ||
onnxModels?.entities[
`${currentModel?.base_model}/onnx/${currentModel?.model_name}`
],
[mainModels?.entities, onnxModels?.entities, currentModel]
);
const handleChangeModel = useCallback(
(v: string | null) => {
if (!v) {
return;
}
let modelField = modelIdToMainModelField(v);
if (v.includes('onnx')) {
modelField = modelIdToOnnxModelField(v);
}
dispatch(modelSelected(modelField));
},
[dispatch]
);
return isLoading || onnxLoading ? (
<IAIMantineSelect
label={t('modelManager.model')}
placeholder="Loading..."
disabled={true}
data={[]}
/>
) : (
<IAIMantineSelect
tooltip={selectedModel?.description}
label={t('modelManager.model')}
value={selectedModel?.id}
placeholder={data.length > 0 ? 'Select a model' : 'No models available'}
data={data}
error={data.length === 0}
disabled={data.length === 0}
onChange={handleChangeModel}
/>
);
};
export default memo(ModelSelect);

View File

@ -0,0 +1,56 @@
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import {
StableDiffusion1ModelCheckpointConfig,
StableDiffusion1ModelDiffusersConfig,
} from 'services/api';
import { receivedModels } from 'services/thunks/model';
export type SD1PipelineModel = (
| StableDiffusion1ModelCheckpointConfig
| StableDiffusion1ModelDiffusersConfig
) & {
name: string;
};
export const sd1PipelineModelsAdapter = createEntityAdapter<SD1PipelineModel>({
selectId: (model) => model.name,
sortComparer: (a, b) => a.name.localeCompare(b.name),
});
export const sd1InitialPipelineModelsState =
sd1PipelineModelsAdapter.getInitialState();
export type SD1PipelineModelState = typeof sd1InitialPipelineModelsState;
export const sd1PipelineModelsSlice = createSlice({
name: 'sd1PipelineModels',
initialState: sd1InitialPipelineModelsState,
reducers: {
modelAdded: sd1PipelineModelsAdapter.upsertOne,
},
extraReducers(builder) {
/**
* Received Models - FULFILLED
*/
builder.addCase(receivedModels.fulfilled, (state, action) => {
if (action.meta.arg.baseModel !== 'sd-1') return;
sd1PipelineModelsAdapter.setAll(state, action.payload);
});
},
});
export const {
selectAll: selectAllSD1PipelineModels,
selectById: selectByIdSD1PipelineModels,
selectEntities: selectEntitiesSD1PipelineModels,
selectIds: selectIdsSD1PipelineModels,
selectTotal: selectTotalSD1PipelineModels,
} = sd1PipelineModelsAdapter.getSelectors<RootState>(
(state) => state.sd1pipelinemodels
);
export const { modelAdded } = sd1PipelineModelsSlice.actions;
export default sd1PipelineModelsSlice.reducer;

View File

@ -0,0 +1,56 @@
import { createEntityAdapter, createSlice } from '@reduxjs/toolkit';
import { RootState } from 'app/store/store';
import {
StableDiffusion2ModelCheckpointConfig,
StableDiffusion2ModelDiffusersConfig,
} from 'services/api';
import { receivedModels } from 'services/thunks/model';
export type SD2PipelineModel = (
| StableDiffusion2ModelCheckpointConfig
| StableDiffusion2ModelDiffusersConfig
) & {
name: string;
};
export const sd2PipelineModelsAdapater = createEntityAdapter<SD2PipelineModel>({
selectId: (model) => model.name,
sortComparer: (a, b) => a.name.localeCompare(b.name),
});
export const sd2InitialPipelineModelsState =
sd2PipelineModelsAdapater.getInitialState();
export type SD2PipelineModelState = typeof sd2InitialPipelineModelsState;
export const sd2PipelineModelsSlice = createSlice({
name: 'sd2PipelineModels',
initialState: sd2InitialPipelineModelsState,
reducers: {
modelAdded: sd2PipelineModelsAdapater.upsertOne,
},
extraReducers(builder) {
/**
* Received Models - FULFILLED
*/
builder.addCase(receivedModels.fulfilled, (state, action) => {
if (action.meta.arg.baseModel !== 'sd-2') return;
sd2PipelineModelsAdapater.setAll(state, action.payload);
});
},
});
export const {
selectAll: selectAllSD2PipelineModels,
selectById: selectByIdSD2PipelineModels,
selectEntities: selectEntitiesSD2PipelineModels,
selectIds: selectIdsSD2PipelineModels,
selectTotal: selectTotalSD2PipelineModels,
} = sd2PipelineModelsAdapater.getSelectors<RootState>(
(state) => state.sd2pipelinemodels
);
export const { modelAdded } = sd2PipelineModelsSlice.actions;
export default sd2PipelineModelsSlice.reducer;

View File

@ -17,7 +17,7 @@ type ModelListProps = {
setSelectedModelId: (name: string | undefined) => void;
};
type ModelFormat = 'all' | 'checkpoint' | 'diffusers';
type ModelFormat = 'all' | 'checkpoint' | 'diffusers' | 'olive';
const ModelList = (props: ModelListProps) => {
const { selectedModelId, setSelectedModelId } = props;

View File

@ -10,6 +10,7 @@ import {
ImportModelConfig,
LoRAModelConfig,
MainModelConfig,
OnnxModelConfig,
MergeModelConfig,
TextualInversionModelConfig,
VaeModelConfig,
@ -27,6 +28,8 @@ export type MainModelConfigEntity =
| DiffusersModelConfigEntity
| CheckpointModelConfigEntity;
export type OnnxModelConfigEntity = OnnxModelConfig & { id: string };
export type LoRAModelConfigEntity = LoRAModelConfig & { id: string };
export type ControlNetModelConfigEntity = ControlNetModelConfig & {
@ -41,6 +44,7 @@ export type VaeModelConfigEntity = VaeModelConfig & { id: string };
type AnyModelConfigEntity =
| MainModelConfigEntity
| OnnxModelConfigEntity
| LoRAModelConfigEntity
| ControlNetModelConfigEntity
| TextualInversionModelConfigEntity
@ -104,6 +108,10 @@ type SearchFolderArg = operations['search_for_models']['parameters']['query'];
const mainModelsAdapter = createEntityAdapter<MainModelConfigEntity>({
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
});
const onnxModelsAdapter = createEntityAdapter<OnnxModelConfigEntity>({
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
});
const loraModelsAdapter = createEntityAdapter<LoRAModelConfigEntity>({
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
});
@ -141,6 +149,38 @@ const createModelEntities = <T extends AnyModelConfigEntity>(
export const modelsApi = api.injectEndpoints({
endpoints: (build) => ({
getOnnxModels: build.query<EntityState<OnnxModelConfigEntity>, void>({
query: () => ({ url: 'models/', params: { model_type: 'onnx' } }),
providesTags: (result, error, arg) => {
const tags: ApiFullTagDescription[] = [
{ id: 'OnnxModel', type: LIST_TAG },
];
if (result) {
tags.push(
...result.ids.map((id) => ({
type: 'OnnxModel' as const,
id,
}))
);
}
return tags;
},
transformResponse: (
response: { models: OnnxModelConfig[] },
meta,
arg
) => {
const entities = createModelEntities<OnnxModelConfigEntity>(
response.models
);
return onnxModelsAdapter.setAll(
onnxModelsAdapter.getInitialState(),
entities
);
},
}),
getMainModels: build.query<EntityState<MainModelConfigEntity>, void>({
query: () => ({ url: 'models/', params: { model_type: 'main' } }),
providesTags: (result, error, arg) => {
@ -413,6 +453,7 @@ export const modelsApi = api.injectEndpoints({
export const {
useGetMainModelsQuery,
useGetOnnxModelsQuery,
useGetControlNetModelsQuery,
useGetLoRAModelsQuery,
useGetTextualInversionModelsQuery,

View File

@ -8,6 +8,132 @@ import {
} from '@reduxjs/toolkit/query/react';
import { $authToken, $baseUrl } from 'services/api/client';
export type { AddInvocation } from './models/AddInvocation';
export type { BoardChanges } from './models/BoardChanges';
export type { BoardDTO } from './models/BoardDTO';
export type { Body_create_board_image } from './models/Body_create_board_image';
export type { Body_remove_board_image } from './models/Body_remove_board_image';
export type { Body_upload_image } from './models/Body_upload_image';
export type { CannyImageProcessorInvocation } from './models/CannyImageProcessorInvocation';
export type { CkptModelInfo } from './models/CkptModelInfo';
export type { ClipField } from './models/ClipField';
export type { CollectInvocation } from './models/CollectInvocation';
export type { CollectInvocationOutput } from './models/CollectInvocationOutput';
export type { ColorField } from './models/ColorField';
export type { CompelInvocation } from './models/CompelInvocation';
export type { CompelOutput } from './models/CompelOutput';
export type { ConditioningField } from './models/ConditioningField';
export type { ContentShuffleImageProcessorInvocation } from './models/ContentShuffleImageProcessorInvocation';
export type { ControlField } from './models/ControlField';
export type { ControlNetInvocation } from './models/ControlNetInvocation';
export type { ControlNetModelConfig } from './models/ControlNetModelConfig';
export type { ControlOutput } from './models/ControlOutput';
export type { CreateModelRequest } from './models/CreateModelRequest';
export type { CvInpaintInvocation } from './models/CvInpaintInvocation';
export type { DiffusersModelInfo } from './models/DiffusersModelInfo';
export type { DivideInvocation } from './models/DivideInvocation';
export type { DynamicPromptInvocation } from './models/DynamicPromptInvocation';
export type { Edge } from './models/Edge';
export type { EdgeConnection } from './models/EdgeConnection';
export type { FloatCollectionOutput } from './models/FloatCollectionOutput';
export type { FloatLinearRangeInvocation } from './models/FloatLinearRangeInvocation';
export type { FloatOutput } from './models/FloatOutput';
export type { Graph } from './models/Graph';
export type { GraphExecutionState } from './models/GraphExecutionState';
export type { GraphInvocation } from './models/GraphInvocation';
export type { GraphInvocationOutput } from './models/GraphInvocationOutput';
export type { HTTPValidationError } from './models/HTTPValidationError';
export type { ImageBlurInvocation } from './models/ImageBlurInvocation';
export type { ImageChannelInvocation } from './models/ImageChannelInvocation';
export type { ImageConvertInvocation } from './models/ImageConvertInvocation';
export type { ImageCropInvocation } from './models/ImageCropInvocation';
export type { ImageDTO } from './models/ImageDTO';
export type { ImageField } from './models/ImageField';
export type { ImageInverseLerpInvocation } from './models/ImageInverseLerpInvocation';
export type { ImageLerpInvocation } from './models/ImageLerpInvocation';
export type { ImageMetadata } from './models/ImageMetadata';
export type { ImageMultiplyInvocation } from './models/ImageMultiplyInvocation';
export type { ImageOutput } from './models/ImageOutput';
export type { ImagePasteInvocation } from './models/ImagePasteInvocation';
export type { ImageProcessorInvocation } from './models/ImageProcessorInvocation';
export type { ImageRecordChanges } from './models/ImageRecordChanges';
export type { ImageResizeInvocation } from './models/ImageResizeInvocation';
export type { ImageScaleInvocation } from './models/ImageScaleInvocation';
export type { ImageToLatentsInvocation } from './models/ImageToLatentsInvocation';
export type { ImageUrlsDTO } from './models/ImageUrlsDTO';
export type { InfillColorInvocation } from './models/InfillColorInvocation';
export type { InfillPatchMatchInvocation } from './models/InfillPatchMatchInvocation';
export type { InfillTileInvocation } from './models/InfillTileInvocation';
export type { InpaintInvocation } from './models/InpaintInvocation';
export type { IntCollectionOutput } from './models/IntCollectionOutput';
export type { IntOutput } from './models/IntOutput';
export type { IterateInvocation } from './models/IterateInvocation';
export type { IterateInvocationOutput } from './models/IterateInvocationOutput';
export type { LatentsField } from './models/LatentsField';
export type { LatentsOutput } from './models/LatentsOutput';
export type { LatentsToImageInvocation } from './models/LatentsToImageInvocation';
export type { LatentsToLatentsInvocation } from './models/LatentsToLatentsInvocation';
export type { LineartAnimeImageProcessorInvocation } from './models/LineartAnimeImageProcessorInvocation';
export type { LineartImageProcessorInvocation } from './models/LineartImageProcessorInvocation';
export type { LoadImageInvocation } from './models/LoadImageInvocation';
export type { LoraInfo } from './models/LoraInfo';
export type { LoraLoaderInvocation } from './models/LoraLoaderInvocation';
export type { LoraLoaderOutput } from './models/LoraLoaderOutput';
export type { MaskFromAlphaInvocation } from './models/MaskFromAlphaInvocation';
export type { MaskOutput } from './models/MaskOutput';
export type { MediapipeFaceProcessorInvocation } from './models/MediapipeFaceProcessorInvocation';
export type { MidasDepthImageProcessorInvocation } from './models/MidasDepthImageProcessorInvocation';
export type { MlsdImageProcessorInvocation } from './models/MlsdImageProcessorInvocation';
export type { ModelInfo } from './models/ModelInfo';
export type { ModelLoaderOutput } from './models/ModelLoaderOutput';
export type { ModelsList } from './models/ModelsList';
export type { ModelType } from './models/ModelType';
export type { MultiplyInvocation } from './models/MultiplyInvocation';
export type { NoiseInvocation } from './models/NoiseInvocation';
export type { NoiseOutput } from './models/NoiseOutput';
export type { NormalbaeImageProcessorInvocation } from './models/NormalbaeImageProcessorInvocation';
export type { OffsetPaginatedResults_BoardDTO_ } from './models/OffsetPaginatedResults_BoardDTO_';
export type { OffsetPaginatedResults_ImageDTO_ } from './models/OffsetPaginatedResults_ImageDTO_';
export type { ONNXLatentsToImageInvocation } from './models/ONNXLatentsToImageInvocation';
export type { ONNXModelLoaderOutput } from './models/ONNXModelLoaderOutput';
export type { ONNXPromptInvocation } from './models/ONNXPromptInvocation';
export type { ONNXSD1ModelLoaderInvocation } from './models/ONNXSD1ModelLoaderInvocation';
export type { ONNXStableDiffusion1ModelConfig } from './models/ONNXStableDiffusion1ModelConfig';
export type { ONNXStableDiffusion2ModelConfig } from './models/ONNXStableDiffusion2ModelConfig';
export type { ONNXTextToLatentsInvocation } from './models/ONNXTextToLatentsInvocation';
export type { OpenposeImageProcessorInvocation } from './models/OpenposeImageProcessorInvocation';
export type { PaginatedResults_GraphExecutionState_ } from './models/PaginatedResults_GraphExecutionState_';
export type { ParamFloatInvocation } from './models/ParamFloatInvocation';
export type { ParamIntInvocation } from './models/ParamIntInvocation';
export type { PidiImageProcessorInvocation } from './models/PidiImageProcessorInvocation';
export type { PipelineModelField } from './models/PipelineModelField';
export type { PipelineModelLoaderInvocation } from './models/PipelineModelLoaderInvocation';
export type { PromptCollectionOutput } from './models/PromptCollectionOutput';
export type { PromptOutput } from './models/PromptOutput';
export type { RandomIntInvocation } from './models/RandomIntInvocation';
export type { RandomRangeInvocation } from './models/RandomRangeInvocation';
export type { RangeInvocation } from './models/RangeInvocation';
export type { RangeOfSizeInvocation } from './models/RangeOfSizeInvocation';
export type { ResizeLatentsInvocation } from './models/ResizeLatentsInvocation';
export type { RestoreFaceInvocation } from './models/RestoreFaceInvocation';
export type { ScaleLatentsInvocation } from './models/ScaleLatentsInvocation';
export type { ShowImageInvocation } from './models/ShowImageInvocation';
export type { StableDiffusion1ModelCheckpointConfig } from './models/StableDiffusion1ModelCheckpointConfig';
export type { StableDiffusion1ModelDiffusersConfig } from './models/StableDiffusion1ModelDiffusersConfig';
export type { StableDiffusion2ModelCheckpointConfig } from './models/StableDiffusion2ModelCheckpointConfig';
export type { StableDiffusion2ModelDiffusersConfig } from './models/StableDiffusion2ModelDiffusersConfig';
export type { StepParamEasingInvocation } from './models/StepParamEasingInvocation';
export type { SubModelType } from './models/SubModelType';
export type { SubtractInvocation } from './models/SubtractInvocation';
export type { TextToLatentsInvocation } from './models/TextToLatentsInvocation';
export type { TextualInversionModelConfig } from './models/TextualInversionModelConfig';
export type { UNetField } from './models/UNetField';
export type { UpscaleInvocation } from './models/UpscaleInvocation';
export type { VaeField } from './models/VaeField';
export type { VaeModelConfig } from './models/VaeModelConfig';
export type { VaeRepo } from './models/VaeRepo';
export type { ValidationError } from './models/ValidationError';
export type { ZoeDepthImageProcessorInvocation } from './models/ZoeDepthImageProcessorInvocation';
export const tagTypes = ['Board', 'Image', 'ImageMetadata', 'Model'];
export type ApiFullTagDescription = FullTagDescription<
(typeof tagTypes)[number]

View File

@ -0,0 +1,26 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Adds two numbers
*/
export type AddInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'add';
/**
* The first number
*/
'a'?: number;
/**
* The second number
*/
'b'?: number;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type BoardChanges = {
/**
* The board's new name.
*/
board_name?: string;
/**
* The name of the board's new cover image.
*/
cover_image_name?: string;
};

View File

@ -0,0 +1,37 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Deserialized board record with cover image URL and image count.
*/
export type BoardDTO = {
/**
* The unique ID of the board.
*/
board_id: string;
/**
* The name of the board.
*/
board_name: string;
/**
* The created timestamp of the board.
*/
created_at: string;
/**
* The updated timestamp of the board.
*/
updated_at: string;
/**
* The deleted timestamp of the board.
*/
deleted_at?: string;
/**
* The name of the board's cover image.
*/
cover_image_name?: string;
/**
* The number of images in the board.
*/
image_count: number;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type Body_create_board_image = {
/**
* The id of the board to add to
*/
board_id: string;
/**
* The name of the image to add
*/
image_name: string;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type Body_remove_board_image = {
/**
* The id of the board
*/
board_id: string;
/**
* The name of the image to remove
*/
image_name: string;
};

View File

@ -0,0 +1,7 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type Body_upload_image = {
file: Blob;
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Canny edge detection for ControlNet
*/
export type CannyImageProcessorInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'canny_image_processor';
/**
* The image to process
*/
image?: ImageField;
/**
* The low threshold of the Canny pixel gradient (0-255)
*/
low_threshold?: number;
/**
* The high threshold of the Canny pixel gradient (0-255)
*/
high_threshold?: number;
};

View File

@ -0,0 +1,39 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type CkptModelInfo = {
/**
* A description of the model
*/
description?: string;
/**
* The name of the model
*/
model_name: string;
/**
* The type of the model
*/
model_type: string;
format?: 'ckpt';
/**
* The path to the model config
*/
config: string;
/**
* The path to the model weights
*/
weights: string;
/**
* The path to the model VAE
*/
vae: string;
/**
* The width of the model
*/
width?: number;
/**
* The height of the model
*/
height?: number;
};

View File

@ -0,0 +1,21 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { LoraInfo } from './LoraInfo';
import type { ModelInfo } from './ModelInfo';
export type ClipField = {
/**
* Info to load tokenizer submodel
*/
tokenizer: ModelInfo;
/**
* Info to load text_encoder submodel
*/
text_encoder: ModelInfo;
/**
* Loras to apply on model loading
*/
loras: Array<LoraInfo>;
};

View File

@ -0,0 +1,26 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Collects values into a collection
*/
export type CollectInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'collect';
/**
* The item to collect (all inputs must be of the same type)
*/
item?: any;
/**
* The collection, will be provided on execution
*/
collection?: Array<any>;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Base class for all invocation outputs
*/
export type CollectInvocationOutput = {
type: 'collect_output';
/**
* The collection of input items
*/
collection: Array<any>;
};

View File

@ -0,0 +1,22 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type ColorField = {
/**
* The red component
*/
'r': number;
/**
* The green component
*/
'g': number;
/**
* The blue component
*/
'b': number;
/**
* The alpha component
*/
'a': number;
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ClipField } from './ClipField';
/**
* Parse prompt using compel package to conditioning.
*/
export type CompelInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'compel';
/**
* Prompt
*/
prompt?: string;
/**
* Clip to use
*/
clip?: ClipField;
};

View File

@ -0,0 +1,16 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ConditioningField } from './ConditioningField';
/**
* Compel parser output
*/
export type CompelOutput = {
type?: 'compel_output';
/**
* Conditioning
*/
conditioning?: ConditioningField;
};

View File

@ -0,0 +1,10 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type ConditioningField = {
/**
* The name of conditioning data
*/
conditioning_name: string;
};

View File

@ -0,0 +1,44 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Applies content shuffle processing to image
*/
export type ContentShuffleImageProcessorInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'content_shuffle_image_processor';
/**
* The image to process
*/
image?: ImageField;
/**
* The pixel resolution for detection
*/
detect_resolution?: number;
/**
* The pixel resolution for the output image
*/
image_resolution?: number;
/**
* Content shuffle `h` parameter
*/
'h'?: number;
/**
* Content shuffle `w` parameter
*/
'w'?: number;
/**
* Content shuffle `f` parameter
*/
'f'?: number;
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
export type ControlField = {
/**
* The control image
*/
image: ImageField;
/**
* The ControlNet model to use
*/
control_model: string;
/**
* The weight given to the ControlNet
*/
control_weight: (number | Array<number>);
/**
* When the ControlNet is first applied (% of total steps)
*/
begin_step_percent: number;
/**
* When the ControlNet is last applied (% of total steps)
*/
end_step_percent: number;
};

View File

@ -0,0 +1,40 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Collects ControlNet info to pass to other nodes
*/
export type ControlNetInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'controlnet';
/**
* The control image
*/
image?: ImageField;
/**
* control model used
*/
control_model?: 'lllyasviel/sd-controlnet-canny' | 'lllyasviel/sd-controlnet-depth' | 'lllyasviel/sd-controlnet-hed' | 'lllyasviel/sd-controlnet-seg' | 'lllyasviel/sd-controlnet-openpose' | 'lllyasviel/sd-controlnet-scribble' | 'lllyasviel/sd-controlnet-normal' | 'lllyasviel/sd-controlnet-mlsd' | 'lllyasviel/control_v11p_sd15_canny' | 'lllyasviel/control_v11p_sd15_openpose' | 'lllyasviel/control_v11p_sd15_seg' | 'lllyasviel/control_v11f1p_sd15_depth' | 'lllyasviel/control_v11p_sd15_normalbae' | 'lllyasviel/control_v11p_sd15_scribble' | 'lllyasviel/control_v11p_sd15_mlsd' | 'lllyasviel/control_v11p_sd15_softedge' | 'lllyasviel/control_v11p_sd15s2_lineart_anime' | 'lllyasviel/control_v11p_sd15_lineart' | 'lllyasviel/control_v11p_sd15_inpaint' | 'lllyasviel/control_v11e_sd15_shuffle' | 'lllyasviel/control_v11e_sd15_ip2p' | 'lllyasviel/control_v11f1e_sd15_tile' | 'thibaud/controlnet-sd21-openpose-diffusers' | 'thibaud/controlnet-sd21-canny-diffusers' | 'thibaud/controlnet-sd21-depth-diffusers' | 'thibaud/controlnet-sd21-scribble-diffusers' | 'thibaud/controlnet-sd21-hed-diffusers' | 'thibaud/controlnet-sd21-zoedepth-diffusers' | 'thibaud/controlnet-sd21-color-diffusers' | 'thibaud/controlnet-sd21-openposev2-diffusers' | 'thibaud/controlnet-sd21-lineart-diffusers' | 'thibaud/controlnet-sd21-normalbae-diffusers' | 'thibaud/controlnet-sd21-ade20k-diffusers' | 'CrucibleAI/ControlNetMediaPipeFace,diffusion_sd15' | 'CrucibleAI/ControlNetMediaPipeFace';
/**
* The weight given to the ControlNet
*/
control_weight?: (number | Array<number>);
/**
* When the ControlNet is first applied (% of total steps)
*/
begin_step_percent?: number;
/**
* When the ControlNet is last applied (% of total steps)
*/
end_step_percent?: number;
};

View File

@ -0,0 +1,17 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { BaseModelType } from './BaseModelType';
import type { ControlNetModelFormat } from './ControlNetModelFormat';
import type { ModelError } from './ModelError';
export type ControlNetModelConfig = {
name: string;
base_model: BaseModelType;
type: 'controlnet';
path: string;
description?: string;
model_format: ControlNetModelFormat;
error?: ModelError;
};

View File

@ -0,0 +1,16 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ControlField } from './ControlField';
/**
* node output for ControlNet info
*/
export type ControlOutput = {
type?: 'control_output';
/**
* The control info
*/
control?: ControlField;
};

View File

@ -0,0 +1,17 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { CkptModelInfo } from './CkptModelInfo';
import type { DiffusersModelInfo } from './DiffusersModelInfo';
export type CreateModelRequest = {
/**
* The name of the model
*/
name: string;
/**
* The model info
*/
info: (CkptModelInfo | DiffusersModelInfo);
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Simple inpaint using opencv.
*/
export type CvInpaintInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'cv_inpaint';
/**
* The image to inpaint
*/
image?: ImageField;
/**
* The mask to use when inpainting
*/
mask?: ImageField;
};

View File

@ -0,0 +1,33 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { VaeRepo } from './VaeRepo';
export type DiffusersModelInfo = {
/**
* A description of the model
*/
description?: string;
/**
* The name of the model
*/
model_name: string;
/**
* The type of the model
*/
model_type: string;
format?: 'folder';
/**
* The VAE repo to use for this model
*/
vae?: VaeRepo;
/**
* The repo ID to use for this model
*/
repo_id?: string;
/**
* The path to the model
*/
path?: string;
};

View File

@ -0,0 +1,26 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Divides two numbers
*/
export type DivideInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'div';
/**
* The first number
*/
'a'?: number;
/**
* The second number
*/
'b'?: number;
};

View File

@ -0,0 +1,30 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Parses a prompt using adieyal/dynamicprompts' random or combinatorial generator
*/
export type DynamicPromptInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'dynamic_prompt';
/**
* The prompt to parse with dynamicprompts
*/
prompt: string;
/**
* The number of prompts to generate
*/
max_prompts?: number;
/**
* Whether to use the combinatorial generator
*/
combinatorial?: boolean;
};

View File

@ -0,0 +1,16 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { EdgeConnection } from './EdgeConnection';
export type Edge = {
/**
* The connection for the edge's from node and field
*/
source: EdgeConnection;
/**
* The connection for the edge's to node and field
*/
destination: EdgeConnection;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
export type EdgeConnection = {
/**
* The id of the node for this edge connection
*/
node_id: string;
/**
* The field for this connection
*/
field: string;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* A collection of floats
*/
export type FloatCollectionOutput = {
type?: 'float_collection';
/**
* The float collection
*/
collection?: Array<number>;
};

View File

@ -0,0 +1,30 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Creates a range
*/
export type FloatLinearRangeInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'float_range';
/**
* The first value of the range
*/
start?: number;
/**
* The last value of the range
*/
stop?: number;
/**
* number of values to interpolate over (including start and stop)
*/
steps?: number;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* A float output
*/
export type FloatOutput = {
type?: 'float_output';
/**
* The output float
*/
param?: number;
};

View File

@ -0,0 +1,84 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { AddInvocation } from './AddInvocation';
import type { CannyImageProcessorInvocation } from './CannyImageProcessorInvocation';
import type { CollectInvocation } from './CollectInvocation';
import type { CompelInvocation } from './CompelInvocation';
import type { ContentShuffleImageProcessorInvocation } from './ContentShuffleImageProcessorInvocation';
import type { ControlNetInvocation } from './ControlNetInvocation';
import type { CvInpaintInvocation } from './CvInpaintInvocation';
import type { DivideInvocation } from './DivideInvocation';
import type { DynamicPromptInvocation } from './DynamicPromptInvocation';
import type { Edge } from './Edge';
import type { FloatLinearRangeInvocation } from './FloatLinearRangeInvocation';
import type { GraphInvocation } from './GraphInvocation';
import type { HedImageProcessorInvocation } from './HedImageProcessorInvocation';
import type { ImageBlurInvocation } from './ImageBlurInvocation';
import type { ImageChannelInvocation } from './ImageChannelInvocation';
import type { ImageConvertInvocation } from './ImageConvertInvocation';
import type { ImageCropInvocation } from './ImageCropInvocation';
import type { ImageInverseLerpInvocation } from './ImageInverseLerpInvocation';
import type { ImageLerpInvocation } from './ImageLerpInvocation';
import type { ImageMultiplyInvocation } from './ImageMultiplyInvocation';
import type { ImagePasteInvocation } from './ImagePasteInvocation';
import type { ImageProcessorInvocation } from './ImageProcessorInvocation';
import type { ImageResizeInvocation } from './ImageResizeInvocation';
import type { ImageScaleInvocation } from './ImageScaleInvocation';
import type { ImageToLatentsInvocation } from './ImageToLatentsInvocation';
import type { InfillColorInvocation } from './InfillColorInvocation';
import type { InfillPatchMatchInvocation } from './InfillPatchMatchInvocation';
import type { InfillTileInvocation } from './InfillTileInvocation';
import type { InpaintInvocation } from './InpaintInvocation';
import type { IterateInvocation } from './IterateInvocation';
import type { LatentsToImageInvocation } from './LatentsToImageInvocation';
import type { LatentsToLatentsInvocation } from './LatentsToLatentsInvocation';
import type { LineartAnimeImageProcessorInvocation } from './LineartAnimeImageProcessorInvocation';
import type { LineartImageProcessorInvocation } from './LineartImageProcessorInvocation';
import type { LoadImageInvocation } from './LoadImageInvocation';
import type { LoraLoaderInvocation } from './LoraLoaderInvocation';
import type { MaskFromAlphaInvocation } from './MaskFromAlphaInvocation';
import type { MediapipeFaceProcessorInvocation } from './MediapipeFaceProcessorInvocation';
import type { MidasDepthImageProcessorInvocation } from './MidasDepthImageProcessorInvocation';
import type { MlsdImageProcessorInvocation } from './MlsdImageProcessorInvocation';
import type { MultiplyInvocation } from './MultiplyInvocation';
import type { NoiseInvocation } from './NoiseInvocation';
import type { NormalbaeImageProcessorInvocation } from './NormalbaeImageProcessorInvocation';
import type { ONNXLatentsToImageInvocation } from './ONNXLatentsToImageInvocation';
import type { ONNXPromptInvocation } from './ONNXPromptInvocation';
import type { ONNXSD1ModelLoaderInvocation } from './ONNXSD1ModelLoaderInvocation';
import type { ONNXTextToLatentsInvocation } from './ONNXTextToLatentsInvocation';
import type { OpenposeImageProcessorInvocation } from './OpenposeImageProcessorInvocation';
import type { ParamFloatInvocation } from './ParamFloatInvocation';
import type { ParamIntInvocation } from './ParamIntInvocation';
import type { PidiImageProcessorInvocation } from './PidiImageProcessorInvocation';
import type { PipelineModelLoaderInvocation } from './PipelineModelLoaderInvocation';
import type { RandomIntInvocation } from './RandomIntInvocation';
import type { RandomRangeInvocation } from './RandomRangeInvocation';
import type { RangeInvocation } from './RangeInvocation';
import type { RangeOfSizeInvocation } from './RangeOfSizeInvocation';
import type { ResizeLatentsInvocation } from './ResizeLatentsInvocation';
import type { RestoreFaceInvocation } from './RestoreFaceInvocation';
import type { ScaleLatentsInvocation } from './ScaleLatentsInvocation';
import type { ShowImageInvocation } from './ShowImageInvocation';
import type { StepParamEasingInvocation } from './StepParamEasingInvocation';
import type { SubtractInvocation } from './SubtractInvocation';
import type { TextToLatentsInvocation } from './TextToLatentsInvocation';
import type { UpscaleInvocation } from './UpscaleInvocation';
import type { ZoeDepthImageProcessorInvocation } from './ZoeDepthImageProcessorInvocation';
export type Graph = {
/**
* The id of this graph
*/
id?: string;
/**
* The nodes in this graph
*/
nodes?: Record<string, (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | PipelineModelLoaderInvocation | LoraLoaderInvocation | CompelInvocation | LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CvInpaintInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | InpaintInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ONNXPromptInvocation | ONNXTextToLatentsInvocation | ONNXLatentsToImageInvocation | ONNXSD1ModelLoaderInvocation | ParamIntInvocation | ParamFloatInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | DynamicPromptInvocation | RestoreFaceInvocation | UpscaleInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation)>;
/**
* The connections between nodes and their fields in this graph
*/
edges?: Array<Edge>;
};

View File

@ -0,0 +1,65 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { CollectInvocationOutput } from './CollectInvocationOutput';
import type { CompelOutput } from './CompelOutput';
import type { ControlOutput } from './ControlOutput';
import type { FloatCollectionOutput } from './FloatCollectionOutput';
import type { FloatOutput } from './FloatOutput';
import type { Graph } from './Graph';
import type { GraphInvocationOutput } from './GraphInvocationOutput';
import type { ImageOutput } from './ImageOutput';
import type { IntCollectionOutput } from './IntCollectionOutput';
import type { IntOutput } from './IntOutput';
import type { IterateInvocationOutput } from './IterateInvocationOutput';
import type { LatentsOutput } from './LatentsOutput';
import type { LoraLoaderOutput } from './LoraLoaderOutput';
import type { MaskOutput } from './MaskOutput';
import type { ModelLoaderOutput } from './ModelLoaderOutput';
import type { NoiseOutput } from './NoiseOutput';
import type { ONNXModelLoaderOutput } from './ONNXModelLoaderOutput';
import type { PromptCollectionOutput } from './PromptCollectionOutput';
import type { PromptOutput } from './PromptOutput';
/**
* Tracks the state of a graph execution
*/
export type GraphExecutionState = {
/**
* The id of the execution state
*/
id: string;
/**
* The graph being executed
*/
graph: Graph;
/**
* The expanded graph of activated and executed nodes
*/
execution_graph: Graph;
/**
* The set of node ids that have been executed
*/
executed: Array<string>;
/**
* The list of node ids that have been executed, in order of execution
*/
executed_history: Array<string>;
/**
* The results of node executions
*/
results: Record<string, (IntCollectionOutput | FloatCollectionOutput | ModelLoaderOutput | LoraLoaderOutput | CompelOutput | ImageOutput | MaskOutput | ControlOutput | LatentsOutput | NoiseOutput | IntOutput | FloatOutput | ONNXModelLoaderOutput | PromptOutput | PromptCollectionOutput | GraphInvocationOutput | IterateInvocationOutput | CollectInvocationOutput)>;
/**
* Errors raised when executing nodes
*/
errors: Record<string, string>;
/**
* The map of prepared nodes to original graph nodes
*/
prepared_source_mapping: Record<string, string>;
/**
* The map of original graph nodes to prepared nodes
*/
source_prepared_mapping: Record<string, Array<string>>;
};

View File

@ -0,0 +1,24 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { Graph } from './Graph';
/**
* Execute a graph
*/
export type GraphInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'graph';
/**
* The graph to run
*/
graph?: Graph;
};

View File

@ -0,0 +1,10 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Base class for all invocation outputs
*/
export type GraphInvocationOutput = {
type: 'graph_output';
};

View File

@ -0,0 +1,9 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ValidationError } from './ValidationError';
export type HTTPValidationError = {
detail?: Array<ValidationError>;
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Blurs an image
*/
export type ImageBlurInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_blur';
/**
* The image to blur
*/
image?: ImageField;
/**
* The blur radius
*/
radius?: number;
/**
* The type of blur
*/
blur_type?: 'gaussian' | 'box';
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Gets a channel from an image.
*/
export type ImageChannelInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_chan';
/**
* The image to get the channel from
*/
image?: ImageField;
/**
* The channel to get
*/
channel?: 'A' | 'R' | 'G' | 'B';
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Converts an image to a different mode.
*/
export type ImageConvertInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_conv';
/**
* The image to convert
*/
image?: ImageField;
/**
* The mode to convert to
*/
mode?: 'L' | 'RGB' | 'RGBA' | 'CMYK' | 'YCbCr' | 'LAB' | 'HSV' | 'I' | 'F';
};

View File

@ -0,0 +1,40 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Crops an image to a specified box. The box can be outside of the image.
*/
export type ImageCropInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_crop';
/**
* The image to crop
*/
image?: ImageField;
/**
* The left x coordinate of the crop rectangle
*/
'x'?: number;
/**
* The top y coordinate of the crop rectangle
*/
'y'?: number;
/**
* The width of the crop rectangle
*/
width?: number;
/**
* The height of the crop rectangle
*/
height?: number;
};

View File

@ -0,0 +1,73 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageCategory } from './ImageCategory';
import type { ImageMetadata } from './ImageMetadata';
import type { ResourceOrigin } from './ResourceOrigin';
/**
* Deserialized image record, enriched for the frontend.
*/
export type ImageDTO = {
/**
* The unique name of the image.
*/
image_name: string;
/**
* The URL of the image.
*/
image_url: string;
/**
* The URL of the image's thumbnail.
*/
thumbnail_url: string;
/**
* The type of the image.
*/
image_origin: ResourceOrigin;
/**
* The category of the image.
*/
image_category: ImageCategory;
/**
* The width of the image in px.
*/
width: number;
/**
* The height of the image in px.
*/
height: number;
/**
* The created timestamp of the image.
*/
created_at: string;
/**
* The updated timestamp of the image.
*/
updated_at: string;
/**
* The deleted timestamp of the image.
*/
deleted_at?: string;
/**
* Whether this is an intermediate image.
*/
is_intermediate: boolean;
/**
* The session ID that generated this image, if it is a generated image.
*/
session_id?: string;
/**
* The node ID that generated this image, if it is a generated image.
*/
node_id?: string;
/**
* A limited subset of the image's generation metadata. Retrieve the image's session for full metadata.
*/
metadata?: ImageMetadata;
/**
* The id of the board the image belongs to, if one exists.
*/
board_id?: string;
};

View File

@ -0,0 +1,13 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* An image field used for passing image objects between invocations
*/
export type ImageField = {
/**
* The name of the image
*/
image_name: string;
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Inverse linear interpolation of all pixels of an image
*/
export type ImageInverseLerpInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_ilerp';
/**
* The image to lerp
*/
image?: ImageField;
/**
* The minimum input value
*/
min?: number;
/**
* The maximum input value
*/
max?: number;
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Linear interpolation of all pixels of an image
*/
export type ImageLerpInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_lerp';
/**
* The image to lerp
*/
image?: ImageField;
/**
* The minimum output value
*/
min?: number;
/**
* The maximum output value
*/
max?: number;
};

View File

@ -0,0 +1,80 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* Core generation metadata for an image/tensor generated in InvokeAI.
*
* Also includes any metadata from the image's PNG tEXt chunks.
*
* Generated by traversing the execution graph, collecting the parameters of the nearest ancestors
* of a given node.
*
* Full metadata may be accessed by querying for the session in the `graph_executions` table.
*/
export type ImageMetadata = {
/**
* The type of the ancestor node of the image output node.
*/
type?: string;
/**
* The positive conditioning.
*/
positive_conditioning?: string;
/**
* The negative conditioning.
*/
negative_conditioning?: string;
/**
* Width of the image/latents in pixels.
*/
width?: number;
/**
* Height of the image/latents in pixels.
*/
height?: number;
/**
* The seed used for noise generation.
*/
seed?: number;
/**
* The classifier-free guidance scale.
*/
cfg_scale?: (number | Array<number>);
/**
* The number of steps used for inference.
*/
steps?: number;
/**
* The scheduler used for inference.
*/
scheduler?: string;
/**
* The model used for inference.
*/
model?: string;
/**
* The strength used for image-to-image/latents-to-latents.
*/
strength?: number;
/**
* The ID of the initial latents.
*/
latents?: string;
/**
* The VAE used for decoding.
*/
vae?: string;
/**
* The UNet used dor inference.
*/
unet?: string;
/**
* The CLIP Encoder used for conditioning.
*/
clip?: string;
/**
* Uploaded image metadata, extracted from the PNG tEXt chunk.
*/
extra?: string;
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Multiplies two images together using `PIL.ImageChops.multiply()`.
*/
export type ImageMultiplyInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_mul';
/**
* The first image to multiply
*/
image1?: ImageField;
/**
* The second image to multiply
*/
image2?: ImageField;
};

View File

@ -0,0 +1,24 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Base class for invocations that output an image
*/
export type ImageOutput = {
type: 'image_output';
/**
* The output image
*/
image: ImageField;
/**
* The width of the image in pixels
*/
width: number;
/**
* The height of the image in pixels
*/
height: number;
};

View File

@ -0,0 +1,40 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Pastes an image into another image.
*/
export type ImagePasteInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_paste';
/**
* The base image
*/
base_image?: ImageField;
/**
* The image to paste
*/
image?: ImageField;
/**
* The mask to use when pasting
*/
mask?: ImageField;
/**
* The left x coordinate at which to paste the image
*/
'x'?: number;
/**
* The top y coordinate at which to paste the image
*/
'y'?: number;
};

View File

@ -0,0 +1,24 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Base class for invocations that preprocess images for ControlNet
*/
export type ImageProcessorInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'image_processor';
/**
* The image to process
*/
image?: ImageField;
};

View File

@ -0,0 +1,28 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageCategory } from './ImageCategory';
/**
* A set of changes to apply to an image record.
*
* Only limited changes are valid:
* - `image_category`: change the category of an image
* - `session_id`: change the session associated with an image
* - `is_intermediate`: change the image's `is_intermediate` flag
*/
export type ImageRecordChanges = {
/**
* The image's new category.
*/
image_category?: ImageCategory;
/**
* The image's new session ID.
*/
session_id?: string;
/**
* The image's new `is_intermediate` flag.
*/
is_intermediate?: boolean;
};

View File

@ -0,0 +1,36 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Resizes an image to specific dimensions
*/
export type ImageResizeInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_resize';
/**
* The image to resize
*/
image?: ImageField;
/**
* The width to resize to (px)
*/
width: number;
/**
* The height to resize to (px)
*/
height: number;
/**
* The resampling mode
*/
resample_mode?: 'nearest' | 'box' | 'bilinear' | 'hamming' | 'bicubic' | 'lanczos';
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Scales an image by a factor
*/
export type ImageScaleInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'img_scale';
/**
* The image to scale
*/
image?: ImageField;
/**
* The factor by which to scale the image
*/
scale_factor: number;
/**
* The resampling mode
*/
resample_mode?: 'nearest' | 'box' | 'bilinear' | 'hamming' | 'bicubic' | 'lanczos';
};

View File

@ -0,0 +1,33 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
import type { VaeField } from './VaeField';
/**
* Encodes an image into latents.
*/
export type ImageToLatentsInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'i2l';
/**
* The image to encode
*/
image?: ImageField;
/**
* Vae submodel
*/
vae?: VaeField;
/**
* Encode latents by overlaping tiles(less memory consumption)
*/
tiled?: boolean;
};

View File

@ -0,0 +1,21 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* The URLs for an image and its thumbnail.
*/
export type ImageUrlsDTO = {
/**
* The unique name of the image.
*/
image_name: string;
/**
* The URL of the image.
*/
image_url: string;
/**
* The URL of the image's thumbnail.
*/
thumbnail_url: string;
};

View File

@ -0,0 +1,29 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ColorField } from './ColorField';
import type { ImageField } from './ImageField';
/**
* Infills transparent areas of an image with a solid color
*/
export type InfillColorInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'infill_rgba';
/**
* The image to infill
*/
image?: ImageField;
/**
* The color to use to infill
*/
color?: ColorField;
};

View File

@ -0,0 +1,24 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Infills transparent areas of an image using the PatchMatch algorithm
*/
export type InfillPatchMatchInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'infill_patchmatch';
/**
* The image to infill
*/
image?: ImageField;
};

View File

@ -0,0 +1,32 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ImageField } from './ImageField';
/**
* Infills transparent areas of an image with tiles of the image
*/
export type InfillTileInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'infill_tile';
/**
* The image to infill
*/
image?: ImageField;
/**
* The tile size (px)
*/
tile_size?: number;
/**
* The seed to use for tile generation (omit for random)
*/
seed?: number;
};

View File

@ -0,0 +1,120 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
import type { ColorField } from './ColorField';
import type { ConditioningField } from './ConditioningField';
import type { ImageField } from './ImageField';
import type { UNetField } from './UNetField';
import type { VaeField } from './VaeField';
/**
* Generates an image using inpaint.
*/
export type InpaintInvocation = {
/**
* The id of this node. Must be unique among all nodes.
*/
id: string;
/**
* Whether or not this node is an intermediate node.
*/
is_intermediate?: boolean;
type?: 'inpaint';
/**
* Positive conditioning for generation
*/
positive_conditioning?: ConditioningField;
/**
* Negative conditioning for generation
*/
negative_conditioning?: ConditioningField;
/**
* The seed to use (omit for random)
*/
seed?: number;
/**
* The number of steps to use to generate the image
*/
steps?: number;
/**
* The width of the resulting image
*/
width?: number;
/**
* The height of the resulting image
*/
height?: number;
/**
* The Classifier-Free Guidance, higher values may result in a result closer to the prompt
*/
cfg_scale?: number;
/**
* The scheduler to use
*/
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'lms_k' | 'pndm' | 'heun' | 'heun_k' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2s_k' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'dpmpp_2m_sde' | 'dpmpp_2m_sde_k' | 'dpmpp_sde' | 'dpmpp_sde_k' | 'unipc';
/**
* UNet model
*/
unet?: UNetField;
/**
* Vae model
*/
vae?: VaeField;
/**
* The input image
*/
image?: ImageField;
/**
* The strength of the original image
*/
strength?: number;
/**
* Whether or not the result should be fit to the aspect ratio of the input image
*/
fit?: boolean;
/**
* The mask
*/
mask?: ImageField;
/**
* The seam inpaint size (px)
*/
seam_size?: number;
/**
* The seam inpaint blur radius (px)
*/
seam_blur?: number;
/**
* The seam inpaint strength
*/
seam_strength?: number;
/**
* The number of steps to use for seam inpaint
*/
seam_steps?: number;
/**
* The tile infill method size (px)
*/
tile_size?: number;
/**
* The method used to infill empty regions (px)
*/
infill_method?: 'patchmatch' | 'tile' | 'solid';
/**
* The width of the inpaint region (px)
*/
inpaint_width?: number;
/**
* The height of the inpaint region (px)
*/
inpaint_height?: number;
/**
* The solid infill method color
*/
inpaint_fill?: ColorField;
/**
* The amount by which to replace masked areas with latent noise
*/
inpaint_replace?: number;
};

View File

@ -0,0 +1,14 @@
/* istanbul ignore file */
/* tslint:disable */
/* eslint-disable */
/**
* A collection of integers
*/
export type IntCollectionOutput = {
type?: 'int_collection';
/**
* The int collection
*/
collection?: Array<number>;
};

Some files were not shown because too many files have changed in this diff Show More