mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Testing onnx in new ui updates
This commit is contained in:
parent
524888bf3b
commit
bd7b59910d
@ -429,4 +429,119 @@ class ONNXSD1ModelLoaderInvocation(BaseInvocation):
|
||||
submodel=SubModelType.VaeEncoder,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
class OnnxModelField(BaseModel):
|
||||
"""Onnx model field"""
|
||||
|
||||
model_name: str = Field(description="Name of the model")
|
||||
base_model: BaseModelType = Field(description="Base model")
|
||||
|
||||
class OnnxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
|
||||
type: Literal["onnx_model_loader"] = "onnx_model_loader"
|
||||
|
||||
model: OnnxModelField = Field(description="The model to load")
|
||||
# TODO: precision?
|
||||
|
||||
# Schema customisation
|
||||
class Config(InvocationConfig):
|
||||
schema_extra = {
|
||||
"ui": {
|
||||
"title": "Onnx Model Loader",
|
||||
"tags": ["model", "loader"],
|
||||
"type_hints": {"model": "model"},
|
||||
},
|
||||
}
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ONNXModelLoaderOutput:
|
||||
base_model = self.model.base_model
|
||||
model_name = self.model.model_name
|
||||
model_type = ModelType.ONNX
|
||||
|
||||
# TODO: not found exceptions
|
||||
if not context.services.model_manager.model_exists(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
):
|
||||
raise Exception(f"Unknown {base_model} {model_type} model: {model_name}")
|
||||
|
||||
"""
|
||||
if not context.services.model_manager.model_exists(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.Diffusers,
|
||||
submodel=SDModelType.Tokenizer,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find tokenizer submodel in {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
|
||||
if not context.services.model_manager.model_exists(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.Diffusers,
|
||||
submodel=SDModelType.TextEncoder,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find text_encoder submodel in {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
|
||||
if not context.services.model_manager.model_exists(
|
||||
model_name=self.model_name,
|
||||
model_type=SDModelType.Diffusers,
|
||||
submodel=SDModelType.UNet,
|
||||
):
|
||||
raise Exception(
|
||||
f"Failed to find unet submodel from {self.model_name}! Check if model corrupted"
|
||||
)
|
||||
"""
|
||||
|
||||
return ONNXModelLoaderOutput(
|
||||
unet=UNetField(
|
||||
unet=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.UNet,
|
||||
),
|
||||
scheduler=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.Scheduler,
|
||||
),
|
||||
loras=[],
|
||||
),
|
||||
clip=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.Tokenizer,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.TextEncoder,
|
||||
),
|
||||
loras=[],
|
||||
),
|
||||
vae_decoder=VaeField(
|
||||
vae=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.VaeDecoder,
|
||||
),
|
||||
),
|
||||
vae_encoder=VaeField(
|
||||
vae=ModelInfo(
|
||||
model_name=model_name,
|
||||
base_model=base_model,
|
||||
model_type=model_type,
|
||||
submodel=SubModelType.VaeEncoder,
|
||||
),
|
||||
)
|
||||
)
|
@ -22,7 +22,7 @@ class ModelProbeInfo(object):
|
||||
variant_type: ModelVariantType
|
||||
prediction_type: SchedulerPredictionType
|
||||
upcast_attention: bool
|
||||
format: Literal['diffusers','checkpoint', 'lycoris']
|
||||
format: Literal['diffusers','checkpoint', 'lycoris', 'olive']
|
||||
image_size: int
|
||||
|
||||
class ProbeBase(object):
|
||||
|
199
invokeai/frontend/web/dist/assets/App-a44d46fe.js
vendored
199
invokeai/frontend/web/dist/assets/App-a44d46fe.js
vendored
File diff suppressed because one or more lines are too long
199
invokeai/frontend/web/dist/assets/App-e42b1b26.js
vendored
Normal file
199
invokeai/frontend/web/dist/assets/App-e42b1b26.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,4 +1,4 @@
|
||||
import{v as S,ga as Ze,q as k,M as Et,gb as Dt,ae as bt,ag as c,gc as v,gd as jt,ge as a,gf as Rt,gg as p,gh as vt,gi as Ht,gj as Wt,aX as Vt,gk as Lt,Z as Ot,gl as qt,gm as Nt,gn as Gt,go as Ut,aV as Xt}from"./index-078526aa.js";import{M as Yt}from"./MantineProvider-8988d217.js";var ut=String.raw,ft=ut`
|
||||
import{v as S,gn as Ze,q as k,M as Et,go as Dt,ae as bt,ag as c,gp as v,gq as jt,gr as a,gs as Rt,gt as p,gu as vt,gv as Ht,gw as Wt,aX as Vt,gx as Lt,Z as Ot,gy as qt,gz as Nt,gA as Gt,gB as Ut,aV as Xt}from"./index-63d3e2f4.js";import{M as Yt}from"./MantineProvider-ffc3d7ba.js";var ut=String.raw,ft=ut`
|
||||
:root,
|
||||
:host {
|
||||
--chakra-vh: 100vh;
|
125
invokeai/frontend/web/dist/assets/index-078526aa.js
vendored
125
invokeai/frontend/web/dist/assets/index-078526aa.js
vendored
File diff suppressed because one or more lines are too long
125
invokeai/frontend/web/dist/assets/index-63d3e2f4.js
vendored
Normal file
125
invokeai/frontend/web/dist/assets/index-63d3e2f4.js
vendored
Normal file
File diff suppressed because one or more lines are too long
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@ -12,7 +12,7 @@
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="module" crossorigin src="./assets/index-078526aa.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-63d3e2f4.js"></script>
|
||||
</head>
|
||||
|
||||
<body dir="ltr">
|
||||
|
17
invokeai/frontend/web/dist/locales/en.json
vendored
17
invokeai/frontend/web/dist/locales/en.json
vendored
@ -102,7 +102,8 @@
|
||||
"openInNewTab": "Open in New Tab",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"areYouSure": "Are you sure?",
|
||||
"imagePrompt": "Image Prompt"
|
||||
"imagePrompt": "Image Prompt",
|
||||
"clearNodes": "Are you sure you want to clear all nodes?"
|
||||
},
|
||||
"gallery": {
|
||||
"generations": "Generations",
|
||||
@ -528,7 +529,7 @@
|
||||
"hidePreview": "Hide Preview",
|
||||
"showPreview": "Show Preview",
|
||||
"controlNetControlMode": "Control Mode",
|
||||
"clipSkip": "Clip Skip",
|
||||
"clipSkip": "CLIP Skip",
|
||||
"aspectRatio": "Ratio"
|
||||
},
|
||||
"settings": {
|
||||
@ -593,7 +594,11 @@
|
||||
"metadataLoadFailed": "Failed to load metadata",
|
||||
"initialImageSet": "Initial Image Set",
|
||||
"initialImageNotSet": "Initial Image Not Set",
|
||||
"initialImageNotSetDesc": "Could not load initial image"
|
||||
"initialImageNotSetDesc": "Could not load initial image",
|
||||
"nodesSaved": "Nodes Saved",
|
||||
"nodesLoaded": "Nodes Loaded",
|
||||
"nodesLoadedFailed": "Failed To Load Nodes",
|
||||
"nodesCleared": "Nodes Cleared"
|
||||
},
|
||||
"tooltip": {
|
||||
"feature": {
|
||||
@ -674,5 +679,11 @@
|
||||
"showProgressImages": "Show Progress Images",
|
||||
"hideProgressImages": "Hide Progress Images",
|
||||
"swapSizes": "Swap Sizes"
|
||||
},
|
||||
"nodes": {
|
||||
"reloadSchema": "Reload Schema",
|
||||
"saveNodes": "Save Nodes",
|
||||
"loadNodes": "Load Nodes",
|
||||
"clearNodes": "Clear Nodes"
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,10 @@ import { MODEL_TYPE_MAP as BASE_MODEL_NAME_MAP } from 'features/system/component
|
||||
import { forEach, isString } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useGetMainModelsQuery } from 'services/api/endpoints/models';
|
||||
import {
|
||||
useGetMainModelsQuery,
|
||||
useGetOnnxModelsQuery,
|
||||
} from 'services/api/endpoints/models';
|
||||
import { FieldComponentProps } from './types';
|
||||
|
||||
const ModelInputFieldComponent = (
|
||||
@ -23,6 +26,7 @@ const ModelInputFieldComponent = (
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { data: mainModels } = useGetMainModelsQuery();
|
||||
const { data: onnxModels } = useGetOnnxModelsQuery();
|
||||
|
||||
const data = useMemo(() => {
|
||||
if (!mainModels) {
|
||||
@ -43,12 +47,33 @@ const ModelInputFieldComponent = (
|
||||
});
|
||||
});
|
||||
|
||||
if (onnxModels) {
|
||||
forEach(onnxModels.entities, (model, id) => {
|
||||
if (!model) {
|
||||
return;
|
||||
}
|
||||
|
||||
data.push({
|
||||
value: id,
|
||||
label: model.model_name,
|
||||
group: BASE_MODEL_NAME_MAP[model.base_model],
|
||||
});
|
||||
});
|
||||
}
|
||||
return data;
|
||||
}, [mainModels]);
|
||||
}, [mainModels, onnxModels]);
|
||||
|
||||
const selectedModel = useMemo(
|
||||
() => mainModels?.entities[field.value ?? mainModels.ids[0]],
|
||||
[mainModels?.entities, mainModels?.ids, field.value]
|
||||
() =>
|
||||
mainModels?.entities[field.value ?? mainModels.ids[0]] ||
|
||||
onnxModels?.entities[field.value ?? onnxModels.ids[0]],
|
||||
[
|
||||
mainModels?.entities,
|
||||
mainModels?.ids,
|
||||
onnxModels?.entities,
|
||||
onnxModels?.ids,
|
||||
field.value,
|
||||
]
|
||||
);
|
||||
|
||||
const handleValueChanged = useCallback(
|
||||
@ -69,18 +94,22 @@ const ModelInputFieldComponent = (
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (field.value && mainModels?.ids.includes(field.value)) {
|
||||
if (
|
||||
field.value &&
|
||||
(mainModels?.ids.includes(field.value) ||
|
||||
onnxModels?.ids.includes(field.value))
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
const firstModel = mainModels?.ids[0];
|
||||
const firstModel = mainModels?.ids[0] || onnxModels?.ids[0];
|
||||
|
||||
if (!isString(firstModel)) {
|
||||
return;
|
||||
}
|
||||
|
||||
handleValueChanged(firstModel);
|
||||
}, [field.value, handleValueChanged, mainModels?.ids]);
|
||||
}, [field.value, handleValueChanged, mainModels?.ids, onnxModels?.ids]);
|
||||
|
||||
return (
|
||||
<IAIMantineSelect
|
||||
|
@ -10,6 +10,7 @@ import {
|
||||
CLIP_SKIP,
|
||||
LORA_LOADER,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
POSITIVE_CONDITIONING,
|
||||
@ -41,6 +42,10 @@ export const addLoRAsToGraph = (
|
||||
!(
|
||||
e.source.node_id === MAIN_MODEL_LOADER &&
|
||||
['unet'].includes(e.source.field)
|
||||
) &&
|
||||
!(
|
||||
e.source.node_id === ONNX_MODEL_LOADER &&
|
||||
['unet'].includes(e.source.field)
|
||||
)
|
||||
);
|
||||
// Remove CLIP_SKIP connections to conditionings to feed it through LoRAs
|
||||
@ -77,11 +82,14 @@ export const addLoRAsToGraph = (
|
||||
// add to graph
|
||||
graph.nodes[currentLoraNodeId] = loraLoaderNode;
|
||||
|
||||
const model_loader = id.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
if (currentLoraIndex === 0) {
|
||||
// first lora = start the lora chain, attach directly to model loader
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
|
@ -10,6 +10,7 @@ import {
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
ONNX_MODEL_LOADER,
|
||||
TEXT_TO_IMAGE_GRAPH,
|
||||
VAE_LOADER,
|
||||
} from './constants';
|
||||
@ -33,11 +34,13 @@ export const addVAEToGraph = (
|
||||
vae_model,
|
||||
};
|
||||
}
|
||||
|
||||
const model_loader = vae?.id.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
if (graph.id === TEXT_TO_IMAGE_GRAPH || graph.id === IMAGE_TO_IMAGE_GRAPH) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isAutoVae ? model_loader : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -50,7 +53,7 @@ export const addVAEToGraph = (
|
||||
if (graph.id === IMAGE_TO_IMAGE_GRAPH) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isAutoVae ? model_loader : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
@ -63,7 +66,7 @@ export const addVAEToGraph = (
|
||||
if (graph.id === INPAINT_GRAPH) {
|
||||
graph.edges.push({
|
||||
source: {
|
||||
node_id: isAutoVae ? MAIN_MODEL_LOADER : VAE_LOADER,
|
||||
node_id: isAutoVae ? model_loader : VAE_LOADER,
|
||||
field: 'vae',
|
||||
},
|
||||
destination: {
|
||||
|
@ -18,6 +18,7 @@ import {
|
||||
LATENTS_TO_IMAGE,
|
||||
LATENTS_TO_LATENTS,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -59,6 +60,11 @@ export const buildCanvasImageToImageGraph = (
|
||||
? shouldUseCpuNoise
|
||||
: initialGenerationState.shouldUseCpuNoise;
|
||||
|
||||
console.log(model);
|
||||
const model_loader = model.model_name.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
@ -69,6 +75,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
*/
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: IMAGE_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
@ -87,9 +94,9 @@ export const buildCanvasImageToImageGraph = (
|
||||
id: NOISE,
|
||||
use_cpu,
|
||||
},
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -121,7 +128,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
@ -181,7 +188,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
|
@ -15,6 +15,7 @@ import {
|
||||
INPAINT_GRAPH,
|
||||
ITERATE,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
NEGATIVE_CONDITIONING,
|
||||
POSITIVE_CONDITIONING,
|
||||
RANDOM_INT,
|
||||
@ -63,6 +64,12 @@ export const buildCanvasInpaintGraph = (
|
||||
// We may need to set the inpaint width and height to scale the image
|
||||
const { scaledBoundingBoxDimensions, boundingBoxScaleMethod } = state.canvas;
|
||||
|
||||
console.log(model);
|
||||
const model_loader = model.model_name.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: INPAINT_GRAPH,
|
||||
nodes: {
|
||||
@ -107,9 +114,9 @@ export const buildCanvasInpaintGraph = (
|
||||
id: NEGATIVE_CONDITIONING,
|
||||
prompt: negativePrompt,
|
||||
},
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -133,7 +140,7 @@ export const buildCanvasInpaintGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -143,7 +150,7 @@ export const buildCanvasInpaintGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
|
@ -10,6 +10,7 @@ import {
|
||||
CLIP_SKIP,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -49,7 +50,10 @@ export const buildCanvasTextToImageGraph = (
|
||||
const use_cpu = shouldUseNoiseSettings
|
||||
? shouldUseCpuNoise
|
||||
: initialGenerationState.shouldUseCpuNoise;
|
||||
|
||||
console.log(model);
|
||||
const model_loader = model.model_name.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
@ -60,6 +64,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
*/
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: TEXT_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
@ -87,9 +92,9 @@ export const buildCanvasTextToImageGraph = (
|
||||
scheduler,
|
||||
steps,
|
||||
},
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -125,7 +130,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
@ -155,7 +160,7 @@ export const buildCanvasTextToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
|
@ -17,6 +17,7 @@ import {
|
||||
LATENTS_TO_IMAGE,
|
||||
LATENTS_TO_LATENTS,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -82,13 +83,19 @@ export const buildLinearImageToImageGraph = (
|
||||
? shouldUseCpuNoise
|
||||
: initialGenerationState.shouldUseCpuNoise;
|
||||
|
||||
console.log(model);
|
||||
const model_loader = model.model_name.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: IMAGE_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -135,7 +142,7 @@ export const buildLinearImageToImageGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -145,7 +152,7 @@ export const buildLinearImageToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
|
@ -10,6 +10,7 @@ import {
|
||||
CLIP_SKIP,
|
||||
LATENTS_TO_IMAGE,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -46,6 +47,10 @@ export const buildLinearTextToImageGraph = (
|
||||
throw new Error('No model found in state');
|
||||
}
|
||||
|
||||
console.log(model);
|
||||
const model_loader = model.model_name.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
@ -56,12 +61,14 @@ export const buildLinearTextToImageGraph = (
|
||||
*/
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: TEXT_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -101,7 +108,7 @@ export const buildLinearTextToImageGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
@ -111,7 +118,7 @@ export const buildLinearTextToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
node_id: model_loader,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
|
@ -8,6 +8,7 @@ export const RANDOM_INT = 'rand_int';
|
||||
export const RANGE_OF_SIZE = 'range_of_size';
|
||||
export const ITERATE = 'iterate';
|
||||
export const MAIN_MODEL_LOADER = 'main_model_loader';
|
||||
export const ONNX_MODEL_LOADER = 'onnx_model_loader';
|
||||
export const VAE_LOADER = 'vae_loader';
|
||||
export const LORA_LOADER = 'lora_loader';
|
||||
export const CLIP_SKIP = 'clip_skip';
|
||||
|
@ -0,0 +1,16 @@
|
||||
import { BaseModelType, OnnxModelField } from 'services/api/types';
|
||||
|
||||
/**
|
||||
* Crudely converts a model id to a main model field
|
||||
* TODO: Make better
|
||||
*/
|
||||
export const modelIdToOnnxModelField = (modelId: string): OnnxModelField => {
|
||||
const [base_model, model_type, model_name] = modelId.split('/');
|
||||
|
||||
const field: OnnxModelField = {
|
||||
base_model: base_model as BaseModelType,
|
||||
model_name,
|
||||
};
|
||||
|
||||
return field;
|
||||
};
|
@ -1,10 +1,10 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { ImageDTO, MainModelField } from 'services/api/types';
|
||||
import { ImageDTO, MainModelField, OnnxModelField } from 'services/api/types';
|
||||
|
||||
export const initialImageSelected = createAction<ImageDTO | string | undefined>(
|
||||
'generation/initialImageSelected'
|
||||
);
|
||||
|
||||
export const modelSelected = createAction<MainModelField>(
|
||||
export const modelSelected = createAction<MainModelField | OnnxModelField>(
|
||||
'generation/modelSelected'
|
||||
);
|
||||
|
@ -8,7 +8,7 @@ import {
|
||||
setShouldShowAdvancedOptions,
|
||||
} from 'features/ui/store/uiSlice';
|
||||
import { clamp } from 'lodash-es';
|
||||
import { ImageDTO, MainModelField } from 'services/api/types';
|
||||
import { ImageDTO, MainModelField, OnnxModelField } from 'services/api/types';
|
||||
import { clipSkipMap } from '../components/Parameters/Advanced/ParamClipSkip';
|
||||
import {
|
||||
CfgScaleParam,
|
||||
@ -53,7 +53,7 @@ export interface GenerationState {
|
||||
shouldUseSymmetry: boolean;
|
||||
horizontalSymmetrySteps: number;
|
||||
verticalSymmetrySteps: number;
|
||||
model: MainModelField | null;
|
||||
model: MainModelField | OnnxModelField | null;
|
||||
vae: VaeModelParam | null;
|
||||
seamlessXAxis: boolean;
|
||||
seamlessYAxis: boolean;
|
||||
@ -226,7 +226,10 @@ export const generationSlice = createSlice({
|
||||
const { image_name, width, height } = action.payload;
|
||||
state.initialImage = { imageName: image_name, width, height };
|
||||
},
|
||||
modelChanged: (state, action: PayloadAction<MainModelField | null>) => {
|
||||
modelChanged: (
|
||||
state,
|
||||
action: PayloadAction<MainModelField | OnnxModelField | null>
|
||||
) => {
|
||||
if (!action.payload) {
|
||||
state.model = null;
|
||||
}
|
||||
|
@ -11,7 +11,11 @@ import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
|
||||
import { modelIdToMainModelField } from 'features/nodes/util/modelIdToMainModelField';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { forEach } from 'lodash-es';
|
||||
import { useGetMainModelsQuery } from 'services/api/endpoints/models';
|
||||
import {
|
||||
useGetMainModelsQuery,
|
||||
useGetOnnxModelsQuery,
|
||||
} from 'services/api/endpoints/models';
|
||||
import { modelIdToOnnxModelField } from 'features/nodes/util/modelIdToOnnxModelField';
|
||||
|
||||
export const MODEL_TYPE_MAP = {
|
||||
'sd-1': 'Stable Diffusion 1.x',
|
||||
@ -31,6 +35,7 @@ const ModelSelect = () => {
|
||||
const { currentModel } = useAppSelector(selector);
|
||||
|
||||
const { data: mainModels, isLoading } = useGetMainModelsQuery();
|
||||
const { data: onnxModels, isLoading: onnxLoading } = useGetOnnxModelsQuery();
|
||||
|
||||
const data = useMemo(() => {
|
||||
if (!mainModels) {
|
||||
@ -50,16 +55,30 @@ const ModelSelect = () => {
|
||||
group: MODEL_TYPE_MAP[model.base_model],
|
||||
});
|
||||
});
|
||||
forEach(onnxModels?.entities, (model, id) => {
|
||||
if (!model) {
|
||||
return;
|
||||
}
|
||||
|
||||
data.push({
|
||||
value: id,
|
||||
label: model.model_name,
|
||||
group: MODEL_TYPE_MAP[model.base_model],
|
||||
});
|
||||
});
|
||||
|
||||
return data;
|
||||
}, [mainModels]);
|
||||
}, [mainModels, onnxModels]);
|
||||
|
||||
const selectedModel = useMemo(
|
||||
() =>
|
||||
mainModels?.entities[
|
||||
`${currentModel?.base_model}/main/${currentModel?.model_name}`
|
||||
] ||
|
||||
onnxModels?.entities[
|
||||
`${currentModel?.base_model}/onnx/${currentModel?.model_name}`
|
||||
],
|
||||
[mainModels?.entities, currentModel]
|
||||
[mainModels?.entities, onnxModels?.entities, currentModel]
|
||||
);
|
||||
|
||||
const handleChangeModel = useCallback(
|
||||
@ -67,14 +86,16 @@ const ModelSelect = () => {
|
||||
if (!v) {
|
||||
return;
|
||||
}
|
||||
|
||||
const modelField = modelIdToMainModelField(v);
|
||||
let modelField = modelIdToMainModelField(v);
|
||||
if (v.includes('onnx')) {
|
||||
modelField = modelIdToOnnxModelField(v);
|
||||
}
|
||||
dispatch(modelSelected(modelField));
|
||||
},
|
||||
[dispatch]
|
||||
);
|
||||
|
||||
return isLoading ? (
|
||||
return isLoading || onnxLoading ? (
|
||||
<IAIMantineSelect
|
||||
label={t('modelManager.model')}
|
||||
placeholder="Loading..."
|
||||
|
@ -50,7 +50,7 @@ const ModelList = () => {
|
||||
|
||||
const [searchText, setSearchText] = useState<string>('');
|
||||
const [isSelectedFilter, setIsSelectedFilter] = useState<
|
||||
'all' | 'ckpt' | 'diffusers'
|
||||
'all' | 'ckpt' | 'diffusers' | 'olive'
|
||||
>('all');
|
||||
const [_, startTransition] = useTransition();
|
||||
|
||||
|
@ -5,6 +5,7 @@ import {
|
||||
ControlNetModelConfig,
|
||||
LoRAModelConfig,
|
||||
MainModelConfig,
|
||||
OnnxModelConfig,
|
||||
TextualInversionModelConfig,
|
||||
VaeModelConfig,
|
||||
} from 'services/api/types';
|
||||
@ -13,6 +14,8 @@ import { ApiFullTagDescription, LIST_TAG, api } from '..';
|
||||
|
||||
export type MainModelConfigEntity = MainModelConfig & { id: string };
|
||||
|
||||
export type OnnxModelConfigEntity = OnnxModelConfig & { id: string };
|
||||
|
||||
export type LoRAModelConfigEntity = LoRAModelConfig & { id: string };
|
||||
|
||||
export type ControlNetModelConfigEntity = ControlNetModelConfig & {
|
||||
@ -27,6 +30,7 @@ export type VaeModelConfigEntity = VaeModelConfig & { id: string };
|
||||
|
||||
type AnyModelConfigEntity =
|
||||
| MainModelConfigEntity
|
||||
| OnnxModelConfigEntity
|
||||
| LoRAModelConfigEntity
|
||||
| ControlNetModelConfigEntity
|
||||
| TextualInversionModelConfigEntity
|
||||
@ -35,6 +39,10 @@ type AnyModelConfigEntity =
|
||||
const mainModelsAdapter = createEntityAdapter<MainModelConfigEntity>({
|
||||
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
|
||||
});
|
||||
|
||||
const onnxModelsAdapter = createEntityAdapter<OnnxModelConfigEntity>({
|
||||
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
|
||||
});
|
||||
const loraModelsAdapter = createEntityAdapter<LoRAModelConfigEntity>({
|
||||
sortComparer: (a, b) => a.model_name.localeCompare(b.model_name),
|
||||
});
|
||||
@ -72,6 +80,38 @@ const createModelEntities = <T extends AnyModelConfigEntity>(
|
||||
|
||||
export const modelsApi = api.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
getOnnxModels: build.query<EntityState<OnnxModelConfigEntity>, void>({
|
||||
query: () => ({ url: 'models/', params: { model_type: 'onnx' } }),
|
||||
providesTags: (result, error, arg) => {
|
||||
const tags: ApiFullTagDescription[] = [
|
||||
{ id: 'OnnxModel', type: LIST_TAG },
|
||||
];
|
||||
|
||||
if (result) {
|
||||
tags.push(
|
||||
...result.ids.map((id) => ({
|
||||
type: 'OnnxModel' as const,
|
||||
id,
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
return tags;
|
||||
},
|
||||
transformResponse: (
|
||||
response: { models: OnnxModelConfig[] },
|
||||
meta,
|
||||
arg
|
||||
) => {
|
||||
const entities = createModelEntities<OnnxModelConfigEntity>(
|
||||
response.models
|
||||
);
|
||||
return onnxModelsAdapter.setAll(
|
||||
onnxModelsAdapter.getInitialState(),
|
||||
entities
|
||||
);
|
||||
},
|
||||
}),
|
||||
getMainModels: build.query<EntityState<MainModelConfigEntity>, void>({
|
||||
query: () => ({ url: 'models/', params: { model_type: 'main' } }),
|
||||
providesTags: (result, error, arg) => {
|
||||
@ -243,6 +283,7 @@ export const modelsApi = api.injectEndpoints({
|
||||
|
||||
export const {
|
||||
useGetMainModelsQuery,
|
||||
useGetOnnxModelsQuery,
|
||||
useGetControlNetModelsQuery,
|
||||
useGetLoRAModelsQuery,
|
||||
useGetTextualInversionModelsQuery,
|
||||
|
@ -12,7 +12,6 @@ export type ONNXStableDiffusion1ModelConfig = {
|
||||
type: 'onnx';
|
||||
path: string;
|
||||
description?: string;
|
||||
model_format: null;
|
||||
error?: ModelError;
|
||||
variant: ModelVariantType;
|
||||
};
|
||||
|
@ -1174,7 +1174,7 @@ export type components = {
|
||||
* @description The nodes in this graph
|
||||
*/
|
||||
nodes?: {
|
||||
[key: string]: (components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined;
|
||||
[key: string]: (components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined;
|
||||
};
|
||||
/**
|
||||
* Edges
|
||||
@ -2904,6 +2904,34 @@ export type components = {
|
||||
*/
|
||||
model: components["schemas"]["MainModelField"];
|
||||
};
|
||||
/**
|
||||
* OnnxModelLoaderInvocation
|
||||
* @description Loads an onnx model, outputting its submodels.
|
||||
*/
|
||||
OnnxModelLoaderInvocation: {
|
||||
/**
|
||||
* Id
|
||||
* @description The id of this node. Must be unique among all nodes.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Is Intermediate
|
||||
* @description Whether or not this node is an intermediate node.
|
||||
* @default false
|
||||
*/
|
||||
is_intermediate?: boolean;
|
||||
/**
|
||||
* Type
|
||||
* @default onnx_model_loader
|
||||
* @enum {string}
|
||||
*/
|
||||
type?: "onnx_model_loader";
|
||||
/**
|
||||
* Model
|
||||
* @description The model to load
|
||||
*/
|
||||
model: components["schemas"]["OnnxModelField"];
|
||||
};
|
||||
/**
|
||||
* MaskFromAlphaInvocation
|
||||
* @description Extracts the alpha channel of an image as a mask.
|
||||
@ -4727,7 +4755,7 @@ export type operations = {
|
||||
};
|
||||
requestBody: {
|
||||
content: {
|
||||
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
|
||||
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
@ -4764,7 +4792,7 @@ export type operations = {
|
||||
};
|
||||
requestBody: {
|
||||
content: {
|
||||
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
|
||||
"application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["OnnxModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["VaeLoaderInvocation"] | components["schemas"]["MetadataAccumulatorInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ClipSkipInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"];
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
|
@ -30,6 +30,7 @@ export type OffsetPaginatedResults_ImageDTO_ =
|
||||
export type ModelType = components['schemas']['ModelType'];
|
||||
export type BaseModelType = components['schemas']['BaseModelType'];
|
||||
export type MainModelField = components['schemas']['MainModelField'];
|
||||
export type OnnxModelField = components['schemas']['OnnxModelField'];
|
||||
export type VAEModelField = components['schemas']['VAEModelField'];
|
||||
export type LoRAModelField = components['schemas']['LoRAModelField'];
|
||||
export type ModelsList = components['schemas']['ModelsList'];
|
||||
@ -47,6 +48,7 @@ export type MainModelConfig =
|
||||
| components['schemas']['StableDiffusion1ModelDiffusersConfig']
|
||||
| components['schemas']['StableDiffusion2ModelCheckpointConfig']
|
||||
| components['schemas']['StableDiffusion2ModelDiffusersConfig'];
|
||||
export type OnnxModelConfig = components['schemas']['ONNXStableDiffusion1ModelConfig']
|
||||
export type AnyModelConfig =
|
||||
| LoRAModelConfig
|
||||
| VaeModelConfig
|
||||
@ -107,6 +109,9 @@ export type ImageCollectionInvocation = TypeReq<
|
||||
export type MainModelLoaderInvocation = TypeReq<
|
||||
components['schemas']['MainModelLoaderInvocation']
|
||||
>;
|
||||
export type OnnxModelLoaderInvocation = TypeReq<
|
||||
components['schemas']['OnnxModelLoaderInvocation']
|
||||
>;
|
||||
export type LoraLoaderInvocation = TypeReq<
|
||||
components['schemas']['LoraLoaderInvocation']
|
||||
>;
|
||||
|
Loading…
Reference in New Issue
Block a user