mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
chore: Rebuild API
This commit is contained in:
parent
67d05d2066
commit
76dd749b1e
@ -7,9 +7,11 @@ export { OpenAPI } from './core/OpenAPI';
|
||||
export type { OpenAPIConfig } from './core/OpenAPI';
|
||||
|
||||
export type { AddInvocation } from './models/AddInvocation';
|
||||
export type { BaseModelType } from './models/BaseModelType';
|
||||
export type { Body_upload_image } from './models/Body_upload_image';
|
||||
export type { CannyImageProcessorInvocation } from './models/CannyImageProcessorInvocation';
|
||||
export type { CkptModelInfo } from './models/CkptModelInfo';
|
||||
export type { ClipField } from './models/ClipField';
|
||||
export type { CollectInvocation } from './models/CollectInvocation';
|
||||
export type { CollectInvocationOutput } from './models/CollectInvocationOutput';
|
||||
export type { ColorField } from './models/ColorField';
|
||||
@ -19,6 +21,7 @@ export type { ConditioningField } from './models/ConditioningField';
|
||||
export type { ContentShuffleImageProcessorInvocation } from './models/ContentShuffleImageProcessorInvocation';
|
||||
export type { ControlField } from './models/ControlField';
|
||||
export type { ControlNetInvocation } from './models/ControlNetInvocation';
|
||||
export type { ControlNetModelConfig } from './models/ControlNetModelConfig';
|
||||
export type { ControlOutput } from './models/ControlOutput';
|
||||
export type { CreateModelRequest } from './models/CreateModelRequest';
|
||||
export type { CvInpaintInvocation } from './models/CvInpaintInvocation';
|
||||
@ -71,12 +74,21 @@ export type { LatentsToLatentsInvocation } from './models/LatentsToLatentsInvoca
|
||||
export type { LineartAnimeImageProcessorInvocation } from './models/LineartAnimeImageProcessorInvocation';
|
||||
export type { LineartImageProcessorInvocation } from './models/LineartImageProcessorInvocation';
|
||||
export type { LoadImageInvocation } from './models/LoadImageInvocation';
|
||||
export type { LoraInfo } from './models/LoraInfo';
|
||||
export type { LoraLoaderInvocation } from './models/LoraLoaderInvocation';
|
||||
export type { LoraLoaderOutput } from './models/LoraLoaderOutput';
|
||||
export type { LoraModelConfig } from './models/LoraModelConfig';
|
||||
export type { MaskFromAlphaInvocation } from './models/MaskFromAlphaInvocation';
|
||||
export type { MaskOutput } from './models/MaskOutput';
|
||||
export type { MediapipeFaceProcessorInvocation } from './models/MediapipeFaceProcessorInvocation';
|
||||
export type { MidasDepthImageProcessorInvocation } from './models/MidasDepthImageProcessorInvocation';
|
||||
export type { MlsdImageProcessorInvocation } from './models/MlsdImageProcessorInvocation';
|
||||
export type { ModelError } from './models/ModelError';
|
||||
export type { ModelInfo } from './models/ModelInfo';
|
||||
export type { ModelLoaderOutput } from './models/ModelLoaderOutput';
|
||||
export type { ModelsList } from './models/ModelsList';
|
||||
export type { ModelType } from './models/ModelType';
|
||||
export type { ModelVariantType } from './models/ModelVariantType';
|
||||
export type { MultiplyInvocation } from './models/MultiplyInvocation';
|
||||
export type { NoiseInvocation } from './models/NoiseInvocation';
|
||||
export type { NoiseOutput } from './models/NoiseOutput';
|
||||
@ -97,12 +109,24 @@ export type { ResizeLatentsInvocation } from './models/ResizeLatentsInvocation';
|
||||
export type { ResourceOrigin } from './models/ResourceOrigin';
|
||||
export type { RestoreFaceInvocation } from './models/RestoreFaceInvocation';
|
||||
export type { ScaleLatentsInvocation } from './models/ScaleLatentsInvocation';
|
||||
export type { SchedulerPredictionType } from './models/SchedulerPredictionType';
|
||||
export type { SD1ModelLoaderInvocation } from './models/SD1ModelLoaderInvocation';
|
||||
export type { SD2ModelLoaderInvocation } from './models/SD2ModelLoaderInvocation';
|
||||
export type { ShowImageInvocation } from './models/ShowImageInvocation';
|
||||
export type { StableDiffusion1CheckpointModelConfig } from './models/StableDiffusion1CheckpointModelConfig';
|
||||
export type { StableDiffusion1DiffusersModelConfig } from './models/StableDiffusion1DiffusersModelConfig';
|
||||
export type { StableDiffusion2CheckpointModelConfig } from './models/StableDiffusion2CheckpointModelConfig';
|
||||
export type { StableDiffusion2DiffusersModelConfig } from './models/StableDiffusion2DiffusersModelConfig';
|
||||
export type { StepParamEasingInvocation } from './models/StepParamEasingInvocation';
|
||||
export type { SubModelType } from './models/SubModelType';
|
||||
export type { SubtractInvocation } from './models/SubtractInvocation';
|
||||
export type { TextToImageInvocation } from './models/TextToImageInvocation';
|
||||
export type { TextToLatentsInvocation } from './models/TextToLatentsInvocation';
|
||||
export type { TextualInversionModelConfig } from './models/TextualInversionModelConfig';
|
||||
export type { UNetField } from './models/UNetField';
|
||||
export type { UpscaleInvocation } from './models/UpscaleInvocation';
|
||||
export type { VaeField } from './models/VaeField';
|
||||
export type { VAEModelConfig } from './models/VAEModelConfig';
|
||||
export type { VaeRepo } from './models/VaeRepo';
|
||||
export type { ValidationError } from './models/ValidationError';
|
||||
export type { ZoeDepthImageProcessorInvocation } from './models/ZoeDepthImageProcessorInvocation';
|
||||
|
@ -24,4 +24,3 @@ export type AddInvocation = {
|
||||
*/
|
||||
'b'?: number;
|
||||
};
|
||||
|
||||
|
@ -0,0 +1,8 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/**
|
||||
* An enumeration.
|
||||
*/
|
||||
export type BaseModelType = 'sd-1' | 'sd-2';
|
@ -5,4 +5,3 @@
|
||||
export type Body_upload_image = {
|
||||
file: Blob;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type CannyImageProcessorInvocation = {
|
||||
*/
|
||||
high_threshold?: number;
|
||||
};
|
||||
|
||||
|
@ -7,6 +7,14 @@ export type CkptModelInfo = {
|
||||
* A description of the model
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the model
|
||||
*/
|
||||
model_name: string;
|
||||
/**
|
||||
* The type of the model
|
||||
*/
|
||||
model_type: string;
|
||||
format?: 'ckpt';
|
||||
/**
|
||||
* The path to the model config
|
||||
@ -29,4 +37,3 @@ export type CkptModelInfo = {
|
||||
*/
|
||||
height?: number;
|
||||
};
|
||||
|
||||
|
21
invokeai/frontend/web/src/services/api/models/ClipField.ts
Normal file
21
invokeai/frontend/web/src/services/api/models/ClipField.ts
Normal file
@ -0,0 +1,21 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { LoraInfo } from './LoraInfo';
|
||||
import type { ModelInfo } from './ModelInfo';
|
||||
|
||||
export type ClipField = {
|
||||
/**
|
||||
* Info to load tokenizer submodel
|
||||
*/
|
||||
tokenizer: ModelInfo;
|
||||
/**
|
||||
* Info to load text_encoder submodel
|
||||
*/
|
||||
text_encoder: ModelInfo;
|
||||
/**
|
||||
* Loras to apply on model loading
|
||||
*/
|
||||
loras: Array<LoraInfo>;
|
||||
};
|
@ -24,4 +24,3 @@ export type CollectInvocation = {
|
||||
*/
|
||||
collection?: Array<any>;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type CollectInvocationOutput = {
|
||||
*/
|
||||
collection: Array<any>;
|
||||
};
|
||||
|
||||
|
@ -20,4 +20,3 @@ export type ColorField = {
|
||||
*/
|
||||
'a': number;
|
||||
};
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ClipField } from './ClipField';
|
||||
|
||||
/**
|
||||
* Parse prompt using compel package to conditioning.
|
||||
*/
|
||||
@ -20,8 +22,7 @@ export type CompelInvocation = {
|
||||
*/
|
||||
prompt?: string;
|
||||
/**
|
||||
* Model to use
|
||||
* Clip to use
|
||||
*/
|
||||
model?: string;
|
||||
clip?: ClipField;
|
||||
};
|
||||
|
||||
|
@ -14,4 +14,3 @@ export type CompelOutput = {
|
||||
*/
|
||||
conditioning?: ConditioningField;
|
||||
};
|
||||
|
||||
|
@ -8,4 +8,3 @@ export type ConditioningField = {
|
||||
*/
|
||||
conditioning_name: string;
|
||||
};
|
||||
|
||||
|
@ -42,4 +42,3 @@ export type ContentShuffleImageProcessorInvocation = {
|
||||
*/
|
||||
'f'?: number;
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type ControlField = {
|
||||
*/
|
||||
end_step_percent: number;
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type ControlNetInvocation = {
|
||||
*/
|
||||
end_step_percent?: number;
|
||||
};
|
||||
|
||||
|
@ -0,0 +1,13 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ModelError } from './ModelError';
|
||||
|
||||
export type ControlNetModelConfig = {
|
||||
path: string;
|
||||
description?: string;
|
||||
format: ('checkpoint' | 'diffusers');
|
||||
default?: boolean;
|
||||
error?: ModelError;
|
||||
};
|
@ -14,4 +14,3 @@ export type ControlOutput = {
|
||||
*/
|
||||
control?: ControlField;
|
||||
};
|
||||
|
||||
|
@ -15,4 +15,3 @@ export type CreateModelRequest = {
|
||||
*/
|
||||
info: (CkptModelInfo | DiffusersModelInfo);
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type CvInpaintInvocation = {
|
||||
*/
|
||||
mask?: ImageField;
|
||||
};
|
||||
|
||||
|
@ -9,7 +9,15 @@ export type DiffusersModelInfo = {
|
||||
* A description of the model
|
||||
*/
|
||||
description?: string;
|
||||
format?: 'diffusers';
|
||||
/**
|
||||
* The name of the model
|
||||
*/
|
||||
model_name: string;
|
||||
/**
|
||||
* The type of the model
|
||||
*/
|
||||
model_type: string;
|
||||
format?: 'folder';
|
||||
/**
|
||||
* The VAE repo to use for this model
|
||||
*/
|
||||
@ -23,4 +31,3 @@ export type DiffusersModelInfo = {
|
||||
*/
|
||||
path?: string;
|
||||
};
|
||||
|
||||
|
@ -24,4 +24,3 @@ export type DivideInvocation = {
|
||||
*/
|
||||
'b'?: number;
|
||||
};
|
||||
|
||||
|
@ -28,4 +28,3 @@ export type DynamicPromptInvocation = {
|
||||
*/
|
||||
combinatorial?: boolean;
|
||||
};
|
||||
|
||||
|
@ -14,4 +14,3 @@ export type Edge = {
|
||||
*/
|
||||
destination: EdgeConnection;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type EdgeConnection = {
|
||||
*/
|
||||
field: string;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type FloatCollectionOutput = {
|
||||
*/
|
||||
collection?: Array<number>;
|
||||
};
|
||||
|
||||
|
@ -28,4 +28,3 @@ export type FloatLinearRangeInvocation = {
|
||||
*/
|
||||
steps?: number;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type FloatOutput = {
|
||||
*/
|
||||
param?: number;
|
||||
};
|
||||
|
||||
|
@ -38,6 +38,7 @@ import type { LatentsToLatentsInvocation } from './LatentsToLatentsInvocation';
|
||||
import type { LineartAnimeImageProcessorInvocation } from './LineartAnimeImageProcessorInvocation';
|
||||
import type { LineartImageProcessorInvocation } from './LineartImageProcessorInvocation';
|
||||
import type { LoadImageInvocation } from './LoadImageInvocation';
|
||||
import type { LoraLoaderInvocation } from './LoraLoaderInvocation';
|
||||
import type { MaskFromAlphaInvocation } from './MaskFromAlphaInvocation';
|
||||
import type { MediapipeFaceProcessorInvocation } from './MediapipeFaceProcessorInvocation';
|
||||
import type { MidasDepthImageProcessorInvocation } from './MidasDepthImageProcessorInvocation';
|
||||
@ -56,6 +57,8 @@ import type { RangeOfSizeInvocation } from './RangeOfSizeInvocation';
|
||||
import type { ResizeLatentsInvocation } from './ResizeLatentsInvocation';
|
||||
import type { RestoreFaceInvocation } from './RestoreFaceInvocation';
|
||||
import type { ScaleLatentsInvocation } from './ScaleLatentsInvocation';
|
||||
import type { SD1ModelLoaderInvocation } from './SD1ModelLoaderInvocation';
|
||||
import type { SD2ModelLoaderInvocation } from './SD2ModelLoaderInvocation';
|
||||
import type { ShowImageInvocation } from './ShowImageInvocation';
|
||||
import type { StepParamEasingInvocation } from './StepParamEasingInvocation';
|
||||
import type { SubtractInvocation } from './SubtractInvocation';
|
||||
@ -72,10 +75,9 @@ export type Graph = {
|
||||
/**
|
||||
* The nodes in this graph
|
||||
*/
|
||||
nodes?: Record<string, (LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | DynamicPromptInvocation | CompelInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | CvInpaintInvocation | RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | UpscaleInvocation | RestoreFaceInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | LatentsToLatentsInvocation | ImageToImageInvocation | InpaintInvocation)>;
|
||||
nodes?: Record<string, (RangeInvocation | RangeOfSizeInvocation | RandomRangeInvocation | SD1ModelLoaderInvocation | SD2ModelLoaderInvocation | LoraLoaderInvocation | CompelInvocation | LoadImageInvocation | ShowImageInvocation | ImageCropInvocation | ImagePasteInvocation | MaskFromAlphaInvocation | ImageMultiplyInvocation | ImageChannelInvocation | ImageConvertInvocation | ImageBlurInvocation | ImageResizeInvocation | ImageScaleInvocation | ImageLerpInvocation | ImageInverseLerpInvocation | ControlNetInvocation | ImageProcessorInvocation | CvInpaintInvocation | TextToImageInvocation | InfillColorInvocation | InfillTileInvocation | InfillPatchMatchInvocation | NoiseInvocation | TextToLatentsInvocation | LatentsToImageInvocation | ResizeLatentsInvocation | ScaleLatentsInvocation | ImageToLatentsInvocation | AddInvocation | SubtractInvocation | MultiplyInvocation | DivideInvocation | RandomIntInvocation | ParamIntInvocation | ParamFloatInvocation | FloatLinearRangeInvocation | StepParamEasingInvocation | DynamicPromptInvocation | RestoreFaceInvocation | UpscaleInvocation | GraphInvocation | IterateInvocation | CollectInvocation | CannyImageProcessorInvocation | HedImageProcessorInvocation | LineartImageProcessorInvocation | LineartAnimeImageProcessorInvocation | OpenposeImageProcessorInvocation | MidasDepthImageProcessorInvocation | NormalbaeImageProcessorInvocation | MlsdImageProcessorInvocation | PidiImageProcessorInvocation | ContentShuffleImageProcessorInvocation | ZoeDepthImageProcessorInvocation | MediapipeFaceProcessorInvocation | ImageToImageInvocation | LatentsToLatentsInvocation | InpaintInvocation)>;
|
||||
/**
|
||||
* The connections between nodes and their fields in this graph
|
||||
*/
|
||||
edges?: Array<Edge>;
|
||||
};
|
||||
|
||||
|
@ -14,7 +14,9 @@ import type { IntCollectionOutput } from './IntCollectionOutput';
|
||||
import type { IntOutput } from './IntOutput';
|
||||
import type { IterateInvocationOutput } from './IterateInvocationOutput';
|
||||
import type { LatentsOutput } from './LatentsOutput';
|
||||
import type { LoraLoaderOutput } from './LoraLoaderOutput';
|
||||
import type { MaskOutput } from './MaskOutput';
|
||||
import type { ModelLoaderOutput } from './ModelLoaderOutput';
|
||||
import type { NoiseOutput } from './NoiseOutput';
|
||||
import type { PromptCollectionOutput } from './PromptCollectionOutput';
|
||||
import type { PromptOutput } from './PromptOutput';
|
||||
@ -46,7 +48,7 @@ export type GraphExecutionState = {
|
||||
/**
|
||||
* The results of node executions
|
||||
*/
|
||||
results: Record<string, (ImageOutput | MaskOutput | ControlOutput | PromptOutput | PromptCollectionOutput | CompelOutput | IntOutput | FloatOutput | LatentsOutput | NoiseOutput | IntCollectionOutput | FloatCollectionOutput | GraphInvocationOutput | IterateInvocationOutput | CollectInvocationOutput)>;
|
||||
results: Record<string, (IntCollectionOutput | FloatCollectionOutput | ModelLoaderOutput | LoraLoaderOutput | CompelOutput | ImageOutput | MaskOutput | ControlOutput | LatentsOutput | NoiseOutput | IntOutput | FloatOutput | PromptOutput | PromptCollectionOutput | GraphInvocationOutput | IterateInvocationOutput | CollectInvocationOutput)>;
|
||||
/**
|
||||
* Errors raised when executing nodes
|
||||
*/
|
||||
@ -60,4 +62,3 @@ export type GraphExecutionState = {
|
||||
*/
|
||||
source_prepared_mapping: Record<string, Array<string>>;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type GraphInvocation = {
|
||||
*/
|
||||
graph?: Graph;
|
||||
};
|
||||
|
||||
|
@ -8,4 +8,3 @@
|
||||
export type GraphInvocationOutput = {
|
||||
type: 'graph_output';
|
||||
};
|
||||
|
||||
|
@ -7,4 +7,3 @@ import type { ValidationError } from './ValidationError';
|
||||
export type HTTPValidationError = {
|
||||
detail?: Array<ValidationError>;
|
||||
};
|
||||
|
||||
|
@ -34,4 +34,3 @@ export type HedImageProcessorInvocation = {
|
||||
*/
|
||||
scribble?: boolean;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type ImageBlurInvocation = {
|
||||
*/
|
||||
blur_type?: 'gaussian' | 'box';
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type ImageChannelInvocation = {
|
||||
*/
|
||||
channel?: 'A' | 'R' | 'G' | 'B';
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type ImageConvertInvocation = {
|
||||
*/
|
||||
mode?: 'L' | 'RGB' | 'RGBA' | 'CMYK' | 'YCbCr' | 'LAB' | 'HSV' | 'I' | 'F';
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type ImageCropInvocation = {
|
||||
*/
|
||||
height?: number;
|
||||
};
|
||||
|
||||
|
@ -67,4 +67,3 @@ export type ImageDTO = {
|
||||
*/
|
||||
metadata?: ImageMetadata;
|
||||
};
|
||||
|
||||
|
@ -11,4 +11,3 @@ export type ImageField = {
|
||||
*/
|
||||
image_name: string;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type ImageInverseLerpInvocation = {
|
||||
*/
|
||||
max?: number;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type ImageLerpInvocation = {
|
||||
*/
|
||||
max?: number;
|
||||
};
|
||||
|
||||
|
@ -78,4 +78,3 @@ export type ImageMetadata = {
|
||||
*/
|
||||
extra?: string;
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type ImageMultiplyInvocation = {
|
||||
*/
|
||||
image2?: ImageField;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type ImageOutput = {
|
||||
*/
|
||||
height: number;
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type ImagePasteInvocation = {
|
||||
*/
|
||||
'y'?: number;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type ImageProcessorInvocation = {
|
||||
*/
|
||||
image?: ImageField;
|
||||
};
|
||||
|
||||
|
@ -26,4 +26,3 @@ export type ImageRecordChanges = {
|
||||
*/
|
||||
is_intermediate?: boolean;
|
||||
};
|
||||
|
||||
|
@ -34,4 +34,3 @@ export type ImageResizeInvocation = {
|
||||
*/
|
||||
resample_mode?: 'nearest' | 'box' | 'bilinear' | 'hamming' | 'bicubic' | 'lanczos';
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type ImageScaleInvocation = {
|
||||
*/
|
||||
resample_mode?: 'nearest' | 'box' | 'bilinear' | 'hamming' | 'bicubic' | 'lanczos';
|
||||
};
|
||||
|
||||
|
@ -74,4 +74,3 @@ export type ImageToImageInvocation = {
|
||||
*/
|
||||
fit?: boolean;
|
||||
};
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ImageField } from './ImageField';
|
||||
import type { VaeField } from './VaeField';
|
||||
|
||||
/**
|
||||
* Encodes an image into latents.
|
||||
@ -22,8 +23,11 @@ export type ImageToLatentsInvocation = {
|
||||
*/
|
||||
image?: ImageField;
|
||||
/**
|
||||
* The model to use
|
||||
* Vae submodel
|
||||
*/
|
||||
model?: string;
|
||||
vae?: VaeField;
|
||||
/**
|
||||
* Encode latents by overlaping tiles(less memory consumption)
|
||||
*/
|
||||
tiled?: boolean;
|
||||
};
|
||||
|
||||
|
@ -19,4 +19,3 @@ export type ImageUrlsDTO = {
|
||||
*/
|
||||
thumbnail_url: string;
|
||||
};
|
||||
|
||||
|
@ -27,4 +27,3 @@ export type InfillColorInvocation = {
|
||||
*/
|
||||
color?: ColorField;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type InfillPatchMatchInvocation = {
|
||||
*/
|
||||
image?: ImageField;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type InfillTileInvocation = {
|
||||
*/
|
||||
seed?: number;
|
||||
};
|
||||
|
||||
|
@ -119,4 +119,3 @@ export type InpaintInvocation = {
|
||||
*/
|
||||
inpaint_replace?: number;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type IntCollectionOutput = {
|
||||
*/
|
||||
collection?: Array<number>;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type IntOutput = {
|
||||
*/
|
||||
'a'?: number;
|
||||
};
|
||||
|
||||
|
@ -24,4 +24,3 @@ export type IterateInvocation = {
|
||||
*/
|
||||
index?: number;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type IterateInvocationOutput = {
|
||||
*/
|
||||
item: any;
|
||||
};
|
||||
|
||||
|
@ -11,4 +11,3 @@ export type LatentsField = {
|
||||
*/
|
||||
latents_name: string;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type LatentsOutput = {
|
||||
*/
|
||||
height: number;
|
||||
};
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
/* eslint-disable */
|
||||
|
||||
import type { LatentsField } from './LatentsField';
|
||||
import type { VaeField } from './VaeField';
|
||||
|
||||
/**
|
||||
* Generates an image from latents.
|
||||
@ -22,8 +23,11 @@ export type LatentsToImageInvocation = {
|
||||
*/
|
||||
latents?: LatentsField;
|
||||
/**
|
||||
* The model to use
|
||||
* Vae submodel
|
||||
*/
|
||||
model?: string;
|
||||
vae?: VaeField;
|
||||
/**
|
||||
* Decode latents by overlaping tiles(less memory consumption)
|
||||
*/
|
||||
tiled?: boolean;
|
||||
};
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
import type { ConditioningField } from './ConditioningField';
|
||||
import type { ControlField } from './ControlField';
|
||||
import type { LatentsField } from './LatentsField';
|
||||
import type { UNetField } from './UNetField';
|
||||
|
||||
/**
|
||||
* Generates latents using latents as base image.
|
||||
@ -44,9 +45,9 @@ export type LatentsToLatentsInvocation = {
|
||||
*/
|
||||
scheduler?: 'ddim' | 'ddpm' | 'deis' | 'lms' | 'pndm' | 'heun' | 'heun_k' | 'euler' | 'euler_k' | 'euler_a' | 'kdpm_2' | 'kdpm_2_a' | 'dpmpp_2s' | 'dpmpp_2m' | 'dpmpp_2m_k' | 'unipc';
|
||||
/**
|
||||
* The model to use (currently ignored)
|
||||
* UNet submodel
|
||||
*/
|
||||
model?: string;
|
||||
unet?: UNetField;
|
||||
/**
|
||||
* The control to use
|
||||
*/
|
||||
@ -60,4 +61,3 @@ export type LatentsToLatentsInvocation = {
|
||||
*/
|
||||
strength?: number;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type LineartAnimeImageProcessorInvocation = {
|
||||
*/
|
||||
image_resolution?: number;
|
||||
};
|
||||
|
||||
|
@ -34,4 +34,3 @@ export type LineartImageProcessorInvocation = {
|
||||
*/
|
||||
coarse?: boolean;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type LoadImageInvocation = {
|
||||
*/
|
||||
image?: ImageField;
|
||||
};
|
||||
|
||||
|
30
invokeai/frontend/web/src/services/api/models/LoraInfo.ts
Normal file
30
invokeai/frontend/web/src/services/api/models/LoraInfo.ts
Normal file
@ -0,0 +1,30 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { BaseModelType } from './BaseModelType';
|
||||
import type { ModelType } from './ModelType';
|
||||
import type { SubModelType } from './SubModelType';
|
||||
|
||||
export type LoraInfo = {
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
model_name: string;
|
||||
/**
|
||||
* Base model
|
||||
*/
|
||||
base_model: BaseModelType;
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
model_type: ModelType;
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
submodel?: SubModelType;
|
||||
/**
|
||||
* Lora's weight which to use when apply to model
|
||||
*/
|
||||
weight: number;
|
||||
};
|
@ -0,0 +1,37 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ClipField } from './ClipField';
|
||||
import type { UNetField } from './UNetField';
|
||||
|
||||
/**
|
||||
* Apply selected lora to unet and text_encoder.
|
||||
*/
|
||||
export type LoraLoaderInvocation = {
|
||||
/**
|
||||
* The id of this node. Must be unique among all nodes.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Whether or not this node is an intermediate node.
|
||||
*/
|
||||
is_intermediate?: boolean;
|
||||
type?: 'lora_loader';
|
||||
/**
|
||||
* Lora model name
|
||||
*/
|
||||
lora_name: string;
|
||||
/**
|
||||
* With what weight to apply lora
|
||||
*/
|
||||
weight?: number;
|
||||
/**
|
||||
* UNet model for applying lora
|
||||
*/
|
||||
unet?: UNetField;
|
||||
/**
|
||||
* Clip model for applying lora
|
||||
*/
|
||||
clip?: ClipField;
|
||||
};
|
@ -0,0 +1,21 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ClipField } from './ClipField';
|
||||
import type { UNetField } from './UNetField';
|
||||
|
||||
/**
|
||||
* Model loader output
|
||||
*/
|
||||
export type LoraLoaderOutput = {
|
||||
type?: 'lora_loader_output';
|
||||
/**
|
||||
* UNet submodel
|
||||
*/
|
||||
unet?: UNetField;
|
||||
/**
|
||||
* Tokenizer and text_encoder submodels
|
||||
*/
|
||||
clip?: ClipField;
|
||||
};
|
@ -0,0 +1,13 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ModelError } from './ModelError';
|
||||
|
||||
export type LoraModelConfig = {
|
||||
path: string;
|
||||
description?: string;
|
||||
format: ('lycoris' | 'diffusers');
|
||||
default?: boolean;
|
||||
error?: ModelError;
|
||||
};
|
@ -26,4 +26,3 @@ export type MaskFromAlphaInvocation = {
|
||||
*/
|
||||
invert?: boolean;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type MaskOutput = {
|
||||
*/
|
||||
height?: number;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type MediapipeFaceProcessorInvocation = {
|
||||
*/
|
||||
min_confidence?: number;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type MidasDepthImageProcessorInvocation = {
|
||||
*/
|
||||
bg_th?: number;
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type MlsdImageProcessorInvocation = {
|
||||
*/
|
||||
thr_d?: number;
|
||||
};
|
||||
|
||||
|
@ -0,0 +1,8 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/**
|
||||
* An enumeration.
|
||||
*/
|
||||
export type ModelError = 'not_found';
|
26
invokeai/frontend/web/src/services/api/models/ModelInfo.ts
Normal file
26
invokeai/frontend/web/src/services/api/models/ModelInfo.ts
Normal file
@ -0,0 +1,26 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { BaseModelType } from './BaseModelType';
|
||||
import type { ModelType } from './ModelType';
|
||||
import type { SubModelType } from './SubModelType';
|
||||
|
||||
export type ModelInfo = {
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
model_name: string;
|
||||
/**
|
||||
* Base model
|
||||
*/
|
||||
base_model: BaseModelType;
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
model_type: ModelType;
|
||||
/**
|
||||
* Info to load submodel
|
||||
*/
|
||||
submodel?: SubModelType;
|
||||
};
|
@ -0,0 +1,26 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { ClipField } from './ClipField';
|
||||
import type { UNetField } from './UNetField';
|
||||
import type { VaeField } from './VaeField';
|
||||
|
||||
/**
|
||||
* Model loader output
|
||||
*/
|
||||
export type ModelLoaderOutput = {
|
||||
type?: 'model_loader_output';
|
||||
/**
|
||||
* UNet submodel
|
||||
*/
|
||||
unet?: UNetField;
|
||||
/**
|
||||
* Tokenizer and text_encoder submodels
|
||||
*/
|
||||
clip?: ClipField;
|
||||
/**
|
||||
* Vae submodel
|
||||
*/
|
||||
vae?: VaeField;
|
||||
};
|
@ -0,0 +1,8 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/**
|
||||
* An enumeration.
|
||||
*/
|
||||
export type ModelType = 'pipeline' | 'vae' | 'lora' | 'controlnet' | 'embedding';
|
@ -0,0 +1,8 @@
|
||||
/* istanbul ignore file */
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
/**
|
||||
* An enumeration.
|
||||
*/
|
||||
export type ModelVariantType = 'normal' | 'inpaint' | 'depth';
|
@ -2,10 +2,15 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
|
||||
import type { CkptModelInfo } from './CkptModelInfo';
|
||||
import type { DiffusersModelInfo } from './DiffusersModelInfo';
|
||||
import type { ControlNetModelConfig } from './ControlNetModelConfig';
|
||||
import type { LoraModelConfig } from './LoraModelConfig';
|
||||
import type { StableDiffusion1CheckpointModelConfig } from './StableDiffusion1CheckpointModelConfig';
|
||||
import type { StableDiffusion1DiffusersModelConfig } from './StableDiffusion1DiffusersModelConfig';
|
||||
import type { StableDiffusion2CheckpointModelConfig } from './StableDiffusion2CheckpointModelConfig';
|
||||
import type { StableDiffusion2DiffusersModelConfig } from './StableDiffusion2DiffusersModelConfig';
|
||||
import type { TextualInversionModelConfig } from './TextualInversionModelConfig';
|
||||
import type { VAEModelConfig } from './VAEModelConfig';
|
||||
|
||||
export type ModelsList = {
|
||||
models: Record<string, (CkptModelInfo | DiffusersModelInfo)>;
|
||||
models: Record<string, Record<string, Record<string, (StableDiffusion2DiffusersModelConfig | ControlNetModelConfig | LoraModelConfig | StableDiffusion1CheckpointModelConfig | TextualInversionModelConfig | StableDiffusion1DiffusersModelConfig | StableDiffusion2CheckpointModelConfig | VAEModelConfig)>>>;
|
||||
};
|
||||
|
||||
|
@ -24,4 +24,3 @@ export type MultiplyInvocation = {
|
||||
*/
|
||||
'b'?: number;
|
||||
};
|
||||
|
||||
|
@ -28,4 +28,3 @@ export type NoiseInvocation = {
|
||||
*/
|
||||
height?: number;
|
||||
};
|
||||
|
||||
|
@ -22,4 +22,3 @@ export type NoiseOutput = {
|
||||
*/
|
||||
height: number;
|
||||
};
|
||||
|
||||
|
@ -30,4 +30,3 @@ export type NormalbaeImageProcessorInvocation = {
|
||||
*/
|
||||
image_resolution?: number;
|
||||
};
|
||||
|
||||
|
@ -25,4 +25,3 @@ export type OffsetPaginatedResults_ImageDTO_ = {
|
||||
*/
|
||||
total: number;
|
||||
};
|
||||
|
||||
|
@ -34,4 +34,3 @@ export type OpenposeImageProcessorInvocation = {
|
||||
*/
|
||||
image_resolution?: number;
|
||||
};
|
||||
|
||||
|
@ -29,4 +29,3 @@ export type PaginatedResults_GraphExecutionState_ = {
|
||||
*/
|
||||
total: number;
|
||||
};
|
||||
|
||||
|
@ -20,4 +20,3 @@ export type ParamFloatInvocation = {
|
||||
*/
|
||||
param?: number;
|
||||
};
|
||||
|
||||
|
@ -20,4 +20,3 @@ export type ParamIntInvocation = {
|
||||
*/
|
||||
'a'?: number;
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type PidiImageProcessorInvocation = {
|
||||
*/
|
||||
scribble?: boolean;
|
||||
};
|
||||
|
||||
|
@ -16,4 +16,3 @@ export type PromptCollectionOutput = {
|
||||
*/
|
||||
count: number;
|
||||
};
|
||||
|
||||
|
@ -12,4 +12,3 @@ export type PromptOutput = {
|
||||
*/
|
||||
prompt: string;
|
||||
};
|
||||
|
||||
|
@ -24,4 +24,3 @@ export type RandomIntInvocation = {
|
||||
*/
|
||||
high?: number;
|
||||
};
|
||||
|
||||
|
@ -32,4 +32,3 @@ export type RandomRangeInvocation = {
|
||||
*/
|
||||
seed?: number;
|
||||
};
|
||||
|
||||
|
@ -28,4 +28,3 @@ export type RangeInvocation = {
|
||||
*/
|
||||
step?: number;
|
||||
};
|
||||
|
||||
|
@ -28,4 +28,3 @@ export type RangeOfSizeInvocation = {
|
||||
*/
|
||||
step?: number;
|
||||
};
|
||||
|
||||
|
@ -38,4 +38,3 @@ export type ResizeLatentsInvocation = {
|
||||
*/
|
||||
antialias?: boolean;
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user