refactor(ui): rewrite all types as zod schemas

This change prepares for safe metadata recall.
This commit is contained in:
psychedelicious 2024-05-07 15:14:47 +10:00 committed by Kent Keirsey
parent a7aa529b99
commit 8342f32f2e
4 changed files with 361 additions and 160 deletions

View File

@ -1,88 +1,121 @@
import type { import {
ControlNetConfigV2, zControlNetConfigV2,
ImageWithDims, zImageWithDims,
IPAdapterConfigV2, zIPAdapterConfigV2,
T2IAdapterConfigV2, zT2IAdapterConfigV2,
} from 'features/controlLayers/util/controlAdapters'; } from 'features/controlLayers/util/controlAdapters';
import type { AspectRatioState } from 'features/parameters/components/ImageSize/types'; import type { AspectRatioState } from 'features/parameters/components/ImageSize/types';
import type { import {
ParameterAutoNegative, type ParameterHeight,
ParameterHeight, type ParameterNegativePrompt,
ParameterNegativePrompt, type ParameterNegativeStylePromptSDXL,
ParameterNegativeStylePromptSDXL, type ParameterPositivePrompt,
ParameterPositivePrompt, type ParameterPositiveStylePromptSDXL,
ParameterPositiveStylePromptSDXL, type ParameterWidth,
ParameterWidth, zAutoNegative,
zParameterNegativePrompt,
zParameterPositivePrompt,
zParameterStrength,
} from 'features/parameters/types/parameterSchemas'; } from 'features/parameters/types/parameterSchemas';
import type { IRect } from 'konva/lib/types'; import { z } from 'zod';
import type { RgbColor } from 'react-colorful';
export type DrawingTool = 'brush' | 'eraser'; export const zTool = z.enum(['brush', 'eraser', 'move', 'rect']);
export type Tool = z.infer<typeof zTool>;
export const zDrawingTool = zTool.extract(['brush', 'eraser']);
export type DrawingTool = z.infer<typeof zDrawingTool>;
export type Tool = DrawingTool | 'move' | 'rect'; const zPoints = z.array(z.number()).refine((points) => points.length % 2 === 0, {
message: 'Must have an even number of points',
});
export const zVectorMaskLine = z.object({
id: z.string(),
type: z.literal('vector_mask_line'),
tool: zDrawingTool,
strokeWidth: z.number().min(1),
points: zPoints,
});
export type VectorMaskLine = z.infer<typeof zVectorMaskLine>;
export type VectorMaskLine = { export const zVectorMaskRect = z.object({
id: string; id: z.string(),
type: 'vector_mask_line'; type: z.literal('vector_mask_rect'),
tool: DrawingTool; x: z.number(),
strokeWidth: number; y: z.number(),
points: number[]; width: z.number().min(1),
}; height: z.number().min(1),
});
export type VectorMaskRect = z.infer<typeof zVectorMaskRect>;
export type VectorMaskRect = { const zLayerBase = z.object({
id: string; id: z.string(),
type: 'vector_mask_rect'; isEnabled: z.boolean(),
x: number; });
y: number;
width: number;
height: number;
};
type LayerBase = { const zRect = z.object({
id: string; x: z.number(),
isEnabled: boolean; y: z.number(),
}; width: z.number().min(1),
height: z.number().min(1),
});
const zRenderableLayerBase = zLayerBase.extend({
x: z.number(),
y: z.number(),
bbox: zRect.nullable(),
bboxNeedsUpdate: z.boolean(),
isSelected: z.boolean(),
});
type RenderableLayerBase = LayerBase & { const zControlAdapterLayer = zRenderableLayerBase.extend({
x: number; type: z.literal('control_adapter_layer'),
y: number; opacity: z.number().gte(0).lte(1),
bbox: IRect | null; isFilterEnabled: z.boolean(),
bboxNeedsUpdate: boolean; controlAdapter: z.discriminatedUnion('type', [zControlNetConfigV2, zT2IAdapterConfigV2]),
isSelected: boolean; });
}; export type ControlAdapterLayer = z.infer<typeof zControlAdapterLayer>;
export type ControlAdapterLayer = RenderableLayerBase & { const zIPAdapterLayer = zLayerBase.extend({
type: 'control_adapter_layer'; // technically, also t2i adapter layer type: z.literal('ip_adapter_layer'),
opacity: number; ipAdapter: zIPAdapterConfigV2,
isFilterEnabled: boolean; });
controlAdapter: ControlNetConfigV2 | T2IAdapterConfigV2; export type IPAdapterLayer = z.infer<typeof zIPAdapterLayer>;
};
export type IPAdapterLayer = LayerBase & { const zRgbColor = z.object({
type: 'ip_adapter_layer'; r: z.number().int().min(0).max(255),
ipAdapter: IPAdapterConfigV2; g: z.number().int().min(0).max(255),
}; b: z.number().int().min(0).max(255),
});
const zRegionalGuidanceLayer = zRenderableLayerBase.extend({
type: z.literal('regional_guidance_layer'),
maskObjects: z.array(z.discriminatedUnion('type', [zVectorMaskLine, zVectorMaskRect])),
positivePrompt: zParameterPositivePrompt.nullable(),
negativePrompt: zParameterNegativePrompt.nullable(),
ipAdapters: z.array(zIPAdapterConfigV2),
previewColor: zRgbColor,
autoNegative: zAutoNegative,
needsPixelBbox: z
.boolean()
.describe(
'Whether the layer needs the slower pixel-based bbox calculation. Set to true when an there is an eraser object.'
),
uploadedMaskImage: zImageWithDims.nullable(),
});
export type RegionalGuidanceLayer = z.infer<typeof zRegionalGuidanceLayer>;
export type RegionalGuidanceLayer = RenderableLayerBase & { const zInitialImageLayer = zRenderableLayerBase.extend({
type: 'regional_guidance_layer'; type: z.literal('initial_image_layer'),
maskObjects: (VectorMaskLine | VectorMaskRect)[]; opacity: z.number().gte(0).lte(1),
positivePrompt: ParameterPositivePrompt | null; image: zImageWithDims.nullable(),
negativePrompt: ParameterNegativePrompt | null; // Up to one text prompt per mask denoisingStrength: zParameterStrength,
ipAdapters: IPAdapterConfigV2[]; // Any number of image prompts });
previewColor: RgbColor; export type InitialImageLayer = z.infer<typeof zInitialImageLayer>;
autoNegative: ParameterAutoNegative;
needsPixelBbox: boolean; // Needs the slower pixel-based bbox calculation - set to true when an there is an eraser object
uploadedMaskImage: ImageWithDims | null;
};
export type InitialImageLayer = RenderableLayerBase & { export const zLayer = z.discriminatedUnion('type', [
type: 'initial_image_layer'; zRegionalGuidanceLayer,
opacity: number; zControlAdapterLayer,
image: ImageWithDims | null; zIPAdapterLayer,
denoisingStrength: number; zInitialImageLayer,
}; ]);
export type Layer = z.infer<typeof zLayer>;
export type Layer = RegionalGuidanceLayer | ControlAdapterLayer | IPAdapterLayer | InitialImageLayer;
export type ControlLayersState = { export type ControlLayersState = {
_version: 2; _version: 2;

View File

@ -4,20 +4,74 @@ import { assert } from 'tsafe';
import { describe, test } from 'vitest'; import { describe, test } from 'vitest';
import type { import type {
_CannyProcessorConfig,
_ColorMapProcessorConfig,
_ContentShuffleProcessorConfig,
_DepthAnythingProcessorConfig,
_DWOpenposeProcessorConfig,
_HedProcessorConfig,
_LineartAnimeProcessorConfig,
_LineartProcessorConfig,
_MediapipeFaceProcessorConfig,
_MidasDepthProcessorConfig,
_MlsdProcessorConfig,
_NormalbaeProcessorConfig,
_PidiProcessorConfig,
_ZoeDepthProcessorConfig,
CannyProcessorConfig,
CLIPVisionModelV2, CLIPVisionModelV2,
ColorMapProcessorConfig,
ContentShuffleProcessorConfig,
ControlModeV2, ControlModeV2,
DepthAnythingModelSize, DepthAnythingModelSize,
DepthAnythingProcessorConfig,
DWOpenposeProcessorConfig,
HedProcessorConfig,
IPMethodV2, IPMethodV2,
LineartAnimeProcessorConfig,
LineartProcessorConfig,
MediapipeFaceProcessorConfig,
MidasDepthProcessorConfig,
MlsdProcessorConfig,
NormalbaeProcessorConfig,
PidiProcessorConfig,
ProcessorConfig, ProcessorConfig,
ProcessorTypeV2, ProcessorTypeV2,
ZoeDepthProcessorConfig,
} from './controlAdapters'; } from './controlAdapters';
describe('Control Adapter Types', () => { describe('Control Adapter Types', () => {
test('ProcessorType', () => assert<Equals<ProcessorConfig['type'], ProcessorTypeV2>>()); test('ProcessorType', () => {
test('IP Adapter Method', () => assert<Equals<NonNullable<S['IPAdapterInvocation']['method']>, IPMethodV2>>()); assert<Equals<ProcessorConfig['type'], ProcessorTypeV2>>();
test('CLIP Vision Model', () => });
assert<Equals<NonNullable<S['IPAdapterInvocation']['clip_vision_model']>, CLIPVisionModelV2>>()); test('IP Adapter Method', () => {
test('Control Mode', () => assert<Equals<NonNullable<S['ControlNetInvocation']['control_mode']>, ControlModeV2>>()); assert<Equals<NonNullable<S['IPAdapterInvocation']['method']>, IPMethodV2>>();
test('DepthAnything Model Size', () => });
assert<Equals<NonNullable<S['DepthAnythingImageProcessorInvocation']['model_size']>, DepthAnythingModelSize>>()); test('CLIP Vision Model', () => {
assert<Equals<NonNullable<S['IPAdapterInvocation']['clip_vision_model']>, CLIPVisionModelV2>>();
});
test('Control Mode', () => {
assert<Equals<NonNullable<S['ControlNetInvocation']['control_mode']>, ControlModeV2>>();
});
test('DepthAnything Model Size', () => {
assert<Equals<NonNullable<S['DepthAnythingImageProcessorInvocation']['model_size']>, DepthAnythingModelSize>>();
});
test('Processor Configs', () => {
// The processor configs are manually modeled zod schemas. This test ensures that the inferred types are correct.
// The types prefixed with `_` are types generated from OpenAPI, while the types without the prefix are manually modeled.
assert<Equals<_CannyProcessorConfig, CannyProcessorConfig>>();
assert<Equals<_ColorMapProcessorConfig, ColorMapProcessorConfig>>();
assert<Equals<_ContentShuffleProcessorConfig, ContentShuffleProcessorConfig>>();
assert<Equals<_DepthAnythingProcessorConfig, DepthAnythingProcessorConfig>>();
assert<Equals<_HedProcessorConfig, HedProcessorConfig>>();
assert<Equals<_LineartAnimeProcessorConfig, LineartAnimeProcessorConfig>>();
assert<Equals<_LineartProcessorConfig, LineartProcessorConfig>>();
assert<Equals<_MediapipeFaceProcessorConfig, MediapipeFaceProcessorConfig>>();
assert<Equals<_MidasDepthProcessorConfig, MidasDepthProcessorConfig>>();
assert<Equals<_MlsdProcessorConfig, MlsdProcessorConfig>>();
assert<Equals<_NormalbaeProcessorConfig, NormalbaeProcessorConfig>>();
assert<Equals<_DWOpenposeProcessorConfig, DWOpenposeProcessorConfig>>();
assert<Equals<_PidiProcessorConfig, PidiProcessorConfig>>();
assert<Equals<_ZoeDepthProcessorConfig, ZoeDepthProcessorConfig>>();
});
}); });

View File

@ -1,9 +1,5 @@
import { deepClone } from 'common/util/deepClone'; import { deepClone } from 'common/util/deepClone';
import type { import { zModelIdentifierField } from 'features/nodes/types/common';
ParameterControlNetModel,
ParameterIPAdapterModel,
ParameterT2IAdapterModel,
} from 'features/parameters/types/parameterSchemas';
import { merge, omit } from 'lodash-es'; import { merge, omit } from 'lodash-es';
import type { import type {
BaseModelType, BaseModelType,
@ -28,90 +24,207 @@ import type {
} from 'services/api/types'; } from 'services/api/types';
import { z } from 'zod'; import { z } from 'zod';
const zId = z.string().min(1);
const zCannyProcessorConfig = z.object({
id: zId,
type: z.literal('canny_image_processor'),
low_threshold: z.number().int().gte(0).lte(255),
high_threshold: z.number().int().gte(0).lte(255),
});
export type _CannyProcessorConfig = Required<
Pick<CannyImageProcessorInvocation, 'id' | 'type' | 'low_threshold' | 'high_threshold'>
>;
export type CannyProcessorConfig = z.infer<typeof zCannyProcessorConfig>;
const zColorMapProcessorConfig = z.object({
id: zId,
type: z.literal('color_map_image_processor'),
color_map_tile_size: z.number().int().gte(1),
});
export type _ColorMapProcessorConfig = Required<
Pick<ColorMapImageProcessorInvocation, 'id' | 'type' | 'color_map_tile_size'>
>;
export type ColorMapProcessorConfig = z.infer<typeof zColorMapProcessorConfig>;
const zContentShuffleProcessorConfig = z.object({
id: zId,
type: z.literal('content_shuffle_image_processor'),
w: z.number().int().gte(0),
h: z.number().int().gte(0),
f: z.number().int().gte(0),
});
export type _ContentShuffleProcessorConfig = Required<
Pick<ContentShuffleImageProcessorInvocation, 'id' | 'type' | 'w' | 'h' | 'f'>
>;
export type ContentShuffleProcessorConfig = z.infer<typeof zContentShuffleProcessorConfig>;
const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']); const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']);
export type DepthAnythingModelSize = z.infer<typeof zDepthAnythingModelSize>; export type DepthAnythingModelSize = z.infer<typeof zDepthAnythingModelSize>;
export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize => export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize =>
zDepthAnythingModelSize.safeParse(v).success; zDepthAnythingModelSize.safeParse(v).success;
const zDepthAnythingProcessorConfig = z.object({
export type CannyProcessorConfig = Required< id: zId,
Pick<CannyImageProcessorInvocation, 'id' | 'type' | 'low_threshold' | 'high_threshold'> type: z.literal('depth_anything_image_processor'),
>; model_size: zDepthAnythingModelSize,
export type ColorMapProcessorConfig = Required< });
Pick<ColorMapImageProcessorInvocation, 'id' | 'type' | 'color_map_tile_size'> export type _DepthAnythingProcessorConfig = Required<
>;
export type ContentShuffleProcessorConfig = Required<
Pick<ContentShuffleImageProcessorInvocation, 'id' | 'type' | 'w' | 'h' | 'f'>
>;
export type DepthAnythingProcessorConfig = Required<
Pick<DepthAnythingImageProcessorInvocation, 'id' | 'type' | 'model_size'> Pick<DepthAnythingImageProcessorInvocation, 'id' | 'type' | 'model_size'>
>; >;
export type HedProcessorConfig = Required<Pick<HedImageProcessorInvocation, 'id' | 'type' | 'scribble'>>; export type DepthAnythingProcessorConfig = z.infer<typeof zDepthAnythingProcessorConfig>;
type LineartAnimeProcessorConfig = Required<Pick<LineartAnimeImageProcessorInvocation, 'id' | 'type'>>;
export type LineartProcessorConfig = Required<Pick<LineartImageProcessorInvocation, 'id' | 'type' | 'coarse'>>; const zHedProcessorConfig = z.object({
export type MediapipeFaceProcessorConfig = Required< id: zId,
type: z.literal('hed_image_processor'),
scribble: z.boolean(),
});
export type _HedProcessorConfig = Required<Pick<HedImageProcessorInvocation, 'id' | 'type' | 'scribble'>>;
export type HedProcessorConfig = z.infer<typeof zHedProcessorConfig>;
const zLineartAnimeProcessorConfig = z.object({
id: zId,
type: z.literal('lineart_anime_image_processor'),
});
export type _LineartAnimeProcessorConfig = Required<Pick<LineartAnimeImageProcessorInvocation, 'id' | 'type'>>;
export type LineartAnimeProcessorConfig = z.infer<typeof zLineartAnimeProcessorConfig>;
const zLineartProcessorConfig = z.object({
id: zId,
type: z.literal('lineart_image_processor'),
coarse: z.boolean(),
});
export type _LineartProcessorConfig = Required<Pick<LineartImageProcessorInvocation, 'id' | 'type' | 'coarse'>>;
export type LineartProcessorConfig = z.infer<typeof zLineartProcessorConfig>;
const zMediapipeFaceProcessorConfig = z.object({
id: zId,
type: z.literal('mediapipe_face_processor'),
max_faces: z.number().int().gte(1),
min_confidence: z.number().gte(0).lte(1),
});
export type _MediapipeFaceProcessorConfig = Required<
Pick<MediapipeFaceProcessorInvocation, 'id' | 'type' | 'max_faces' | 'min_confidence'> Pick<MediapipeFaceProcessorInvocation, 'id' | 'type' | 'max_faces' | 'min_confidence'>
>; >;
export type MidasDepthProcessorConfig = Required< export type MediapipeFaceProcessorConfig = z.infer<typeof zMediapipeFaceProcessorConfig>;
const zMidasDepthProcessorConfig = z.object({
id: zId,
type: z.literal('midas_depth_image_processor'),
a_mult: z.number().gte(0),
bg_th: z.number().gte(0),
});
export type _MidasDepthProcessorConfig = Required<
Pick<MidasDepthImageProcessorInvocation, 'id' | 'type' | 'a_mult' | 'bg_th'> Pick<MidasDepthImageProcessorInvocation, 'id' | 'type' | 'a_mult' | 'bg_th'>
>; >;
export type MlsdProcessorConfig = Required<Pick<MlsdImageProcessorInvocation, 'id' | 'type' | 'thr_v' | 'thr_d'>>; export type MidasDepthProcessorConfig = z.infer<typeof zMidasDepthProcessorConfig>;
type NormalbaeProcessorConfig = Required<Pick<NormalbaeImageProcessorInvocation, 'id' | 'type'>>;
export type DWOpenposeProcessorConfig = Required< const zMlsdProcessorConfig = z.object({
id: zId,
type: z.literal('mlsd_image_processor'),
thr_v: z.number().gte(0),
thr_d: z.number().gte(0),
});
export type _MlsdProcessorConfig = Required<Pick<MlsdImageProcessorInvocation, 'id' | 'type' | 'thr_v' | 'thr_d'>>;
export type MlsdProcessorConfig = z.infer<typeof zMlsdProcessorConfig>;
const zNormalbaeProcessorConfig = z.object({
id: zId,
type: z.literal('normalbae_image_processor'),
});
export type _NormalbaeProcessorConfig = Required<Pick<NormalbaeImageProcessorInvocation, 'id' | 'type'>>;
export type NormalbaeProcessorConfig = z.infer<typeof zNormalbaeProcessorConfig>;
const zDWOpenposeProcessorConfig = z.object({
id: zId,
type: z.literal('dw_openpose_image_processor'),
draw_body: z.boolean(),
draw_face: z.boolean(),
draw_hands: z.boolean(),
});
export type _DWOpenposeProcessorConfig = Required<
Pick<DWOpenposeImageProcessorInvocation, 'id' | 'type' | 'draw_body' | 'draw_face' | 'draw_hands'> Pick<DWOpenposeImageProcessorInvocation, 'id' | 'type' | 'draw_body' | 'draw_face' | 'draw_hands'>
>; >;
export type PidiProcessorConfig = Required<Pick<PidiImageProcessorInvocation, 'id' | 'type' | 'safe' | 'scribble'>>; export type DWOpenposeProcessorConfig = z.infer<typeof zDWOpenposeProcessorConfig>;
type ZoeDepthProcessorConfig = Required<Pick<ZoeDepthImageProcessorInvocation, 'id' | 'type'>>;
export type ProcessorConfig = const zPidiProcessorConfig = z.object({
| CannyProcessorConfig id: zId,
| ColorMapProcessorConfig type: z.literal('pidi_image_processor'),
| ContentShuffleProcessorConfig safe: z.boolean(),
| DepthAnythingProcessorConfig scribble: z.boolean(),
| HedProcessorConfig });
| LineartAnimeProcessorConfig export type _PidiProcessorConfig = Required<Pick<PidiImageProcessorInvocation, 'id' | 'type' | 'safe' | 'scribble'>>;
| LineartProcessorConfig export type PidiProcessorConfig = z.infer<typeof zPidiProcessorConfig>;
| MediapipeFaceProcessorConfig
| MidasDepthProcessorConfig
| MlsdProcessorConfig
| NormalbaeProcessorConfig
| DWOpenposeProcessorConfig
| PidiProcessorConfig
| ZoeDepthProcessorConfig;
export type ImageWithDims = { const zZoeDepthProcessorConfig = z.object({
name: string; id: zId,
width: number; type: z.literal('zoe_depth_image_processor'),
height: number; });
}; export type _ZoeDepthProcessorConfig = Required<Pick<ZoeDepthImageProcessorInvocation, 'id' | 'type'>>;
export type ZoeDepthProcessorConfig = z.infer<typeof zZoeDepthProcessorConfig>;
type ControlAdapterBase = { export const zProcessorConfig = z.discriminatedUnion('type', [
id: string; zCannyProcessorConfig,
weight: number; zColorMapProcessorConfig,
image: ImageWithDims | null; zContentShuffleProcessorConfig,
processedImage: ImageWithDims | null; zDepthAnythingProcessorConfig,
isProcessingImage: boolean; zHedProcessorConfig,
processorConfig: ProcessorConfig | null; zLineartAnimeProcessorConfig,
beginEndStepPct: [number, number]; zLineartProcessorConfig,
}; zMediapipeFaceProcessorConfig,
zMidasDepthProcessorConfig,
zMlsdProcessorConfig,
zNormalbaeProcessorConfig,
zDWOpenposeProcessorConfig,
zPidiProcessorConfig,
zZoeDepthProcessorConfig,
]);
export type ProcessorConfig = z.infer<typeof zProcessorConfig>;
export const zImageWithDims = z.object({
name: z.string(),
width: z.number().int().positive(),
height: z.number().int().positive(),
});
export type ImageWithDims = z.infer<typeof zImageWithDims>;
const zBeginEndStepPct = z
.tuple([z.number().gte(0).lte(1), z.number().gte(0).lte(1)])
.refine(([begin, end]) => begin < end, {
message: 'Begin must be less than end',
});
const zControlAdapterBase = z.object({
id: zId,
weight: z.number().gte(0).lte(0),
image: zImageWithDims.nullable(),
processedImage: zImageWithDims.nullable(),
isProcessingImage: z.boolean(),
processorConfig: zProcessorConfig.nullable(),
beginEndStepPct: zBeginEndStepPct,
});
const zControlModeV2 = z.enum(['balanced', 'more_prompt', 'more_control', 'unbalanced']); const zControlModeV2 = z.enum(['balanced', 'more_prompt', 'more_control', 'unbalanced']);
export type ControlModeV2 = z.infer<typeof zControlModeV2>; export type ControlModeV2 = z.infer<typeof zControlModeV2>;
export const isControlModeV2 = (v: unknown): v is ControlModeV2 => zControlModeV2.safeParse(v).success; export const isControlModeV2 = (v: unknown): v is ControlModeV2 => zControlModeV2.safeParse(v).success;
export type ControlNetConfigV2 = ControlAdapterBase & { export const zControlNetConfigV2 = zControlAdapterBase.extend({
type: 'controlnet'; type: z.literal('controlnet'),
model: ParameterControlNetModel | null; model: zModelIdentifierField.nullable(),
controlMode: ControlModeV2; controlMode: zControlModeV2,
}; });
export const isControlNetConfigV2 = (ca: ControlNetConfigV2 | T2IAdapterConfigV2): ca is ControlNetConfigV2 => export type ControlNetConfigV2 = z.infer<typeof zControlNetConfigV2>;
ca.type === 'controlnet';
export const isControlNetConfigV2 = (ca: ControlNetConfigV2 | T2IAdapterConfigV2): ca is ControlNetConfigV2 =>
zControlNetConfigV2.safeParse(ca).success;
export const zT2IAdapterConfigV2 = zControlAdapterBase.extend({
type: z.literal('t2i_adapter'),
model: zModelIdentifierField.nullable(),
});
export type T2IAdapterConfigV2 = z.infer<typeof zT2IAdapterConfigV2>;
export type T2IAdapterConfigV2 = ControlAdapterBase & {
type: 't2i_adapter';
model: ParameterT2IAdapterModel | null;
};
export const isT2IAdapterConfigV2 = (ca: ControlNetConfigV2 | T2IAdapterConfigV2): ca is T2IAdapterConfigV2 => export const isT2IAdapterConfigV2 = (ca: ControlNetConfigV2 | T2IAdapterConfigV2): ca is T2IAdapterConfigV2 =>
ca.type === 't2i_adapter'; zT2IAdapterConfigV2.safeParse(ca).success;
const zCLIPVisionModelV2 = z.enum(['ViT-H', 'ViT-G']); const zCLIPVisionModelV2 = z.enum(['ViT-H', 'ViT-G']);
export type CLIPVisionModelV2 = z.infer<typeof zCLIPVisionModelV2>; export type CLIPVisionModelV2 = z.infer<typeof zCLIPVisionModelV2>;
@ -121,16 +234,17 @@ const zIPMethodV2 = z.enum(['full', 'style', 'composition']);
export type IPMethodV2 = z.infer<typeof zIPMethodV2>; export type IPMethodV2 = z.infer<typeof zIPMethodV2>;
export const isIPMethodV2 = (v: unknown): v is IPMethodV2 => zIPMethodV2.safeParse(v).success; export const isIPMethodV2 = (v: unknown): v is IPMethodV2 => zIPMethodV2.safeParse(v).success;
export type IPAdapterConfigV2 = { export const zIPAdapterConfigV2 = z.object({
id: string; id: zId,
type: 'ip_adapter'; type: z.literal('ip_adapter'),
weight: number; weight: z.number().gte(0).lte(0),
method: IPMethodV2; method: zIPMethodV2,
image: ImageWithDims | null; image: zImageWithDims.nullable(),
model: ParameterIPAdapterModel | null; model: zModelIdentifierField.nullable(),
clipVisionModel: CLIPVisionModelV2; clipVisionModel: zCLIPVisionModelV2,
beginEndStepPct: [number, number]; beginEndStepPct: zBeginEndStepPct,
}; });
export type IPAdapterConfigV2 = z.infer<typeof zIPAdapterConfigV2>;
const zProcessorTypeV2 = z.enum([ const zProcessorTypeV2 = z.enum([
'canny_image_processor', 'canny_image_processor',

View File

@ -16,14 +16,14 @@ import { z } from 'zod';
*/ */
// #region Positive prompt // #region Positive prompt
const zParameterPositivePrompt = z.string(); export const zParameterPositivePrompt = z.string();
export type ParameterPositivePrompt = z.infer<typeof zParameterPositivePrompt>; export type ParameterPositivePrompt = z.infer<typeof zParameterPositivePrompt>;
export const isParameterPositivePrompt = (val: unknown): val is ParameterPositivePrompt => export const isParameterPositivePrompt = (val: unknown): val is ParameterPositivePrompt =>
zParameterPositivePrompt.safeParse(val).success; zParameterPositivePrompt.safeParse(val).success;
// #endregion // #endregion
// #region Negative prompt // #region Negative prompt
const zParameterNegativePrompt = z.string(); export const zParameterNegativePrompt = z.string();
export type ParameterNegativePrompt = z.infer<typeof zParameterNegativePrompt>; export type ParameterNegativePrompt = z.infer<typeof zParameterNegativePrompt>;
export const isParameterNegativePrompt = (val: unknown): val is ParameterNegativePrompt => export const isParameterNegativePrompt = (val: unknown): val is ParameterNegativePrompt =>
zParameterNegativePrompt.safeParse(val).success; zParameterNegativePrompt.safeParse(val).success;
@ -127,7 +127,7 @@ export type ParameterT2IAdapterModel = z.infer<typeof zParameterT2IAdapterModel>
// #endregion // #endregion
// #region Strength (l2l strength) // #region Strength (l2l strength)
const zParameterStrength = z.number().min(0).max(1); export const zParameterStrength = z.number().min(0).max(1);
export type ParameterStrength = z.infer<typeof zParameterStrength>; export type ParameterStrength = z.infer<typeof zParameterStrength>;
export const isParameterStrength = (val: unknown): val is ParameterStrength => export const isParameterStrength = (val: unknown): val is ParameterStrength =>
zParameterStrength.safeParse(val).success; zParameterStrength.safeParse(val).success;
@ -198,6 +198,6 @@ export const isParameterLoRAWeight = (val: unknown): val is ParameterLoRAWeight
// #endregion // #endregion
// #region Regional Prompts AutoNegative // #region Regional Prompts AutoNegative
const zAutoNegative = z.enum(['off', 'invert']); export const zAutoNegative = z.enum(['off', 'invert']);
export type ParameterAutoNegative = z.infer<typeof zAutoNegative>; export type ParameterAutoNegative = z.infer<typeof zAutoNegative>;
// #endregion // #endregion