diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py index 417aa8e1d1..26aec288a2 100644 --- a/invokeai/app/invocations/fields.py +++ b/invokeai/app/invocations/fields.py @@ -64,6 +64,7 @@ class UIType(str, Enum, metaclass=MetaEnum): Imagen3Model = "Imagen3ModelField" Imagen4Model = "Imagen4ModelField" ChatGPT4oModel = "ChatGPT4oModelField" + FluxKontextModel = "FluxKontextModelField" # endregion # region Misc Field Types diff --git a/invokeai/backend/model_manager/taxonomy.py b/invokeai/backend/model_manager/taxonomy.py index 5285d53c25..a353a44e76 100644 --- a/invokeai/backend/model_manager/taxonomy.py +++ b/invokeai/backend/model_manager/taxonomy.py @@ -29,6 +29,7 @@ class BaseModelType(str, Enum): Imagen3 = "imagen3" Imagen4 = "imagen4" ChatGPT4o = "chatgpt-4o" + FluxKontext = "flux-kontext" class ModelType(str, Enum): diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 5f34bbb2e7..ac12910bec 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1147,6 +1147,7 @@ "modelIncompatibleScaledBboxWidth": "Scaled bbox width is {{width}} but {{model}} requires multiple of {{multiple}}", "modelIncompatibleScaledBboxHeight": "Scaled bbox height is {{height}} but {{model}} requires multiple of {{multiple}}", "fluxModelMultipleControlLoRAs": "Can only use 1 Control LoRA at a time", + "fluxKontextMultipleReferenceImages": "Can only use 1 Reference Image at a time with Flux Kontext", "canvasIsFiltering": "Canvas is busy (filtering)", "canvasIsTransforming": "Canvas is busy (transforming)", "canvasIsRasterizing": "Canvas is busy (rasterizing)", @@ -1337,6 +1338,7 @@ "fluxFillIncompatibleWithT2IAndI2I": "FLUX Fill is not compatible with Text to Image or Image to Image. Use other FLUX models for these tasks.", "imagenIncompatibleGenerationMode": "Google {{model}} supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.", "chatGPT4oIncompatibleGenerationMode": "ChatGPT 4o supports Text to Image and Image to Image only. Use other models Inpainting and Outpainting tasks.", + "fluxKontextIncompatibleGenerationMode": "Flux Kontext supports Text to Image only. Use other models for Image to Image, Inpainting and Outpainting tasks.", "problemUnpublishingWorkflow": "Problem Unpublishing Workflow", "problemUnpublishingWorkflowDescription": "There was a problem unpublishing the workflow. Please try again.", "workflowUnpublished": "Workflow Unpublished" diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts index 294b1c2f15..afd9a2fff2 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts @@ -10,6 +10,7 @@ import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatch import { buildChatGPT4oGraph } from 'features/nodes/util/graph/generation/buildChatGPT4oGraph'; import { buildCogView4Graph } from 'features/nodes/util/graph/generation/buildCogView4Graph'; import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph'; +import { buildFluxKontextGraph } from 'features/nodes/util/graph/generation/buildFluxKontextGraph'; import { buildImagen3Graph } from 'features/nodes/util/graph/generation/buildImagen3Graph'; import { buildImagen4Graph } from 'features/nodes/util/graph/generation/buildImagen4Graph'; import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph'; @@ -59,6 +60,8 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) return await buildImagen4Graph(state, manager); case 'chatgpt-4o': return await buildChatGPT4oGraph(state, manager); + case 'flux-kontext': + return await buildFluxKontextGraph(state, manager); default: assert(false, `No graph builders for base ${base}`); } diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts index f9a85d7bc9..7415678b71 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/addLayerHooks.ts @@ -29,6 +29,7 @@ import type { import { initialChatGPT4oReferenceImage, initialControlNet, + initialFluxKontextReferenceImage, initialIPAdapter, initialT2IAdapter, } from 'features/controlLayers/store/util'; @@ -87,6 +88,12 @@ export const selectDefaultRefImageConfig = createSelector( return referenceImage; } + if (selectedMainModel?.base === 'flux-kontext') { + const referenceImage = deepClone(initialFluxKontextReferenceImage); + referenceImage.model = zModelIdentifierField.parse(selectedMainModel); + return referenceImage; + } + const { data } = query; let model: IPAdapterModelConfig | null = null; if (data) { diff --git a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts index 3c76d9c96b..6d9619df66 100644 --- a/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts +++ b/invokeai/frontend/web/src/features/controlLayers/hooks/useIsEntityTypeEnabled.ts @@ -2,10 +2,12 @@ import { useAppSelector } from 'app/store/storeHooks'; import { selectIsChatGTP4o, selectIsCogView4, + selectIsFluxKontext, selectIsImagen3, selectIsImagen4, selectIsSD3, } from 'features/controlLayers/store/paramsSlice'; +import { selectActiveReferenceImageEntities } from 'features/controlLayers/store/selectors'; import type { CanvasEntityType } from 'features/controlLayers/store/types'; import { useMemo } from 'react'; import type { Equals } from 'tsafe'; @@ -17,23 +19,28 @@ export const useIsEntityTypeEnabled = (entityType: CanvasEntityType) => { const isImagen3 = useAppSelector(selectIsImagen3); const isImagen4 = useAppSelector(selectIsImagen4); const isChatGPT4o = useAppSelector(selectIsChatGTP4o); + const isFluxKontext = useAppSelector(selectIsFluxKontext); + const activeReferenceImageEntities = useAppSelector(selectActiveReferenceImageEntities); const isEntityTypeEnabled = useMemo(() => { switch (entityType) { case 'reference_image': + if (isFluxKontext) { + return activeReferenceImageEntities.length === 0; + } return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4; case 'regional_guidance': - return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isChatGPT4o; + return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o; case 'control_layer': - return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isChatGPT4o; + return !isSD3 && !isCogView4 && !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o; case 'inpaint_mask': - return !isImagen3 && !isImagen4 && !isChatGPT4o; + return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o; case 'raster_layer': - return !isImagen3 && !isImagen4 && !isChatGPT4o; + return !isImagen3 && !isImagen4 && !isFluxKontext && !isChatGPT4o; default: assert>(false); } - }, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isChatGPT4o]); + }, [entityType, isSD3, isCogView4, isImagen3, isImagen4, isFluxKontext, isChatGPT4o, activeReferenceImageEntities]); return isEntityTypeEnabled; }; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts index a10b688ac1..661adcff3b 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/canvasSlice.ts @@ -69,7 +69,13 @@ import type { IPMethodV2, T2IAdapterConfig, } from './types'; -import { getEntityIdentifier, isChatGPT4oAspectRatioID, isImagenAspectRatioID, isRenderableEntity } from './types'; +import { + getEntityIdentifier, + isChatGPT4oAspectRatioID, + isFluxKontextAspectRatioID, + isImagenAspectRatioID, + isRenderableEntity, +} from './types'; import { converters, getControlLayerState, @@ -81,6 +87,7 @@ import { initialChatGPT4oReferenceImage, initialControlLoRA, initialControlNet, + initialFluxKontextReferenceImage, initialFLUXRedux, initialIPAdapter, initialT2IAdapter, @@ -686,6 +693,16 @@ export const canvasSlice = createSlice({ return; } + if (entity.ipAdapter.model.base === 'flux-kontext') { + // Switching to flux-kontext + entity.ipAdapter = { + ...initialFluxKontextReferenceImage, + image: entity.ipAdapter.image, + model: entity.ipAdapter.model, + }; + return; + } + if (entity.ipAdapter.model.type === 'flux_redux') { // Switching to flux_redux entity.ipAdapter = { @@ -1322,6 +1339,31 @@ export const canvasSlice = createSlice({ } state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; state.bbox.aspectRatio.isLocked = true; + } else if (state.bbox.modelBase === 'flux-kontext' && isFluxKontextAspectRatioID(id)) { + if (id === '3:4') { + state.bbox.rect.width = 880; + state.bbox.rect.height = 1184; + } else if (id === '4:3') { + state.bbox.rect.width = 1184; + state.bbox.rect.height = 880; + } else if (id === '9:16') { + state.bbox.rect.width = 752; + state.bbox.rect.height = 1392; + } else if (id === '16:9') { + state.bbox.rect.width = 1392; + state.bbox.rect.height = 752; + } else if (id === '21:9') { + state.bbox.rect.width = 1568; + state.bbox.rect.height = 672; + } else if (id === '9:21') { + state.bbox.rect.width = 672; + state.bbox.rect.height = 1568; + } else if (id === '1:1') { + state.bbox.rect.width = 880; + state.bbox.rect.height = 880; + } + state.bbox.aspectRatio.value = state.bbox.rect.width / state.bbox.rect.height; + state.bbox.aspectRatio.isLocked = true; } else { state.bbox.aspectRatio.isLocked = true; state.bbox.aspectRatio.value = ASPECT_RATIO_MAP[id].ratio; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts index 573752cc3a..069e8fc366 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/paramsSlice.ts @@ -383,6 +383,7 @@ export const selectIsCogView4 = createParamsSelector((params) => params.model?.b export const selectIsImagen3 = createParamsSelector((params) => params.model?.base === 'imagen3'); export const selectIsImagen4 = createParamsSelector((params) => params.model?.base === 'imagen4'); export const selectIsChatGTP4o = createParamsSelector((params) => params.model?.base === 'chatgpt-4o'); +export const selectIsFluxKontext = createParamsSelector((params) => params.model?.base === 'flux-kontext'); export const selectModel = createParamsSelector((params) => params.model); export const selectModelKey = createParamsSelector((params) => params.model?.key); diff --git a/invokeai/frontend/web/src/features/controlLayers/store/types.ts b/invokeai/frontend/web/src/features/controlLayers/store/types.ts index e681ca73ea..9d0f79b8a8 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/types.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/types.ts @@ -258,6 +258,13 @@ const zChatGPT4oReferenceImageConfig = z.object({ }); export type ChatGPT4oReferenceImageConfig = z.infer; +const zFluxKontextReferenceImageConfig = z.object({ + type: z.literal('flux_kontext_reference_image'), + image: zImageWithDims.nullable(), + model: zServerValidatedModelIdentifierField.nullable(), +}); +export type FluxKontextReferenceImageConfig = z.infer; + const zCanvasEntityBase = z.object({ id: zId, name: zName, @@ -268,7 +275,12 @@ const zCanvasEntityBase = z.object({ const zCanvasReferenceImageState = zCanvasEntityBase.extend({ type: z.literal('reference_image'), // This should be named `referenceImage` but we need to keep it as `ipAdapter` for backwards compatibility - ipAdapter: z.discriminatedUnion('type', [zIPAdapterConfig, zFLUXReduxConfig, zChatGPT4oReferenceImageConfig]), + ipAdapter: z.discriminatedUnion('type', [ + zIPAdapterConfig, + zFLUXReduxConfig, + zChatGPT4oReferenceImageConfig, + zFluxKontextReferenceImageConfig, + ]), }); export type CanvasReferenceImageState = z.infer; @@ -280,6 +292,9 @@ export const isFLUXReduxConfig = (config: CanvasReferenceImageState['ipAdapter'] export const isChatGPT4oReferenceImageConfig = ( config: CanvasReferenceImageState['ipAdapter'] ): config is ChatGPT4oReferenceImageConfig => config.type === 'chatgpt_4o_reference_image'; +export const isFluxKontextReferenceImageConfig = ( + config: CanvasReferenceImageState['ipAdapter'] +): config is FluxKontextReferenceImageConfig => config.type === 'flux_kontext_reference_image'; const zFillStyle = z.enum(['solid', 'grid', 'crosshatch', 'diagonal', 'horizontal', 'vertical']); export type FillStyle = z.infer; @@ -406,7 +421,7 @@ export type StagingAreaImage = { offsetY: number; }; -export const zAspectRatioID = z.enum(['Free', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']); +export const zAspectRatioID = z.enum(['Free', '21:9', '9:21', '16:9', '3:2', '4:3', '1:1', '3:4', '2:3', '9:16']); export const zImagen3AspectRatioID = z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']); export const isImagenAspectRatioID = (v: unknown): v is z.infer => @@ -416,6 +431,10 @@ export const zChatGPT4oAspectRatioID = z.enum(['3:2', '1:1', '2:3']); export const isChatGPT4oAspectRatioID = (v: unknown): v is z.infer => zChatGPT4oAspectRatioID.safeParse(v).success; +export const zFluxKontextAspectRatioID = z.enum(['21:9', '4:3', '1:1', '3:4', '9:21', '16:9', '9:16']); +export const isFluxKontextAspectRatioID = (v: unknown): v is z.infer => + zFluxKontextAspectRatioID.safeParse(v).success; + export type AspectRatioID = z.infer; export const isAspectRatioID = (v: unknown): v is AspectRatioID => zAspectRatioID.safeParse(v).success; diff --git a/invokeai/frontend/web/src/features/controlLayers/store/util.ts b/invokeai/frontend/web/src/features/controlLayers/store/util.ts index 4f809d720b..5fb3cc27dc 100644 --- a/invokeai/frontend/web/src/features/controlLayers/store/util.ts +++ b/invokeai/frontend/web/src/features/controlLayers/store/util.ts @@ -10,6 +10,7 @@ import type { ChatGPT4oReferenceImageConfig, ControlLoRAConfig, ControlNetConfig, + FluxKontextReferenceImageConfig, FLUXReduxConfig, ImageWithDims, IPAdapterConfig, @@ -83,6 +84,11 @@ export const initialChatGPT4oReferenceImage: ChatGPT4oReferenceImageConfig = { image: null, model: null, }; +export const initialFluxKontextReferenceImage: FluxKontextReferenceImageConfig = { + type: 'flux_kontext_reference_image', + image: null, + model: null, +}; export const initialT2IAdapter: T2IAdapterConfig = { type: 't2i_adapter', model: null, diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge.tsx index 6069444cc4..59b7f022e2 100644 --- a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge.tsx +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/ModelManagerPanel/ModelBaseBadge.tsx @@ -19,6 +19,7 @@ export const BASE_COLOR_MAP: Record = { imagen3: 'pink', imagen4: 'pink', 'chatgpt-4o': 'pink', + 'flux-kontext': 'pink', }; const ModelBaseBadge = ({ base }: Props) => { diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx index bf1da12eb0..73f27a34cf 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/InputFieldRenderer.tsx @@ -4,6 +4,7 @@ import { FloatFieldSlider } from 'features/nodes/components/flow/nodes/Invocatio import ChatGPT4oModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ChatGPT4oModelFieldInputComponent'; import { FloatFieldCollectionInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FloatFieldCollectionInputComponent'; import { FloatGeneratorFieldInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FloatGeneratorFieldComponent'; +import FluxKontextModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/FluxKontextModelFieldInputComponent'; import { ImageFieldCollectionInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ImageFieldCollectionInputComponent'; import { ImageGeneratorFieldInputComponent } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ImageGeneratorFieldComponent'; import Imagen3ModelFieldInputComponent from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/Imagen3ModelFieldInputComponent'; @@ -50,6 +51,8 @@ import { isFloatFieldInputTemplate, isFloatGeneratorFieldInputInstance, isFloatGeneratorFieldInputTemplate, + isFluxKontextModelFieldInputInstance, + isFluxKontextModelFieldInputTemplate, isFluxMainModelFieldInputInstance, isFluxMainModelFieldInputTemplate, isFluxReduxModelFieldInputInstance, @@ -417,6 +420,13 @@ export const InputFieldRenderer = memo(({ nodeId, fieldName, settings }: Props) return ; } + if (isFluxKontextModelFieldInputTemplate(template)) { + if (!isFluxKontextModelFieldInputInstance(field)) { + return null; + } + return ; + } + if (isChatGPT4oModelFieldInputTemplate(template)) { if (!isChatGPT4oModelFieldInputInstance(field)) { return null; diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/FluxKontextModelFieldInputComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/FluxKontextModelFieldInputComponent.tsx new file mode 100644 index 0000000000..4a66288056 --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/FluxKontextModelFieldInputComponent.tsx @@ -0,0 +1,49 @@ +import { useAppDispatch } from 'app/store/storeHooks'; +import { ModelFieldCombobox } from 'features/nodes/components/flow/nodes/Invocation/fields/inputs/ModelFieldCombobox'; +import { fieldFluxKontextModelValueChanged } from 'features/nodes/store/nodesSlice'; +import type { + FluxKontextModelFieldInputInstance, + FluxKontextModelFieldInputTemplate, +} from 'features/nodes/types/field'; +import { memo, useCallback } from 'react'; +import { useFluxKontextModels } from 'services/api/hooks/modelsByType'; +import type { ApiModelConfig } from 'services/api/types'; + +import type { FieldComponentProps } from './types'; + +const FluxKontextModelFieldInputComponent = ( + props: FieldComponentProps +) => { + const { nodeId, field } = props; + const dispatch = useAppDispatch(); + + const [modelConfigs, { isLoading }] = useFluxKontextModels(); + + const onChange = useCallback( + (value: ApiModelConfig | null) => { + if (!value) { + return; + } + dispatch( + fieldFluxKontextModelValueChanged({ + nodeId, + fieldName: field.name, + value, + }) + ); + }, + [dispatch, field.name, nodeId] + ); + + return ( + + ); +}; + +export default memo(FluxKontextModelFieldInputComponent); diff --git a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts index dadb12a72b..9b304a68bc 100644 --- a/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts +++ b/invokeai/frontend/web/src/features/nodes/components/sidePanel/workflow/publish.ts @@ -127,6 +127,8 @@ const NODE_TYPE_PUBLISH_DENYLIST = [ 'google_imagen4_generate_image', 'chatgpt_4o_generate_image', 'chatgpt_4o_edit_image', + 'flux_kontext_generate_image', + 'flux_kontext_edit_image', ]; export const selectHasUnpublishableNodes = createSelector(selectNodes, (nodes) => { diff --git a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts index adb10f0fa8..c17aa6d068 100644 --- a/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts +++ b/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts @@ -34,6 +34,7 @@ import type { FieldValue, FloatFieldValue, FloatGeneratorFieldValue, + FluxKontextModelFieldValue, FluxReduxModelFieldValue, FluxVAEModelFieldValue, ImageFieldCollectionValue, @@ -75,6 +76,7 @@ import { zFloatFieldCollectionValue, zFloatFieldValue, zFloatGeneratorFieldValue, + zFluxKontextModelFieldValue, zFluxReduxModelFieldValue, zFluxVAEModelFieldValue, zImageFieldCollectionValue, @@ -527,6 +529,9 @@ export const nodesSlice = createSlice({ fieldChatGPT4oModelValueChanged: (state, action: FieldValueAction) => { fieldValueReducer(state, action, zChatGPT4oModelFieldValue); }, + fieldFluxKontextModelValueChanged: (state, action: FieldValueAction) => { + fieldValueReducer(state, action, zFluxKontextModelFieldValue); + }, fieldEnumModelValueChanged: (state, action: FieldValueAction) => { fieldValueReducer(state, action, zEnumFieldValue); }, @@ -697,6 +702,7 @@ export const { fieldImagen3ModelValueChanged, fieldImagen4ModelValueChanged, fieldChatGPT4oModelValueChanged, + fieldFluxKontextModelValueChanged, fieldFloatGeneratorValueChanged, fieldIntegerGeneratorValueChanged, fieldStringGeneratorValueChanged, diff --git a/invokeai/frontend/web/src/features/nodes/types/common.ts b/invokeai/frontend/web/src/features/nodes/types/common.ts index c0f54fb935..c57954b2ec 100644 --- a/invokeai/frontend/web/src/features/nodes/types/common.ts +++ b/invokeai/frontend/web/src/features/nodes/types/common.ts @@ -78,6 +78,7 @@ const zBaseModel = z.enum([ 'imagen3', 'imagen4', 'chatgpt-4o', + 'flux-kontext', ]); export type BaseModelType = z.infer; export const zMainModelBase = z.enum([ @@ -90,6 +91,7 @@ export const zMainModelBase = z.enum([ 'imagen3', 'imagen4', 'chatgpt-4o', + 'flux-kontext', ]); export type MainModelBase = z.infer; export const isMainModelBase = (base: unknown): base is MainModelBase => zMainModelBase.safeParse(base).success; diff --git a/invokeai/frontend/web/src/features/nodes/types/field.ts b/invokeai/frontend/web/src/features/nodes/types/field.ts index f834564407..9dd31ce7f0 100644 --- a/invokeai/frontend/web/src/features/nodes/types/field.ts +++ b/invokeai/frontend/web/src/features/nodes/types/field.ts @@ -260,6 +260,10 @@ const zChatGPT4oModelFieldType = zFieldTypeBase.extend({ name: z.literal('ChatGPT4oModelField'), originalType: zStatelessFieldType.optional(), }); +const zFluxKontextModelFieldType = zFieldTypeBase.extend({ + name: z.literal('FluxKontextModelField'), + originalType: zStatelessFieldType.optional(), +}); const zSchedulerFieldType = zFieldTypeBase.extend({ name: z.literal('SchedulerField'), originalType: zStatelessFieldType.optional(), @@ -313,6 +317,7 @@ const zStatefulFieldType = z.union([ zImagen3ModelFieldType, zImagen4ModelFieldType, zChatGPT4oModelFieldType, + zFluxKontextModelFieldType, zColorFieldType, zSchedulerFieldType, zFloatGeneratorFieldType, @@ -354,6 +359,7 @@ const modelFieldTypeNames = [ zImagen3ModelFieldType.shape.name.value, zImagen4ModelFieldType.shape.name.value, zChatGPT4oModelFieldType.shape.name.value, + zFluxKontextModelFieldType.shape.name.value, // Stateless model fields 'UNetField', 'VAEField', @@ -1231,6 +1237,24 @@ export const isImagen4ModelFieldInputTemplate = buildTemplateTypeGuard('Imagen4ModelField'); // #endregion +// #region FluxKontextModelField +export const zFluxKontextModelFieldValue = zModelIdentifierField.optional(); +const zFluxKontextModelFieldInputInstance = zFieldInputInstanceBase.extend({ + value: zFluxKontextModelFieldValue, +}); +const zFluxKontextModelFieldInputTemplate = zFieldInputTemplateBase.extend({ + type: zFluxKontextModelFieldType, + originalType: zFieldType.optional(), + default: zFluxKontextModelFieldValue, +}); +export type FluxKontextModelFieldValue = z.infer; +export type FluxKontextModelFieldInputInstance = z.infer; +export type FluxKontextModelFieldInputTemplate = z.infer; +export const isFluxKontextModelFieldInputInstance = buildInstanceTypeGuard(zFluxKontextModelFieldInputInstance); +export const isFluxKontextModelFieldInputTemplate = + buildTemplateTypeGuard('FluxKontextModelField'); +// #endregion + // #region ChatGPT4oModelField export const zChatGPT4oModelFieldValue = zModelIdentifierField.optional(); const zChatGPT4oModelFieldInputInstance = zFieldInputInstanceBase.extend({ @@ -1882,6 +1906,7 @@ export const zStatefulFieldValue = z.union([ zFluxReduxModelFieldValue, zImagen3ModelFieldValue, zImagen4ModelFieldValue, + zFluxKontextModelFieldValue, zChatGPT4oModelFieldValue, zColorFieldValue, zSchedulerFieldValue, @@ -1976,6 +2001,7 @@ const zStatefulFieldInputTemplate = z.union([ zImagen3ModelFieldInputTemplate, zImagen4ModelFieldInputTemplate, zChatGPT4oModelFieldInputTemplate, + zFluxKontextModelFieldInputTemplate, zColorFieldInputTemplate, zSchedulerFieldInputTemplate, zStatelessFieldInputTemplate, diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts new file mode 100644 index 0000000000..a1da6df12b --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFluxKontextGraph.ts @@ -0,0 +1,92 @@ +import { logger } from 'app/logging/logger'; +import type { RootState } from 'app/store/store'; +import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager'; +import { getPrefixedId } from 'features/controlLayers/konva/util'; +import { selectCanvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice'; +import { selectCanvasSlice } from 'features/controlLayers/store/selectors'; +import { isFluxKontextReferenceImageConfig } from 'features/controlLayers/store/types'; +import { getGlobalReferenceImageWarnings } from 'features/controlLayers/store/validators'; +import type { ImageField } from 'features/nodes/types/common'; +import { zModelIdentifierField } from 'features/nodes/types/common'; +import { Graph } from 'features/nodes/util/graph/generation/Graph'; +import { + CANVAS_OUTPUT_PREFIX, + getBoardField, + selectPresetModifiedPrompts, +} from 'features/nodes/util/graph/graphBuilderUtils'; +import { type GraphBuilderReturn, UnsupportedGenerationModeError } from 'features/nodes/util/graph/types'; +import { t } from 'i18next'; +import { selectMainModelConfig } from 'services/api/endpoints/models'; +import type { Equals } from 'tsafe'; +import { assert } from 'tsafe'; + +const log = logger('system'); + +export const buildFluxKontextGraph = async (state: RootState, manager: CanvasManager): Promise => { + const generationMode = await manager.compositor.getGenerationMode(); + + if (generationMode !== 'txt2img') { + throw new UnsupportedGenerationModeError(t('toast.fluxKontextIncompatibleGenerationMode')); + } + + log.debug({ generationMode }, 'Building Flux Kontext graph'); + + const model = selectMainModelConfig(state); + + const canvas = selectCanvasSlice(state); + const canvasSettings = selectCanvasSettingsSlice(state); + + const { bbox } = canvas; + const { positivePrompt } = selectPresetModifiedPrompts(state); + + assert(model, 'No model found in state'); + assert(model.base === 'flux-kontext', 'Model is not a Flux Kontext model'); + + const is_intermediate = canvasSettings.sendToCanvas; + const board = canvasSettings.sendToCanvas ? undefined : getBoardField(state); + + const validRefImages = canvas.referenceImages.entities + .filter((entity) => entity.isEnabled) + .filter((entity) => isFluxKontextReferenceImageConfig(entity.ipAdapter)) + .filter((entity) => getGlobalReferenceImageWarnings(entity, model).length === 0); + + let input_image: ImageField | undefined = undefined; + + if (validRefImages[0]) { + assert(validRefImages.length === 1, 'Flux Kontext can have at most one reference image'); + + assert(validRefImages[0].ipAdapter.image, 'Image is required for reference image'); + input_image = { + image_name: validRefImages[0].ipAdapter.image.image_name, + }; + } + + if (generationMode === 'txt2img') { + const g = new Graph(getPrefixedId('flux_kontext_txt2img_graph')); + const fluxKontextImage = g.addNode({ + // @ts-expect-error: These nodes are not available in the OSS application + type: input_image ? 'flux_kontext_edit_image' : 'flux_kontext_generate_image', + id: getPrefixedId(CANVAS_OUTPUT_PREFIX), + model: zModelIdentifierField.parse(model), + positive_prompt: positivePrompt, + aspect_ratio: bbox.aspectRatio.id, + use_cache: false, + is_intermediate, + board, + input_image, + prompt_upsampling: true, + }); + g.upsertMetadata({ + positive_prompt: positivePrompt, + model: Graph.getModelMetadataField(model), + width: bbox.rect.width, + height: bbox.rect.height, + }); + return { + g, + positivePromptFieldIdentifier: { nodeId: fluxKontextImage.id, fieldName: 'positive_prompt' }, + }; + } + + assert>(false, 'Invalid generation mode for Flux Kontext'); +}; diff --git a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts index 741b19e09e..736b486a1b 100644 --- a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts +++ b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputInstance.ts @@ -36,6 +36,7 @@ const FIELD_VALUE_FALLBACK_MAP: Record = Imagen3ModelField: undefined, Imagen4ModelField: undefined, ChatGPT4oModelField: undefined, + FluxKontextModelField: undefined, FloatGeneratorField: undefined, IntegerGeneratorField: undefined, StringGeneratorField: undefined, diff --git a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts index 4cb6ffe5aa..1c61610858 100644 --- a/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts +++ b/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts @@ -16,6 +16,7 @@ import type { FloatFieldCollectionInputTemplate, FloatFieldInputTemplate, FloatGeneratorFieldInputTemplate, + FluxKontextModelFieldInputTemplate, FluxMainModelFieldInputTemplate, FluxReduxModelFieldInputTemplate, FluxVAEModelFieldInputTemplate, @@ -613,6 +614,20 @@ const buildImagen4ModelFieldInputTemplate: FieldInputTemplateBuilder = ({ + schemaObject, + baseField, + fieldType, +}) => { + const template: FluxKontextModelFieldInputTemplate = { + ...baseField, + type: fieldType, + default: schemaObject.default ?? undefined, + }; + return template; +}; + const buildChatGPT4oModelFieldInputTemplate: FieldInputTemplateBuilder = ({ schemaObject, baseField, @@ -835,6 +850,7 @@ export const TEMPLATE_BUILDER_MAP: Record { const isImagen3 = useAppSelector(selectIsImagen3); const isChatGPT4o = useAppSelector(selectIsChatGTP4o); const isImagen4 = useAppSelector(selectIsImagen4); + const isFluxKontext = useAppSelector(selectIsFluxKontext); const options = useMemo(() => { // Imagen3 and ChatGPT4o have different aspect ratio options, and do not support freeform sizes if (isImagen3 || isImagen4) { @@ -32,9 +39,12 @@ export const BboxAspectRatioSelect = memo(() => { if (isChatGPT4o) { return zChatGPT4oAspectRatioID.options; } + if (isFluxKontext) { + return zFluxKontextAspectRatioID.options; + } // All other models return zAspectRatioID.options; - }, [isImagen3, isChatGPT4o, isImagen4]); + }, [isImagen3, isChatGPT4o, isImagen4, isFluxKontext]); const onChange = useCallback>( (e) => { diff --git a/invokeai/frontend/web/src/features/parameters/components/Bbox/constants.ts b/invokeai/frontend/web/src/features/parameters/components/Bbox/constants.ts index 83828cd222..b614eaec86 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Bbox/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/components/Bbox/constants.ts @@ -1,6 +1,7 @@ import type { AspectRatioID } from 'features/controlLayers/store/types'; export const ASPECT_RATIO_MAP: Record, { ratio: number; inverseID: AspectRatioID }> = { + '21:9': { ratio: 21 / 9, inverseID: '9:21' }, '16:9': { ratio: 16 / 9, inverseID: '9:16' }, '3:2': { ratio: 3 / 2, inverseID: '2:3' }, '4:3': { ratio: 4 / 3, inverseID: '4:3' }, @@ -8,4 +9,5 @@ export const ASPECT_RATIO_MAP: Record, { ratio: n '3:4': { ratio: 3 / 4, inverseID: '4:3' }, '2:3': { ratio: 2 / 3, inverseID: '3:2' }, '9:16': { ratio: 9 / 16, inverseID: '16:9' }, + '9:21': { ratio: 9 / 21, inverseID: '21:9' }, }; diff --git a/invokeai/frontend/web/src/features/parameters/hooks/useIsApiModel.ts b/invokeai/frontend/web/src/features/parameters/hooks/useIsApiModel.ts index 73c61cde30..90bcb6b391 100644 --- a/invokeai/frontend/web/src/features/parameters/hooks/useIsApiModel.ts +++ b/invokeai/frontend/web/src/features/parameters/hooks/useIsApiModel.ts @@ -1,10 +1,16 @@ import { useAppSelector } from 'app/store/storeHooks'; -import { selectIsChatGTP4o, selectIsImagen3, selectIsImagen4 } from 'features/controlLayers/store/paramsSlice'; +import { + selectIsChatGTP4o, + selectIsFluxKontext, + selectIsImagen3, + selectIsImagen4, +} from 'features/controlLayers/store/paramsSlice'; export const useIsApiModel = () => { const isImagen3 = useAppSelector(selectIsImagen3); const isImagen4 = useAppSelector(selectIsImagen4); const isChatGPT4o = useAppSelector(selectIsChatGTP4o); + const isFluxKontext = useAppSelector(selectIsFluxKontext); - return isImagen3 || isImagen4 || isChatGPT4o; + return isImagen3 || isImagen4 || isChatGPT4o || isFluxKontext; }; diff --git a/invokeai/frontend/web/src/features/parameters/types/constants.ts b/invokeai/frontend/web/src/features/parameters/types/constants.ts index 2795794acb..d00ff1b1fa 100644 --- a/invokeai/frontend/web/src/features/parameters/types/constants.ts +++ b/invokeai/frontend/web/src/features/parameters/types/constants.ts @@ -16,6 +16,7 @@ export const MODEL_TYPE_MAP: Record = { imagen3: 'Imagen3', imagen4: 'Imagen4', 'chatgpt-4o': 'ChatGPT 4o', + 'flux-kontext': 'Flux Kontext', }; /** @@ -33,6 +34,7 @@ export const MODEL_TYPE_SHORT_MAP: Record = { imagen3: 'Imagen3', imagen4: 'Imagen4', 'chatgpt-4o': 'ChatGPT 4o', + 'flux-kontext': 'Flux Kontext', }; /** @@ -83,6 +85,10 @@ export const CLIP_SKIP_MAP: Record { case 'imagen3': case 'imagen4': case 'chatgpt-4o': + case 'flux-kontext': default: return 1024; } @@ -81,6 +82,7 @@ export const getGridSize = (base?: BaseModelType | null): number => { case 'sdxl': case 'imagen3': case 'chatgpt-4o': + case 'flux-kontext': default: return 8; } diff --git a/invokeai/frontend/web/src/features/queue/store/readiness.ts b/invokeai/frontend/web/src/features/queue/store/readiness.ts index a546f15ecf..a91ccbfb19 100644 --- a/invokeai/frontend/web/src/features/queue/store/readiness.ts +++ b/invokeai/frontend/web/src/features/queue/store/readiness.ts @@ -516,6 +516,17 @@ const getReasonsWhyCannotEnqueueCanvasTab = (arg: { } }); + const enabledGlobalReferenceLayers = canvas.referenceImages.entities.filter( + (referenceImage) => referenceImage.isEnabled + ); + + // Flux Kontext only supports 1x Reference Image at a time. + const referenceImageCount = enabledGlobalReferenceLayers.length; + + if (model?.base === 'flux-kontext' && referenceImageCount > 1) { + reasons.push({ content: i18n.t('parameters.invoke.fluxKontextMultipleReferenceImages') }); + } + canvas.referenceImages.entities .filter((entity) => entity.isEnabled) .forEach((entity, i) => { diff --git a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts index af5f2c0f6e..fc2c782003 100644 --- a/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts +++ b/invokeai/frontend/web/src/services/api/hooks/modelsByType.ts @@ -16,6 +16,7 @@ import { isControlLayerModelConfig, isControlLoRAModelConfig, isControlNetModelConfig, + isFluxKontextModelConfig, isFluxMainModelModelConfig, isFluxReduxModelConfig, isFluxVAEModelConfig, @@ -85,7 +86,11 @@ export const useCLIPVisionModels = buildModelsHook(isCLIPVisionModelConfig); export const useSigLipModels = buildModelsHook(isSigLipModelConfig); export const useFluxReduxModels = buildModelsHook(isFluxReduxModelConfig); export const useGlobalReferenceImageModels = buildModelsHook( - (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) || isChatGPT4oModelConfig(config) + (config) => + isIPAdapterModelConfig(config) || + isFluxReduxModelConfig(config) || + isChatGPT4oModelConfig(config) || + isFluxKontextModelConfig(config) ); export const useRegionalReferenceImageModels = buildModelsHook( (config) => isIPAdapterModelConfig(config) || isFluxReduxModelConfig(config) @@ -94,6 +99,7 @@ export const useLLaVAModels = buildModelsHook(isLLaVAModelConfig); export const useImagen3Models = buildModelsHook(isImagen3ModelConfig); export const useImagen4Models = buildModelsHook(isImagen4ModelConfig); export const useChatGPT4oModels = buildModelsHook(isChatGPT4oModelConfig); +export const useFluxKontextModels = buildModelsHook(isFluxKontextModelConfig); // const buildModelsSelector = // (typeGuard: (config: AnyModelConfig) => config is T): Selector => diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 2b62f83a19..13d8912b46 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2075,7 +2075,7 @@ export type components = { * @description Base model type. * @enum {string} */ - BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "chatgpt-4o"; + BaseModelType: "any" | "sd-1" | "sd-2" | "sd-3" | "sdxl" | "sdxl-refiner" | "flux" | "cogview4" | "imagen3" | "imagen4" | "chatgpt-4o" | "flux-kontext"; /** Batch */ Batch: { /** @@ -21199,7 +21199,7 @@ export type components = { * used, and the type will be ignored. They are included here for backwards compatibility. * @enum {string} */ - UIType: "MainModelField" | "CogView4MainModelField" | "FluxMainModelField" | "SD3MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "FluxVAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "T5EncoderModelField" | "CLIPEmbedModelField" | "CLIPLEmbedModelField" | "CLIPGEmbedModelField" | "SpandrelImageToImageModelField" | "ControlLoRAModelField" | "SigLipModelField" | "FluxReduxModelField" | "LLaVAModelField" | "Imagen3ModelField" | "Imagen4ModelField" | "ChatGPT4oModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; + UIType: "MainModelField" | "CogView4MainModelField" | "FluxMainModelField" | "SD3MainModelField" | "SDXLMainModelField" | "SDXLRefinerModelField" | "ONNXModelField" | "VAEModelField" | "FluxVAEModelField" | "LoRAModelField" | "ControlNetModelField" | "IPAdapterModelField" | "T2IAdapterModelField" | "T5EncoderModelField" | "CLIPEmbedModelField" | "CLIPLEmbedModelField" | "CLIPGEmbedModelField" | "SpandrelImageToImageModelField" | "ControlLoRAModelField" | "SigLipModelField" | "FluxReduxModelField" | "LLaVAModelField" | "Imagen3ModelField" | "Imagen4ModelField" | "ChatGPT4oModelField" | "FluxKontextModelField" | "SchedulerField" | "AnyField" | "CollectionField" | "CollectionItemField" | "DEPRECATED_Boolean" | "DEPRECATED_Color" | "DEPRECATED_Conditioning" | "DEPRECATED_Control" | "DEPRECATED_Float" | "DEPRECATED_Image" | "DEPRECATED_Integer" | "DEPRECATED_Latents" | "DEPRECATED_String" | "DEPRECATED_BooleanCollection" | "DEPRECATED_ColorCollection" | "DEPRECATED_ConditioningCollection" | "DEPRECATED_ControlCollection" | "DEPRECATED_FloatCollection" | "DEPRECATED_ImageCollection" | "DEPRECATED_IntegerCollection" | "DEPRECATED_LatentsCollection" | "DEPRECATED_StringCollection" | "DEPRECATED_BooleanPolymorphic" | "DEPRECATED_ColorPolymorphic" | "DEPRECATED_ConditioningPolymorphic" | "DEPRECATED_ControlPolymorphic" | "DEPRECATED_FloatPolymorphic" | "DEPRECATED_ImagePolymorphic" | "DEPRECATED_IntegerPolymorphic" | "DEPRECATED_LatentsPolymorphic" | "DEPRECATED_StringPolymorphic" | "DEPRECATED_UNet" | "DEPRECATED_Vae" | "DEPRECATED_CLIP" | "DEPRECATED_Collection" | "DEPRECATED_CollectionItem" | "DEPRECATED_Enum" | "DEPRECATED_WorkflowField" | "DEPRECATED_IsIntermediate" | "DEPRECATED_BoardField" | "DEPRECATED_MetadataItem" | "DEPRECATED_MetadataItemCollection" | "DEPRECATED_MetadataItemPolymorphic" | "DEPRECATED_MetadataDict"; /** UNetField */ UNetField: { /** @description Info to load unet submodel */ diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index a445a35c2c..086ff3296f 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -240,6 +240,10 @@ export const isImagen4ModelConfig = (config: AnyModelConfig): config is ApiModel return config.type === 'main' && config.base === 'imagen4'; }; +export const isFluxKontextModelConfig = (config: AnyModelConfig): config is ApiModelConfig => { + return config.type === 'main' && config.base === 'flux-kontext'; +}; + export const isNonRefinerMainModelConfig = (config: AnyModelConfig): config is MainModelConfig => { return config.type === 'main' && config.base !== 'sdxl-refiner'; };