diff --git a/invokeai/frontend/web/src/services/api/models/ImageMetadata.ts b/invokeai/frontend/web/src/services/api/models/ImageMetadata.ts index 023d75fc56..76c0155e97 100644 --- a/invokeai/frontend/web/src/services/api/models/ImageMetadata.ts +++ b/invokeai/frontend/web/src/services/api/models/ImageMetadata.ts @@ -7,11 +7,16 @@ * * Also includes any metadata from the image's PNG tEXt chunks. * - * Generated by traversing the execution graph, collecting the parameters of the nearest ancestors of a given node. + * Generated by traversing the execution graph, collecting the parameters of the nearest ancestors + * of a given node. * * Full metadata may be accessed by querying for the session in the `graph_executions` table. */ export type ImageMetadata = { + /** + * The type of the ancestor node of the image output node. + */ + type?: string; /** * The positive conditioning. */ @@ -21,11 +26,11 @@ export type ImageMetadata = { */ negative_conditioning?: string; /** - * Width of the image/tensor in pixels. + * Width of the image/latents in pixels. */ width?: number; /** - * Height of the image/tensor in pixels. + * Height of the image/latents in pixels. */ height?: number; /** @@ -49,19 +54,27 @@ export type ImageMetadata = { */ model?: string; /** - * The strength used for image-to-image/tensor-to-tensor. + * The strength used for image-to-image/latents-to-latents. */ strength?: number; /** - * The ID of the initial image. + * The ID of the initial latents. */ - image?: string; + latents?: string; /** - * The ID of the initial tensor. + * The VAE used for decoding. */ - tensor?: string; + vae?: string; /** - * Extra metadata, extracted from the PNG tEXt chunk. + * The UNet used dor inference. + */ + unet?: string; + /** + * The CLIP Encoder used for conditioning. + */ + clip?: string; + /** + * Uploaded image metadata, extracted from the PNG tEXt chunk. */ extra?: string; };