mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
035425ef24
- Address database feedback: - Remove all the extraneous tables. Only an `images` table now: - `image_type` and `image_category` are unrestricted strings. When creating images, the provided values are checked to ensure they are a valid type and category. - Add `updated_at` and `deleted_at` columns. `deleted_at` is currently unused. - Use SQLite's built-in timestamp features to populate these. Add a trigger to update `updated_at` when the row is updated. Currently no way to update a row. - Rename the `id` column in `images` to `image_name` - Rename `ImageCategory.IMAGE` to `ImageCategory.GENERAL` - Move all exceptions outside their base classes to make them more portable. - Add `width` and `height` columns to the database. These store the actual dimensions of the image file, whereas the metadata's `width` and `height` refer to the respective generation parameters and are nullable. - Make `deserialize_image_record` take a `dict` instead of `sqlite3.Row` - Improve comments throughout - Tidy up unused code/files and some minor organisation
92 lines
3.5 KiB
Python
92 lines
3.5 KiB
Python
from typing import Optional
|
|
from pydantic import BaseModel, Extra, Field, StrictFloat, StrictInt, StrictStr
|
|
|
|
|
|
class ImageMetadata(BaseModel):
|
|
"""
|
|
Core generation metadata for an image/tensor generated in InvokeAI.
|
|
|
|
Also includes any metadata from the image's PNG tEXt chunks.
|
|
|
|
Generated by traversing the execution graph, collecting the parameters of the nearest ancestors
|
|
of a given node.
|
|
|
|
Full metadata may be accessed by querying for the session in the `graph_executions` table.
|
|
"""
|
|
|
|
class Config:
|
|
extra = Extra.allow
|
|
"""
|
|
This lets the ImageMetadata class accept arbitrary additional fields. The CoreMetadataService
|
|
won't add any fields that are not already defined, but other a different metadata service
|
|
implementation might.
|
|
"""
|
|
|
|
type: Optional[StrictStr] = Field(
|
|
default=None,
|
|
description="The type of the ancestor node of the image output node.",
|
|
)
|
|
"""The type of the ancestor node of the image output node."""
|
|
positive_conditioning: Optional[StrictStr] = Field(
|
|
default=None, description="The positive conditioning."
|
|
)
|
|
"""The positive conditioning"""
|
|
negative_conditioning: Optional[StrictStr] = Field(
|
|
default=None, description="The negative conditioning."
|
|
)
|
|
"""The negative conditioning"""
|
|
width: Optional[StrictInt] = Field(
|
|
default=None, description="Width of the image/latents in pixels."
|
|
)
|
|
"""Width of the image/latents in pixels"""
|
|
height: Optional[StrictInt] = Field(
|
|
default=None, description="Height of the image/latents in pixels."
|
|
)
|
|
"""Height of the image/latents in pixels"""
|
|
seed: Optional[StrictInt] = Field(
|
|
default=None, description="The seed used for noise generation."
|
|
)
|
|
"""The seed used for noise generation"""
|
|
cfg_scale: Optional[StrictFloat] = Field(
|
|
default=None, description="The classifier-free guidance scale."
|
|
)
|
|
"""The classifier-free guidance scale"""
|
|
steps: Optional[StrictInt] = Field(
|
|
default=None, description="The number of steps used for inference."
|
|
)
|
|
"""The number of steps used for inference"""
|
|
scheduler: Optional[StrictStr] = Field(
|
|
default=None, description="The scheduler used for inference."
|
|
)
|
|
"""The scheduler used for inference"""
|
|
model: Optional[StrictStr] = Field(
|
|
default=None, description="The model used for inference."
|
|
)
|
|
"""The model used for inference"""
|
|
strength: Optional[StrictFloat] = Field(
|
|
default=None,
|
|
description="The strength used for image-to-image/latents-to-latents.",
|
|
)
|
|
"""The strength used for image-to-image/latents-to-latents."""
|
|
latents: Optional[StrictStr] = Field(
|
|
default=None, description="The ID of the initial latents."
|
|
)
|
|
"""The ID of the initial latents"""
|
|
vae: Optional[StrictStr] = Field(
|
|
default=None, description="The VAE used for decoding."
|
|
)
|
|
"""The VAE used for decoding"""
|
|
unet: Optional[StrictStr] = Field(
|
|
default=None, description="The UNet used dor inference."
|
|
)
|
|
"""The UNet used dor inference"""
|
|
clip: Optional[StrictStr] = Field(
|
|
default=None, description="The CLIP Encoder used for conditioning."
|
|
)
|
|
"""The CLIP Encoder used for conditioning"""
|
|
extra: Optional[StrictStr] = Field(
|
|
default=None,
|
|
description="Uploaded image metadata, extracted from the PNG tEXt chunk.",
|
|
)
|
|
"""Uploaded image metadata, extracted from the PNG tEXt chunk."""
|