2023-09-17 00:14:58 +00:00
|
|
|
from builtins import float
|
|
|
|
from typing import List, Union
|
2023-09-15 03:06:57 +00:00
|
|
|
|
2024-02-06 03:56:32 +00:00
|
|
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
|
|
from typing_extensions import Self
|
2023-09-06 17:36:00 +00:00
|
|
|
|
2024-03-23 20:10:28 +00:00
|
|
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
2024-03-08 10:37:00 +00:00
|
|
|
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
2024-03-09 08:43:24 +00:00
|
|
|
from invokeai.app.invocations.model import ModelIdentifierField
|
2023-09-06 17:36:00 +00:00
|
|
|
from invokeai.app.invocations.primitives import ImageField
|
2024-01-02 00:13:49 +00:00
|
|
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
2024-02-05 06:16:35 +00:00
|
|
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
2024-03-23 20:10:28 +00:00
|
|
|
from invokeai.backend.model_manager.config import (
|
|
|
|
AnyModelConfig,
|
|
|
|
BaseModelType,
|
|
|
|
IPAdapterCheckpointConfig,
|
|
|
|
IPAdapterDiffusersConfig,
|
|
|
|
ModelType,
|
|
|
|
)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
|
2023-09-14 15:57:53 +00:00
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
class IPAdapterField(BaseModel):
|
2023-10-13 18:44:42 +00:00
|
|
|
image: Union[ImageField, List[ImageField]] = Field(description="The IP-Adapter image prompt(s).")
|
2024-03-09 08:43:24 +00:00
|
|
|
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model to use.")
|
|
|
|
image_encoder_model: ModelIdentifierField = Field(description="The name of the CLIP image encoder model.")
|
2023-09-16 19:36:16 +00:00
|
|
|
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
|
|
|
begin_step_percent: float = Field(
|
|
|
|
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
|
|
|
)
|
|
|
|
end_step_percent: float = Field(
|
|
|
|
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
|
|
|
)
|
2023-09-06 17:36:00 +00:00
|
|
|
|
2024-01-02 00:13:49 +00:00
|
|
|
@field_validator("weight")
|
|
|
|
@classmethod
|
2024-02-06 03:56:32 +00:00
|
|
|
def validate_ip_adapter_weight(cls, v: float) -> float:
|
2024-01-02 00:13:49 +00:00
|
|
|
validate_weights(v)
|
|
|
|
return v
|
|
|
|
|
|
|
|
@model_validator(mode="after")
|
2024-02-06 03:56:32 +00:00
|
|
|
def validate_begin_end_step_percent(self) -> Self:
|
2024-01-02 00:13:49 +00:00
|
|
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
|
|
|
return self
|
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
|
|
|
|
@invocation_output("ip_adapter_output")
|
|
|
|
class IPAdapterOutput(BaseInvocationOutput):
|
|
|
|
# Outputs
|
2023-09-08 20:14:17 +00:00
|
|
|
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
2023-09-06 17:36:00 +00:00
|
|
|
|
|
|
|
|
2024-03-19 11:08:16 +00:00
|
|
|
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.2.2")
|
2023-09-06 17:36:00 +00:00
|
|
|
class IPAdapterInvocation(BaseInvocation):
|
|
|
|
"""Collects IP-Adapter info to pass to other nodes."""
|
|
|
|
|
|
|
|
# Inputs
|
2023-10-13 18:44:42 +00:00
|
|
|
image: Union[ImageField, List[ImageField]] = InputField(description="The IP-Adapter image prompt(s).")
|
2024-03-09 08:43:24 +00:00
|
|
|
ip_adapter_model: ModelIdentifierField = InputField(
|
2024-03-08 10:37:00 +00:00
|
|
|
description="The IP-Adapter model.",
|
|
|
|
title="IP-Adapter Model",
|
|
|
|
input=Input.Direct,
|
|
|
|
ui_order=-1,
|
|
|
|
ui_type=UIType.IPAdapterModel,
|
2023-09-06 17:36:00 +00:00
|
|
|
)
|
2023-09-16 19:36:16 +00:00
|
|
|
|
2023-09-17 00:00:21 +00:00
|
|
|
weight: Union[float, List[float]] = InputField(
|
2024-01-02 00:13:49 +00:00
|
|
|
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
2023-09-17 00:00:21 +00:00
|
|
|
)
|
2023-09-16 15:24:12 +00:00
|
|
|
begin_step_percent: float = InputField(
|
2024-01-02 00:13:49 +00:00
|
|
|
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
2023-09-16 15:24:12 +00:00
|
|
|
)
|
|
|
|
end_step_percent: float = InputField(
|
|
|
|
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
|
|
|
)
|
2023-09-06 17:36:00 +00:00
|
|
|
|
2024-01-02 00:13:49 +00:00
|
|
|
@field_validator("weight")
|
|
|
|
@classmethod
|
2024-02-06 03:56:32 +00:00
|
|
|
def validate_ip_adapter_weight(cls, v: float) -> float:
|
2024-01-02 00:13:49 +00:00
|
|
|
validate_weights(v)
|
|
|
|
return v
|
|
|
|
|
|
|
|
@model_validator(mode="after")
|
2024-02-06 03:56:32 +00:00
|
|
|
def validate_begin_end_step_percent(self) -> Self:
|
2024-01-02 00:13:49 +00:00
|
|
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
|
|
|
return self
|
|
|
|
|
2024-02-05 06:16:35 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
2023-09-14 15:57:53 +00:00
|
|
|
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
2024-02-15 09:43:41 +00:00
|
|
|
ip_adapter_info = context.models.get_config(self.ip_adapter_model.key)
|
2024-03-23 20:10:28 +00:00
|
|
|
assert isinstance(ip_adapter_info, (IPAdapterDiffusersConfig, IPAdapterCheckpointConfig))
|
|
|
|
image_encoder_model_id = (
|
|
|
|
ip_adapter_info.image_encoder_model_id
|
|
|
|
if isinstance(ip_adapter_info, IPAdapterDiffusersConfig)
|
|
|
|
else "InvokeAI/ip_adapter_sd_image_encoder"
|
|
|
|
)
|
2023-09-15 03:06:57 +00:00
|
|
|
image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip()
|
2024-03-18 02:19:53 +00:00
|
|
|
image_encoder_model = self._get_image_encoder(context, image_encoder_model_name)
|
2023-09-06 17:36:00 +00:00
|
|
|
return IPAdapterOutput(
|
|
|
|
ip_adapter=IPAdapterField(
|
|
|
|
image=self.image,
|
2023-09-12 23:09:10 +00:00
|
|
|
ip_adapter_model=self.ip_adapter_model,
|
2024-03-18 02:19:53 +00:00
|
|
|
image_encoder_model=ModelIdentifierField.from_config(image_encoder_model),
|
2023-09-06 17:36:00 +00:00
|
|
|
weight=self.weight,
|
2023-09-16 15:24:12 +00:00
|
|
|
begin_step_percent=self.begin_step_percent,
|
|
|
|
end_step_percent=self.end_step_percent,
|
2023-09-06 17:36:00 +00:00
|
|
|
),
|
|
|
|
)
|
2024-03-18 02:19:53 +00:00
|
|
|
|
|
|
|
def _get_image_encoder(self, context: InvocationContext, image_encoder_model_name: str) -> AnyModelConfig:
|
|
|
|
found = False
|
|
|
|
while not found:
|
|
|
|
image_encoder_models = context.models.search_by_attrs(
|
|
|
|
name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision
|
|
|
|
)
|
|
|
|
found = len(image_encoder_models) > 0
|
|
|
|
if not found:
|
|
|
|
context.logger.warning(
|
|
|
|
f"The image encoder required by this IP Adapter ({image_encoder_model_name}) is not installed."
|
|
|
|
)
|
|
|
|
context.logger.warning("Downloading and installing now. This may take a while.")
|
|
|
|
installer = context._services.model_manager.install
|
|
|
|
job = installer.heuristic_import(f"InvokeAI/{image_encoder_model_name}")
|
|
|
|
installer.wait_for_job(job, timeout=600) # wait up to 10 minutes - then raise a TimeoutException
|
|
|
|
assert len(image_encoder_models) == 1
|
|
|
|
return image_encoder_models[0]
|