2023-04-06 04:06:05 +00:00
|
|
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
2024-02-28 17:15:39 +00:00
|
|
|
import inspect
|
2024-01-16 13:50:36 +00:00
|
|
|
import math
|
2023-07-08 09:28:26 +00:00
|
|
|
from contextlib import ExitStack
|
2023-08-18 02:59:31 +00:00
|
|
|
from functools import singledispatchmethod
|
2024-04-30 19:50:53 +00:00
|
|
|
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
2023-05-12 03:33:24 +00:00
|
|
|
|
2023-06-13 21:26:37 +00:00
|
|
|
import einops
|
2023-08-20 18:49:18 +00:00
|
|
|
import numpy as np
|
2024-02-10 23:09:45 +00:00
|
|
|
import numpy.typing as npt
|
2023-04-06 04:06:05 +00:00
|
|
|
import torch
|
2024-03-08 18:42:35 +00:00
|
|
|
import torchvision
|
2023-08-11 10:20:37 +00:00
|
|
|
import torchvision.transforms as T
|
2024-02-10 23:09:45 +00:00
|
|
|
from diffusers.configuration_utils import ConfigMixin
|
2023-05-13 13:08:03 +00:00
|
|
|
from diffusers.image_processor import VaeImageProcessor
|
2023-11-09 20:06:01 +00:00
|
|
|
from diffusers.models.adapter import T2IAdapter
|
2024-04-13 06:43:50 +00:00
|
|
|
from diffusers.models.attention_processor import (
|
|
|
|
AttnProcessor2_0,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
)
|
2024-05-01 07:00:06 +00:00
|
|
|
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
|
|
|
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
2024-02-10 23:09:45 +00:00
|
|
|
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
2024-05-01 07:00:06 +00:00
|
|
|
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
|
|
|
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
|
|
|
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
2024-02-21 02:13:19 +00:00
|
|
|
from PIL import Image, ImageFilter
|
2024-04-27 19:12:06 +00:00
|
|
|
from pydantic import field_validator
|
2023-08-11 10:20:37 +00:00
|
|
|
from torchvision.transforms.functional import resize as tv_resize
|
2024-03-06 08:37:15 +00:00
|
|
|
from transformers import CLIPVisionModelWithProjection
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2024-04-13 06:43:50 +00:00
|
|
|
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR, SCHEDULER_NAME_VALUES
|
|
|
|
from invokeai.app.invocations.fields import (
|
|
|
|
ConditioningField,
|
|
|
|
DenoiseMaskField,
|
|
|
|
FieldDescriptions,
|
|
|
|
ImageField,
|
|
|
|
Input,
|
|
|
|
InputField,
|
|
|
|
LatentsField,
|
|
|
|
OutputField,
|
|
|
|
UIType,
|
|
|
|
WithBoard,
|
|
|
|
WithMetadata,
|
|
|
|
)
|
2023-09-06 17:36:00 +00:00
|
|
|
from invokeai.app.invocations.ip_adapter import IPAdapterField
|
2024-04-13 06:43:50 +00:00
|
|
|
from invokeai.app.invocations.primitives import DenoiseMaskOutput, ImageOutput, LatentsOutput
|
2023-10-05 05:29:16 +00:00
|
|
|
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
2024-02-05 06:16:35 +00:00
|
|
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
2023-08-06 03:41:47 +00:00
|
|
|
from invokeai.app.util.controlnet_utils import prepare_control_image
|
2024-05-29 14:29:54 +00:00
|
|
|
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
2024-02-17 16:45:32 +00:00
|
|
|
from invokeai.backend.lora import LoRAModelRaw
|
2024-02-10 23:09:45 +00:00
|
|
|
from invokeai.backend.model_manager import BaseModelType, LoadedModel
|
2024-04-23 10:25:12 +00:00
|
|
|
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
|
2024-02-17 16:45:32 +00:00
|
|
|
from invokeai.backend.model_patcher import ModelPatcher
|
2024-04-13 06:43:50 +00:00
|
|
|
from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless
|
2024-03-08 16:49:32 +00:00
|
|
|
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
2024-04-13 06:43:50 +00:00
|
|
|
BasicConditioningInfo,
|
|
|
|
IPAdapterConditioningInfo,
|
|
|
|
IPAdapterData,
|
|
|
|
Range,
|
|
|
|
SDXLConditioningInfo,
|
|
|
|
TextConditioningData,
|
|
|
|
TextConditioningRegions,
|
|
|
|
)
|
2024-04-08 19:07:49 +00:00
|
|
|
from invokeai.backend.util.mask import to_standard_float_mask
|
2024-02-06 03:56:32 +00:00
|
|
|
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
2023-08-11 10:20:37 +00:00
|
|
|
|
2023-05-12 03:33:24 +00:00
|
|
|
from ...backend.stable_diffusion.diffusers_pipeline import (
|
2024-04-13 06:43:50 +00:00
|
|
|
ControlNetData,
|
|
|
|
StableDiffusionGeneratorPipeline,
|
|
|
|
T2IAdapterData,
|
|
|
|
image_resized_to_grid_as_tensor,
|
|
|
|
)
|
2023-05-11 12:40:03 +00:00
|
|
|
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
2024-04-15 13:12:49 +00:00
|
|
|
from ...backend.util.devices import TorchDevice
|
2024-04-13 06:43:50 +00:00
|
|
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
2023-08-11 10:20:37 +00:00
|
|
|
from .controlnet_image_processors import ControlField
|
2024-03-09 08:43:24 +00:00
|
|
|
from .model import ModelIdentifierField, UNetField, VAEField
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2024-04-15 13:12:49 +00:00
|
|
|
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
|
2023-07-19 15:55:37 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-08-30 12:35:47 +00:00
|
|
|
@invocation_output("scheduler_output")
|
|
|
|
class SchedulerOutput(BaseInvocationOutput):
|
2024-02-10 22:51:25 +00:00
|
|
|
scheduler: SCHEDULER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler)
|
2023-08-30 12:35:47 +00:00
|
|
|
|
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@invocation(
|
|
|
|
"scheduler",
|
|
|
|
title="Scheduler",
|
|
|
|
tags=["scheduler"],
|
|
|
|
category="latents",
|
|
|
|
version="1.0.0",
|
|
|
|
)
|
2023-08-30 12:35:47 +00:00
|
|
|
class SchedulerInvocation(BaseInvocation):
|
|
|
|
"""Selects a scheduler."""
|
|
|
|
|
2024-02-10 22:51:25 +00:00
|
|
|
scheduler: SCHEDULER_NAME_VALUES = InputField(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
default="euler",
|
|
|
|
description=FieldDescriptions.scheduler,
|
|
|
|
ui_type=UIType.Scheduler,
|
2023-08-30 12:35:47 +00:00
|
|
|
)
|
|
|
|
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> SchedulerOutput:
|
2023-08-30 12:35:47 +00:00
|
|
|
return SchedulerOutput(scheduler=self.scheduler)
|
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
"create_denoise_mask",
|
|
|
|
title="Create Denoise Mask",
|
|
|
|
tags=["mask", "denoise"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
2023-09-04 08:11:56 +00:00
|
|
|
)
|
2023-08-26 17:50:13 +00:00
|
|
|
class CreateDenoiseMaskInvocation(BaseInvocation):
|
|
|
|
"""Creates mask for denoising model run."""
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-03-06 08:42:47 +00:00
|
|
|
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
|
2023-08-26 18:14:35 +00:00
|
|
|
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
|
|
|
|
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
fp32: bool = InputField(
|
|
|
|
default=DEFAULT_PRECISION == "float32",
|
|
|
|
description=FieldDescriptions.fp32,
|
|
|
|
ui_order=4,
|
|
|
|
)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-02-21 02:13:19 +00:00
|
|
|
def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
|
2023-08-18 01:07:40 +00:00
|
|
|
if mask_image.mode != "L":
|
|
|
|
mask_image = mask_image.convert("L")
|
2024-02-10 23:09:45 +00:00
|
|
|
mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
2023-08-18 01:07:40 +00:00
|
|
|
if mask_tensor.dim() == 3:
|
|
|
|
mask_tensor = mask_tensor.unsqueeze(0)
|
2023-08-18 06:05:39 +00:00
|
|
|
# if shape is not None:
|
2023-08-18 01:07:40 +00:00
|
|
|
# mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR)
|
|
|
|
return mask_tensor
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
|
2023-08-18 01:07:40 +00:00
|
|
|
if self.image is not None:
|
2024-02-15 09:43:41 +00:00
|
|
|
image = context.images.get_pil(self.image.image_name)
|
2024-02-10 23:09:45 +00:00
|
|
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
|
|
|
if image_tensor.dim() == 3:
|
|
|
|
image_tensor = image_tensor.unsqueeze(0)
|
2023-08-18 01:07:40 +00:00
|
|
|
else:
|
2024-02-10 23:09:45 +00:00
|
|
|
image_tensor = None
|
2023-08-18 01:07:40 +00:00
|
|
|
|
|
|
|
mask = self.prep_mask_tensor(
|
2024-01-13 12:23:16 +00:00
|
|
|
context.images.get_pil(self.mask.image_name),
|
2023-08-18 01:07:40 +00:00
|
|
|
)
|
2023-08-18 06:05:39 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
if image_tensor is not None:
|
2024-03-06 08:37:15 +00:00
|
|
|
vae_info = context.models.load(self.vae.vae)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
|
|
|
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
2023-08-18 01:07:40 +00:00
|
|
|
# TODO:
|
|
|
|
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
|
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
2023-08-18 01:07:40 +00:00
|
|
|
else:
|
|
|
|
masked_latents_name = None
|
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
mask_name = context.tensors.save(tensor=mask)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-01-13 12:23:16 +00:00
|
|
|
return DenoiseMaskOutput.build(
|
|
|
|
mask_name=mask_name,
|
|
|
|
masked_latents_name=masked_latents_name,
|
2024-02-21 02:13:19 +00:00
|
|
|
gradient=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
@invocation_output("gradient_mask_output")
|
|
|
|
class GradientMaskOutput(BaseInvocationOutput):
|
|
|
|
"""Outputs a denoise mask and an image representing the total gradient of the mask."""
|
|
|
|
|
|
|
|
denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run")
|
|
|
|
expanded_mask_area: ImageField = OutputField(
|
|
|
|
description="Image representing the total gradient area of the mask. For paste-back purposes."
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-02-21 02:13:19 +00:00
|
|
|
@invocation(
|
|
|
|
"create_gradient_mask",
|
|
|
|
title="Create Gradient Mask",
|
|
|
|
tags=["mask", "denoise"],
|
|
|
|
category="latents",
|
2024-04-11 17:58:00 +00:00
|
|
|
version="1.1.0",
|
2024-02-21 02:13:19 +00:00
|
|
|
)
|
|
|
|
class CreateGradientMaskInvocation(BaseInvocation):
|
|
|
|
"""Creates mask for denoising model run."""
|
|
|
|
|
|
|
|
mask: ImageField = InputField(default=None, description="Image which will be masked", ui_order=1)
|
|
|
|
edge_radius: int = InputField(
|
|
|
|
default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2
|
|
|
|
)
|
|
|
|
coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3)
|
|
|
|
minimum_denoise: float = InputField(
|
|
|
|
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
|
|
|
)
|
2024-04-11 17:58:00 +00:00
|
|
|
image: Optional[ImageField] = InputField(
|
|
|
|
default=None,
|
2024-04-12 03:12:32 +00:00
|
|
|
description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE",
|
2024-04-11 17:58:00 +00:00
|
|
|
title="[OPTIONAL] Image",
|
2024-04-12 03:25:04 +00:00
|
|
|
ui_order=6,
|
2024-04-11 17:58:00 +00:00
|
|
|
)
|
2024-04-23 10:25:12 +00:00
|
|
|
unet: Optional[UNetField] = InputField(
|
|
|
|
description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE",
|
|
|
|
default=None,
|
|
|
|
input=Input.Connection,
|
|
|
|
title="[OPTIONAL] UNet",
|
|
|
|
ui_order=5,
|
|
|
|
)
|
2024-04-11 17:58:00 +00:00
|
|
|
vae: Optional[VAEField] = InputField(
|
|
|
|
default=None,
|
2024-04-12 03:12:32 +00:00
|
|
|
description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE",
|
2024-04-11 17:58:00 +00:00
|
|
|
title="[OPTIONAL] VAE",
|
|
|
|
input=Input.Connection,
|
2024-04-12 03:25:04 +00:00
|
|
|
ui_order=7,
|
2024-04-11 17:58:00 +00:00
|
|
|
)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
|
|
|
|
fp32: bool = InputField(
|
|
|
|
default=DEFAULT_PRECISION == "float32",
|
|
|
|
description=FieldDescriptions.fp32,
|
|
|
|
ui_order=9,
|
|
|
|
)
|
|
|
|
|
2024-02-21 02:13:19 +00:00
|
|
|
@torch.no_grad()
|
2024-03-03 00:31:13 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
2024-02-21 02:13:19 +00:00
|
|
|
mask_image = context.images.get_pil(self.mask.image_name, mode="L")
|
2024-03-03 00:31:13 +00:00
|
|
|
if self.edge_radius > 0:
|
|
|
|
if self.coherence_mode == "Box Blur":
|
|
|
|
blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius))
|
|
|
|
else: # Gaussian Blur OR Staged
|
|
|
|
# Gaussian Blur uses standard deviation. 1/2 radius is a good approximation
|
|
|
|
blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2))
|
2024-02-21 02:13:19 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False)
|
2024-02-21 02:13:19 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
# redistribute blur so that the original edges are 0 and blur outwards to 1
|
|
|
|
blur_tensor = (blur_tensor - 0.5) * 2
|
2024-02-21 02:13:19 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
threshold = 1 - self.minimum_denoise
|
2024-02-21 02:13:19 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
if self.coherence_mode == "Staged":
|
|
|
|
# wherever the blur_tensor is less than fully masked, convert it to threshold
|
|
|
|
blur_tensor = torch.where((blur_tensor < 1) & (blur_tensor > 0), threshold, blur_tensor)
|
|
|
|
else:
|
|
|
|
# wherever the blur_tensor is above threshold but less than 1, drop it to threshold
|
|
|
|
blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor)
|
2024-02-21 02:13:19 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
else:
|
|
|
|
blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
2024-02-21 02:13:19 +00:00
|
|
|
|
|
|
|
mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1))
|
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
# compute a [0, 1] mask from the blur_tensor
|
|
|
|
expanded_mask = torch.where((blur_tensor < 1), 0, 1)
|
|
|
|
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
|
|
|
expanded_image_dto = context.images.save(expanded_mask_image)
|
|
|
|
|
2024-04-11 17:58:00 +00:00
|
|
|
masked_latents_name = None
|
2024-04-23 10:25:12 +00:00
|
|
|
if self.unet is not None and self.vae is not None and self.image is not None:
|
|
|
|
# all three fields must be present at the same time
|
|
|
|
main_model_config = context.models.get_config(self.unet.unet.key)
|
|
|
|
assert isinstance(main_model_config, MainConfigBase)
|
|
|
|
if main_model_config.variant is ModelVariantType.Inpaint:
|
|
|
|
mask = blur_tensor
|
|
|
|
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
|
|
|
image = context.images.get_pil(self.image.image_name)
|
|
|
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
|
|
|
if image_tensor.dim() == 3:
|
|
|
|
image_tensor = image_tensor.unsqueeze(0)
|
|
|
|
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
|
|
|
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
|
|
|
masked_latents = ImageToLatentsInvocation.vae_encode(
|
|
|
|
vae_info, self.fp32, self.tiled, masked_image.clone()
|
|
|
|
)
|
|
|
|
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
2024-04-11 17:58:00 +00:00
|
|
|
|
2024-03-03 00:31:13 +00:00
|
|
|
return GradientMaskOutput(
|
2024-04-11 17:58:00 +00:00
|
|
|
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
2024-03-03 00:31:13 +00:00
|
|
|
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
2023-08-18 01:07:40 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
def get_scheduler(
|
2024-02-05 06:16:35 +00:00
|
|
|
context: InvocationContext,
|
2024-03-09 08:43:24 +00:00
|
|
|
scheduler_info: ModelIdentifierField,
|
2023-05-13 13:08:03 +00:00
|
|
|
scheduler_name: str,
|
2023-08-13 21:24:38 +00:00
|
|
|
seed: int,
|
2023-05-13 13:08:03 +00:00
|
|
|
) -> Scheduler:
|
2023-07-28 13:46:44 +00:00
|
|
|
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"])
|
2024-03-06 08:37:15 +00:00
|
|
|
orig_scheduler_info = context.models.load(scheduler_info)
|
2023-05-14 00:06:26 +00:00
|
|
|
with orig_scheduler_info as orig_scheduler:
|
2023-05-13 13:08:03 +00:00
|
|
|
scheduler_config = orig_scheduler.config
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-05-11 14:23:33 +00:00
|
|
|
if "_backup" in scheduler_config:
|
|
|
|
scheduler_config = scheduler_config["_backup"]
|
2023-07-05 17:00:43 +00:00
|
|
|
scheduler_config = {
|
|
|
|
**scheduler_config,
|
2024-02-10 23:09:45 +00:00
|
|
|
**scheduler_extra_config, # FIXME
|
2023-07-05 17:00:43 +00:00
|
|
|
"_backup": scheduler_config,
|
|
|
|
}
|
2023-08-13 21:24:38 +00:00
|
|
|
|
|
|
|
# make dpmpp_sde reproducable(seed can be passed only in initializer)
|
|
|
|
if scheduler_class is DPMSolverSDEScheduler:
|
|
|
|
scheduler_config["noise_sampler_seed"] = seed
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
scheduler = scheduler_class.from_config(scheduler_config)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
# hack copied over from generate.py
|
2023-07-28 13:46:44 +00:00
|
|
|
if not hasattr(scheduler, "uses_inpainting_model"):
|
2023-04-06 04:06:05 +00:00
|
|
|
scheduler.uses_inpainting_model = lambda: False
|
2024-02-10 23:09:45 +00:00
|
|
|
assert isinstance(scheduler, Scheduler)
|
2023-04-06 04:06:05 +00:00
|
|
|
return scheduler
|
|
|
|
|
|
|
|
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
@invocation(
|
|
|
|
"denoise_latents",
|
|
|
|
title="Denoise Latents",
|
|
|
|
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.5.3",
|
feat(nodes): move all invocation metadata (type, title, tags, category) to decorator
All invocation metadata (type, title, tags and category) are now defined in decorators.
The decorators add the `type: Literal["invocation_type"]: "invocation_type"` field to the invocation.
Category is a new invocation metadata, but it is not used by the frontend just yet.
- `@invocation()` decorator for invocations
```py
@invocation(
"sdxl_compel_prompt",
title="SDXL Prompt",
tags=["sdxl", "compel", "prompt"],
category="conditioning",
)
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
...
```
- `@invocation_output()` decorator for invocation outputs
```py
@invocation_output("clip_skip_output")
class ClipSkipInvocationOutput(BaseInvocationOutput):
...
```
- update invocation docs
- add category to decorator
- regen frontend types
2023-08-30 08:35:12 +00:00
|
|
|
)
|
2023-08-11 10:20:37 +00:00
|
|
|
class DenoiseLatentsInvocation(BaseInvocation):
|
|
|
|
"""Denoises noisy latents to decodable images"""
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2024-03-08 18:42:35 +00:00
|
|
|
positive_conditioning: Union[ConditioningField, list[ConditioningField]] = InputField(
|
2023-08-22 06:23:20 +00:00
|
|
|
description=FieldDescriptions.positive_cond, input=Input.Connection, ui_order=0
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
2024-03-08 18:42:35 +00:00
|
|
|
negative_conditioning: Union[ConditioningField, list[ConditioningField]] = InputField(
|
2023-08-22 06:23:20 +00:00
|
|
|
description=FieldDescriptions.negative_cond, input=Input.Connection, ui_order=1
|
2023-08-14 03:23:09 +00:00
|
|
|
)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
noise: Optional[LatentsField] = InputField(
|
|
|
|
default=None,
|
|
|
|
description=FieldDescriptions.noise,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=3,
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
|
|
|
cfg_scale: Union[float, List[float]] = InputField(
|
2024-04-27 18:40:52 +00:00
|
|
|
default=7.5, description=FieldDescriptions.cfg_scale, title="CFG Scale"
|
2023-08-11 10:20:37 +00:00
|
|
|
)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
denoising_start: float = InputField(
|
|
|
|
default=0.0,
|
|
|
|
ge=0,
|
|
|
|
le=1,
|
|
|
|
description=FieldDescriptions.denoising_start,
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
2024-02-10 22:51:25 +00:00
|
|
|
scheduler: SCHEDULER_NAME_VALUES = InputField(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
default="euler",
|
|
|
|
description=FieldDescriptions.scheduler,
|
|
|
|
ui_type=UIType.Scheduler,
|
|
|
|
)
|
|
|
|
unet: UNetField = InputField(
|
|
|
|
description=FieldDescriptions.unet,
|
|
|
|
input=Input.Connection,
|
|
|
|
title="UNet",
|
|
|
|
ui_order=2,
|
2023-08-17 08:58:01 +00:00
|
|
|
)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
control: Optional[Union[ControlField, list[ControlField]]] = InputField(
|
feat: polymorphic fields
Initial support for polymorphic field types. Polymorphic types are a single of or list of a specific type. For example, `Union[str, list[str]]`.
Polymorphics do not yet have support for direct input in the UI (will come in the future). They will be forcibly set as Connection-only fields, in which case users will not be able to provide direct input to the field.
If a polymorphic should present as a singleton type - which would allow direct input - the node must provide an explicit type hint.
For example, `DenoiseLatents`' `CFG Scale` is polymorphic, but in the node editor, we want to present this as a number input. In the node definition, the field is given `ui_type=UIType.Float`, which tells the UI to treat this as a `float` field.
The connection validation logic will prevent connecting a collection to `CFG Scale` in this situation, because it is typed as `float`. The workaround is to disable validation from the settings to make this specific connection. A future improvement will resolve this.
This also introduces better support for collection field types. Like polymorphics, collection types are parsed automatically by the client and do not need any specific type hints.
Also like polymorphics, there is no support yet for direct input of collection types in the UI.
- Disabling validation in workflow editor now displays the visual hints for valid connections, but lets you connect to anything.
- Added `ui_order: int` to `InputField` and `OutputField`. The UI will use this, if present, to order fields in a node UI. See usage in `DenoiseLatents` for an example.
- Updated the field colors - duplicate colors have just been lightened a bit. It's not perfect but it was a quick fix.
- Field handles for collections are the same color as their single counterparts, but have a dark dot in the center of them.
- Field handles for polymorphics are a rounded square with dot in the middle.
- Removed all fields that just render `null` from `InputFieldRenderer`, replaced with a single fallback
- Removed logic in `zValidatedWorkflow`, which checked for existence of node templates for each node in a workflow. This logic introduced a circular dependency, due to importing the global redux `store` in order to get the node templates within a zod schema. It's actually fine to just leave this out entirely; The case of a missing node template is handled by the UI. Fixing it otherwise would introduce a substantial headache.
- Fixed the `ControlNetInvocation.control_model` field default, which was a string when it shouldn't have one.
2023-09-01 09:40:27 +00:00
|
|
|
default=None,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=5,
|
2023-08-22 06:23:20 +00:00
|
|
|
)
|
2023-09-21 21:46:05 +00:00
|
|
|
ip_adapter: Optional[Union[IPAdapterField, list[IPAdapterField]]] = InputField(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
description=FieldDescriptions.ip_adapter,
|
|
|
|
title="IP-Adapter",
|
|
|
|
default=None,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=6,
|
|
|
|
)
|
|
|
|
t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]] = InputField(
|
|
|
|
description=FieldDescriptions.t2i_adapter,
|
|
|
|
title="T2I-Adapter",
|
|
|
|
default=None,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=7,
|
2023-09-06 17:36:00 +00:00
|
|
|
)
|
2023-11-30 09:55:20 +00:00
|
|
|
cfg_rescale_multiplier: float = InputField(
|
2024-01-02 20:41:59 +00:00
|
|
|
title="CFG Rescale Multiplier", default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
2023-11-30 09:55:20 +00:00
|
|
|
)
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
latents: Optional[LatentsField] = InputField(
|
feat(ui): add support for custom field types
Node authors may now create their own arbitrary/custom field types. Any pydantic model is supported.
Two notes:
1. Your field type's class name must be unique.
Suggest prefixing fields with something related to the node pack as a kind of namespace.
2. Custom field types function as connection-only fields.
For example, if your custom field has string attributes, you will not get a text input for that attribute when you give a node a field with your custom type.
This is the same behaviour as other complex fields that don't have custom UIs in the workflow editor - like, say, a string collection.
feat(ui): fix tooltips for custom types
We need to hold onto the original type of the field so they don't all just show up as "Unknown".
fix(ui): fix ts error with custom fields
feat(ui): custom field types connection validation
In the initial commit, a custom field's original type was added to the *field templates* only as `originalType`. Custom fields' `type` property was `"Custom"`*. This allowed for type safety throughout the UI logic.
*Actually, it was `"Unknown"`, but I changed it to custom for clarity.
Connection validation logic, however, uses the *field instance* of the node/field. Like the templates, *field instances* with custom types have their `type` set to `"Custom"`, but they didn't have an `originalType` property. As a result, all custom fields could be connected to all other custom fields.
To resolve this, we need to add `originalType` to the *field instances*, then switch the validation logic to use this instead of `type`.
This ended up needing a bit of fanagling:
- If we make `originalType` a required property on field instances, existing workflows will break during connection validation, because they won't have this property. We'd need a new layer of logic to migrate the workflows, adding the new `originalType` property.
While this layer is probably needed anyways, typing `originalType` as optional is much simpler. Workflow migration logic can come layer.
(Technically, we could remove all references to field types from the workflow files, and let the templates hold all this information. This feels like a significant change and I'm reluctant to do it now.)
- Because `originalType` is optional, anywhere we care about the type of a field, we need to use it over `type`. So there are a number of `field.originalType ?? field.type` expressions. This is a bit of a gotcha, we'll need to remember this in the future.
- We use `Array.prototype.includes()` often in the workflow editor, e.g. `COLLECTION_TYPES.includes(type)`. In these cases, the const array is of type `FieldType[]`, and `type` is is `FieldType`.
Because we now support custom types, the arg `type` is now widened from `FieldType` to `string`.
This causes a TS error. This behaviour is somewhat controversial (see https://github.com/microsoft/TypeScript/issues/14520). These expressions are now rewritten as `COLLECTION_TYPES.some((t) => t === type)` to satisfy TS. It's logically equivalent.
fix(ui): typo
feat(ui): add CustomCollection and CustomPolymorphic field types
feat(ui): add validation for CustomCollection & CustomPolymorphic types
- Update connection validation for custom types
- Use simple string parsing to determine if a field is a collection or polymorphic type.
- No longer need to keep a list of collection and polymorphic types.
- Added runtime checks in `baseinvocation.py` to ensure no fields are named in such a way that it could mess up the new parsing
chore(ui): remove errant console.log
fix(ui): rename 'nodes.currentConnectionFieldType' -> 'nodes.connectionStartFieldType'
This was confusingly named and kept tripping me up. Renamed to be consistent with the `reactflow` `ConnectionStartParams` type.
fix(ui): fix ts error
feat(nodes): add runtime check for custom field names
"Custom", "CustomCollection" and "CustomPolymorphic" are reserved field names.
chore(ui): add TODO for revising field type names
wip refactor fieldtype structured
wip refactor field types
wip refactor types
wip refactor types
fix node layout
refactor field types
chore: mypy
organisation
organisation
organisation
fix(nodes): fix field orig_required, field_kind and input statuses
feat(nodes): remove broken implementation of default_factory on InputField
Use of this could break connection validation due to the difference in node schemas required fields and invoke() required args.
Removed entirely for now. It wasn't ever actually used by the system, because all graphs always had values provided for fields where default_factory was used.
Also, pydantic is smart enough to not reuse the same object when specifying a default value - it clones the object first. So, the common pattern of `default_factory=list` is extraneous. It can just be `default=[]`.
fix(nodes): fix InputField name validation
workflow validation
validation
chore: ruff
feat(nodes): fix up baseinvocation comments
fix(ui): improve typing & logic of buildFieldInputTemplate
improved error handling in parseFieldType
fix: back compat for deprecated default_factory and UIType
feat(nodes): do not show node packs loaded log if none loaded
chore(ui): typegen
2023-11-17 00:32:35 +00:00
|
|
|
default=None,
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=4,
|
2023-10-05 05:29:16 +00:00
|
|
|
)
|
2023-08-26 17:50:13 +00:00
|
|
|
denoise_mask: Optional[DenoiseMaskField] = InputField(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
default=None,
|
|
|
|
description=FieldDescriptions.mask,
|
|
|
|
input=Input.Connection,
|
|
|
|
ui_order=8,
|
2023-08-11 10:20:37 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@field_validator("cfg_scale")
|
2024-04-27 19:12:06 +00:00
|
|
|
def ge_one(cls, v: Union[List[float], float]) -> Union[List[float], float]:
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
"""validate that all cfg_scale values are >= 1"""
|
|
|
|
if isinstance(v, list):
|
|
|
|
for i in v:
|
|
|
|
if i < 1:
|
2023-07-28 13:46:44 +00:00
|
|
|
raise ValueError("cfg_scale must be greater than 1")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
else:
|
|
|
|
if v < 1:
|
2023-07-28 13:46:44 +00:00
|
|
|
raise ValueError("cfg_scale must be greater than 1")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
return v
|
|
|
|
|
2024-03-08 18:42:35 +00:00
|
|
|
def _get_text_embeddings_and_masks(
|
|
|
|
self,
|
|
|
|
cond_list: list[ConditioningField],
|
|
|
|
context: InvocationContext,
|
|
|
|
device: torch.device,
|
|
|
|
dtype: torch.dtype,
|
|
|
|
) -> tuple[Union[list[BasicConditioningInfo], list[SDXLConditioningInfo]], list[Optional[torch.Tensor]]]:
|
|
|
|
"""Get the text embeddings and masks from the input conditioning fields."""
|
|
|
|
text_embeddings: Union[list[BasicConditioningInfo], list[SDXLConditioningInfo]] = []
|
|
|
|
text_embeddings_masks: list[Optional[torch.Tensor]] = []
|
|
|
|
for cond in cond_list:
|
|
|
|
cond_data = context.conditioning.load(cond.conditioning_name)
|
|
|
|
text_embeddings.append(cond_data.conditionings[0].to(device=device, dtype=dtype))
|
|
|
|
|
|
|
|
mask = cond.mask
|
|
|
|
if mask is not None:
|
2024-04-08 18:16:22 +00:00
|
|
|
mask = context.tensors.load(mask.tensor_name)
|
2024-03-08 18:42:35 +00:00
|
|
|
text_embeddings_masks.append(mask)
|
|
|
|
|
|
|
|
return text_embeddings, text_embeddings_masks
|
|
|
|
|
|
|
|
def _preprocess_regional_prompt_mask(
|
2024-04-08 19:07:49 +00:00
|
|
|
self, mask: Optional[torch.Tensor], target_height: int, target_width: int, dtype: torch.dtype
|
2024-03-08 18:42:35 +00:00
|
|
|
) -> torch.Tensor:
|
|
|
|
"""Preprocess a regional prompt mask to match the target height and width.
|
|
|
|
If mask is None, returns a mask of all ones with the target height and width.
|
|
|
|
If mask is not None, resizes the mask to the target height and width using 'nearest' interpolation.
|
|
|
|
|
|
|
|
Returns:
|
2024-04-08 19:07:49 +00:00
|
|
|
torch.Tensor: The processed mask. shape: (1, 1, target_height, target_width).
|
2024-03-08 18:42:35 +00:00
|
|
|
"""
|
2024-04-08 19:07:49 +00:00
|
|
|
|
2024-03-08 18:42:35 +00:00
|
|
|
if mask is None:
|
2024-04-08 19:07:49 +00:00
|
|
|
return torch.ones((1, 1, target_height, target_width), dtype=dtype)
|
|
|
|
|
|
|
|
mask = to_standard_float_mask(mask, out_dtype=dtype)
|
2024-03-08 18:42:35 +00:00
|
|
|
|
|
|
|
tf = torchvision.transforms.Resize(
|
|
|
|
(target_height, target_width), interpolation=torchvision.transforms.InterpolationMode.NEAREST
|
|
|
|
)
|
2024-04-08 16:27:57 +00:00
|
|
|
|
|
|
|
# Add a batch dimension to the mask, because torchvision expects shape (batch, channels, h, w).
|
2024-03-08 18:42:35 +00:00
|
|
|
mask = mask.unsqueeze(0) # Shape: (1, h, w) -> (1, 1, h, w)
|
|
|
|
resized_mask = tf(mask)
|
|
|
|
return resized_mask
|
|
|
|
|
|
|
|
def _concat_regional_text_embeddings(
|
|
|
|
self,
|
|
|
|
text_conditionings: Union[list[BasicConditioningInfo], list[SDXLConditioningInfo]],
|
|
|
|
masks: Optional[list[Optional[torch.Tensor]]],
|
|
|
|
latent_height: int,
|
|
|
|
latent_width: int,
|
2024-04-08 19:07:49 +00:00
|
|
|
dtype: torch.dtype,
|
2024-03-08 18:42:35 +00:00
|
|
|
) -> tuple[Union[BasicConditioningInfo, SDXLConditioningInfo], Optional[TextConditioningRegions]]:
|
|
|
|
"""Concatenate regional text embeddings into a single embedding and track the region masks accordingly."""
|
|
|
|
if masks is None:
|
|
|
|
masks = [None] * len(text_conditionings)
|
|
|
|
assert len(text_conditionings) == len(masks)
|
|
|
|
|
|
|
|
is_sdxl = type(text_conditionings[0]) is SDXLConditioningInfo
|
|
|
|
|
|
|
|
all_masks_are_none = all(mask is None for mask in masks)
|
|
|
|
|
|
|
|
text_embedding = []
|
|
|
|
pooled_embedding = None
|
|
|
|
add_time_ids = None
|
|
|
|
cur_text_embedding_len = 0
|
|
|
|
processed_masks = []
|
|
|
|
embedding_ranges = []
|
|
|
|
|
|
|
|
for prompt_idx, text_embedding_info in enumerate(text_conditionings):
|
|
|
|
mask = masks[prompt_idx]
|
|
|
|
|
|
|
|
if is_sdxl:
|
|
|
|
# We choose a random SDXLConditioningInfo's pooled_embeds and add_time_ids here, with a preference for
|
|
|
|
# prompts without a mask. We prefer prompts without a mask, because they are more likely to contain
|
|
|
|
# global prompt information. In an ideal case, there should be exactly one global prompt without a
|
|
|
|
# mask, but we don't enforce this.
|
|
|
|
|
|
|
|
# HACK(ryand): The fact that we have to choose a single pooled_embedding and add_time_ids here is a
|
|
|
|
# fundamental interface issue. The SDXL Compel nodes are not designed to be used in the way that we use
|
|
|
|
# them for regional prompting. Ideally, the DenoiseLatents invocation should accept a single
|
|
|
|
# pooled_embeds tensor and a list of standard text embeds with region masks. This change would be a
|
|
|
|
# pretty major breaking change to a popular node, so for now we use this hack.
|
|
|
|
if pooled_embedding is None or mask is None:
|
|
|
|
pooled_embedding = text_embedding_info.pooled_embeds
|
|
|
|
if add_time_ids is None or mask is None:
|
|
|
|
add_time_ids = text_embedding_info.add_time_ids
|
|
|
|
|
|
|
|
text_embedding.append(text_embedding_info.embeds)
|
|
|
|
if not all_masks_are_none:
|
|
|
|
embedding_ranges.append(
|
|
|
|
Range(
|
|
|
|
start=cur_text_embedding_len, end=cur_text_embedding_len + text_embedding_info.embeds.shape[1]
|
|
|
|
)
|
|
|
|
)
|
2024-04-08 19:07:49 +00:00
|
|
|
processed_masks.append(
|
|
|
|
self._preprocess_regional_prompt_mask(mask, latent_height, latent_width, dtype=dtype)
|
|
|
|
)
|
2024-03-08 18:42:35 +00:00
|
|
|
|
|
|
|
cur_text_embedding_len += text_embedding_info.embeds.shape[1]
|
|
|
|
|
|
|
|
text_embedding = torch.cat(text_embedding, dim=1)
|
|
|
|
assert len(text_embedding.shape) == 3 # batch_size, seq_len, token_len
|
|
|
|
|
|
|
|
regions = None
|
|
|
|
if not all_masks_are_none:
|
|
|
|
regions = TextConditioningRegions(
|
|
|
|
masks=torch.cat(processed_masks, dim=1),
|
|
|
|
ranges=embedding_ranges,
|
|
|
|
)
|
|
|
|
|
|
|
|
if is_sdxl:
|
2024-04-30 19:50:53 +00:00
|
|
|
return (
|
|
|
|
SDXLConditioningInfo(embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids),
|
|
|
|
regions,
|
|
|
|
)
|
2024-03-11 22:22:49 +00:00
|
|
|
return BasicConditioningInfo(embeds=text_embedding), regions
|
2024-03-08 18:42:35 +00:00
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
def get_conditioning_data(
|
2023-07-05 17:00:43 +00:00
|
|
|
self,
|
2024-02-05 06:16:35 +00:00
|
|
|
context: InvocationContext,
|
2024-02-10 23:09:45 +00:00
|
|
|
unet: UNet2DConditionModel,
|
2024-03-08 18:42:35 +00:00
|
|
|
latent_height: int,
|
|
|
|
latent_width: int,
|
2024-03-08 16:49:32 +00:00
|
|
|
) -> TextConditioningData:
|
2024-03-08 18:42:35 +00:00
|
|
|
# Normalize self.positive_conditioning and self.negative_conditioning to lists.
|
|
|
|
cond_list = self.positive_conditioning
|
|
|
|
if not isinstance(cond_list, list):
|
|
|
|
cond_list = [cond_list]
|
|
|
|
uncond_list = self.negative_conditioning
|
|
|
|
if not isinstance(uncond_list, list):
|
|
|
|
uncond_list = [uncond_list]
|
|
|
|
|
|
|
|
cond_text_embeddings, cond_text_embedding_masks = self._get_text_embeddings_and_masks(
|
|
|
|
cond_list, context, unet.device, unet.dtype
|
|
|
|
)
|
|
|
|
uncond_text_embeddings, uncond_text_embedding_masks = self._get_text_embeddings_and_masks(
|
|
|
|
uncond_list, context, unet.device, unet.dtype
|
|
|
|
)
|
2023-07-16 03:24:24 +00:00
|
|
|
|
2024-03-08 18:42:35 +00:00
|
|
|
cond_text_embedding, cond_regions = self._concat_regional_text_embeddings(
|
|
|
|
text_conditionings=cond_text_embeddings,
|
|
|
|
masks=cond_text_embedding_masks,
|
|
|
|
latent_height=latent_height,
|
|
|
|
latent_width=latent_width,
|
2024-04-08 19:07:49 +00:00
|
|
|
dtype=unet.dtype,
|
2024-03-08 18:42:35 +00:00
|
|
|
)
|
|
|
|
uncond_text_embedding, uncond_regions = self._concat_regional_text_embeddings(
|
|
|
|
text_conditionings=uncond_text_embeddings,
|
|
|
|
masks=uncond_text_embedding_masks,
|
|
|
|
latent_height=latent_height,
|
|
|
|
latent_width=latent_width,
|
2024-04-08 19:07:49 +00:00
|
|
|
dtype=unet.dtype,
|
2024-03-08 18:42:35 +00:00
|
|
|
)
|
2023-04-25 01:21:03 +00:00
|
|
|
|
2024-04-27 19:12:06 +00:00
|
|
|
if isinstance(self.cfg_scale, list):
|
|
|
|
assert (
|
|
|
|
len(self.cfg_scale) == self.steps
|
|
|
|
), "cfg_scale (list) must have the same length as the number of steps"
|
|
|
|
|
2024-03-08 16:49:32 +00:00
|
|
|
conditioning_data = TextConditioningData(
|
2024-03-08 18:42:35 +00:00
|
|
|
uncond_text=uncond_text_embedding,
|
|
|
|
cond_text=cond_text_embedding,
|
|
|
|
uncond_regions=uncond_regions,
|
|
|
|
cond_regions=cond_regions,
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
guidance_scale=self.cfg_scale,
|
2023-11-30 09:55:20 +00:00
|
|
|
guidance_rescale_multiplier=self.cfg_rescale_multiplier,
|
2023-06-18 21:34:01 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
return conditioning_data
|
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
def create_pipeline(
|
2023-07-05 17:00:43 +00:00
|
|
|
self,
|
2024-02-10 23:09:45 +00:00
|
|
|
unet: UNet2DConditionModel,
|
|
|
|
scheduler: Scheduler,
|
2023-07-05 17:00:43 +00:00
|
|
|
) -> StableDiffusionGeneratorPipeline:
|
2023-05-13 13:08:03 +00:00
|
|
|
class FakeVae:
|
|
|
|
class FakeVaeConfig:
|
2024-02-10 23:09:45 +00:00
|
|
|
def __init__(self) -> None:
|
2023-05-13 13:08:03 +00:00
|
|
|
self.block_out_channels = [0]
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
def __init__(self) -> None:
|
2023-05-13 13:08:03 +00:00
|
|
|
self.config = FakeVae.FakeVaeConfig()
|
|
|
|
|
|
|
|
return StableDiffusionGeneratorPipeline(
|
2023-07-05 02:37:16 +00:00
|
|
|
vae=FakeVae(), # TODO: oh...
|
2023-05-13 13:08:03 +00:00
|
|
|
text_encoder=None,
|
|
|
|
tokenizer=None,
|
|
|
|
unet=unet,
|
|
|
|
scheduler=scheduler,
|
|
|
|
safety_checker=None,
|
|
|
|
feature_extractor=None,
|
|
|
|
requires_safety_checker=False,
|
|
|
|
)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-06-13 21:26:37 +00:00
|
|
|
def prep_control_data(
|
|
|
|
self,
|
2024-02-05 06:16:35 +00:00
|
|
|
context: InvocationContext,
|
2024-02-10 23:09:45 +00:00
|
|
|
control_input: Optional[Union[ControlField, List[ControlField]]],
|
2023-06-13 21:26:37 +00:00
|
|
|
latents_shape: List[int],
|
2023-07-05 17:00:43 +00:00
|
|
|
exit_stack: ExitStack,
|
2023-06-13 21:26:37 +00:00
|
|
|
do_classifier_free_guidance: bool = True,
|
2024-02-10 23:09:45 +00:00
|
|
|
) -> Optional[List[ControlNetData]]:
|
2023-11-27 16:12:15 +00:00
|
|
|
# Assuming fixed dimensional scaling of LATENT_SCALE_FACTOR.
|
|
|
|
control_height_resize = latents_shape[2] * LATENT_SCALE_FACTOR
|
|
|
|
control_width_resize = latents_shape[3] * LATENT_SCALE_FACTOR
|
2023-05-18 00:23:21 +00:00
|
|
|
if control_input is None:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-05-18 00:23:21 +00:00
|
|
|
elif isinstance(control_input, list) and len(control_input) == 0:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-05-18 00:23:21 +00:00
|
|
|
elif isinstance(control_input, ControlField):
|
|
|
|
control_list = [control_input]
|
|
|
|
elif isinstance(control_input, list) and len(control_input) > 0 and isinstance(control_input[0], ControlField):
|
|
|
|
control_list = control_input
|
2023-04-30 14:44:50 +00:00
|
|
|
else:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-07-28 13:46:44 +00:00
|
|
|
if control_list is None:
|
2023-09-06 17:36:00 +00:00
|
|
|
return None
|
|
|
|
# After above handling, any control that is not None should now be of type list[ControlField].
|
|
|
|
|
|
|
|
# FIXME: add checks to skip entry if model or image is None
|
|
|
|
# and if weight is None, populate with default 1.0?
|
|
|
|
controlnet_data = []
|
|
|
|
for control_info in control_list:
|
2024-03-06 08:37:15 +00:00
|
|
|
control_model = exit_stack.enter_context(context.models.load(control_info.control_model))
|
2023-07-05 17:00:43 +00:00
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
# control_models.append(control_model)
|
|
|
|
control_image_field = control_info.image
|
2024-01-13 12:23:16 +00:00
|
|
|
input_image = context.images.get_pil(control_image_field.image_name)
|
2023-09-06 17:36:00 +00:00
|
|
|
# self.image.image_type, self.image.image_name
|
|
|
|
# FIXME: still need to test with different widths, heights, devices, dtypes
|
|
|
|
# and add in batch_size, num_images_per_prompt?
|
|
|
|
# and do real check for classifier_free_guidance?
|
|
|
|
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
|
|
|
|
control_image = prepare_control_image(
|
|
|
|
image=input_image,
|
|
|
|
do_classifier_free_guidance=do_classifier_free_guidance,
|
|
|
|
width=control_width_resize,
|
|
|
|
height=control_height_resize,
|
|
|
|
# batch_size=batch_size * num_images_per_prompt,
|
|
|
|
# num_images_per_prompt=num_images_per_prompt,
|
|
|
|
device=control_model.device,
|
|
|
|
dtype=control_model.dtype,
|
|
|
|
control_mode=control_info.control_mode,
|
|
|
|
resize_mode=control_info.resize_mode,
|
|
|
|
)
|
|
|
|
control_item = ControlNetData(
|
|
|
|
model=control_model, # model object
|
|
|
|
image_tensor=control_image,
|
|
|
|
weight=control_info.control_weight,
|
|
|
|
begin_step_percent=control_info.begin_step_percent,
|
|
|
|
end_step_percent=control_info.end_step_percent,
|
|
|
|
control_mode=control_info.control_mode,
|
|
|
|
# any resizing needed should currently be happening in prepare_control_image(),
|
|
|
|
# but adding resize_mode to ControlNetData in case needed in the future
|
|
|
|
resize_mode=control_info.resize_mode,
|
|
|
|
)
|
|
|
|
controlnet_data.append(control_item)
|
|
|
|
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
|
2023-09-01 06:07:15 +00:00
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
return controlnet_data
|
|
|
|
|
2024-05-29 02:41:44 +00:00
|
|
|
def prep_ip_adapter_image_prompts(
|
|
|
|
self,
|
|
|
|
context: InvocationContext,
|
2024-05-29 14:29:54 +00:00
|
|
|
ip_adapters: List[IPAdapterField],
|
2024-05-29 02:41:44 +00:00
|
|
|
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
|
|
|
|
"""Run the IPAdapter CLIPVisionModel, returning image prompt embeddings."""
|
|
|
|
image_prompts = []
|
2024-05-29 14:29:54 +00:00
|
|
|
for single_ip_adapter in ip_adapters:
|
2024-05-29 02:41:44 +00:00
|
|
|
with context.models.load(single_ip_adapter.ip_adapter_model) as ip_adapter_model:
|
2024-05-29 14:29:54 +00:00
|
|
|
assert isinstance(ip_adapter_model, IPAdapter)
|
2024-05-29 02:41:44 +00:00
|
|
|
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
|
|
|
|
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
|
|
|
|
single_ipa_image_fields = single_ip_adapter.image
|
|
|
|
if not isinstance(single_ipa_image_fields, list):
|
|
|
|
single_ipa_image_fields = [single_ipa_image_fields]
|
|
|
|
|
|
|
|
single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields]
|
|
|
|
with image_encoder_model_info as image_encoder_model:
|
|
|
|
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
|
|
|
# Get image embeddings from CLIP and ImageProjModel.
|
|
|
|
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
|
|
|
|
single_ipa_images, image_encoder_model
|
|
|
|
)
|
|
|
|
image_prompts.append((image_prompt_embeds, uncond_image_prompt_embeds))
|
|
|
|
|
|
|
|
return image_prompts
|
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
def prep_ip_adapter_data(
|
|
|
|
self,
|
2024-02-05 06:16:35 +00:00
|
|
|
context: InvocationContext,
|
2024-05-29 14:29:54 +00:00
|
|
|
ip_adapters: List[IPAdapterField],
|
|
|
|
image_prompts: List[Tuple[torch.Tensor, torch.Tensor]],
|
2023-09-12 23:09:10 +00:00
|
|
|
exit_stack: ExitStack,
|
2024-03-14 20:58:11 +00:00
|
|
|
latent_height: int,
|
|
|
|
latent_width: int,
|
|
|
|
dtype: torch.dtype,
|
2024-05-29 14:29:54 +00:00
|
|
|
) -> Optional[List[IPAdapterData]]:
|
|
|
|
"""If IP-Adapter is enabled, then this function loads the requisite models and adds the image prompt conditioning data."""
|
2023-09-21 21:46:05 +00:00
|
|
|
ip_adapter_data_list = []
|
2024-05-29 14:29:54 +00:00
|
|
|
assert len(ip_adapters) == len(image_prompts)
|
|
|
|
for single_ip_adapter in ip_adapters:
|
2024-05-29 02:41:44 +00:00
|
|
|
ip_adapter_model = exit_stack.enter_context(context.models.load(single_ip_adapter.ip_adapter_model))
|
2023-09-12 23:09:10 +00:00
|
|
|
|
2024-05-29 02:41:44 +00:00
|
|
|
image_prompt_embeds, uncond_image_prompt_embeds = image_prompts.pop(0)
|
2023-10-18 12:59:12 +00:00
|
|
|
|
2024-05-29 14:29:54 +00:00
|
|
|
mask_field = single_ip_adapter.mask
|
|
|
|
mask = context.tensors.load(mask_field.tensor_name) if mask_field is not None else None
|
2024-03-14 20:58:11 +00:00
|
|
|
mask = self._preprocess_regional_prompt_mask(mask, latent_height, latent_width, dtype=dtype)
|
|
|
|
|
2023-09-21 21:46:05 +00:00
|
|
|
ip_adapter_data_list.append(
|
|
|
|
IPAdapterData(
|
|
|
|
ip_adapter_model=ip_adapter_model,
|
2023-10-06 18:37:05 +00:00
|
|
|
weight=single_ip_adapter.weight,
|
2024-04-13 05:39:45 +00:00
|
|
|
target_blocks=single_ip_adapter.target_blocks,
|
2023-10-06 18:37:05 +00:00
|
|
|
begin_step_percent=single_ip_adapter.begin_step_percent,
|
|
|
|
end_step_percent=single_ip_adapter.end_step_percent,
|
2024-02-28 18:49:02 +00:00
|
|
|
ip_adapter_conditioning=IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds),
|
2024-03-14 20:58:11 +00:00
|
|
|
mask=mask,
|
2023-09-21 21:46:05 +00:00
|
|
|
)
|
2023-09-13 23:10:02 +00:00
|
|
|
)
|
|
|
|
|
2024-05-29 14:29:54 +00:00
|
|
|
return ip_adapter_data_list if len(ip_adapter_data_list) > 0 else None
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-10-05 05:29:16 +00:00
|
|
|
def run_t2i_adapters(
|
|
|
|
self,
|
2024-02-05 06:16:35 +00:00
|
|
|
context: InvocationContext,
|
2023-10-05 05:29:16 +00:00
|
|
|
t2i_adapter: Optional[Union[T2IAdapterField, list[T2IAdapterField]]],
|
|
|
|
latents_shape: list[int],
|
|
|
|
do_classifier_free_guidance: bool,
|
|
|
|
) -> Optional[list[T2IAdapterData]]:
|
|
|
|
if t2i_adapter is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Handle the possibility that t2i_adapter could be a list or a single T2IAdapterField.
|
|
|
|
if isinstance(t2i_adapter, T2IAdapterField):
|
|
|
|
t2i_adapter = [t2i_adapter]
|
|
|
|
|
|
|
|
if len(t2i_adapter) == 0:
|
|
|
|
return None
|
|
|
|
|
|
|
|
t2i_adapter_data = []
|
|
|
|
for t2i_adapter_field in t2i_adapter:
|
2024-03-06 08:37:15 +00:00
|
|
|
t2i_adapter_model_config = context.models.get_config(t2i_adapter_field.t2i_adapter_model.key)
|
|
|
|
t2i_adapter_loaded_model = context.models.load(t2i_adapter_field.t2i_adapter_model)
|
2024-01-13 12:23:16 +00:00
|
|
|
image = context.images.get_pil(t2i_adapter_field.image.image_name)
|
2023-10-05 05:29:16 +00:00
|
|
|
|
|
|
|
# The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally.
|
2024-02-16 11:51:47 +00:00
|
|
|
if t2i_adapter_model_config.base == BaseModelType.StableDiffusion1:
|
2023-10-05 05:29:16 +00:00
|
|
|
max_unet_downscale = 8
|
2024-02-16 11:51:47 +00:00
|
|
|
elif t2i_adapter_model_config.base == BaseModelType.StableDiffusionXL:
|
2023-10-05 05:29:16 +00:00
|
|
|
max_unet_downscale = 4
|
|
|
|
else:
|
2024-02-16 11:51:47 +00:00
|
|
|
raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.")
|
2023-10-05 05:29:16 +00:00
|
|
|
|
|
|
|
t2i_adapter_model: T2IAdapter
|
2024-02-16 11:51:47 +00:00
|
|
|
with t2i_adapter_loaded_model as t2i_adapter_model:
|
2023-10-05 05:29:16 +00:00
|
|
|
total_downscale_factor = t2i_adapter_model.total_downscale_factor
|
|
|
|
|
|
|
|
# Resize the T2I-Adapter input image.
|
|
|
|
# We select the resize dimensions so that after the T2I-Adapter's total_downscale_factor is applied, the
|
|
|
|
# result will match the latent image's dimensions after max_unet_downscale is applied.
|
|
|
|
t2i_input_height = latents_shape[2] // max_unet_downscale * total_downscale_factor
|
|
|
|
t2i_input_width = latents_shape[3] // max_unet_downscale * total_downscale_factor
|
|
|
|
|
|
|
|
# Note: We have hard-coded `do_classifier_free_guidance=False`. This is because we only want to prepare
|
|
|
|
# a single image. If CFG is enabled, we will duplicate the resultant tensor after applying the
|
|
|
|
# T2I-Adapter model.
|
|
|
|
#
|
|
|
|
# Note: We re-use the `prepare_control_image(...)` from ControlNet for T2I-Adapter, because it has many
|
|
|
|
# of the same requirements (e.g. preserving binary masks during resize).
|
|
|
|
t2i_image = prepare_control_image(
|
|
|
|
image=image,
|
|
|
|
do_classifier_free_guidance=False,
|
|
|
|
width=t2i_input_width,
|
|
|
|
height=t2i_input_height,
|
2024-02-06 03:56:32 +00:00
|
|
|
num_channels=t2i_adapter_model.config["in_channels"], # mypy treats this as a FrozenDict
|
2023-10-05 05:29:16 +00:00
|
|
|
device=t2i_adapter_model.device,
|
|
|
|
dtype=t2i_adapter_model.dtype,
|
|
|
|
resize_mode=t2i_adapter_field.resize_mode,
|
|
|
|
)
|
|
|
|
|
|
|
|
adapter_state = t2i_adapter_model(t2i_image)
|
|
|
|
|
|
|
|
if do_classifier_free_guidance:
|
|
|
|
for idx, value in enumerate(adapter_state):
|
|
|
|
adapter_state[idx] = torch.cat([value] * 2, dim=0)
|
|
|
|
|
|
|
|
t2i_adapter_data.append(
|
|
|
|
T2IAdapterData(
|
|
|
|
adapter_state=adapter_state,
|
|
|
|
weight=t2i_adapter_field.weight,
|
|
|
|
begin_step_percent=t2i_adapter_field.begin_step_percent,
|
|
|
|
end_step_percent=t2i_adapter_field.end_step_percent,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
return t2i_adapter_data
|
|
|
|
|
2023-08-12 00:19:49 +00:00
|
|
|
# original idea by https://github.com/AmericanPresidentJimmyCarter
|
2023-08-13 16:31:47 +00:00
|
|
|
# TODO: research more for second order schedulers timesteps
|
2024-02-10 23:09:45 +00:00
|
|
|
def init_scheduler(
|
|
|
|
self,
|
|
|
|
scheduler: Union[Scheduler, ConfigMixin],
|
|
|
|
device: torch.device,
|
|
|
|
steps: int,
|
|
|
|
denoising_start: float,
|
|
|
|
denoising_end: float,
|
2024-02-28 17:15:39 +00:00
|
|
|
seed: int,
|
2024-05-01 06:38:39 +00:00
|
|
|
) -> Tuple[int, List[int], int, Dict[str, Any]]:
|
2024-02-10 23:09:45 +00:00
|
|
|
assert isinstance(scheduler, ConfigMixin)
|
2023-08-14 02:14:05 +00:00
|
|
|
if scheduler.config.get("cpu_only", False):
|
2023-08-30 00:40:59 +00:00
|
|
|
scheduler.set_timesteps(steps, device="cpu")
|
2023-08-14 02:14:05 +00:00
|
|
|
timesteps = scheduler.timesteps.to(device=device)
|
|
|
|
else:
|
2023-08-30 00:40:59 +00:00
|
|
|
scheduler.set_timesteps(steps, device=device)
|
2023-08-14 02:14:05 +00:00
|
|
|
timesteps = scheduler.timesteps
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2023-08-30 00:40:59 +00:00
|
|
|
# skip greater order timesteps
|
|
|
|
_timesteps = timesteps[:: scheduler.order]
|
2023-08-12 00:19:49 +00:00
|
|
|
|
2023-08-30 00:40:59 +00:00
|
|
|
# get start timestep index
|
2024-02-10 23:09:45 +00:00
|
|
|
t_start_val = int(round(scheduler.config["num_train_timesteps"] * (1 - denoising_start)))
|
2023-08-30 00:40:59 +00:00
|
|
|
t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, _timesteps)))
|
2023-08-11 12:46:16 +00:00
|
|
|
|
2023-08-30 00:40:59 +00:00
|
|
|
# get end timestep index
|
2024-02-10 23:09:45 +00:00
|
|
|
t_end_val = int(round(scheduler.config["num_train_timesteps"] * (1 - denoising_end)))
|
2023-08-30 00:40:59 +00:00
|
|
|
t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, _timesteps[t_start_idx:])))
|
|
|
|
|
|
|
|
# apply order to indexes
|
|
|
|
t_start_idx *= scheduler.order
|
|
|
|
t_end_idx *= scheduler.order
|
2023-08-12 00:19:49 +00:00
|
|
|
|
2023-08-30 00:40:59 +00:00
|
|
|
init_timestep = timesteps[t_start_idx : t_start_idx + 1]
|
|
|
|
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
|
|
|
|
num_inference_steps = len(timesteps) // scheduler.order
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2024-05-01 06:38:39 +00:00
|
|
|
scheduler_step_kwargs: Dict[str, Any] = {}
|
2024-02-28 17:15:39 +00:00
|
|
|
scheduler_step_signature = inspect.signature(scheduler.step)
|
|
|
|
if "generator" in scheduler_step_signature.parameters:
|
|
|
|
# At some point, someone decided that schedulers that accept a generator should use the original seed with
|
|
|
|
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
|
|
|
|
# reproducibility.
|
2024-05-01 06:38:39 +00:00
|
|
|
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
|
2024-05-01 07:00:06 +00:00
|
|
|
if isinstance(scheduler, TCDScheduler):
|
2024-05-01 06:38:39 +00:00
|
|
|
scheduler_step_kwargs.update({"eta": 1.0})
|
2024-02-28 17:15:39 +00:00
|
|
|
|
|
|
|
return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
def prep_inpaint_mask(
|
|
|
|
self, context: InvocationContext, latents: torch.Tensor
|
2024-02-21 02:13:19 +00:00
|
|
|
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], bool]:
|
2023-08-26 17:50:13 +00:00
|
|
|
if self.denoise_mask is None:
|
2024-02-21 02:13:19 +00:00
|
|
|
return None, None, False
|
2023-08-08 15:50:36 +00:00
|
|
|
|
2024-02-07 12:30:46 +00:00
|
|
|
mask = context.tensors.load(self.denoise_mask.mask_name)
|
2023-08-27 17:04:55 +00:00
|
|
|
mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
2023-08-26 17:50:13 +00:00
|
|
|
if self.denoise_mask.masked_latents_name is not None:
|
2024-02-07 12:30:46 +00:00
|
|
|
masked_latents = context.tensors.load(self.denoise_mask.masked_latents_name)
|
2023-08-18 01:07:40 +00:00
|
|
|
else:
|
2024-03-08 04:39:45 +00:00
|
|
|
masked_latents = torch.where(mask < 0.5, 0.0, latents)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-02-21 02:13:19 +00:00
|
|
|
return 1 - mask, masked_latents, self.denoise_mask.gradient
|
2023-08-08 15:50:36 +00:00
|
|
|
|
2023-07-05 04:39:15 +00:00
|
|
|
@torch.no_grad()
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2023-07-24 21:13:32 +00:00
|
|
|
with SilenceWarnings(): # this quenches NSFW nag from diffusers
|
2023-08-10 03:19:22 +00:00
|
|
|
seed = None
|
2023-08-07 16:57:11 +00:00
|
|
|
noise = None
|
|
|
|
if self.noise is not None:
|
2024-02-07 12:30:46 +00:00
|
|
|
noise = context.tensors.load(self.noise.latents_name)
|
2023-08-10 03:19:22 +00:00
|
|
|
seed = self.noise.seed
|
|
|
|
|
|
|
|
if self.latents is not None:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents = context.tensors.load(self.latents.latents_name)
|
2023-08-10 03:19:22 +00:00
|
|
|
if seed is None:
|
|
|
|
seed = self.latents.seed
|
2023-08-31 01:07:17 +00:00
|
|
|
|
|
|
|
if noise is not None and noise.shape[1:] != latents.shape[1:]:
|
|
|
|
raise Exception(f"Incompatable 'noise' and 'latents' shapes: {latents.shape=} {noise.shape=}")
|
|
|
|
|
|
|
|
elif noise is not None:
|
2023-08-10 03:19:22 +00:00
|
|
|
latents = torch.zeros_like(noise)
|
2023-08-31 01:07:17 +00:00
|
|
|
else:
|
|
|
|
raise Exception("'latents' or 'noise' must be provided!")
|
2023-08-10 03:19:22 +00:00
|
|
|
|
|
|
|
if seed is None:
|
|
|
|
seed = 0
|
2023-04-14 06:41:06 +00:00
|
|
|
|
2024-02-21 02:13:19 +00:00
|
|
|
mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents)
|
2023-08-08 15:50:36 +00:00
|
|
|
|
2023-10-05 05:29:16 +00:00
|
|
|
# TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets,
|
|
|
|
# below. Investigate whether this is appropriate.
|
|
|
|
t2i_adapter_data = self.run_t2i_adapters(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
context,
|
|
|
|
self.t2i_adapter,
|
|
|
|
latents.shape,
|
|
|
|
do_classifier_free_guidance=True,
|
2023-10-05 05:29:16 +00:00
|
|
|
)
|
|
|
|
|
2024-05-29 14:29:54 +00:00
|
|
|
ip_adapters: List[IPAdapterField] = []
|
|
|
|
if self.ip_adapter is not None:
|
|
|
|
# ip_adapter could be a list or a single IPAdapterField. Normalize to a list here.
|
|
|
|
if isinstance(self.ip_adapter, list):
|
|
|
|
ip_adapters = self.ip_adapter
|
|
|
|
else:
|
|
|
|
ip_adapters = [self.ip_adapter]
|
|
|
|
|
2024-05-29 23:45:38 +00:00
|
|
|
# If there are IP adapters, the following line runs the adapters' CLIPVision image encoders to return
|
|
|
|
# a series of image conditioning embeddings. This is being done here rather than in the
|
|
|
|
# big model context below in order to use less VRAM on low-VRAM systems.
|
|
|
|
# The image prompts are then passed to prep_ip_adapter_data().
|
2024-05-29 14:29:54 +00:00
|
|
|
image_prompts = self.prep_ip_adapter_image_prompts(context=context, ip_adapters=ip_adapters)
|
2024-05-29 02:41:44 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
# get the unet's config so that we can pass the base to dispatch_progress()
|
2024-02-15 09:43:41 +00:00
|
|
|
unet_config = context.models.get_config(self.unet.unet.key)
|
2023-04-14 06:41:06 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
def step_callback(state: PipelineIntermediateState) -> None:
|
2024-02-15 09:43:41 +00:00
|
|
|
context.util.sd_step_callback(state, unet_config.base)
|
2024-02-10 23:09:45 +00:00
|
|
|
|
|
|
|
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
2023-07-24 21:13:32 +00:00
|
|
|
for lora in self.unet.loras:
|
2024-03-06 08:37:15 +00:00
|
|
|
lora_info = context.models.load(lora.lora)
|
|
|
|
assert isinstance(lora_info.model, LoRAModelRaw)
|
2024-02-06 03:56:32 +00:00
|
|
|
yield (lora_info.model, lora.weight)
|
2023-07-24 21:13:32 +00:00
|
|
|
del lora_info
|
|
|
|
return
|
2023-07-18 13:51:16 +00:00
|
|
|
|
2024-03-06 08:37:15 +00:00
|
|
|
unet_info = context.models.load(self.unet.unet)
|
2024-02-06 03:56:32 +00:00
|
|
|
assert isinstance(unet_info.model, UNet2DConditionModel)
|
2023-09-06 17:36:00 +00:00
|
|
|
with (
|
|
|
|
ExitStack() as exit_stack,
|
|
|
|
unet_info as unet,
|
2024-05-24 17:06:09 +00:00
|
|
|
ModelPatcher.apply_freeu(unet, self.unet.freeu_config),
|
|
|
|
set_seamless(unet, self.unet.seamless_axes), # FIXME
|
2023-10-31 19:39:54 +00:00
|
|
|
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
|
|
|
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
|
2023-09-06 17:36:00 +00:00
|
|
|
):
|
2024-02-14 14:51:11 +00:00
|
|
|
assert isinstance(unet, UNet2DConditionModel)
|
2023-08-10 03:19:22 +00:00
|
|
|
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
2023-08-07 16:57:11 +00:00
|
|
|
if noise is not None:
|
|
|
|
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
2023-08-08 17:01:49 +00:00
|
|
|
if mask is not None:
|
|
|
|
mask = mask.to(device=unet.device, dtype=unet.dtype)
|
2023-08-18 01:07:40 +00:00
|
|
|
if masked_latents is not None:
|
|
|
|
masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
scheduler = get_scheduler(
|
|
|
|
context=context,
|
|
|
|
scheduler_info=self.unet.scheduler,
|
|
|
|
scheduler_name=self.scheduler,
|
2023-08-13 21:24:38 +00:00
|
|
|
seed=seed,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
pipeline = self.create_pipeline(unet, scheduler)
|
2024-03-08 18:42:35 +00:00
|
|
|
|
|
|
|
_, _, latent_height, latent_width = latents.shape
|
|
|
|
conditioning_data = self.get_conditioning_data(
|
|
|
|
context=context, unet=unet, latent_height=latent_height, latent_width=latent_width
|
|
|
|
)
|
2023-05-13 13:08:03 +00:00
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
controlnet_data = self.prep_control_data(
|
2023-07-27 19:01:00 +00:00
|
|
|
context=context,
|
|
|
|
control_input=self.control,
|
2023-08-10 03:19:22 +00:00
|
|
|
latents_shape=latents.shape,
|
2023-07-24 21:13:32 +00:00
|
|
|
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
|
|
|
do_classifier_free_guidance=True,
|
|
|
|
exit_stack=exit_stack,
|
|
|
|
)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2023-09-06 17:36:00 +00:00
|
|
|
ip_adapter_data = self.prep_ip_adapter_data(
|
|
|
|
context=context,
|
2024-05-29 14:29:54 +00:00
|
|
|
ip_adapters=ip_adapters,
|
|
|
|
image_prompts=image_prompts,
|
2023-09-12 23:09:10 +00:00
|
|
|
exit_stack=exit_stack,
|
2024-03-14 20:58:11 +00:00
|
|
|
latent_height=latent_height,
|
|
|
|
latent_width=latent_width,
|
|
|
|
dtype=unet.dtype,
|
2023-09-06 17:36:00 +00:00
|
|
|
)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2024-02-28 17:15:39 +00:00
|
|
|
num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs = self.init_scheduler(
|
2023-08-07 16:57:11 +00:00
|
|
|
scheduler,
|
2023-07-24 21:13:32 +00:00
|
|
|
device=unet.device,
|
2023-08-07 16:57:11 +00:00
|
|
|
steps=self.steps,
|
|
|
|
denoising_start=self.denoising_start,
|
|
|
|
denoising_end=self.denoising_end,
|
2024-02-28 17:15:39 +00:00
|
|
|
seed=seed,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2024-02-15 22:28:55 +00:00
|
|
|
result_latents = pipeline.latents_from_embeddings(
|
2023-08-10 03:19:22 +00:00
|
|
|
latents=latents,
|
2023-07-24 21:13:32 +00:00
|
|
|
timesteps=timesteps,
|
2023-08-11 12:46:16 +00:00
|
|
|
init_timestep=init_timestep,
|
2023-07-24 21:13:32 +00:00
|
|
|
noise=noise,
|
2023-08-08 15:50:36 +00:00
|
|
|
seed=seed,
|
|
|
|
mask=mask,
|
2023-08-18 01:07:40 +00:00
|
|
|
masked_latents=masked_latents,
|
2024-02-21 02:13:19 +00:00
|
|
|
gradient_mask=gradient_mask,
|
2023-08-07 16:57:11 +00:00
|
|
|
num_inference_steps=num_inference_steps,
|
2024-02-28 17:15:39 +00:00
|
|
|
scheduler_step_kwargs=scheduler_step_kwargs,
|
2023-07-24 21:13:32 +00:00
|
|
|
conditioning_data=conditioning_data,
|
2023-10-05 05:29:16 +00:00
|
|
|
control_data=controlnet_data,
|
|
|
|
ip_adapter_data=ip_adapter_data,
|
|
|
|
t2i_adapter_data=t2i_adapter_data,
|
2023-07-27 19:01:00 +00:00
|
|
|
callback=step_callback,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
|
|
|
result_latents = result_latents.to("cpu")
|
2024-04-15 13:12:49 +00:00
|
|
|
TorchDevice.empty_cache()
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=result_latents)
|
2024-04-11 04:50:22 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=result_latents, seed=None)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
"l2i",
|
|
|
|
title="Latents to Image",
|
|
|
|
tags=["latents", "image", "vae", "l2i"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.2.2",
|
2023-09-04 08:11:56 +00:00
|
|
|
)
|
2024-02-07 05:33:55 +00:00
|
|
|
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
2023-04-06 04:06:05 +00:00
|
|
|
"""Generates an image from latents."""
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
2024-03-06 08:42:47 +00:00
|
|
|
vae: VAEField = InputField(
|
2023-08-14 03:23:09 +00:00
|
|
|
description=FieldDescriptions.vae,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
|
|
|
|
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
|
2023-04-10 09:07:48 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
@torch.no_grad()
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents = context.tensors.load(self.latents.latents_name)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2024-03-06 08:37:15 +00:00
|
|
|
vae_info = context.models.load(self.vae.vae)
|
2024-03-12 16:00:24 +00:00
|
|
|
assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL, AutoencoderTiny))
|
2024-02-06 03:56:32 +00:00
|
|
|
with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
2024-02-14 14:36:30 +00:00
|
|
|
assert isinstance(vae, torch.nn.Module)
|
2023-07-18 13:20:25 +00:00
|
|
|
latents = latents.to(vae.device)
|
2023-07-11 15:19:36 +00:00
|
|
|
if self.fp32:
|
|
|
|
vae.to(dtype=torch.float32)
|
|
|
|
|
2024-03-12 16:00:24 +00:00
|
|
|
use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance(
|
2023-07-11 15:19:36 +00:00
|
|
|
vae.decoder.mid_block.attentions[0].processor,
|
|
|
|
(
|
|
|
|
AttnProcessor2_0,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# if xformers or torch_2_0 is used attention block does not need
|
|
|
|
# to be in float32 which can save lots of memory
|
|
|
|
if use_torch_2_0_or_xformers:
|
|
|
|
vae.post_quant_conv.to(latents.dtype)
|
|
|
|
vae.decoder.conv_in.to(latents.dtype)
|
|
|
|
vae.decoder.mid_block.to(latents.dtype)
|
|
|
|
else:
|
|
|
|
latents = latents.float()
|
|
|
|
|
|
|
|
else:
|
|
|
|
vae.to(dtype=torch.float16)
|
|
|
|
latents = latents.half()
|
|
|
|
|
2024-03-11 12:01:48 +00:00
|
|
|
if self.tiled or context.config.get().force_tiled_decode:
|
2023-09-13 16:40:06 +00:00
|
|
|
vae.enable_tiling()
|
|
|
|
else:
|
|
|
|
vae.disable_tiling()
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
# clear memory as vae decode can request a lot
|
2024-04-15 13:12:49 +00:00
|
|
|
TorchDevice.empty_cache()
|
2023-05-14 00:06:26 +00:00
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
with torch.inference_mode():
|
|
|
|
# copied from diffusers pipeline
|
|
|
|
latents = latents / vae.config.scaling_factor
|
|
|
|
image = vae.decode(latents, return_dict=False)[0]
|
2023-07-05 02:37:16 +00:00
|
|
|
image = (image / 2 + 0.5).clamp(0, 1) # denormalize
|
2023-05-13 13:08:03 +00:00
|
|
|
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
|
|
|
np_image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
|
|
|
|
|
|
|
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
|
|
|
|
2024-04-15 13:12:49 +00:00
|
|
|
TorchDevice.empty_cache()
|
2023-05-13 13:08:03 +00:00
|
|
|
|
2024-01-13 12:23:16 +00:00
|
|
|
image_dto = context.images.save(image=image)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-01-13 12:23:16 +00:00
|
|
|
return ImageOutput.build(image_dto)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-07-28 13:46:44 +00:00
|
|
|
LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"]
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@invocation(
|
|
|
|
"lresize",
|
|
|
|
title="Resize Latents",
|
|
|
|
tags=["latents", "resize"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
class ResizeLatentsInvocation(BaseInvocation):
|
2023-04-26 23:59:22 +00:00
|
|
|
"""Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8."""
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
2023-07-28 13:46:44 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
width: int = InputField(
|
|
|
|
ge=64,
|
2023-11-27 16:12:15 +00:00
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
2023-08-14 03:23:09 +00:00
|
|
|
description=FieldDescriptions.width,
|
|
|
|
)
|
|
|
|
height: int = InputField(
|
|
|
|
ge=64,
|
2023-11-27 16:12:15 +00:00
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
2023-08-14 03:23:09 +00:00
|
|
|
description=FieldDescriptions.width,
|
|
|
|
)
|
|
|
|
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
|
|
|
|
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents = context.tensors.load(self.latents.latents_name)
|
2024-04-15 13:12:49 +00:00
|
|
|
device = TorchDevice.choose_torch_device()
|
2023-07-18 13:20:25 +00:00
|
|
|
|
2023-04-24 12:07:53 +00:00
|
|
|
resized_latents = torch.nn.functional.interpolate(
|
2023-07-28 13:46:44 +00:00
|
|
|
latents.to(device),
|
2023-11-27 16:12:15 +00:00
|
|
|
size=(self.height // LATENT_SCALE_FACTOR, self.width // LATENT_SCALE_FACTOR),
|
2023-07-28 13:46:44 +00:00
|
|
|
mode=self.mode,
|
|
|
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
2023-07-18 13:20:25 +00:00
|
|
|
resized_latents = resized_latents.to("cpu")
|
2024-04-15 13:12:49 +00:00
|
|
|
|
|
|
|
TorchDevice.empty_cache()
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=resized_latents)
|
2024-01-13 12:23:16 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@invocation(
|
|
|
|
"lscale",
|
|
|
|
title="Scale Latents",
|
|
|
|
tags=["latents", "resize"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
class ScaleLatentsInvocation(BaseInvocation):
|
|
|
|
"""Scales latents by a given factor."""
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
2023-07-28 13:46:44 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
scale_factor: float = InputField(gt=0, description=FieldDescriptions.scale_factor)
|
|
|
|
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
|
|
|
|
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents = context.tensors.load(self.latents.latents_name)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-04-15 13:12:49 +00:00
|
|
|
device = TorchDevice.choose_torch_device()
|
2023-07-18 13:20:25 +00:00
|
|
|
|
2023-04-24 12:07:53 +00:00
|
|
|
# resizing
|
|
|
|
resized_latents = torch.nn.functional.interpolate(
|
2023-07-28 13:46:44 +00:00
|
|
|
latents.to(device),
|
|
|
|
scale_factor=self.scale_factor,
|
|
|
|
mode=self.mode,
|
|
|
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
2023-07-18 13:20:25 +00:00
|
|
|
resized_latents = resized_latents.to("cpu")
|
2024-04-15 13:12:49 +00:00
|
|
|
TorchDevice.empty_cache()
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=resized_latents)
|
2024-01-13 12:23:16 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
|
|
|
|
2023-09-04 08:11:56 +00:00
|
|
|
@invocation(
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
"i2l",
|
|
|
|
title="Image to Latents",
|
|
|
|
tags=["latents", "image", "vae", "i2l"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
2023-09-04 08:11:56 +00:00
|
|
|
)
|
2023-05-13 13:08:03 +00:00
|
|
|
class ImageToLatentsInvocation(BaseInvocation):
|
2023-05-05 05:15:55 +00:00
|
|
|
"""Encodes an image into latents."""
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(
|
|
|
|
description="The image to encode",
|
|
|
|
)
|
2024-03-06 08:42:47 +00:00
|
|
|
vae: VAEField = InputField(
|
2023-08-14 03:23:09 +00:00
|
|
|
description=FieldDescriptions.vae,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
|
|
|
|
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
2023-08-18 01:07:40 +00:00
|
|
|
@staticmethod
|
2024-02-10 23:09:45 +00:00
|
|
|
def vae_encode(vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor) -> torch.Tensor:
|
2023-05-14 00:06:26 +00:00
|
|
|
with vae_info as vae:
|
2024-02-10 23:09:45 +00:00
|
|
|
assert isinstance(vae, torch.nn.Module)
|
2023-07-16 03:00:37 +00:00
|
|
|
orig_dtype = vae.dtype
|
2023-08-18 01:07:40 +00:00
|
|
|
if upcast:
|
2023-07-16 03:00:37 +00:00
|
|
|
vae.to(dtype=torch.float32)
|
|
|
|
|
2024-03-12 16:11:38 +00:00
|
|
|
use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance(
|
2023-07-16 03:00:37 +00:00
|
|
|
vae.decoder.mid_block.attentions[0].processor,
|
|
|
|
(
|
|
|
|
AttnProcessor2_0,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# if xformers or torch_2_0 is used attention block does not need
|
|
|
|
# to be in float32 which can save lots of memory
|
|
|
|
if use_torch_2_0_or_xformers:
|
|
|
|
vae.post_quant_conv.to(orig_dtype)
|
|
|
|
vae.decoder.conv_in.to(orig_dtype)
|
|
|
|
vae.decoder.mid_block.to(orig_dtype)
|
2023-07-28 13:46:44 +00:00
|
|
|
# else:
|
2023-07-16 03:00:37 +00:00
|
|
|
# latents = latents.float()
|
|
|
|
|
|
|
|
else:
|
|
|
|
vae.to(dtype=torch.float16)
|
2023-07-28 13:46:44 +00:00
|
|
|
# latents = latents.half()
|
2023-07-16 03:00:37 +00:00
|
|
|
|
2023-09-13 16:40:06 +00:00
|
|
|
if tiled:
|
|
|
|
vae.enable_tiling()
|
|
|
|
else:
|
|
|
|
vae.disable_tiling()
|
2023-05-13 13:08:03 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
# non_noised_latents_from_image
|
|
|
|
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
|
|
|
with torch.inference_mode():
|
2023-09-01 03:12:00 +00:00
|
|
|
latents = ImageToLatentsInvocation._encode_to_tensor(vae, image_tensor)
|
2023-05-14 00:06:26 +00:00
|
|
|
|
2023-07-20 15:54:51 +00:00
|
|
|
latents = vae.config.scaling_factor * latents
|
2023-07-16 03:00:37 +00:00
|
|
|
latents = latents.to(dtype=orig_dtype)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
2023-08-18 01:07:40 +00:00
|
|
|
return latents
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2024-01-13 12:23:16 +00:00
|
|
|
image = context.images.get_pil(self.image.image_name)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
2024-03-06 08:37:15 +00:00
|
|
|
vae_info = context.models.load(self.vae.vae)
|
2023-08-18 01:07:40 +00:00
|
|
|
|
|
|
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
|
|
|
if image_tensor.dim() == 3:
|
|
|
|
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
|
|
|
|
|
|
|
latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor)
|
|
|
|
|
2023-07-18 13:20:25 +00:00
|
|
|
latents = latents.to("cpu")
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=latents)
|
2024-01-13 12:23:16 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=latents, seed=None)
|
2023-08-18 02:59:31 +00:00
|
|
|
|
2023-08-18 21:05:12 +00:00
|
|
|
@singledispatchmethod
|
2023-09-01 03:12:00 +00:00
|
|
|
@staticmethod
|
|
|
|
def _encode_to_tensor(vae: AutoencoderKL, image_tensor: torch.FloatTensor) -> torch.FloatTensor:
|
2024-02-10 23:09:45 +00:00
|
|
|
assert isinstance(vae, torch.nn.Module)
|
2023-08-18 02:59:31 +00:00
|
|
|
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
2024-02-10 23:09:45 +00:00
|
|
|
latents: torch.Tensor = image_tensor_dist.sample().to(
|
|
|
|
dtype=vae.dtype
|
|
|
|
) # FIXME: uses torch.randn. make reproducible!
|
2023-08-18 02:59:31 +00:00
|
|
|
return latents
|
|
|
|
|
2023-08-18 21:05:12 +00:00
|
|
|
@_encode_to_tensor.register
|
2023-09-01 03:12:00 +00:00
|
|
|
@staticmethod
|
|
|
|
def _(vae: AutoencoderTiny, image_tensor: torch.FloatTensor) -> torch.FloatTensor:
|
2024-02-10 23:09:45 +00:00
|
|
|
assert isinstance(vae, torch.nn.Module)
|
|
|
|
latents: torch.FloatTensor = vae.encode(image_tensor).latents
|
|
|
|
return latents
|
2023-08-25 22:21:47 +00:00
|
|
|
|
2023-08-20 18:49:18 +00:00
|
|
|
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
@invocation(
|
|
|
|
"lblend",
|
|
|
|
title="Blend Latents",
|
|
|
|
tags=["latents", "blend"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
feat(api): chore: pydantic & fastapi upgrade
Upgrade pydantic and fastapi to latest.
- pydantic~=2.4.2
- fastapi~=103.2
- fastapi-events~=0.9.1
**Big Changes**
There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes.
**Invocations**
The biggest change relates to invocation creation, instantiation and validation.
Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie.
Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`.
With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation.
This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method.
In the end, this implementation is cleaner.
**Invocation Fields**
In pydantic v2, you can no longer directly add or remove fields from a model.
Previously, we did this to add the `type` field to invocations.
**Invocation Decorators**
With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper.
A similar technique is used for `invocation_output()`.
**Minor Changes**
There are a number of minor changes around the pydantic v2 models API.
**Protected `model_` Namespace**
All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_".
Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple.
```py
class IPAdapterModelField(BaseModel):
model_name: str = Field(description="Name of the IP-Adapter model")
base_model: BaseModelType = Field(description="Base model")
model_config = ConfigDict(protected_namespaces=())
```
**Model Serialization**
Pydantic models no longer have `Model.dict()` or `Model.json()`.
Instead, we use `Model.model_dump()` or `Model.model_dump_json()`.
**Model Deserialization**
Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions.
Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model.
```py
adapter_graph = TypeAdapter(Graph)
deserialized_graph_from_json = adapter_graph.validate_json(graph_json)
deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict)
```
**Field Customisation**
Pydantic `Field`s no longer accept arbitrary args.
Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field.
**Schema Customisation**
FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec.
This necessitates two changes:
- Our schema customization logic has been revised
- Schema parsing to build node templates has been revised
The specific aren't important, but this does present additional surface area for bugs.
**Performance Improvements**
Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node.
I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
2023-09-24 08:11:07 +00:00
|
|
|
)
|
2023-08-20 18:49:18 +00:00
|
|
|
class BlendLatentsInvocation(BaseInvocation):
|
|
|
|
"""Blend two latents using a given alpha. Latents must have same size."""
|
|
|
|
|
|
|
|
latents_a: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
latents_b: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
|
|
|
|
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents_a = context.tensors.load(self.latents_a.latents_name)
|
|
|
|
latents_b = context.tensors.load(self.latents_b.latents_name)
|
2023-08-20 18:49:18 +00:00
|
|
|
|
|
|
|
if latents_a.shape != latents_b.shape:
|
2023-11-10 23:51:21 +00:00
|
|
|
raise Exception("Latents to blend must be the same size.")
|
2023-08-20 18:49:18 +00:00
|
|
|
|
2024-04-15 13:12:49 +00:00
|
|
|
device = TorchDevice.choose_torch_device()
|
2023-08-20 18:49:18 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
def slerp(
|
|
|
|
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
|
|
|
v0: Union[torch.Tensor, npt.NDArray[Any]],
|
|
|
|
v1: Union[torch.Tensor, npt.NDArray[Any]],
|
|
|
|
DOT_THRESHOLD: float = 0.9995,
|
|
|
|
) -> Union[torch.Tensor, npt.NDArray[Any]]:
|
2023-08-20 18:49:18 +00:00
|
|
|
"""
|
|
|
|
Spherical linear interpolation
|
|
|
|
Args:
|
|
|
|
t (float/np.ndarray): Float value between 0.0 and 1.0
|
|
|
|
v0 (np.ndarray): Starting vector
|
|
|
|
v1 (np.ndarray): Final vector
|
|
|
|
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
|
|
|
colineal. Not recommended to alter this.
|
|
|
|
Returns:
|
|
|
|
v2 (np.ndarray): Interpolation vector between v0 and v1
|
|
|
|
"""
|
|
|
|
inputs_are_torch = False
|
|
|
|
if not isinstance(v0, np.ndarray):
|
|
|
|
inputs_are_torch = True
|
|
|
|
v0 = v0.detach().cpu().numpy()
|
|
|
|
if not isinstance(v1, np.ndarray):
|
|
|
|
inputs_are_torch = True
|
|
|
|
v1 = v1.detach().cpu().numpy()
|
|
|
|
|
|
|
|
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
|
|
|
if np.abs(dot) > DOT_THRESHOLD:
|
|
|
|
v2 = (1 - t) * v0 + t * v1
|
|
|
|
else:
|
|
|
|
theta_0 = np.arccos(dot)
|
|
|
|
sin_theta_0 = np.sin(theta_0)
|
|
|
|
theta_t = theta_0 * t
|
|
|
|
sin_theta_t = np.sin(theta_t)
|
|
|
|
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
|
|
|
s1 = sin_theta_t / sin_theta_0
|
|
|
|
v2 = s0 * v0 + s1 * v1
|
|
|
|
|
|
|
|
if inputs_are_torch:
|
2024-02-10 23:09:45 +00:00
|
|
|
v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
|
|
|
|
return v2_torch
|
|
|
|
else:
|
|
|
|
assert isinstance(v2, np.ndarray)
|
|
|
|
return v2
|
2023-08-20 18:49:18 +00:00
|
|
|
|
|
|
|
# blend
|
2024-02-10 23:09:45 +00:00
|
|
|
bl = slerp(self.alpha, latents_a, latents_b)
|
|
|
|
assert isinstance(bl, torch.Tensor)
|
|
|
|
blended_latents: torch.Tensor = bl # for type checking convenience
|
2023-08-20 18:49:18 +00:00
|
|
|
|
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
|
|
|
blended_latents = blended_latents.to("cpu")
|
2024-04-15 13:12:49 +00:00
|
|
|
|
|
|
|
TorchDevice.empty_cache()
|
2023-08-20 18:49:18 +00:00
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=blended_latents)
|
2024-01-13 12:23:16 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
2023-11-27 16:02:10 +00:00
|
|
|
|
|
|
|
|
2023-11-27 17:05:55 +00:00
|
|
|
# The Crop Latents node was copied from @skunkworxdark's implementation here:
|
|
|
|
# https://github.com/skunkworxdark/XYGrid_nodes/blob/74647fa9c1fa57d317a94bd43ca689af7f0aae5e/images_to_grids.py#L1117C1-L1167C80
|
2023-11-27 16:02:10 +00:00
|
|
|
@invocation(
|
2023-11-27 17:05:55 +00:00
|
|
|
"crop_latents",
|
2023-11-27 16:02:10 +00:00
|
|
|
title="Crop Latents",
|
|
|
|
tags=["latents", "crop"],
|
|
|
|
category="latents",
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.2",
|
2023-11-27 16:02:10 +00:00
|
|
|
)
|
2023-11-27 17:05:55 +00:00
|
|
|
# TODO(ryand): Named `CropLatentsCoreInvocation` to prevent a conflict with custom node `CropLatentsInvocation`.
|
|
|
|
# Currently, if the class names conflict then 'GET /openapi.json' fails.
|
|
|
|
class CropLatentsCoreInvocation(BaseInvocation):
|
2023-11-27 16:30:00 +00:00
|
|
|
"""Crops a latent-space tensor to a box specified in image-space. The box dimensions and coordinates must be
|
|
|
|
divisible by the latent scale factor of 8.
|
|
|
|
"""
|
2023-11-27 16:02:10 +00:00
|
|
|
|
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
2023-11-30 15:44:21 +00:00
|
|
|
x: int = InputField(
|
2023-11-27 16:02:10 +00:00
|
|
|
ge=0,
|
2023-11-27 16:12:15 +00:00
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
2023-11-27 16:30:00 +00:00
|
|
|
description="The left x coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space.",
|
2023-11-27 16:02:10 +00:00
|
|
|
)
|
2023-11-30 15:44:21 +00:00
|
|
|
y: int = InputField(
|
2023-11-27 16:02:10 +00:00
|
|
|
ge=0,
|
2023-11-27 16:12:15 +00:00
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
2023-11-27 16:30:00 +00:00
|
|
|
description="The top y coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space.",
|
2023-11-27 16:02:10 +00:00
|
|
|
)
|
2023-11-29 15:23:55 +00:00
|
|
|
width: int = InputField(
|
|
|
|
ge=1,
|
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
|
|
|
description="The width (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space.",
|
|
|
|
)
|
|
|
|
height: int = InputField(
|
|
|
|
ge=1,
|
|
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
|
|
|
description="The height (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space.",
|
|
|
|
)
|
2023-11-27 16:02:10 +00:00
|
|
|
|
2024-02-05 23:22:58 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2024-02-07 12:30:46 +00:00
|
|
|
latents = context.tensors.load(self.latents.latents_name)
|
2023-11-27 16:02:10 +00:00
|
|
|
|
2023-11-30 15:44:21 +00:00
|
|
|
x1 = self.x // LATENT_SCALE_FACTOR
|
|
|
|
y1 = self.y // LATENT_SCALE_FACTOR
|
2023-11-27 16:12:15 +00:00
|
|
|
x2 = x1 + (self.width // LATENT_SCALE_FACTOR)
|
|
|
|
y2 = y1 + (self.height // LATENT_SCALE_FACTOR)
|
2023-11-27 16:02:10 +00:00
|
|
|
|
2023-11-27 17:05:55 +00:00
|
|
|
cropped_latents = latents[..., y1:y2, x1:x2]
|
2023-11-27 16:02:10 +00:00
|
|
|
|
2024-02-07 06:41:23 +00:00
|
|
|
name = context.tensors.save(tensor=cropped_latents)
|
2023-11-27 16:02:10 +00:00
|
|
|
|
2024-01-13 12:23:16 +00:00
|
|
|
return LatentsOutput.build(latents_name=name, latents=cropped_latents)
|
2024-01-16 13:50:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation_output("ideal_size_output")
|
|
|
|
class IdealSizeOutput(BaseInvocationOutput):
|
|
|
|
"""Base class for invocations that output an image"""
|
|
|
|
|
2024-01-17 04:16:46 +00:00
|
|
|
width: int = OutputField(description="The ideal width of the image (in pixels)")
|
|
|
|
height: int = OutputField(description="The ideal height of the image (in pixels)")
|
2024-01-16 13:50:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
@invocation(
|
|
|
|
"ideal_size",
|
|
|
|
title="Ideal Size",
|
|
|
|
tags=["latents", "math", "ideal_size"],
|
2024-03-19 11:08:16 +00:00
|
|
|
version="1.0.3",
|
2024-01-16 13:50:36 +00:00
|
|
|
)
|
|
|
|
class IdealSizeInvocation(BaseInvocation):
|
|
|
|
"""Calculates the ideal size for generation to avoid duplication"""
|
|
|
|
|
2024-01-17 04:16:46 +00:00
|
|
|
width: int = InputField(default=1024, description="Final image width")
|
|
|
|
height: int = InputField(default=576, description="Final image height")
|
|
|
|
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
|
|
|
|
multiplier: float = InputField(
|
|
|
|
default=1.0,
|
|
|
|
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)",
|
|
|
|
)
|
2024-01-16 13:50:36 +00:00
|
|
|
|
2024-02-10 23:09:45 +00:00
|
|
|
def trim_to_multiple_of(self, *args: int, multiple_of: int = LATENT_SCALE_FACTOR) -> Tuple[int, ...]:
|
2024-01-16 13:50:36 +00:00
|
|
|
return tuple((x - x % multiple_of) for x in args)
|
|
|
|
|
|
|
|
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
2024-04-04 21:38:40 +00:00
|
|
|
unet_config = context.models.get_config(self.unet.unet.key)
|
2024-01-16 13:50:36 +00:00
|
|
|
aspect = self.width / self.height
|
2024-02-10 23:09:45 +00:00
|
|
|
dimension: float = 512
|
|
|
|
if unet_config.base == BaseModelType.StableDiffusion2:
|
2024-01-16 13:50:36 +00:00
|
|
|
dimension = 768
|
2024-02-10 23:09:45 +00:00
|
|
|
elif unet_config.base == BaseModelType.StableDiffusionXL:
|
2024-01-16 13:50:36 +00:00
|
|
|
dimension = 1024
|
|
|
|
dimension = dimension * self.multiplier
|
|
|
|
min_dimension = math.floor(dimension * 0.5)
|
|
|
|
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
|
|
|
|
|
|
|
if aspect > 1.0:
|
|
|
|
init_height = max(min_dimension, math.sqrt(model_area / aspect))
|
|
|
|
init_width = init_height * aspect
|
|
|
|
else:
|
|
|
|
init_width = max(min_dimension, math.sqrt(model_area * aspect))
|
|
|
|
init_height = init_width / aspect
|
|
|
|
|
|
|
|
scaled_width, scaled_height = self.trim_to_multiple_of(
|
|
|
|
math.floor(init_width),
|
|
|
|
math.floor(init_height),
|
|
|
|
)
|
|
|
|
|
|
|
|
return IdealSizeOutput(width=scaled_width, height=scaled_height)
|