mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
c238a7f18b
Upgrade pydantic and fastapi to latest. - pydantic~=2.4.2 - fastapi~=103.2 - fastapi-events~=0.9.1 **Big Changes** There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes. **Invocations** The biggest change relates to invocation creation, instantiation and validation. Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie. Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`. With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation. This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method. In the end, this implementation is cleaner. **Invocation Fields** In pydantic v2, you can no longer directly add or remove fields from a model. Previously, we did this to add the `type` field to invocations. **Invocation Decorators** With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper. A similar technique is used for `invocation_output()`. **Minor Changes** There are a number of minor changes around the pydantic v2 models API. **Protected `model_` Namespace** All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_". Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple. ```py class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") model_config = ConfigDict(protected_namespaces=()) ``` **Model Serialization** Pydantic models no longer have `Model.dict()` or `Model.json()`. Instead, we use `Model.model_dump()` or `Model.model_dump_json()`. **Model Deserialization** Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions. Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model. ```py adapter_graph = TypeAdapter(Graph) deserialized_graph_from_json = adapter_graph.validate_json(graph_json) deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict) ``` **Field Customisation** Pydantic `Field`s no longer accept arbitrary args. Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field. **Schema Customisation** FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec. This necessitates two changes: - Our schema customization logic has been revised - Schema parsing to build node templates has been revised The specific aren't important, but this does present additional surface area for bugs. **Performance Improvements** Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node. I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
134 lines
4.7 KiB
Python
134 lines
4.7 KiB
Python
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
|
|
from pathlib import Path
|
|
from typing import Literal
|
|
|
|
import cv2 as cv
|
|
import numpy as np
|
|
import torch
|
|
from basicsr.archs.rrdbnet_arch import RRDBNet
|
|
from PIL import Image
|
|
from pydantic import ConfigDict
|
|
from realesrgan import RealESRGANer
|
|
|
|
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
|
from invokeai.backend.util.devices import choose_torch_device
|
|
|
|
from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation
|
|
|
|
# TODO: Populate this from disk?
|
|
# TODO: Use model manager to load?
|
|
ESRGAN_MODELS = Literal[
|
|
"RealESRGAN_x4plus.pth",
|
|
"RealESRGAN_x4plus_anime_6B.pth",
|
|
"ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
|
"RealESRGAN_x2plus.pth",
|
|
]
|
|
|
|
if choose_torch_device() == torch.device("mps"):
|
|
from torch import mps
|
|
|
|
|
|
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.1.0")
|
|
class ESRGANInvocation(BaseInvocation):
|
|
"""Upscales an image using RealESRGAN."""
|
|
|
|
image: ImageField = InputField(description="The input image")
|
|
model_name: ESRGAN_MODELS = InputField(default="RealESRGAN_x4plus.pth", description="The Real-ESRGAN model to use")
|
|
tile_size: int = InputField(
|
|
default=400, ge=0, description="Tile size for tiled ESRGAN upscaling (0=tiling disabled)"
|
|
)
|
|
|
|
model_config = ConfigDict(protected_namespaces=())
|
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
|
image = context.services.images.get_pil_image(self.image.image_name)
|
|
models_path = context.services.configuration.models_path
|
|
|
|
rrdbnet_model = None
|
|
netscale = None
|
|
esrgan_model_path = None
|
|
|
|
if self.model_name in [
|
|
"RealESRGAN_x4plus.pth",
|
|
"ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
|
]:
|
|
# x4 RRDBNet model
|
|
rrdbnet_model = RRDBNet(
|
|
num_in_ch=3,
|
|
num_out_ch=3,
|
|
num_feat=64,
|
|
num_block=23,
|
|
num_grow_ch=32,
|
|
scale=4,
|
|
)
|
|
netscale = 4
|
|
elif self.model_name in ["RealESRGAN_x4plus_anime_6B.pth"]:
|
|
# x4 RRDBNet model, 6 blocks
|
|
rrdbnet_model = RRDBNet(
|
|
num_in_ch=3,
|
|
num_out_ch=3,
|
|
num_feat=64,
|
|
num_block=6, # 6 blocks
|
|
num_grow_ch=32,
|
|
scale=4,
|
|
)
|
|
netscale = 4
|
|
elif self.model_name in ["RealESRGAN_x2plus.pth"]:
|
|
# x2 RRDBNet model
|
|
rrdbnet_model = RRDBNet(
|
|
num_in_ch=3,
|
|
num_out_ch=3,
|
|
num_feat=64,
|
|
num_block=23,
|
|
num_grow_ch=32,
|
|
scale=2,
|
|
)
|
|
netscale = 2
|
|
else:
|
|
msg = f"Invalid RealESRGAN model: {self.model_name}"
|
|
context.services.logger.error(msg)
|
|
raise ValueError(msg)
|
|
|
|
esrgan_model_path = Path(f"core/upscaling/realesrgan/{self.model_name}")
|
|
|
|
upsampler = RealESRGANer(
|
|
scale=netscale,
|
|
model_path=str(models_path / esrgan_model_path),
|
|
model=rrdbnet_model,
|
|
half=False,
|
|
tile=self.tile_size,
|
|
)
|
|
|
|
# prepare image - Real-ESRGAN uses cv2 internally, and cv2 uses BGR vs RGB for PIL
|
|
# TODO: This strips the alpha... is that okay?
|
|
cv_image = cv.cvtColor(np.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
|
|
|
# We can pass an `outscale` value here, but it just resizes the image by that factor after
|
|
# upscaling, so it's kinda pointless for our purposes. If you want something other than 4x
|
|
# upscaling, you'll need to add a resize node after this one.
|
|
upscaled_image, img_mode = upsampler.enhance(cv_image)
|
|
|
|
# back to PIL
|
|
pil_image = Image.fromarray(cv.cvtColor(upscaled_image, cv.COLOR_BGR2RGB)).convert("RGBA")
|
|
|
|
torch.cuda.empty_cache()
|
|
if choose_torch_device() == torch.device("mps"):
|
|
mps.empty_cache()
|
|
|
|
image_dto = context.services.images.create(
|
|
image=pil_image,
|
|
image_origin=ResourceOrigin.INTERNAL,
|
|
image_category=ImageCategory.GENERAL,
|
|
node_id=self.id,
|
|
session_id=context.graph_execution_state_id,
|
|
is_intermediate=self.is_intermediate,
|
|
workflow=self.workflow,
|
|
)
|
|
|
|
return ImageOutput(
|
|
image=ImageField(image_name=image_dto.image_name),
|
|
width=image_dto.width,
|
|
height=image_dto.height,
|
|
)
|