mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
78377469db
* Bump diffusers to 0.21.2. * Add T2IAdapterInvocation boilerplate. * Add T2I-Adapter model to model-management. * (minor) Tidy prepare_control_image(...). * Add logic to run the T2I-Adapter models at the start of the DenoiseLatentsInvocation. * Add logic for applying T2I-Adapter weights and accumulating. * Add T2IAdapter to MODEL_CLASSES map. * yarn typegen * Add model probes for T2I-Adapter models. * Add all of the frontend boilerplate required to use T2I-Adapter in the nodes editor. * Add T2IAdapterModel.convert_if_required(...). * Fix errors in T2I-Adapter input image sizing logic. * Fix bug with handling of multiple T2I-Adapters. * black / flake8 * Fix typo * yarn build * Add num_channels param to prepare_control_image(...). * Link to upstream diffusers bugfix PR that currently requires a workaround. * feat: Add Color Map Preprocessor Needed for the color T2I Adapter * feat: Add Color Map Preprocessor to Linear UI * Revert "feat: Add Color Map Preprocessor" This reverts commita1119a00bf
. * Revert "feat: Add Color Map Preprocessor to Linear UI" This reverts commitbd8a9b82d8
. * Fix T2I-Adapter field rendering in workflow editor. * yarn build, yarn typegen --------- Co-authored-by: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
43 lines
1.3 KiB
Python
43 lines
1.3 KiB
Python
import numpy as np
|
|
import pytest
|
|
from PIL import Image
|
|
|
|
from invokeai.app.util.controlnet_utils import prepare_control_image
|
|
|
|
|
|
@pytest.mark.parametrize("num_channels", [1, 2, 3])
|
|
def test_prepare_control_image_num_channels(num_channels):
|
|
"""Test that the `num_channels` parameter is applied correctly in prepare_control_image(...)."""
|
|
np_image = np.zeros((256, 256, 3), dtype=np.uint8)
|
|
pil_image = Image.fromarray(np_image)
|
|
|
|
torch_image = prepare_control_image(
|
|
image=pil_image,
|
|
width=256,
|
|
height=256,
|
|
num_channels=num_channels,
|
|
device="cpu",
|
|
do_classifier_free_guidance=False,
|
|
)
|
|
|
|
assert torch_image.shape == (1, num_channels, 256, 256)
|
|
|
|
|
|
@pytest.mark.parametrize("num_channels", [0, 4])
|
|
def test_prepare_control_image_num_channels_too_large(num_channels):
|
|
"""Test that an exception is raised in prepare_control_image(...) if the `num_channels` parameter is out of the
|
|
supported range.
|
|
"""
|
|
np_image = np.zeros((256, 256, 3), dtype=np.uint8)
|
|
pil_image = Image.fromarray(np_image)
|
|
|
|
with pytest.raises(ValueError):
|
|
_ = prepare_control_image(
|
|
image=pil_image,
|
|
width=256,
|
|
height=256,
|
|
num_channels=num_channels,
|
|
device="cpu",
|
|
do_classifier_free_guidance=False,
|
|
)
|