mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Tidy names and locations of modules
- Rename old "model_management" directory to "model_management_OLD" in order to catch dangling references to original model manager. - Caught and fixed most dangling references (still checking) - Rename lora, textual_inversion and model_patcher modules - Introduce a RawModel base class to simplfy the Union returned by the model loaders. - Tidy up the model manager 2-related tests. Add useful fixtures, and a finalizer to the queue and installer fixtures that will stop the services and release threads.
This commit is contained in:
committed by
psychedelicious
parent
ba1f8878dd
commit
2ad0752582
241
tests/backend/model_manager/util/test_hf_model_select.py
Normal file
241
tests/backend/model_manager/util/test_hf_model_select.py
Normal file
@ -0,0 +1,241 @@
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from invokeai.backend.model_manager.config import ModelRepoVariant
|
||||
from invokeai.backend.model_manager.util.select_hf_files import filter_files
|
||||
|
||||
|
||||
# This is the full list of model paths returned by the HF API for sdxl-base
|
||||
@pytest.fixture
|
||||
def sdxl_base_files() -> List[Path]:
|
||||
return [
|
||||
Path(x)
|
||||
for x in [
|
||||
".gitattributes",
|
||||
"01.png",
|
||||
"LICENSE.md",
|
||||
"README.md",
|
||||
"comparison.png",
|
||||
"model_index.json",
|
||||
"pipeline.png",
|
||||
"scheduler/scheduler_config.json",
|
||||
"sd_xl_base_1.0.safetensors",
|
||||
"sd_xl_base_1.0_0.9vae.safetensors",
|
||||
"sd_xl_offset_example-lora_1.0.safetensors",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/flax_model.msgpack",
|
||||
"text_encoder/model.fp16.safetensors",
|
||||
"text_encoder/model.onnx",
|
||||
"text_encoder/model.safetensors",
|
||||
"text_encoder/openvino_model.bin",
|
||||
"text_encoder/openvino_model.xml",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/flax_model.msgpack",
|
||||
"text_encoder_2/model.fp16.safetensors",
|
||||
"text_encoder_2/model.onnx",
|
||||
"text_encoder_2/model.onnx_data",
|
||||
"text_encoder_2/model.safetensors",
|
||||
"text_encoder_2/openvino_model.bin",
|
||||
"text_encoder_2/openvino_model.xml",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/diffusion_flax_model.msgpack",
|
||||
"unet/diffusion_pytorch_model.fp16.safetensors",
|
||||
"unet/diffusion_pytorch_model.safetensors",
|
||||
"unet/model.onnx",
|
||||
"unet/model.onnx_data",
|
||||
"unet/openvino_model.bin",
|
||||
"unet/openvino_model.xml",
|
||||
"vae/config.json",
|
||||
"vae/diffusion_flax_model.msgpack",
|
||||
"vae/diffusion_pytorch_model.fp16.safetensors",
|
||||
"vae/diffusion_pytorch_model.safetensors",
|
||||
"vae_1_0/config.json",
|
||||
"vae_1_0/diffusion_pytorch_model.fp16.safetensors",
|
||||
"vae_1_0/diffusion_pytorch_model.safetensors",
|
||||
"vae_decoder/config.json",
|
||||
"vae_decoder/model.onnx",
|
||||
"vae_decoder/openvino_model.bin",
|
||||
"vae_decoder/openvino_model.xml",
|
||||
"vae_encoder/config.json",
|
||||
"vae_encoder/model.onnx",
|
||||
"vae_encoder/openvino_model.bin",
|
||||
"vae_encoder/openvino_model.xml",
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
# This are what we expect to get when various diffusers variants are requested
|
||||
@pytest.mark.parametrize(
|
||||
"variant,expected_list",
|
||||
[
|
||||
(
|
||||
None,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/model.safetensors",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/model.safetensors",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/diffusion_pytorch_model.safetensors",
|
||||
"vae/config.json",
|
||||
"vae/diffusion_pytorch_model.safetensors",
|
||||
"vae_1_0/config.json",
|
||||
"vae_1_0/diffusion_pytorch_model.safetensors",
|
||||
],
|
||||
),
|
||||
(
|
||||
ModelRepoVariant.DEFAULT,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/model.safetensors",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/model.safetensors",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/diffusion_pytorch_model.safetensors",
|
||||
"vae/config.json",
|
||||
"vae/diffusion_pytorch_model.safetensors",
|
||||
"vae_1_0/config.json",
|
||||
"vae_1_0/diffusion_pytorch_model.safetensors",
|
||||
],
|
||||
),
|
||||
(
|
||||
ModelRepoVariant.OPENVINO,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/openvino_model.bin",
|
||||
"text_encoder/openvino_model.xml",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/openvino_model.bin",
|
||||
"text_encoder_2/openvino_model.xml",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/openvino_model.bin",
|
||||
"unet/openvino_model.xml",
|
||||
"vae_decoder/config.json",
|
||||
"vae_decoder/openvino_model.bin",
|
||||
"vae_decoder/openvino_model.xml",
|
||||
"vae_encoder/config.json",
|
||||
"vae_encoder/openvino_model.bin",
|
||||
"vae_encoder/openvino_model.xml",
|
||||
],
|
||||
),
|
||||
(
|
||||
ModelRepoVariant.FP16,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/model.fp16.safetensors",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/model.fp16.safetensors",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/diffusion_pytorch_model.fp16.safetensors",
|
||||
"vae/config.json",
|
||||
"vae/diffusion_pytorch_model.fp16.safetensors",
|
||||
"vae_1_0/config.json",
|
||||
"vae_1_0/diffusion_pytorch_model.fp16.safetensors",
|
||||
],
|
||||
),
|
||||
(
|
||||
ModelRepoVariant.ONNX,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/model.onnx",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/model.onnx",
|
||||
"text_encoder_2/model.onnx_data",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/model.onnx",
|
||||
"unet/model.onnx_data",
|
||||
"vae_decoder/config.json",
|
||||
"vae_decoder/model.onnx",
|
||||
"vae_encoder/config.json",
|
||||
"vae_encoder/model.onnx",
|
||||
],
|
||||
),
|
||||
(
|
||||
ModelRepoVariant.FLAX,
|
||||
[
|
||||
"model_index.json",
|
||||
"scheduler/scheduler_config.json",
|
||||
"text_encoder/config.json",
|
||||
"text_encoder/flax_model.msgpack",
|
||||
"text_encoder_2/config.json",
|
||||
"text_encoder_2/flax_model.msgpack",
|
||||
"tokenizer/merges.txt",
|
||||
"tokenizer/special_tokens_map.json",
|
||||
"tokenizer/tokenizer_config.json",
|
||||
"tokenizer/vocab.json",
|
||||
"tokenizer_2/merges.txt",
|
||||
"tokenizer_2/special_tokens_map.json",
|
||||
"tokenizer_2/tokenizer_config.json",
|
||||
"tokenizer_2/vocab.json",
|
||||
"unet/config.json",
|
||||
"unet/diffusion_flax_model.msgpack",
|
||||
"vae/config.json",
|
||||
"vae/diffusion_flax_model.msgpack",
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_select(sdxl_base_files: List[Path], variant: ModelRepoVariant, expected_list: List[Path]) -> None:
|
||||
print(f"testing variant {variant}")
|
||||
filtered_files = filter_files(sdxl_base_files, variant)
|
||||
assert set(filtered_files) == {Path(x) for x in expected_list}
|
Reference in New Issue
Block a user