mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add tests for model installation events
This commit is contained in:
@ -326,6 +326,14 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
cls.singleton_init = kwargs
|
||||
return cls.singleton_config
|
||||
|
||||
@classmethod
|
||||
def clear_config(cls):
|
||||
"""
|
||||
This removes the singleton InvokeAIAppConfig configuration object.
|
||||
"""
|
||||
cls.singleton_config = None
|
||||
cls.singleton_config = None
|
||||
|
||||
@property
|
||||
def root_path(self) -> Path:
|
||||
"""
|
||||
|
@ -197,11 +197,13 @@ class DownloadQueue(DownloadQueueBase):
|
||||
Prune completed and errored queue items from the job list.
|
||||
"""
|
||||
try:
|
||||
self._lock.acquire()
|
||||
to_delete = set()
|
||||
for job in self._jobs:
|
||||
self._lock.acquire()
|
||||
for job_id, job in self._jobs.items():
|
||||
if self._in_terminal_state(job):
|
||||
self._job.remove(job)
|
||||
to_delete.add(job_id)
|
||||
for job_id in to_delete:
|
||||
del self._jobs[job_id]
|
||||
except KeyError as excp:
|
||||
raise UnknownJobIDException("Unrecognized job") from excp
|
||||
finally:
|
||||
@ -661,8 +663,11 @@ class DownloadQueue(DownloadQueueBase):
|
||||
source = Path(job.source).resolve()
|
||||
destination = Path(job.destination).resolve()
|
||||
try:
|
||||
job.total_bytes = source.stat().st_size
|
||||
self._update_job_status(job, DownloadJobStatus.RUNNING)
|
||||
if source != destination:
|
||||
shutil.move(source, destination)
|
||||
job.bytes = destination.stat().st_size
|
||||
self._update_job_status(job, DownloadJobStatus.COMPLETED)
|
||||
except OSError as excp:
|
||||
job.error = excp
|
||||
|
@ -525,7 +525,7 @@ class ModelInstall(ModelInstallBase):
|
||||
info.license = metadata.license
|
||||
info.thumbnail_url = metadata.thumbnail_url
|
||||
self._store.update_model(model_id, info)
|
||||
self._async_installs[info.source] = model_id
|
||||
self._async_installs[job.source] = model_id
|
||||
job.model_key = model_id
|
||||
elif job.status == "error":
|
||||
self._logger.warning(f"{job.source}: Model installation error: {job.error}")
|
||||
@ -544,7 +544,7 @@ class ModelInstall(ModelInstallBase):
|
||||
info.source = str(job.source)
|
||||
info.description = f"Imported model {info.name}"
|
||||
self._store.update_model(model_id, info)
|
||||
self._async_installs[info.source] = model_id
|
||||
self._async_installs[job.source] = model_id
|
||||
job.model_key = model_id
|
||||
elif job.status == "error":
|
||||
self._logger.warning(f"{job.source}: Model installation error: {job.error}")
|
||||
|
@ -236,9 +236,11 @@ class ModelLoad(ModelLoadBase):
|
||||
raise InvalidModelException(f"Files for model '{key}' not found at {model_path}")
|
||||
|
||||
dst_convert_path = self._get_model_convert_cache_path(model_path)
|
||||
model_path = model_class.convert_if_required(
|
||||
model_config=model_config,
|
||||
output_path=dst_convert_path,
|
||||
model_path = self.resolve_model_path(
|
||||
model_class.convert_if_required(
|
||||
model_config=model_config,
|
||||
output_path=dst_convert_path,
|
||||
)
|
||||
)
|
||||
|
||||
model_context = self._cache.get_model(
|
||||
|
@ -0,0 +1,80 @@
|
||||
model:
|
||||
base_learning_rate: 7.5e-05
|
||||
target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion
|
||||
params:
|
||||
parameterization: "v"
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false # Note: different from the one we trained before
|
||||
conditioning_key: hybrid # important
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
finetune_keys: null
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: invokeai.backend.stable_diffusion.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
||||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||
f_start: [ 1.e-6 ]
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
personalization_config:
|
||||
target: invokeai.backend.stable_diffusion.embedding_manager.EmbeddingManager
|
||||
params:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['sculpture']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 8
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
target: invokeai.backend.stable_diffusion.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_heads: 8
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: invokeai.backend.stable_diffusion.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: invokeai.backend.stable_diffusion.encoders.modules.WeightedFrozenCLIPEmbedder
|
@ -6,6 +6,8 @@ import pytest
|
||||
from omegaconf import OmegaConf
|
||||
from pydantic import ValidationError
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patch_rootdir(tmp_path: Path, monkeypatch: Any) -> None:
|
||||
@ -55,7 +57,6 @@ def test_use_init(patch_rootdir):
|
||||
# note that we explicitly set omegaconf dict and argv here
|
||||
# so that the values aren't read from ~invokeai/invokeai.yaml and
|
||||
# sys.argv respectively.
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
conf1 = InvokeAIAppConfig.get_config()
|
||||
assert conf1
|
||||
@ -73,8 +74,6 @@ def test_use_init(patch_rootdir):
|
||||
|
||||
|
||||
def test_legacy():
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
assert conf
|
||||
conf.parse_args(conf=init3, argv=[])
|
||||
@ -86,8 +85,6 @@ def test_legacy():
|
||||
|
||||
|
||||
def test_argv_override():
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
conf.parse_args(conf=init1, argv=["--always_use_cpu", "--max_cache=10"])
|
||||
assert conf.always_use_cpu
|
||||
@ -96,8 +93,6 @@ def test_argv_override():
|
||||
|
||||
|
||||
def test_env_override(patch_rootdir):
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
# argv overrides
|
||||
conf = InvokeAIAppConfig()
|
||||
conf.parse_args(conf=init1, argv=["--max_cache=10"])
|
||||
@ -129,8 +124,6 @@ def test_env_override(patch_rootdir):
|
||||
|
||||
|
||||
def test_root_resists_cwd(patch_rootdir):
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
previous = os.environ["INVOKEAI_ROOT"]
|
||||
cwd = Path(os.getcwd()).resolve()
|
||||
|
||||
@ -146,8 +139,6 @@ def test_root_resists_cwd(patch_rootdir):
|
||||
|
||||
|
||||
def test_type_coercion(patch_rootdir):
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
conf = InvokeAIAppConfig().get_config()
|
||||
conf.parse_args(argv=["--root=/tmp/foobar"])
|
||||
assert conf.root == Path("/tmp/foobar")
|
||||
|
8
tests/x_model_manager/README.txt
Normal file
8
tests/x_model_manager/README.txt
Normal file
@ -0,0 +1,8 @@
|
||||
These tests are placed in an "x_" folder so that they are run after
|
||||
the node tests. If they run beforehand the nodes tests blow up. I
|
||||
suspect that there are conflicts arising from the in-memory
|
||||
InvokeAIAppConfig object, but even when I take care to create a fresh
|
||||
object each time, the problem persists, so perhaps it is something
|
||||
else?
|
||||
|
||||
- Lincoln
|
@ -310,6 +310,8 @@ def test_pause_cancel_url(): # this one is tricky because of potential race con
|
||||
assert Path(tmpdir, "mock9999.safetensors").exists() is False, "cancelled file should be deleted"
|
||||
assert Path(tmpdir, "mock54321.safetensors").exists()
|
||||
|
||||
assert len(queue.list_jobs()) == 3
|
||||
queue.prune_jobs()
|
||||
assert len(queue.list_jobs()) == 0
|
||||
|
||||
def test_pause_cancel_repo_id(): # this one is tricky because of potential race conditions
|
80
tests/x_model_manager/test_model_install_service.py
Normal file
80
tests/x_model_manager/test_model_install_service.py
Normal file
@ -0,0 +1,80 @@
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.app.services.config.invokeai_config import InvokeAIAppConfig
|
||||
from invokeai.app.services.events import EventServiceBase
|
||||
from invokeai.app.services.model_manager_service import ModelManagerService
|
||||
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||
|
||||
# This is a very little embedding model that we can use to test installation
|
||||
TEST_MODEL = "test_embedding.safetensors"
|
||||
|
||||
|
||||
class DummyEvent(BaseModel):
|
||||
"""Dummy Event to use with Dummy Event service."""
|
||||
|
||||
event_name: str
|
||||
status: str
|
||||
|
||||
|
||||
class DummyEventService(EventServiceBase):
|
||||
"""Dummy event service for testing."""
|
||||
|
||||
events: list
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.events = list()
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
"""Dispatch an event by appending it to self.events."""
|
||||
self.events.append(DummyEvent(event_name=event_name, status=payload["job"].status))
|
||||
|
||||
|
||||
def test_install(datadir: Path):
|
||||
"""Test installation of an itty-bitty embedding."""
|
||||
# create a temporary root directory for install to target
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmp_path = Path(tmpdir)
|
||||
(tmp_path / "models").mkdir()
|
||||
(tmp_path / "configs").mkdir()
|
||||
config = InvokeAIAppConfig(
|
||||
root=tmp_path,
|
||||
conf_path=tmp_path / "configs" / "models.yaml",
|
||||
models_dir=tmp_path / "models",
|
||||
)
|
||||
|
||||
event_bus = DummyEventService()
|
||||
mm_service = ModelManagerService(config=config, event_bus=event_bus)
|
||||
|
||||
source = datadir / TEST_MODEL
|
||||
mm_service.install_model(source=source)
|
||||
id_map = mm_service.wait_for_installs()
|
||||
print(id_map)
|
||||
assert source in id_map, "model did not install; id_map empty"
|
||||
assert id_map[source] is not None, "model did not install: source field empty"
|
||||
|
||||
# test the events
|
||||
assert len(event_bus.events) > 0, "no events received"
|
||||
assert len(event_bus.events) == 3
|
||||
|
||||
event_names = set([x.event_name for x in event_bus.events])
|
||||
assert "model_event" in event_names
|
||||
event_payloads = set([x.status for x in event_bus.events])
|
||||
assert "enqueued" in event_payloads
|
||||
assert "running" in event_payloads
|
||||
assert "completed" in event_payloads
|
||||
|
||||
key = id_map[source]
|
||||
model = mm_service.model_info(key) # may raise an exception here
|
||||
assert Path(config.models_path / model.path).exists(), "generated path incorrect"
|
||||
assert model.base_model == BaseModelType.StableDiffusion1, "probe of model base type didn't work"
|
||||
assert model.model_type == ModelType.TextualInversion, "probe of model type didn't work"
|
||||
|
||||
model_info = mm_service.get_model(key)
|
||||
assert model_info, "model did not load"
|
||||
with model_info as model:
|
||||
assert model is not None, "model context not working"
|
Binary file not shown.
@ -13,7 +13,7 @@ from invokeai.backend.model_manager.storage import ModelConfigStore, ModelConfig
|
||||
|
||||
@pytest.fixture
|
||||
def store(datadir) -> ModelConfigStore:
|
||||
InvokeAIAppConfig.get_config(root=datadir)
|
||||
InvokeAIAppConfig(root=datadir)
|
||||
return ModelConfigStoreYAML(datadir / "configs" / "models.yaml")
|
||||
|
||||
|
@ -14,7 +14,7 @@ from invokeai.backend.model_manager.storage import ModelConfigStore, ModelConfig
|
||||
|
||||
@pytest.fixture
|
||||
def store(datadir) -> ModelConfigStore:
|
||||
InvokeAIAppConfig.get_config(root=datadir)
|
||||
InvokeAIAppConfig(root=datadir)
|
||||
return ModelConfigStoreSQL(datadir / "databases" / "models.db")
|
||||
|
||||
|
Reference in New Issue
Block a user