From aa5d124d708fedd0454818d806fad3ff7b86667c Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Wed, 14 Feb 2024 09:51:11 -0500 Subject: [PATCH] References to context.services.model_manager.store.get_model can only accept keys, remove invalid assertion --- invokeai/app/invocations/latent.py | 4 ++-- .../load/model_cache/model_cache_default.py | 22 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c3de521940..05293fdfee 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -681,7 +681,7 @@ class DenoiseLatentsInvocation(BaseInvocation): source_node_id = graph_execution_state.prepared_source_mapping[self.id] # get the unet's config so that we can pass the base to dispatch_progress() - unet_config = context.services.model_manager.store.get_model(**self.unet.unet.model_dump()) + unet_config = context.services.model_manager.store.get_model(self.unet.unet.key) def step_callback(state: PipelineIntermediateState) -> None: self.dispatch_progress(context, source_node_id, state, unet_config.base) @@ -709,7 +709,7 @@ class DenoiseLatentsInvocation(BaseInvocation): # Apply the LoRA after unet has been moved to its target device for faster patching. ModelPatcher.apply_lora_unet(unet, _lora_loader()), ): - assert isinstance(unet, torch.Tensor) + assert isinstance(unet, UNet2DConditionModel) latents = latents.to(device=unet.device, dtype=unet.dtype) if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 786396062c..02ce1266c7 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -303,18 +303,18 @@ class ModelCache(ModelCacheBase[AnyModel]): in_vram_models = 0 locked_in_vram_models = 0 for cache_record in self._cached_models.values(): - assert hasattr(cache_record.model, "device") - if cache_record.model.device == self.storage_device: - in_ram_models += 1 - else: - in_vram_models += 1 - if cache_record.locked: - locked_in_vram_models += 1 + if hasattr(cache_record.model, "device"): + if cache_record.model.device == self.storage_device: + in_ram_models += 1 + else: + in_vram_models += 1 + if cache_record.locked: + locked_in_vram_models += 1 - self.logger.debug( - f"Current VRAM/RAM usage: {vram}/{ram}; models_in_ram/models_in_vram(locked) =" - f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})" - ) + self.logger.debug( + f"Current VRAM/RAM usage: {vram}/{ram}; models_in_ram/models_in_vram(locked) =" + f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})" + ) def make_room(self, model_size: int) -> None: """Make enough room in the cache to accommodate a new model of indicated size."""