mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
References to context.services.model_manager.store.get_model can only accept keys, remove invalid assertion
This commit is contained in:
parent
5cc73ec5dd
commit
aa5d124d70
@ -681,7 +681,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
|||||||
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
# get the unet's config so that we can pass the base to dispatch_progress()
|
# get the unet's config so that we can pass the base to dispatch_progress()
|
||||||
unet_config = context.services.model_manager.store.get_model(**self.unet.unet.model_dump())
|
unet_config = context.services.model_manager.store.get_model(self.unet.unet.key)
|
||||||
|
|
||||||
def step_callback(state: PipelineIntermediateState) -> None:
|
def step_callback(state: PipelineIntermediateState) -> None:
|
||||||
self.dispatch_progress(context, source_node_id, state, unet_config.base)
|
self.dispatch_progress(context, source_node_id, state, unet_config.base)
|
||||||
@ -709,7 +709,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
|||||||
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
# Apply the LoRA after unet has been moved to its target device for faster patching.
|
||||||
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
|
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
|
||||||
):
|
):
|
||||||
assert isinstance(unet, torch.Tensor)
|
assert isinstance(unet, UNet2DConditionModel)
|
||||||
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
||||||
if noise is not None:
|
if noise is not None:
|
||||||
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
||||||
|
@ -303,18 +303,18 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
|||||||
in_vram_models = 0
|
in_vram_models = 0
|
||||||
locked_in_vram_models = 0
|
locked_in_vram_models = 0
|
||||||
for cache_record in self._cached_models.values():
|
for cache_record in self._cached_models.values():
|
||||||
assert hasattr(cache_record.model, "device")
|
if hasattr(cache_record.model, "device"):
|
||||||
if cache_record.model.device == self.storage_device:
|
if cache_record.model.device == self.storage_device:
|
||||||
in_ram_models += 1
|
in_ram_models += 1
|
||||||
else:
|
else:
|
||||||
in_vram_models += 1
|
in_vram_models += 1
|
||||||
if cache_record.locked:
|
if cache_record.locked:
|
||||||
locked_in_vram_models += 1
|
locked_in_vram_models += 1
|
||||||
|
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
f"Current VRAM/RAM usage: {vram}/{ram}; models_in_ram/models_in_vram(locked) ="
|
f"Current VRAM/RAM usage: {vram}/{ram}; models_in_ram/models_in_vram(locked) ="
|
||||||
f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})"
|
f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})"
|
||||||
)
|
)
|
||||||
|
|
||||||
def make_room(self, model_size: int) -> None:
|
def make_room(self, model_size: int) -> None:
|
||||||
"""Make enough room in the cache to accommodate a new model of indicated size."""
|
"""Make enough room in the cache to accommodate a new model of indicated size."""
|
||||||
|
Loading…
Reference in New Issue
Block a user