From e3912e882672ae7ad99282e20ade050a9aaf7f8b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 3 Oct 2023 15:36:23 -0400 Subject: [PATCH] replace config.ram_cache_size with config.ram and similarly for vram --- invokeai/backend/model_manager/loader.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/model_manager/loader.py b/invokeai/backend/model_manager/loader.py index 39e836559c..6e4b585acb 100644 --- a/invokeai/backend/model_manager/loader.py +++ b/invokeai/backend/model_manager/loader.py @@ -158,13 +158,13 @@ class ModelLoad(ModelLoadBase): self._logger.info(f"Using models database {models_file}") self._logger.info(f"Rendering device = {device} ({device_name})") - self._logger.info(f"Maximum RAM cache size: {config.ram_cache_size}") - self._logger.info(f"Maximum VRAM cache size: {config.vram_cache_size}") + self._logger.info(f"Maximum RAM cache size: {config.ram}") + self._logger.info(f"Maximum VRAM cache size: {config.vram}") self._logger.info(f"Precision: {precision}") self._cache = ModelCache( - max_cache_size=config.ram_cache_size, - max_vram_cache_size=config.vram_cache_size, + max_cache_size=config.ram, + max_vram_cache_size=config.vram, lazy_offloading=config.lazy_offload, execution_device=device, precision=dtype,