remove vram_cache and don't move VRAM models back into CPU

This commit is contained in:
Lincoln Stein
2024-03-31 16:37:13 -04:00
parent 24d73280ee
commit eaa2c68693
7 changed files with 107 additions and 230 deletions

View File

@ -1345,7 +1345,7 @@ from invokeai.app.services.model_load import ModelLoadService, ModelLoaderRegist
config = InvokeAIAppConfig.get_config()
ram_cache = ModelCache(
max_cache_size=config.ram_cache_size, max_vram_cache_size=config.vram_cache_size, logger=logger
max_cache_size=config.ram_cache_size, logger=logger
)
convert_cache = ModelConvertCache(
cache_path=config.models_convert_cache_path, max_size=config.convert_cache_size