mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
cap model cache size using bytes, not # models
This commit is contained in:
@ -59,7 +59,7 @@ def get_model_manager(config: Args, logger: types.ModuleType) -> ModelManager:
|
||||
config.conf,
|
||||
precision=dtype,
|
||||
device_type=device,
|
||||
max_loaded_models=config.max_loaded_models,
|
||||
max_cache_size=config.max_cache_size,
|
||||
# temporarily disabled until model manager stabilizes
|
||||
# embedding_path = Path(embedding_path),
|
||||
logger = logger,
|
||||
|
Reference in New Issue
Block a user