mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove default model cache sizes from model_cache_default.py. These defaults were misleading, because the config defaults take precedence over them.
This commit is contained in:
parent
3e569c8312
commit
e064377c05
@ -40,15 +40,8 @@ from invokeai.backend.model_manager.load.model_util import calc_model_size_by_da
|
|||||||
from invokeai.backend.util.devices import TorchDevice
|
from invokeai.backend.util.devices import TorchDevice
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
# Maximum size of the cache, in gigs
|
# Size of a GB in bytes.
|
||||||
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
|
GIG = 2**30
|
||||||
DEFAULT_MAX_CACHE_SIZE = 6.0
|
|
||||||
|
|
||||||
# amount of GPU memory to hold in reserve for use by generations (GB)
|
|
||||||
DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75
|
|
||||||
|
|
||||||
# actual size of a gig
|
|
||||||
GIG = 1073741824
|
|
||||||
|
|
||||||
# Size of a MB in bytes.
|
# Size of a MB in bytes.
|
||||||
MB = 2**20
|
MB = 2**20
|
||||||
@ -59,8 +52,8 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
|||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
max_cache_size: float = DEFAULT_MAX_CACHE_SIZE,
|
max_cache_size: float,
|
||||||
max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE,
|
max_vram_cache_size: float,
|
||||||
execution_device: torch.device = torch.device("cuda"),
|
execution_device: torch.device = torch.device("cuda"),
|
||||||
storage_device: torch.device = torch.device("cpu"),
|
storage_device: torch.device = torch.device("cpu"),
|
||||||
precision: torch.dtype = torch.float16,
|
precision: torch.dtype = torch.float16,
|
||||||
|
Loading…
Reference in New Issue
Block a user