Tidy up GIG -> GB and remove unused GIG constant.

This commit is contained in:
Ryan Dick 2024-08-27 15:52:09 +00:00
parent c578b8df1e
commit 6ba9b1b6b0
3 changed files with 14 additions and 18 deletions

View File

@ -25,7 +25,7 @@ from invokeai.backend.util.devices import TorchDevice
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
# Size of a GB in bytes. # Size of a GB in bytes.
GIG = 2**30 GB = 2**30
# Size of a MB in bytes. # Size of a MB in bytes.
MB = 2**20 MB = 2**20
@ -196,7 +196,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
# more stats # more stats
if self.stats: if self.stats:
stats_name = stats_name or key stats_name = stats_name or key
self.stats.cache_size = int(self._max_cache_size * GIG) self.stats.cache_size = int(self._max_cache_size * GB)
self.stats.high_watermark = max(self.stats.high_watermark, self.cache_size()) self.stats.high_watermark = max(self.stats.high_watermark, self.cache_size())
self.stats.in_cache = len(self._cached_models) self.stats.in_cache = len(self._cached_models)
self.stats.loaded_model_sizes[stats_name] = max( self.stats.loaded_model_sizes[stats_name] = max(
@ -228,9 +228,9 @@ class ModelCache(ModelCacheBase[AnyModel]):
:param size_required: The amount of space to clear in the execution_device cache, in bytes. :param size_required: The amount of space to clear in the execution_device cache, in bytes.
""" """
reserved = self._max_vram_cache_size * GIG reserved = self._max_vram_cache_size * GB
vram_in_use = torch.cuda.memory_allocated() + size_required vram_in_use = torch.cuda.memory_allocated() + size_required
self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM needed for models; max allowed={(reserved/GIG):.2f}GB") self.logger.debug(f"{(vram_in_use/GB):.2f}GB VRAM needed for models; max allowed={(reserved/GB):.2f}GB")
for _, cache_entry in sorted(self._cached_models.items(), key=lambda x: x[1].size): for _, cache_entry in sorted(self._cached_models.items(), key=lambda x: x[1].size):
if vram_in_use <= reserved: if vram_in_use <= reserved:
break break
@ -241,7 +241,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
cache_entry.loaded = False cache_entry.loaded = False
vram_in_use = torch.cuda.memory_allocated() + size_required vram_in_use = torch.cuda.memory_allocated() + size_required
self.logger.debug( self.logger.debug(
f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GIG):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GIG):.2f}GB" f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GB):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GB):.2f}GB"
) )
TorchDevice.empty_cache() TorchDevice.empty_cache()
@ -299,7 +299,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
self.logger.debug( self.logger.debug(
f"Moved model '{cache_entry.key}' from {source_device} to" f"Moved model '{cache_entry.key}' from {source_device} to"
f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s." f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s."
f"Estimated model size: {(cache_entry.size/GIG):.3f} GB." f"Estimated model size: {(cache_entry.size/GB):.3f} GB."
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
) )
@ -322,14 +322,14 @@ class ModelCache(ModelCacheBase[AnyModel]):
f"Moving model '{cache_entry.key}' from {source_device} to" f"Moving model '{cache_entry.key}' from {source_device} to"
f" {target_device} caused an unexpected change in VRAM usage. The model's" f" {target_device} caused an unexpected change in VRAM usage. The model's"
" estimated size may be incorrect. Estimated model size:" " estimated size may be incorrect. Estimated model size:"
f" {(cache_entry.size/GIG):.3f} GB.\n" f" {(cache_entry.size/GB):.3f} GB.\n"
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
) )
def print_cuda_stats(self) -> None: def print_cuda_stats(self) -> None:
"""Log CUDA diagnostics.""" """Log CUDA diagnostics."""
vram = "%4.2fG" % (torch.cuda.memory_allocated() / GIG) vram = "%4.2fG" % (torch.cuda.memory_allocated() / GB)
ram = "%4.2fG" % (self.cache_size() / GIG) ram = "%4.2fG" % (self.cache_size() / GB)
in_ram_models = 0 in_ram_models = 0
in_vram_models = 0 in_vram_models = 0
@ -356,13 +356,13 @@ class ModelCache(ModelCacheBase[AnyModel]):
garbage-collected. garbage-collected.
""" """
bytes_needed = size bytes_needed = size
maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes maximum_size = self.max_cache_size * GB # stored in GB, convert to bytes
current_size = self.cache_size() current_size = self.cache_size()
if current_size + bytes_needed > maximum_size: if current_size + bytes_needed > maximum_size:
self.logger.debug( self.logger.debug(
f"Max cache size exceeded: {(current_size/GIG):.2f}/{self.max_cache_size:.2f} GB, need an additional" f"Max cache size exceeded: {(current_size/GB):.2f}/{self.max_cache_size:.2f} GB, need an additional"
f" {(bytes_needed/GIG):.2f} GB" f" {(bytes_needed/GB):.2f} GB"
) )
self.logger.debug(f"Before making_room: cached_models={len(self._cached_models)}") self.logger.debug(f"Before making_room: cached_models={len(self._cached_models)}")
@ -379,7 +379,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
if not cache_entry.locked: if not cache_entry.locked:
self.logger.debug( self.logger.debug(
f"Removing {model_key} from RAM cache to free at least {(size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" f"Removing {model_key} from RAM cache to free at least {(size/GB):.2f} GB (-{(cache_entry.size/GB):.2f} GB)"
) )
current_size -= cache_entry.size current_size -= cache_entry.size
models_cleared += 1 models_cleared += 1

View File

@ -3,10 +3,9 @@ Initialization file for invokeai.backend.util
""" """
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.util.util import GIG, Chdir, directory_size from invokeai.backend.util.util import Chdir, directory_size
__all__ = [ __all__ = [
"GIG",
"directory_size", "directory_size",
"Chdir", "Chdir",
"InvokeAILogger", "InvokeAILogger",

View File

@ -7,9 +7,6 @@ from pathlib import Path
from PIL import Image from PIL import Image
# actual size of a gig
GIG = 1073741824
def slugify(value: str, allow_unicode: bool = False) -> str: def slugify(value: str, allow_unicode: bool = False) -> str:
""" """