From b9fd2e9e760672e3a258ab269fb5b24c746566fc Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Tue, 3 Oct 2023 11:56:11 -0400 Subject: [PATCH] Improve get_pretty_snapshot_diff(...) message formatting. --- .../model_management/memory_snapshot.py | 31 ++++++++++++++----- .../backend/model_management/model_cache.py | 18 +++++------ 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_management/memory_snapshot.py index bfab67e77c..14ed10d385 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_management/memory_snapshot.py @@ -66,13 +66,30 @@ class MemorySnapshot: def get_pretty_snapshot_diff(snapshot_1: MemorySnapshot, snapshot_2: MemorySnapshot) -> str: """Get a pretty string describing the difference between two `MemorySnapshot`s.""" - ram_diff = snapshot_2.process_ram - snapshot_1.process_ram - msg = f"RAM ({(ram_diff/GB):+.2f}): {(snapshot_1.process_ram/GB):.2f}GB -> {(snapshot_2.process_ram/GB):.2f}GB" - vram_diff = None - if snapshot_1.vram is not None and snapshot_2.vram is not None: - vram_diff = snapshot_2.vram - snapshot_1.vram + def get_msg_line(prefix: str, val1: Optional[int], val2: Optional[int]): + diff = None + if val1 is not None and val2 is not None: + diff = val2 - val1 + return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n" - msg += f", VRAM ({(vram_diff/GB):+.2f}): {(snapshot_1.vram/GB):.2f}GB -> {(snapshot_2.vram/GB):.2f}GB" + msg = "" - return msg + msg += get_msg_line("Process RAM", snapshot_1.process_ram, snapshot_2.process_ram) + + if snapshot_1.malloc_info is not None and snapshot_2.malloc_info is not None: + msg += get_msg_line("libc mmap allocated", snapshot_1.malloc_info.hblkhd, snapshot_2.malloc_info.hblkhd) + + msg += get_msg_line("libc arena used", snapshot_1.malloc_info.uordblks, snapshot_2.malloc_info.uordblks) + + msg += get_msg_line("libc arena free", snapshot_1.malloc_info.fordblks, snapshot_2.malloc_info.fordblks) + + libc_total_allocated_1 = snapshot_1.malloc_info.arena + snapshot_1.malloc_info.hblkhd + libc_total_allocated_2 = snapshot_2.malloc_info.arena + snapshot_2.malloc_info.hblkhd + msg += get_msg_line("libc total allocated", libc_total_allocated_1, libc_total_allocated_2) + + libc_total_used_1 = snapshot_1.malloc_info.uordblks + snapshot_1.malloc_info.hblkhd + libc_total_used_2 = snapshot_2.malloc_info.uordblks + snapshot_2.malloc_info.hblkhd + msg += get_msg_line("libc total used", libc_total_used_1, libc_total_used_2) + + msg += get_msg_line("VRAM", snapshot_1.vram, snapshot_2.vram) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 663fd8b627..f62e5ee315 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -230,10 +230,10 @@ class ModelCache(object): self_reported_model_size_after_load = model_info.get_size(submodel) self.logger.debug( - f"Moved model '{key}' from disk to cpu in {(end_load_time-start_load_time):.2f}s. Self-reported size" - f" before/after load: {(self_reported_model_size_before_load/GIG):.2f}GB /" - f" {(self_reported_model_size_after_load/GIG):.2f}GB." - f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}." + f"Moved model '{key}' from disk to cpu in {(end_load_time-start_load_time):.2f}s.\n" + f"Self-reported size before/after load: {(self_reported_model_size_before_load/GIG):.2f}GB /" + f" {(self_reported_model_size_after_load/GIG):.2f}GB.\n" + f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" ) # We only log a warning for over-reported (not under-reported) model sizes before load. There is a known @@ -282,9 +282,9 @@ class ModelCache(object): end_model_to_time = time.time() self.logger.debug( f"Moved model '{key}' from {source_device} to" - f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s." - f" Estimated model size: {(cache_entry.size/GIG):.2f} GB." - f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}." + f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s.\n" + f"Estimated model size: {(cache_entry.size/GIG):.2f} GB.\n" + f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" ) if snapshot_before.vram is not None and snapshot_after.vram is not None: @@ -301,8 +301,8 @@ class ModelCache(object): f"Moving model '{key}' from {source_device} to" f" {target_device} caused an unexpected change in VRAM usage. The model's" " estimated size may be incorrect. Estimated model size:" - f" {(cache_entry.size/GIG):.2f} GB." - f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}." + f" {(cache_entry.size/GIG):.2f} GB.\n" + f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" ) # Now, we will update our size estimate for `cache_entry` based on the change in VRAM usage. We only use the