From 119d26e102e223701c26b1c410f51ad6722a069e Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Thu, 7 Mar 2024 14:08:15 -0500 Subject: [PATCH] Remove manual memory management in hashlib_hasher in favor of using Python's built-in buffering --- invokeai/backend/model_manager/hash.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_manager/hash.py b/invokeai/backend/model_manager/hash.py index e146924b97..89d4d954e6 100644 --- a/invokeai/backend/model_manager/hash.py +++ b/invokeai/backend/model_manager/hash.py @@ -163,13 +163,11 @@ class ModelHash: """ def hashlib_hasher(file_path: Path) -> str: - """Hashes a file using a hashlib algorithm. Uses `memoryview` to avoid reading the entire file into memory.""" + """Hashes a file using a hashlib algorithm.""" hasher = hashlib.new(algorithm) - buffer = bytearray(128 * 1024) - mv = memoryview(buffer) - with open(file_path, "rb", buffering=0) as f: - while n := f.readinto(mv): - hasher.update(mv[:n]) + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(128 * 1024), b""): + hasher.update(chunk) return hasher.hexdigest() return hashlib_hasher