mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
7726d312e1
For SSDs, `blake3` is about 10x faster than `blake3_single` - 3 files/second vs 30 files/second. For spinning HDDs, `blake3` is about 100x slower than `blake3_single` - 300 seconds/file vs 3 seconds/file. For external drives, `blake3` is always worse, but the difference is highly variable. For external spinning drives, it's probably way worse than internal. The least offensive algorithm is `blake3_single`, and it's still _much_ faster than any other algorithm.
34 lines
916 B
Python
Executable File
34 lines
916 B
Python
Executable File
#!/bin/env python
|
|
|
|
"""Little command-line utility for probing a model on disk."""
|
|
|
|
import argparse
|
|
from pathlib import Path
|
|
from typing import get_args
|
|
|
|
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
|
from invokeai.backend.model_manager import InvalidModelConfigException, ModelProbe
|
|
|
|
algos = ", ".join(set(get_args(HASHING_ALGORITHMS)))
|
|
|
|
parser = argparse.ArgumentParser(description="Probe model type")
|
|
parser.add_argument(
|
|
"model_path",
|
|
type=Path,
|
|
nargs="+",
|
|
)
|
|
parser.add_argument(
|
|
"--hash_algo",
|
|
type=str,
|
|
default="blake3_single",
|
|
help=f"Hashing algorithm to use (default: blake3_single), one of: {algos}",
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
for path in args.model_path:
|
|
try:
|
|
info = ModelProbe.probe(path, hash_algo=args.hash_algo)
|
|
print(f"{path}:{info.model_dump_json(indent=4)}")
|
|
except InvalidModelConfigException as exc:
|
|
print(exc)
|