mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into bugfix/convert-v2-models
This commit is contained in:
@ -25,8 +25,6 @@ import torch
|
||||
import transformers
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers import logging as dlogging
|
||||
from diffusers.utils.logging import (get_verbosity, set_verbosity,
|
||||
set_verbosity_error)
|
||||
from huggingface_hub import scan_cache_dir
|
||||
from omegaconf import OmegaConf
|
||||
from omegaconf.dictconfig import DictConfig
|
||||
@ -49,9 +47,10 @@ class ModelManager(object):
|
||||
def __init__(
|
||||
self,
|
||||
config: OmegaConf,
|
||||
device_type: str = "cpu",
|
||||
device_type: str | torch.device = "cpu",
|
||||
precision: str = "float16",
|
||||
max_loaded_models=DEFAULT_MAX_MODELS,
|
||||
sequential_offload = False
|
||||
):
|
||||
"""
|
||||
Initialize with the path to the models.yaml config file,
|
||||
@ -69,6 +68,7 @@ class ModelManager(object):
|
||||
self.models = {}
|
||||
self.stack = [] # this is an LRU FIFO
|
||||
self.current_model = None
|
||||
self.sequential_offload = sequential_offload
|
||||
|
||||
def valid_model(self, model_name: str) -> bool:
|
||||
"""
|
||||
@ -529,7 +529,10 @@ class ModelManager(object):
|
||||
dlogging.set_verbosity(verbosity)
|
||||
assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded')
|
||||
|
||||
pipeline.to(self.device)
|
||||
if self.sequential_offload:
|
||||
pipeline.enable_offload_submodels(self.device)
|
||||
else:
|
||||
pipeline.to(self.device)
|
||||
|
||||
model_hash = self._diffuser_sha256(name_or_path)
|
||||
|
||||
@ -761,7 +764,7 @@ class ModelManager(object):
|
||||
return
|
||||
|
||||
model_name = model_name or diffusers_path.name
|
||||
model_description = model_description or "Optimized version of {model_name}"
|
||||
model_description = model_description or f"Optimized version of {model_name}"
|
||||
print(f">> Optimizing {model_name} (30-60s)")
|
||||
try:
|
||||
# By passing the specified VAE too the conversion function, the autoencoder
|
||||
@ -801,15 +804,17 @@ class ModelManager(object):
|
||||
models_folder_safetensors = Path(search_folder).glob("**/*.safetensors")
|
||||
|
||||
ckpt_files = [x for x in models_folder_ckpt if x.is_file()]
|
||||
safetensor_files = [x for x in models_folder_safetensors if x.is_file]
|
||||
safetensor_files = [x for x in models_folder_safetensors if x.is_file()]
|
||||
|
||||
files = ckpt_files + safetensor_files
|
||||
|
||||
found_models = []
|
||||
for file in files:
|
||||
found_models.append(
|
||||
{"name": file.stem, "location": str(file.resolve()).replace("\\", "/")}
|
||||
)
|
||||
location = str(file.resolve()).replace("\\", "/")
|
||||
if 'model.safetensors' not in location and 'diffusion_pytorch_model.safetensors' not in location:
|
||||
found_models.append(
|
||||
{"name": file.stem, "location": location}
|
||||
)
|
||||
|
||||
return search_folder, found_models
|
||||
|
||||
@ -994,12 +999,12 @@ class ModelManager(object):
|
||||
if self.device == "cpu":
|
||||
return model
|
||||
|
||||
# diffusers really really doesn't like us moving a float16 model onto CPU
|
||||
verbosity = get_verbosity()
|
||||
set_verbosity_error()
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.offload_all()
|
||||
return model
|
||||
|
||||
model.cond_stage_model.device = "cpu"
|
||||
model.to("cpu")
|
||||
set_verbosity(verbosity)
|
||||
|
||||
for submodel in ("first_stage_model", "cond_stage_model", "model"):
|
||||
try:
|
||||
@ -1012,6 +1017,10 @@ class ModelManager(object):
|
||||
if self.device == "cpu":
|
||||
return model
|
||||
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.ready()
|
||||
return model
|
||||
|
||||
model.to(self.device)
|
||||
model.cond_stage_model.device = self.device
|
||||
|
||||
@ -1162,7 +1171,7 @@ class ModelManager(object):
|
||||
strategy.execute()
|
||||
|
||||
@staticmethod
|
||||
def _abs_path(path: Union(str, Path)) -> Path:
|
||||
def _abs_path(path: str | Path) -> Path:
|
||||
if path is None or Path(path).is_absolute():
|
||||
return path
|
||||
return Path(Globals.root, path).resolve()
|
||||
|
Reference in New Issue
Block a user