2022-10-11 21:24:10 +00:00
|
|
|
'''
|
|
|
|
Manage a cache of Stable Diffusion model files for fast switching.
|
|
|
|
They are moved between GPU and CPU as necessary. If CPU memory falls
|
|
|
|
below a preset minimum, the least recently used model will be
|
|
|
|
cleared and loaded from disk when next needed.
|
|
|
|
'''
|
|
|
|
|
|
|
|
import torch
|
|
|
|
import os
|
|
|
|
import io
|
|
|
|
import time
|
|
|
|
import gc
|
|
|
|
import hashlib
|
|
|
|
import psutil
|
|
|
|
import transformers
|
2022-10-25 04:30:48 +00:00
|
|
|
import traceback
|
2022-10-22 17:29:45 +00:00
|
|
|
import os
|
2022-10-11 21:24:10 +00:00
|
|
|
from sys import getrefcount
|
|
|
|
from omegaconf import OmegaConf
|
|
|
|
from omegaconf.errors import ConfigAttributeError
|
|
|
|
from ldm.util import instantiate_from_config
|
|
|
|
|
2022-10-31 13:05:38 +00:00
|
|
|
DEFAULT_MAX_MODELS=2
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
class ModelCache(object):
|
2022-10-31 12:53:16 +00:00
|
|
|
def __init__(self, config:OmegaConf, device_type:str, precision:str, max_loaded_models=DEFAULT_MAX_MODELS):
|
2022-10-12 06:14:59 +00:00
|
|
|
'''
|
|
|
|
Initialize with the path to the models.yaml config file,
|
|
|
|
the torch device type, and precision. The optional
|
|
|
|
min_avail_mem argument specifies how much unused system
|
|
|
|
(CPU) memory to preserve. The cache of models in RAM will
|
|
|
|
grow until this value is approached. Default is 2G.
|
|
|
|
'''
|
2022-10-11 21:24:10 +00:00
|
|
|
# prevent nasty-looking CLIP log message
|
|
|
|
transformers.logging.set_verbosity_error()
|
|
|
|
self.config = config
|
|
|
|
self.precision = precision
|
|
|
|
self.device = torch.device(device_type)
|
2022-10-31 12:53:16 +00:00
|
|
|
self.max_loaded_models = max_loaded_models
|
2022-10-11 21:24:10 +00:00
|
|
|
self.models = {}
|
|
|
|
self.stack = [] # this is an LRU FIFO
|
|
|
|
self.current_model = None
|
|
|
|
|
2022-11-09 02:07:13 +00:00
|
|
|
def valid_model(self, model_name:str)->bool:
|
|
|
|
'''
|
|
|
|
Given a model name, returns True if it is a valid
|
|
|
|
identifier.
|
|
|
|
'''
|
|
|
|
return model_name in self.config
|
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
def get_model(self, model_name:str):
|
2022-10-12 06:14:59 +00:00
|
|
|
'''
|
|
|
|
Given a model named identified in models.yaml, return
|
|
|
|
the model object. If in RAM will load into GPU VRAM.
|
|
|
|
If on disk, will load from there.
|
|
|
|
'''
|
2022-11-09 02:07:13 +00:00
|
|
|
if not self.valid_model(model_name):
|
2022-10-12 06:14:59 +00:00
|
|
|
print(f'** "{model_name}" is not a known model name. Please check your models.yaml file')
|
2022-11-09 02:07:13 +00:00
|
|
|
return self.current_model
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
if self.current_model != model_name:
|
2022-11-01 21:22:48 +00:00
|
|
|
if model_name not in self.models: # make room for a new one
|
|
|
|
self._make_cache_room()
|
|
|
|
self.offload_model(self.current_model)
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
if model_name in self.models:
|
|
|
|
requested_model = self.models[model_name]['model']
|
2022-10-12 06:14:59 +00:00
|
|
|
print(f'>> Retrieving model {model_name} from system RAM cache')
|
|
|
|
self.models[model_name]['model'] = self._model_from_cpu(requested_model)
|
2022-10-11 21:24:10 +00:00
|
|
|
width = self.models[model_name]['width']
|
|
|
|
height = self.models[model_name]['height']
|
2022-10-12 06:14:59 +00:00
|
|
|
hash = self.models[model_name]['hash']
|
2022-11-01 21:22:48 +00:00
|
|
|
else: # we're about to load a new model, so potentially offload the least recently used one
|
2022-10-12 06:14:59 +00:00
|
|
|
try:
|
|
|
|
requested_model, width, height, hash = self._load_model(model_name)
|
|
|
|
self.models[model_name] = {}
|
|
|
|
self.models[model_name]['model'] = requested_model
|
|
|
|
self.models[model_name]['width'] = width
|
|
|
|
self.models[model_name]['height'] = height
|
|
|
|
self.models[model_name]['hash'] = hash
|
|
|
|
except Exception as e:
|
2022-10-13 16:27:04 +00:00
|
|
|
print(f'** model {model_name} could not be loaded: {str(e)}')
|
2022-10-25 04:30:48 +00:00
|
|
|
print(traceback.format_exc())
|
2022-10-13 16:27:04 +00:00
|
|
|
print(f'** restoring {self.current_model}')
|
2022-10-15 19:46:29 +00:00
|
|
|
self.get_model(self.current_model)
|
|
|
|
return None
|
2022-10-12 06:14:59 +00:00
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
self.current_model = model_name
|
|
|
|
self._push_newest_model(model_name)
|
2022-10-12 06:14:59 +00:00
|
|
|
return {
|
|
|
|
'model':requested_model,
|
|
|
|
'width':width,
|
|
|
|
'height':height,
|
|
|
|
'hash': hash
|
|
|
|
}
|
|
|
|
|
2022-10-21 04:28:54 +00:00
|
|
|
def default_model(self) -> str:
|
|
|
|
'''
|
|
|
|
Returns the name of the default model, or None
|
|
|
|
if none is defined.
|
|
|
|
'''
|
|
|
|
for model_name in self.config:
|
|
|
|
if self.config[model_name].get('default',False):
|
|
|
|
return model_name
|
|
|
|
return None
|
|
|
|
|
|
|
|
def set_default_model(self,model_name:str):
|
|
|
|
'''
|
|
|
|
Set the default model. The change will not take
|
|
|
|
effect until you call model_cache.commit()
|
|
|
|
'''
|
2022-11-11 04:41:02 +00:00
|
|
|
print(f'DEBUG: before set_default_model()\n{OmegaConf.to_yaml(self.config)}')
|
2022-10-21 04:28:54 +00:00
|
|
|
assert model_name in self.models,f"unknown model '{model_name}'"
|
2022-11-11 04:41:02 +00:00
|
|
|
config = self.config
|
|
|
|
for model in config:
|
|
|
|
config[model].pop('default',None)
|
|
|
|
config[model_name]['default'] = True
|
|
|
|
print(f'DEBUG: after set_default_model():\n{OmegaConf.to_yaml(self.config)}')
|
2022-10-21 04:28:54 +00:00
|
|
|
|
2022-10-12 06:14:59 +00:00
|
|
|
def list_models(self) -> dict:
|
|
|
|
'''
|
|
|
|
Return a dict of models in the format:
|
|
|
|
{ model_name1: {'status': ('active'|'cached'|'not loaded'),
|
|
|
|
'description': description,
|
|
|
|
},
|
|
|
|
model_name2: { etc }
|
|
|
|
'''
|
|
|
|
result = {}
|
2022-10-11 21:24:10 +00:00
|
|
|
for name in self.config:
|
|
|
|
try:
|
|
|
|
description = self.config[name].description
|
|
|
|
except ConfigAttributeError:
|
|
|
|
description = '<no description>'
|
|
|
|
if self.current_model == name:
|
|
|
|
status = 'active'
|
|
|
|
elif name in self.models:
|
|
|
|
status = 'cached'
|
|
|
|
else:
|
|
|
|
status = 'not loaded'
|
2022-10-12 06:14:59 +00:00
|
|
|
result[name]={}
|
|
|
|
result[name]['status']=status
|
|
|
|
result[name]['description']=description
|
|
|
|
return result
|
|
|
|
|
|
|
|
def print_models(self):
|
|
|
|
'''
|
|
|
|
Print a table of models, their descriptions, and load status
|
|
|
|
'''
|
|
|
|
models = self.list_models()
|
|
|
|
for name in models:
|
2022-10-12 07:03:29 +00:00
|
|
|
line = f'{name:25s} {models[name]["status"]:>10s} {models[name]["description"]}'
|
|
|
|
if models[name]['status'] == 'active':
|
|
|
|
print(f'\033[1m{line}\033[0m')
|
|
|
|
else:
|
|
|
|
print(line)
|
2022-10-11 21:24:10 +00:00
|
|
|
|
2022-10-21 04:28:54 +00:00
|
|
|
def del_model(self, model_name:str) ->bool:
|
2022-10-15 19:46:29 +00:00
|
|
|
'''
|
2022-10-21 04:28:54 +00:00
|
|
|
Delete the named model.
|
2022-10-15 19:46:29 +00:00
|
|
|
'''
|
|
|
|
omega = self.config
|
|
|
|
del omega[model_name]
|
|
|
|
if model_name in self.stack:
|
|
|
|
self.stack.remove(model_name)
|
2022-10-21 04:28:54 +00:00
|
|
|
return True
|
2022-10-15 19:46:29 +00:00
|
|
|
|
2022-10-21 04:28:54 +00:00
|
|
|
def add_model(self, model_name:str, model_attributes:dict, clobber=False) ->True:
|
2022-10-14 03:48:07 +00:00
|
|
|
'''
|
|
|
|
Update the named model with a dictionary of attributes. Will fail with an
|
|
|
|
assertion error if the name already exists. Pass clobber=True to overwrite.
|
2022-10-21 04:28:54 +00:00
|
|
|
On a successful update, the config will be changed in memory and the
|
|
|
|
method will return True. Will fail with an assertion error if provided
|
|
|
|
attributes are incorrect or the model name is missing.
|
2022-10-14 03:48:07 +00:00
|
|
|
'''
|
|
|
|
omega = self.config
|
|
|
|
# check that all the required fields are present
|
|
|
|
for field in ('description','weights','height','width','config'):
|
|
|
|
assert field in model_attributes, f'required field {field} is missing'
|
|
|
|
|
|
|
|
assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"'
|
|
|
|
config = omega[model_name] if model_name in omega else {}
|
|
|
|
for field in model_attributes:
|
|
|
|
config[field] = model_attributes[field]
|
|
|
|
|
|
|
|
omega[model_name] = config
|
2022-10-21 04:28:54 +00:00
|
|
|
if clobber:
|
|
|
|
self._invalidate_cached_model(model_name)
|
|
|
|
return True
|
2022-10-14 03:48:07 +00:00
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
def _load_model(self, model_name:str):
|
|
|
|
"""Load and initialize the model from configuration variables passed at object creation time"""
|
|
|
|
if model_name not in self.config:
|
|
|
|
print(f'"{model_name}" is not a known model name. Please check your models.yaml file')
|
|
|
|
return None
|
|
|
|
|
|
|
|
mconfig = self.config[model_name]
|
|
|
|
config = mconfig.config
|
|
|
|
weights = mconfig.weights
|
2022-10-22 17:29:45 +00:00
|
|
|
vae = mconfig.get('vae',None)
|
2022-10-11 21:24:10 +00:00
|
|
|
width = mconfig.width
|
|
|
|
height = mconfig.height
|
|
|
|
|
2022-10-12 06:14:59 +00:00
|
|
|
print(f'>> Loading {model_name} from {weights}')
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
# for usage statistics
|
|
|
|
if self._has_cuda():
|
|
|
|
torch.cuda.reset_peak_memory_stats()
|
2022-10-12 06:14:59 +00:00
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
tic = time.time()
|
|
|
|
|
|
|
|
# this does the work
|
|
|
|
c = OmegaConf.load(config)
|
|
|
|
with open(weights,'rb') as f:
|
|
|
|
weight_bytes = f.read()
|
2022-10-12 06:14:59 +00:00
|
|
|
model_hash = self._cached_sha256(weights,weight_bytes)
|
2022-10-11 21:24:10 +00:00
|
|
|
pl_sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu')
|
|
|
|
del weight_bytes
|
|
|
|
sd = pl_sd['state_dict']
|
|
|
|
model = instantiate_from_config(c.model)
|
|
|
|
m, u = model.load_state_dict(sd, strict=False)
|
|
|
|
|
|
|
|
if self.precision == 'float16':
|
2022-10-14 03:48:07 +00:00
|
|
|
print(' | Using faster float16 precision')
|
2022-10-11 21:24:10 +00:00
|
|
|
model.to(torch.float16)
|
|
|
|
else:
|
2022-10-14 03:48:07 +00:00
|
|
|
print(' | Using more accurate float32 precision')
|
2022-10-11 21:24:10 +00:00
|
|
|
|
2022-10-22 17:29:45 +00:00
|
|
|
# look and load a matching vae file. Code borrowed from AUTOMATIC1111 modules/sd_models.py
|
2022-10-29 14:37:38 +00:00
|
|
|
if vae:
|
|
|
|
if os.path.exists(vae):
|
|
|
|
print(f' | Loading VAE weights from: {vae}')
|
|
|
|
vae_ckpt = torch.load(vae, map_location="cpu")
|
|
|
|
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
|
|
|
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
|
|
|
else:
|
|
|
|
print(f' | VAE file {vae} not found. Skipping.')
|
2022-10-22 17:29:45 +00:00
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
model.to(self.device)
|
2022-10-12 07:03:29 +00:00
|
|
|
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
|
|
|
model.cond_stage_model.device = self.device
|
2022-10-22 17:29:45 +00:00
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
model.eval()
|
|
|
|
|
2022-10-12 19:56:06 +00:00
|
|
|
for m in model.modules():
|
|
|
|
if isinstance(m, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
|
|
|
m._orig_padding_mode = m.padding_mode
|
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
# usage statistics
|
|
|
|
toc = time.time()
|
|
|
|
print(f'>> Model loaded in', '%4.2fs' % (toc - tic))
|
|
|
|
if self._has_cuda():
|
|
|
|
print(
|
|
|
|
'>> Max VRAM used to load the model:',
|
|
|
|
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
|
|
|
'\n>> Current VRAM usage:'
|
|
|
|
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
|
|
|
)
|
2022-10-12 06:14:59 +00:00
|
|
|
return model, width, height, model_hash
|
2022-10-11 21:24:10 +00:00
|
|
|
|
2022-11-01 21:22:48 +00:00
|
|
|
def offload_model(self, model_name:str):
|
|
|
|
'''
|
|
|
|
Offload the indicated model to CPU. Will call
|
|
|
|
_make_cache_room() to free space if needed.
|
|
|
|
'''
|
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
if model_name not in self.models:
|
|
|
|
return
|
2022-11-01 21:22:48 +00:00
|
|
|
|
|
|
|
message = f'>> Offloading {model_name} to CPU'
|
|
|
|
print(message)
|
2022-10-11 21:24:10 +00:00
|
|
|
model = self.models[model_name]['model']
|
2022-10-12 06:14:59 +00:00
|
|
|
self.models[model_name]['model'] = self._model_to_cpu(model)
|
2022-11-01 21:22:48 +00:00
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
gc.collect()
|
|
|
|
if self._has_cuda():
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2022-11-01 21:22:48 +00:00
|
|
|
def _make_cache_room(self):
|
|
|
|
num_loaded_models = len(self.models)
|
|
|
|
if num_loaded_models >= self.max_loaded_models:
|
|
|
|
least_recent_model = self._pop_oldest_model()
|
|
|
|
print(f'>> Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}')
|
|
|
|
if least_recent_model is not None:
|
|
|
|
del self.models[least_recent_model]
|
|
|
|
gc.collect()
|
|
|
|
|
|
|
|
def print_vram_usage(self):
|
|
|
|
if self._has_cuda:
|
|
|
|
print ('>> Current VRAM usage: ','%4.2fG' % (torch.cuda.memory_allocated() / 1e9))
|
|
|
|
|
2022-10-21 04:28:54 +00:00
|
|
|
def commit(self,config_file_path:str):
|
|
|
|
'''
|
|
|
|
Write current configuration out to the indicated file.
|
|
|
|
'''
|
|
|
|
yaml_str = OmegaConf.to_yaml(self.config)
|
|
|
|
tmpfile = os.path.join(os.path.dirname(config_file_path),'new_config.tmp')
|
|
|
|
with open(tmpfile, 'w') as outfile:
|
|
|
|
outfile.write(self.preamble())
|
|
|
|
outfile.write(yaml_str)
|
|
|
|
os.rename(tmpfile,config_file_path)
|
|
|
|
|
|
|
|
def preamble(self):
|
|
|
|
'''
|
|
|
|
Returns the preamble for the config file.
|
|
|
|
'''
|
|
|
|
return '''# This file describes the alternative machine learning models
|
2022-10-29 05:02:45 +00:00
|
|
|
# available to InvokeAI script.
|
2022-10-21 04:28:54 +00:00
|
|
|
#
|
|
|
|
# To add a new model, follow the examples below. Each
|
|
|
|
# model requires a model config file, a weights file,
|
|
|
|
# and the width and height of the images it
|
|
|
|
# was trained on.
|
|
|
|
'''
|
|
|
|
|
|
|
|
def _invalidate_cached_model(self,model_name:str):
|
2022-11-01 21:22:48 +00:00
|
|
|
self.offload_model(model_name)
|
2022-10-21 04:28:54 +00:00
|
|
|
if model_name in self.stack:
|
|
|
|
self.stack.remove(model_name)
|
|
|
|
self.models.pop(model_name,None)
|
|
|
|
|
2022-10-11 21:24:10 +00:00
|
|
|
def _model_to_cpu(self,model):
|
2022-10-12 15:08:27 +00:00
|
|
|
if self.device != 'cpu':
|
2022-10-12 07:03:29 +00:00
|
|
|
model.cond_stage_model.device = 'cpu'
|
2022-10-11 21:24:10 +00:00
|
|
|
model.first_stage_model.to('cpu')
|
|
|
|
model.cond_stage_model.to('cpu')
|
|
|
|
model.model.to('cpu')
|
2022-10-12 15:08:27 +00:00
|
|
|
return model.to('cpu')
|
|
|
|
else:
|
|
|
|
return model
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
def _model_from_cpu(self,model):
|
2022-10-12 15:08:27 +00:00
|
|
|
if self.device != 'cpu':
|
2022-10-11 21:24:10 +00:00
|
|
|
model.to(self.device)
|
|
|
|
model.first_stage_model.to(self.device)
|
|
|
|
model.cond_stage_model.to(self.device)
|
2022-10-12 07:03:29 +00:00
|
|
|
model.cond_stage_model.device = self.device
|
2022-10-12 06:14:59 +00:00
|
|
|
return model
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
def _pop_oldest_model(self):
|
|
|
|
'''
|
|
|
|
Remove the first element of the FIFO, which ought
|
2022-10-12 06:14:59 +00:00
|
|
|
to be the least recently accessed model. Do not
|
|
|
|
pop the last one, because it is in active use!
|
2022-10-11 21:24:10 +00:00
|
|
|
'''
|
2022-10-31 12:53:16 +00:00
|
|
|
return self.stack.pop(0)
|
2022-10-11 21:24:10 +00:00
|
|
|
|
|
|
|
def _push_newest_model(self,model_name:str):
|
|
|
|
'''
|
|
|
|
Maintain a simple FIFO. First element is always the
|
|
|
|
least recent, and last element is always the most recent.
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
self.stack.remove(model_name)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
self.stack.append(model_name)
|
|
|
|
|
|
|
|
def _has_cuda(self):
|
|
|
|
return self.device.type == 'cuda'
|
|
|
|
|
|
|
|
def _cached_sha256(self,path,data):
|
|
|
|
dirname = os.path.dirname(path)
|
|
|
|
basename = os.path.basename(path)
|
|
|
|
base, _ = os.path.splitext(basename)
|
|
|
|
hashpath = os.path.join(dirname,base+'.sha256')
|
|
|
|
if os.path.exists(hashpath) and os.path.getmtime(path) <= os.path.getmtime(hashpath):
|
|
|
|
with open(hashpath) as f:
|
|
|
|
hash = f.read()
|
|
|
|
return hash
|
|
|
|
print(f'>> Calculating sha256 hash of weights file')
|
|
|
|
tic = time.time()
|
|
|
|
sha = hashlib.sha256()
|
|
|
|
sha.update(data)
|
|
|
|
hash = sha.hexdigest()
|
|
|
|
toc = time.time()
|
|
|
|
print(f'>> sha256 = {hash}','(%4.2fs)' % (toc - tic))
|
|
|
|
with open(hashpath,'w') as f:
|
|
|
|
f.write(hash)
|
|
|
|
return hash
|