Merge branch 'main' into lstein-import-safetensors

This commit is contained in:
Lincoln Stein
2023-01-23 21:58:07 -05:00
committed by GitHub
59 changed files with 2415 additions and 745 deletions

View File

@ -25,6 +25,7 @@ import torch
import safetensors
import transformers
from diffusers import AutoencoderKL, logging as dlogging
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
@ -36,7 +37,11 @@ from ldm.util import instantiate_from_config, ask_user
DEFAULT_MAX_MODELS=2
class ModelManager(object):
def __init__(self, config:OmegaConf, device_type:str, precision:str, max_loaded_models=DEFAULT_MAX_MODELS):
def __init__(self,
config:OmegaConf,
device_type:str='cpu',
precision:str='float16',
max_loaded_models=DEFAULT_MAX_MODELS):
'''
Initialize with the path to the models.yaml config file,
the torch device type, and precision. The optional
@ -541,7 +546,7 @@ class ModelManager(object):
format='diffusers',
)
if isinstance(repo_or_path,Path) and repo_or_path.exists():
new_config.update(path=repo_or_path)
new_config.update(path=str(repo_or_path))
else:
new_config.update(repo_id=repo_or_path)
@ -833,11 +838,11 @@ class ModelManager(object):
return model
# diffusers really really doesn't like us moving a float16 model onto CPU
import logging
logging.getLogger('diffusers.pipeline_utils').setLevel(logging.CRITICAL)
verbosity = get_verbosity()
set_verbosity_error()
model.cond_stage_model.device = 'cpu'
model.to('cpu')
logging.getLogger('pipeline_utils').setLevel(logging.INFO)
set_verbosity(verbosity)
for submodel in ('first_stage_model','cond_stage_model','model'):
try: