mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add ability to specify location of config file (models.yaml)
This commit is contained in:
commit
063b4a1995
18
configs/models.yaml
Normal file
18
configs/models.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# This file describes the alternative machine learning models
|
||||||
|
# available to the dream script.
|
||||||
|
#
|
||||||
|
# To add a new model, follow the examples below. Each
|
||||||
|
# model requires a model config file, a weights file,
|
||||||
|
# and the width and height of the images it
|
||||||
|
# was trained on.
|
||||||
|
|
||||||
|
laion400m:
|
||||||
|
config: configs/latent-diffusion/txt2img-1p4B-eval.yaml
|
||||||
|
weights: models/ldm/text2img-large/model.ckpt
|
||||||
|
width: 256
|
||||||
|
height: 256
|
||||||
|
stable-diffusion-1.4:
|
||||||
|
config: configs/stable-diffusion/v1-inference.yaml
|
||||||
|
weights: models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
@ -9,31 +9,34 @@ import sys
|
|||||||
import copy
|
import copy
|
||||||
import warnings
|
import warnings
|
||||||
import time
|
import time
|
||||||
from ldm.dream.devices import choose_torch_device
|
|
||||||
import ldm.dream.readline
|
import ldm.dream.readline
|
||||||
from ldm.dream.pngwriter import PngWriter, PromptFormatter
|
from ldm.dream.pngwriter import PngWriter, PromptFormatter
|
||||||
from ldm.dream.server import DreamServer, ThreadingDreamServer
|
from ldm.dream.server import DreamServer, ThreadingDreamServer
|
||||||
from ldm.dream.image_util import make_grid
|
from ldm.dream.image_util import make_grid
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Initialize command-line parsers and the diffusion model"""
|
"""Initialize command-line parsers and the diffusion model"""
|
||||||
arg_parser = create_argv_parser()
|
arg_parser = create_argv_parser()
|
||||||
opt = arg_parser.parse_args()
|
opt = arg_parser.parse_args()
|
||||||
|
|
||||||
if opt.laion400m:
|
if opt.laion400m:
|
||||||
# defaults suitable to the older latent diffusion weights
|
print('--laion400m flag has been deprecated. Please use --model laion400m instead.')
|
||||||
width = 256
|
sys.exit(-1)
|
||||||
height = 256
|
if opt.weights != 'model':
|
||||||
config = 'configs/latent-diffusion/txt2img-1p4B-eval.yaml'
|
print('--weights argument has been deprecated. Please configure ./configs/models.yaml, and call it using --model instead.')
|
||||||
weights = 'models/ldm/text2img-large/model.ckpt'
|
sys.exit(-1)
|
||||||
else:
|
|
||||||
# some defaults suitable for stable diffusion weights
|
try:
|
||||||
width = 512
|
print(f'attempting to load {opt.config}')
|
||||||
height = 512
|
models = OmegaConf.load(opt.config)
|
||||||
config = 'configs/stable-diffusion/v1-inference.yaml'
|
width = models[opt.model].width
|
||||||
if '.ckpt' in opt.weights:
|
height = models[opt.model].height
|
||||||
weights = opt.weights
|
config = models[opt.model].config
|
||||||
else:
|
weights = models[opt.model].weights
|
||||||
weights = f'models/ldm/stable-diffusion-v1/{opt.weights}.ckpt'
|
except (FileNotFoundError, IOError, KeyError) as e:
|
||||||
|
print(f'{e}. Aborting.')
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
print('* Initializing, be patient...\n')
|
print('* Initializing, be patient...\n')
|
||||||
sys.path.append('.')
|
sys.path.append('.')
|
||||||
@ -348,8 +351,6 @@ def create_argv_parser():
|
|||||||
dest='full_precision',
|
dest='full_precision',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Use slower full precision math for calculations',
|
help='Use slower full precision math for calculations',
|
||||||
# MPS only functions with full precision, see https://github.com/lstein/stable-diffusion/issues/237
|
|
||||||
default=choose_torch_device() == 'mps',
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-g',
|
'-g',
|
||||||
@ -429,6 +430,16 @@ def create_argv_parser():
|
|||||||
default='cuda',
|
default='cuda',
|
||||||
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if available"
|
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if available"
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--model',
|
||||||
|
default='stable-diffusion-1.4',
|
||||||
|
help='Indicates which diffusion model to load. (currently "stable-diffusion-1.4" (default) or "laion400m")',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--config',
|
||||||
|
default ='configs/models.yaml',
|
||||||
|
help ='Path to configuration file for alternate models.',
|
||||||
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user