complete several steps needed to make 3.0 installable

- invokeai-configure updated to work with new config system
- migrate invokeai.init to invokeai.yaml during configure
- replace legacy invokeai with invokeai-node-cli
- add ability to run an invocation directly from invokeai-node-cli command line
- update CI tests to work with new invokeai syntax
This commit is contained in:
Lincoln Stein 2023-05-17 14:13:12 -04:00
parent b7c5a39685
commit 7593dc19d6
6 changed files with 87 additions and 68 deletions

View File

@ -133,8 +133,10 @@ jobs:
invokeai invokeai
--no-patchmatch --no-patchmatch
--no-nsfw_checker --no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }} --precision=float32
--always_use_cpu
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }} --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
< ${{ env.TEST_PROMPTS }}
- name: Archive results - name: Archive results
id: archive-results id: archive-results

View File

@ -10,7 +10,7 @@ from typing import (
get_type_hints, get_type_hints,
) )
from pydantic import BaseModel from pydantic import BaseModel, ValidationError
from pydantic.fields import Field from pydantic.fields import Field
@ -188,7 +188,14 @@ def invoke_all(context: CliContext):
def invoke_cli(): def invoke_cli():
# this gets the basic configuration
config = get_invokeai_config() config = get_invokeai_config()
# get the optional list of invocations to execute on the command line
parser = config.get_parser()
parser.add_argument('commands',nargs='*')
invocation_commands = parser.parse_args().commands
model_manager = get_model_manager(config,logger=logger) model_manager = get_model_manager(config,logger=logger)
events = EventServiceBase() events = EventServiceBase()
@ -232,9 +239,16 @@ def invoke_cli():
context = CliContext(invoker, session, parser) context = CliContext(invoker, session, parser)
set_autocompleter(services) set_autocompleter(services)
while True: command_line_args_exist = len(invocation_commands) > 0
done = False
while not done:
try: try:
cmd_input = input("invoke> ") if command_line_args_exist:
cmd_input = invocation_commands.pop(0)
done = len(invocation_commands) == 0
else:
cmd_input = input("invoke> ")
except (KeyboardInterrupt, EOFError): except (KeyboardInterrupt, EOFError):
# Ctrl-c exits # Ctrl-c exits
break break
@ -358,6 +372,9 @@ def invoke_cli():
invoker.services.logger.warning('Invalid command, use "help" to list commands') invoker.services.logger.warning('Invalid command, use "help" to list commands')
continue continue
except ValidationError:
invoker.services.logger.warning('Invalid command arguments, run "<command> --help" for summary')
except SessionError: except SessionError:
# Start a new session # Start a new session
invoker.services.logger.warning("Session error: creating a new session") invoker.services.logger.warning("Session error: creating a new session")

View File

@ -472,19 +472,30 @@ class editOptsForm(npyscreen.FormMultiPage):
self.nextrely += 1 self.nextrely += 1
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
value="Directory containing embedding/textual inversion files:", value="Directories containing textual inversion and LoRA models (<tab> autocompletes, ctrl-N advances):",
editable=False, editable=False,
color="CONTROL", color="CONTROL",
) )
self.embedding_path = self.add_widget_intelligent( self.embedding_dir = self.add_widget_intelligent(
npyscreen.TitleFilename, npyscreen.TitleFilename,
name="(<tab> autocompletes, ctrl-N advances):", name=" Textual Inversion Embeddings:",
value=str(default_embedding_dir()), value=str(default_embedding_dir()),
select_dir=True, select_dir=True,
must_exist=False, must_exist=False,
use_two_lines=False, use_two_lines=False,
labelColor="GOOD", labelColor="GOOD",
begin_entry_at=40, begin_entry_at=32,
scroll_exit=True,
)
self.lora_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name=" LoRA and LyCORIS:",
value=str(default_lora_dir()),
select_dir=True,
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=32,
scroll_exit=True, scroll_exit=True,
) )
self.nextrely += 1 self.nextrely += 1
@ -551,9 +562,9 @@ class editOptsForm(npyscreen.FormMultiPage):
bad_fields.append( bad_fields.append(
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory." f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
) )
if not Path(opt.embedding_path).parent.exists(): if not Path(opt.embedding_dir).parent.exists():
bad_fields.append( bad_fields.append(
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory." f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory."
) )
if len(bad_fields) > 0: if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:\n" message = "The following problems were detected and must be corrected:\n"
@ -568,13 +579,14 @@ class editOptsForm(npyscreen.FormMultiPage):
new_opts = Namespace() new_opts = Namespace()
for attr in [ for attr in [
"outdir", "outdir",
"nsfw_checker", "nsfw_checker",
"free_gpu_mem", "free_gpu_mem",
"max_loaded_models", "max_loaded_models",
"xformers_enabled", "xformers_enabled",
"always_use_cpu", "always_use_cpu",
"embedding_path", "embedding_dir",
"lora_dir",
]: ]:
setattr(new_opts, attr, getattr(self, attr).value) setattr(new_opts, attr, getattr(self, attr).value)
@ -680,31 +692,21 @@ def run_console_ui(
# ------------------------------------- # -------------------------------------
def write_opts(opts: Namespace, init_file: Path): def write_opts(opts: Namespace, init_file: Path):
""" """
Update the invokeai.init file with values from current settings. Update the invokeai.yaml file with values from current settings.
""" """
if Path(init_file).exists(): # this will load current settings
config = OmegaConf.load(init_file) config = InvokeAIAppConfig()
else: for key,value in opts.__dict__.items():
config = OmegaConf.create() if hasattr(config,key):
setattr(config,key,value)
if not config.globals:
config.globals = dict()
globals = config.globals
fields = list(get_type_hints(InvokeAIAppConfig).keys())
for attr in fields:
if hasattr(opts,attr):
setattr(globals,attr,getattr(opts,attr))
with open(init_file,'w', encoding='utf-8') as file: with open(init_file,'w', encoding='utf-8') as file:
file.write(OmegaConf.to_yaml(config)) file.write(config.to_yaml())
if opts.hf_token: if opts.hf_token:
HfLogin(opts.hf_token) HfLogin(opts.hf_token)
# ------------------------------------- # -------------------------------------
def default_output_dir() -> Path: def default_output_dir() -> Path:
return config.root / "outputs" return config.root / "outputs"
@ -713,6 +715,10 @@ def default_output_dir() -> Path:
def default_embedding_dir() -> Path: def default_embedding_dir() -> Path:
return config.root / "embeddings" return config.root / "embeddings"
# -------------------------------------
def default_lora_dir() -> Path:
return config.root / "loras"
# ------------------------------------- # -------------------------------------
def write_default_options(program_opts: Namespace, initfile: Path): def write_default_options(program_opts: Namespace, initfile: Path):
opt = default_startup_options(initfile) opt = default_startup_options(initfile)
@ -725,35 +731,26 @@ def write_default_options(program_opts: Namespace, initfile: Path):
# the old init file and write out the new # the old init file and write out the new
# yaml format. # yaml format.
def migrate_init_file(legacy_format:Path): def migrate_init_file(legacy_format:Path):
old = legacy_parser.parse_args([f'@{str(legacy_format)}']) old = legacy_parser.parse_args([f'@{str(legacy_format)}'])
new = new = InvokeAIAppConfig(conf={})
new = OmegaConf.create()
new.globals = dict()
globals = new.globals
for attr in ['host','port']:
if hasattr(old,attr):
setattr(globals,attr,getattr(old,attr))
# change of name
globals.allow_origins = old.cors or []
fields = list(get_type_hints(InvokeAIAppConfig).keys()) fields = list(get_type_hints(InvokeAIAppConfig).keys())
for attr in fields: for attr in fields:
if hasattr(old,attr): if hasattr(old,attr):
setattr(globals,attr,getattr(old,attr)) setattr(new,attr,getattr(old,attr))
# a few places where the names have changed # a few places where the field names have changed and we have to
globals.nsfw_checker = old.nsfw_checker # manually add in the new names/values
globals.xformers_enabled = old.xformers new.nsfw_checker = old.safety_checker
globals.conf_path = old.conf new.xformers_enabled = old.xformers
globals.embedding_dir = old.embedding_path new.conf_path = old.conf
new.embedding_dir = old.embedding_path
new.globals = {key: globals[key] for key in sorted(globals)}
invokeai_yaml = legacy_format.parent / 'invokeai.yaml' invokeai_yaml = legacy_format.parent / 'invokeai.yaml'
with open(invokeai_yaml,"w", encoding="utf-8") as outfile: with open(invokeai_yaml,"w", encoding="utf-8") as outfile:
outfile.write(OmegaConf.to_yaml(new)) outfile.write(new.to_yaml())
legacy_format.replace(legacy_format.parent / 'invokeai.init.old')
# ------------------------------------- # -------------------------------------
def main(): def main():
@ -822,10 +819,10 @@ def main():
old_init_file = Path(config.root, 'invokeai.init') old_init_file = Path(config.root, 'invokeai.init')
new_init_file = Path(config.root, 'invokeai.yaml') new_init_file = Path(config.root, 'invokeai.yaml')
if old_init_file.exists() and not new_init_file.exists(): if old_init_file.exists() and not new_init_file.exists():
print('** MIGRATING OLD invokeai.init FILE TO NEW invokeai.yaml FORMAT') print('** Migrating invokeai.init to invokeai.yaml')
migrate_init_file(old_init_file) migrate_init_file(old_init_file)
# is it a good idea to re-read invokeai.yaml? config = get_invokeai_config() # reread defaults
config = get_invokeai_config()
if not config.model_conf_path.exists(): if not config.model_conf_path.exists():
initialize_rootdir(config.root, opt.yes_to_all) initialize_rootdir(config.root, opt.yes_to_all)

View File

@ -47,8 +47,7 @@ from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer from transformers import CLIPTextModel, CLIPTokenizer
# invokeai stuff # invokeai stuff
from ..args import ArgFormatter, PagingArgumentParser from invokeai.app.services.config import InvokeAIAppConfig
from ..globals import Globals, global_cache_dir
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = { PIL_INTERPOLATION = {
@ -90,6 +89,8 @@ def save_progress(
def parse_args(): def parse_args():
config = InvokeAIAppConfig()
parser = PagingArgumentParser( parser = PagingArgumentParser(
description="Textual inversion training", formatter_class=ArgFormatter description="Textual inversion training", formatter_class=ArgFormatter
) )
@ -112,7 +113,7 @@ def parse_args():
"--root_dir", "--root_dir",
"--root", "--root",
type=Path, type=Path,
default=Globals.root, default=config.root,
help="Path to the invokeai runtime directory", help="Path to the invokeai runtime directory",
) )
general_group.add_argument( general_group.add_argument(
@ -127,7 +128,7 @@ def parse_args():
general_group.add_argument( general_group.add_argument(
"--output_dir", "--output_dir",
type=Path, type=Path,
default=f"{Globals.root}/text-inversion-model", default=f"{config.root}/text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.", help="The output directory where the model predictions and checkpoints will be written.",
) )
model_group.add_argument( model_group.add_argument(
@ -580,7 +581,7 @@ def do_textual_inversion_training(
# setting up things the way invokeai expects them # setting up things the way invokeai expects them
if not os.path.isabs(output_dir): if not os.path.isabs(output_dir):
output_dir = os.path.join(Globals.root, output_dir) output_dir = os.path.join(config.root, output_dir)
logging_dir = output_dir / logging_dir logging_dir = output_dir / logging_dir
@ -628,7 +629,7 @@ def do_textual_inversion_training(
elif output_dir is not None: elif output_dir is not None:
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
models_conf = OmegaConf.load(os.path.join(Globals.root, "configs/models.yaml")) models_conf = OmegaConf.load(os.path.join(config.root, "configs/models.yaml"))
model_conf = models_conf.get(model, None) model_conf = models_conf.get(model, None)
assert model_conf is not None, f"Unknown model: {model}" assert model_conf is not None, f"Unknown model: {model}"
assert ( assert (
@ -640,7 +641,7 @@ def do_textual_inversion_training(
assert ( assert (
pretrained_model_name_or_path pretrained_model_name_or_path
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}" ), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
pipeline_args = dict(cache_dir=global_cache_dir("hub")) pipeline_args = dict(cache_dir=config.cache_dir())
# Load tokenizer # Load tokenizer
if tokenizer_name: if tokenizer_name:

View File

@ -60,6 +60,7 @@ def test_env_override():
conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10'])
assert conf.nsfw_checker==False assert conf.nsfw_checker==False
assert conf.max_loaded_models==10
conf = InvokeAIAppConfig(conf=init1,argv=[],max_loaded_models=20) conf = InvokeAIAppConfig(conf=init1,argv=[],max_loaded_models=20)
assert conf.max_loaded_models==20 assert conf.max_loaded_models==20

View File

@ -1,3 +1,4 @@
banana sushi -Ak_lms -S42 -s5 t2i --positive_prompt 'banana sushi' --seed 42
banana sushi -Ak_heun -S42 -s5 compel --prompt 'strawberry sushi' | compel | noise | t2l --scheduler heun --steps 3 --scheduler ddim --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning | l2i
banana sushi -Addim -S42 -s5 compel --prompt 'banana sushi' | compel | noise | t2i --scheduler heun --steps 3 --scheduler euler_a --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning