mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
complete several steps needed to make 3.0 installable
- invokeai-configure updated to work with new config system - migrate invokeai.init to invokeai.yaml during configure - replace legacy invokeai with invokeai-node-cli - add ability to run an invocation directly from invokeai-node-cli command line - update CI tests to work with new invokeai syntax
This commit is contained in:
parent
b7c5a39685
commit
7593dc19d6
4
.github/workflows/test-invoke-pip.yml
vendored
4
.github/workflows/test-invoke-pip.yml
vendored
@ -133,8 +133,10 @@ jobs:
|
||||
invokeai
|
||||
--no-patchmatch
|
||||
--no-nsfw_checker
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
--precision=float32
|
||||
--always_use_cpu
|
||||
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
||||
< ${{ env.TEST_PROMPTS }}
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
|
@ -10,7 +10,7 @@ from typing import (
|
||||
get_type_hints,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from pydantic.fields import Field
|
||||
|
||||
|
||||
@ -188,7 +188,14 @@ def invoke_all(context: CliContext):
|
||||
|
||||
|
||||
def invoke_cli():
|
||||
# this gets the basic configuration
|
||||
config = get_invokeai_config()
|
||||
|
||||
# get the optional list of invocations to execute on the command line
|
||||
parser = config.get_parser()
|
||||
parser.add_argument('commands',nargs='*')
|
||||
invocation_commands = parser.parse_args().commands
|
||||
|
||||
model_manager = get_model_manager(config,logger=logger)
|
||||
|
||||
events = EventServiceBase()
|
||||
@ -232,8 +239,15 @@ def invoke_cli():
|
||||
context = CliContext(invoker, session, parser)
|
||||
set_autocompleter(services)
|
||||
|
||||
while True:
|
||||
command_line_args_exist = len(invocation_commands) > 0
|
||||
done = False
|
||||
|
||||
while not done:
|
||||
try:
|
||||
if command_line_args_exist:
|
||||
cmd_input = invocation_commands.pop(0)
|
||||
done = len(invocation_commands) == 0
|
||||
else:
|
||||
cmd_input = input("invoke> ")
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
# Ctrl-c exits
|
||||
@ -358,6 +372,9 @@ def invoke_cli():
|
||||
invoker.services.logger.warning('Invalid command, use "help" to list commands')
|
||||
continue
|
||||
|
||||
except ValidationError:
|
||||
invoker.services.logger.warning('Invalid command arguments, run "<command> --help" for summary')
|
||||
|
||||
except SessionError:
|
||||
# Start a new session
|
||||
invoker.services.logger.warning("Session error: creating a new session")
|
||||
|
@ -472,19 +472,30 @@ class editOptsForm(npyscreen.FormMultiPage):
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Directory containing embedding/textual inversion files:",
|
||||
value="Directories containing textual inversion and LoRA models (<tab> autocompletes, ctrl-N advances):",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.embedding_path = self.add_widget_intelligent(
|
||||
self.embedding_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name="(<tab> autocompletes, ctrl-N advances):",
|
||||
name=" Textual Inversion Embeddings:",
|
||||
value=str(default_embedding_dir()),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=40,
|
||||
begin_entry_at=32,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.lora_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilename,
|
||||
name=" LoRA and LyCORIS:",
|
||||
value=str(default_lora_dir()),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
use_two_lines=False,
|
||||
labelColor="GOOD",
|
||||
begin_entry_at=32,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
@ -551,9 +562,9 @@ class editOptsForm(npyscreen.FormMultiPage):
|
||||
bad_fields.append(
|
||||
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
|
||||
)
|
||||
if not Path(opt.embedding_path).parent.exists():
|
||||
if not Path(opt.embedding_dir).parent.exists():
|
||||
bad_fields.append(
|
||||
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory."
|
||||
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory."
|
||||
)
|
||||
if len(bad_fields) > 0:
|
||||
message = "The following problems were detected and must be corrected:\n"
|
||||
@ -574,7 +585,8 @@ class editOptsForm(npyscreen.FormMultiPage):
|
||||
"max_loaded_models",
|
||||
"xformers_enabled",
|
||||
"always_use_cpu",
|
||||
"embedding_path",
|
||||
"embedding_dir",
|
||||
"lora_dir",
|
||||
]:
|
||||
setattr(new_opts, attr, getattr(self, attr).value)
|
||||
|
||||
@ -680,31 +692,21 @@ def run_console_ui(
|
||||
# -------------------------------------
|
||||
def write_opts(opts: Namespace, init_file: Path):
|
||||
"""
|
||||
Update the invokeai.init file with values from current settings.
|
||||
Update the invokeai.yaml file with values from current settings.
|
||||
"""
|
||||
|
||||
if Path(init_file).exists():
|
||||
config = OmegaConf.load(init_file)
|
||||
else:
|
||||
config = OmegaConf.create()
|
||||
|
||||
if not config.globals:
|
||||
config.globals = dict()
|
||||
|
||||
globals = config.globals
|
||||
fields = list(get_type_hints(InvokeAIAppConfig).keys())
|
||||
for attr in fields:
|
||||
if hasattr(opts,attr):
|
||||
setattr(globals,attr,getattr(opts,attr))
|
||||
# this will load current settings
|
||||
config = InvokeAIAppConfig()
|
||||
for key,value in opts.__dict__.items():
|
||||
if hasattr(config,key):
|
||||
setattr(config,key,value)
|
||||
|
||||
with open(init_file,'w', encoding='utf-8') as file:
|
||||
file.write(OmegaConf.to_yaml(config))
|
||||
file.write(config.to_yaml())
|
||||
|
||||
if opts.hf_token:
|
||||
HfLogin(opts.hf_token)
|
||||
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def default_output_dir() -> Path:
|
||||
return config.root / "outputs"
|
||||
@ -713,6 +715,10 @@ def default_output_dir() -> Path:
|
||||
def default_embedding_dir() -> Path:
|
||||
return config.root / "embeddings"
|
||||
|
||||
# -------------------------------------
|
||||
def default_lora_dir() -> Path:
|
||||
return config.root / "loras"
|
||||
|
||||
# -------------------------------------
|
||||
def write_default_options(program_opts: Namespace, initfile: Path):
|
||||
opt = default_startup_options(initfile)
|
||||
@ -725,35 +731,26 @@ def write_default_options(program_opts: Namespace, initfile: Path):
|
||||
# the old init file and write out the new
|
||||
# yaml format.
|
||||
def migrate_init_file(legacy_format:Path):
|
||||
|
||||
old = legacy_parser.parse_args([f'@{str(legacy_format)}'])
|
||||
new =
|
||||
new = OmegaConf.create()
|
||||
|
||||
new.globals = dict()
|
||||
globals = new.globals
|
||||
for attr in ['host','port']:
|
||||
if hasattr(old,attr):
|
||||
setattr(globals,attr,getattr(old,attr))
|
||||
# change of name
|
||||
globals.allow_origins = old.cors or []
|
||||
new = InvokeAIAppConfig(conf={})
|
||||
|
||||
fields = list(get_type_hints(InvokeAIAppConfig).keys())
|
||||
for attr in fields:
|
||||
if hasattr(old,attr):
|
||||
setattr(globals,attr,getattr(old,attr))
|
||||
setattr(new,attr,getattr(old,attr))
|
||||
|
||||
# a few places where the names have changed
|
||||
globals.nsfw_checker = old.nsfw_checker
|
||||
globals.xformers_enabled = old.xformers
|
||||
globals.conf_path = old.conf
|
||||
globals.embedding_dir = old.embedding_path
|
||||
|
||||
new.globals = {key: globals[key] for key in sorted(globals)}
|
||||
# a few places where the field names have changed and we have to
|
||||
# manually add in the new names/values
|
||||
new.nsfw_checker = old.safety_checker
|
||||
new.xformers_enabled = old.xformers
|
||||
new.conf_path = old.conf
|
||||
new.embedding_dir = old.embedding_path
|
||||
|
||||
invokeai_yaml = legacy_format.parent / 'invokeai.yaml'
|
||||
with open(invokeai_yaml,"w", encoding="utf-8") as outfile:
|
||||
outfile.write(OmegaConf.to_yaml(new))
|
||||
outfile.write(new.to_yaml())
|
||||
|
||||
legacy_format.replace(legacy_format.parent / 'invokeai.init.old')
|
||||
|
||||
# -------------------------------------
|
||||
def main():
|
||||
@ -822,10 +819,10 @@ def main():
|
||||
old_init_file = Path(config.root, 'invokeai.init')
|
||||
new_init_file = Path(config.root, 'invokeai.yaml')
|
||||
if old_init_file.exists() and not new_init_file.exists():
|
||||
print('** MIGRATING OLD invokeai.init FILE TO NEW invokeai.yaml FORMAT')
|
||||
print('** Migrating invokeai.init to invokeai.yaml')
|
||||
migrate_init_file(old_init_file)
|
||||
# is it a good idea to re-read invokeai.yaml?
|
||||
config = get_invokeai_config()
|
||||
config = get_invokeai_config() # reread defaults
|
||||
|
||||
|
||||
if not config.model_conf_path.exists():
|
||||
initialize_rootdir(config.root, opt.yes_to_all)
|
||||
|
@ -47,8 +47,7 @@ from tqdm.auto import tqdm
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
# invokeai stuff
|
||||
from ..args import ArgFormatter, PagingArgumentParser
|
||||
from ..globals import Globals, global_cache_dir
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
||||
PIL_INTERPOLATION = {
|
||||
@ -90,6 +89,8 @@ def save_progress(
|
||||
|
||||
|
||||
def parse_args():
|
||||
config = InvokeAIAppConfig()
|
||||
|
||||
parser = PagingArgumentParser(
|
||||
description="Textual inversion training", formatter_class=ArgFormatter
|
||||
)
|
||||
@ -112,7 +113,7 @@ def parse_args():
|
||||
"--root_dir",
|
||||
"--root",
|
||||
type=Path,
|
||||
default=Globals.root,
|
||||
default=config.root,
|
||||
help="Path to the invokeai runtime directory",
|
||||
)
|
||||
general_group.add_argument(
|
||||
@ -127,7 +128,7 @@ def parse_args():
|
||||
general_group.add_argument(
|
||||
"--output_dir",
|
||||
type=Path,
|
||||
default=f"{Globals.root}/text-inversion-model",
|
||||
default=f"{config.root}/text-inversion-model",
|
||||
help="The output directory where the model predictions and checkpoints will be written.",
|
||||
)
|
||||
model_group.add_argument(
|
||||
@ -580,7 +581,7 @@ def do_textual_inversion_training(
|
||||
|
||||
# setting up things the way invokeai expects them
|
||||
if not os.path.isabs(output_dir):
|
||||
output_dir = os.path.join(Globals.root, output_dir)
|
||||
output_dir = os.path.join(config.root, output_dir)
|
||||
|
||||
logging_dir = output_dir / logging_dir
|
||||
|
||||
@ -628,7 +629,7 @@ def do_textual_inversion_training(
|
||||
elif output_dir is not None:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
models_conf = OmegaConf.load(os.path.join(Globals.root, "configs/models.yaml"))
|
||||
models_conf = OmegaConf.load(os.path.join(config.root, "configs/models.yaml"))
|
||||
model_conf = models_conf.get(model, None)
|
||||
assert model_conf is not None, f"Unknown model: {model}"
|
||||
assert (
|
||||
@ -640,7 +641,7 @@ def do_textual_inversion_training(
|
||||
assert (
|
||||
pretrained_model_name_or_path
|
||||
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
|
||||
pipeline_args = dict(cache_dir=global_cache_dir("hub"))
|
||||
pipeline_args = dict(cache_dir=config.cache_dir())
|
||||
|
||||
# Load tokenizer
|
||||
if tokenizer_name:
|
||||
|
@ -60,6 +60,7 @@ def test_env_override():
|
||||
|
||||
conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10'])
|
||||
assert conf.nsfw_checker==False
|
||||
assert conf.max_loaded_models==10
|
||||
|
||||
conf = InvokeAIAppConfig(conf=init1,argv=[],max_loaded_models=20)
|
||||
assert conf.max_loaded_models==20
|
||||
|
@ -1,3 +1,4 @@
|
||||
banana sushi -Ak_lms -S42 -s5
|
||||
banana sushi -Ak_heun -S42 -s5
|
||||
banana sushi -Addim -S42 -s5
|
||||
t2i --positive_prompt 'banana sushi' --seed 42
|
||||
compel --prompt 'strawberry sushi' | compel | noise | t2l --scheduler heun --steps 3 --scheduler ddim --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning | l2i
|
||||
compel --prompt 'banana sushi' | compel | noise | t2i --scheduler heun --steps 3 --scheduler euler_a --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user