mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
7 Commits
v2.3.4.pos
...
invokeai-b
Author | SHA1 | Date | |
---|---|---|---|
dbd2161601 | |||
1f83ac2eae | |||
f7bb68d01c | |||
8cddf9c5b3 | |||
9b546ccf06 | |||
73dbf73a95 | |||
18a1f3893f |
@ -17,6 +17,8 @@ if sys.platform == "darwin":
|
|||||||
|
|
||||||
import pyparsing # type: ignore
|
import pyparsing # type: ignore
|
||||||
|
|
||||||
|
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
|
||||||
|
|
||||||
import ldm.invoke
|
import ldm.invoke
|
||||||
|
|
||||||
from ..generate import Generate
|
from ..generate import Generate
|
||||||
@ -31,13 +33,21 @@ from .pngwriter import PngWriter, retrieve_metadata, write_metadata
|
|||||||
from .readline import Completer, get_completer
|
from .readline import Completer, get_completer
|
||||||
from ..util import url_attachment_name
|
from ..util import url_attachment_name
|
||||||
|
|
||||||
|
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
|
||||||
|
|
||||||
# global used in multiple functions (fix)
|
# global used in multiple functions (fix)
|
||||||
infile = None
|
infile = None
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Initialize command-line parsers and the diffusion model"""
|
"""Initialize command-line parsers and the diffusion model"""
|
||||||
global infile
|
global infile
|
||||||
|
|
||||||
|
print('DEBUG: [3] Entered main()', file=sys.stderr)
|
||||||
|
print('DEBUG: INVOKEAI ENVIRONMENT:')
|
||||||
|
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||||
|
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||||
|
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||||
|
|
||||||
opt = Args()
|
opt = Args()
|
||||||
args = opt.parse_args()
|
args = opt.parse_args()
|
||||||
if not args:
|
if not args:
|
||||||
@ -66,9 +76,13 @@ def main():
|
|||||||
Globals.sequential_guidance = args.sequential_guidance
|
Globals.sequential_guidance = args.sequential_guidance
|
||||||
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
||||||
|
|
||||||
|
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
|
||||||
|
|
||||||
# run any post-install patches needed
|
# run any post-install patches needed
|
||||||
run_patches()
|
run_patches()
|
||||||
|
|
||||||
|
print(f'DEBUG: [5] Patches run', file=sys.stderr)
|
||||||
|
|
||||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||||
|
|
||||||
if not args.conf:
|
if not args.conf:
|
||||||
@ -84,8 +98,9 @@ def main():
|
|||||||
# loading here to avoid long delays on startup
|
# loading here to avoid long delays on startup
|
||||||
# these two lines prevent a horrible warning message from appearing
|
# these two lines prevent a horrible warning message from appearing
|
||||||
# when the frozen CLIP tokenizer is imported
|
# when the frozen CLIP tokenizer is imported
|
||||||
|
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
|
||||||
|
|
||||||
import transformers # type: ignore
|
import transformers # type: ignore
|
||||||
|
|
||||||
from ldm.generate import Generate
|
from ldm.generate import Generate
|
||||||
|
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
@ -93,6 +108,7 @@ def main():
|
|||||||
|
|
||||||
diffusers.logging.set_verbosity_error()
|
diffusers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
|
||||||
# Loading Face Restoration and ESRGAN Modules
|
# Loading Face Restoration and ESRGAN Modules
|
||||||
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
||||||
|
|
||||||
@ -114,6 +130,7 @@ def main():
|
|||||||
Globals.lora_models_dir = opt.lora_path
|
Globals.lora_models_dir = opt.lora_path
|
||||||
|
|
||||||
# migrate legacy models
|
# migrate legacy models
|
||||||
|
print(f'DEBUG: [8] migrating models', file=sys.stderr)
|
||||||
ModelManager.migrate_models()
|
ModelManager.migrate_models()
|
||||||
|
|
||||||
# load the infile as a list of lines
|
# load the infile as a list of lines
|
||||||
@ -131,6 +148,7 @@ def main():
|
|||||||
|
|
||||||
model = opt.model or retrieve_last_used_model()
|
model = opt.model or retrieve_last_used_model()
|
||||||
|
|
||||||
|
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
|
||||||
# creating a Generate object:
|
# creating a Generate object:
|
||||||
try:
|
try:
|
||||||
gen = Generate(
|
gen = Generate(
|
||||||
@ -157,6 +175,7 @@ def main():
|
|||||||
print(">> changed to seamless tiling mode")
|
print(">> changed to seamless tiling mode")
|
||||||
|
|
||||||
# preload the model
|
# preload the model
|
||||||
|
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
|
||||||
try:
|
try:
|
||||||
gen.load_model()
|
gen.load_model()
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -204,6 +223,7 @@ def main():
|
|||||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||||
def main_loop(gen, opt, completer):
|
def main_loop(gen, opt, completer):
|
||||||
"""prompt/read/execute loop"""
|
"""prompt/read/execute loop"""
|
||||||
|
print(f'DEBUG: [11] In main loop', file=sys.stderr)
|
||||||
global infile
|
global infile
|
||||||
done = False
|
done = False
|
||||||
doneAfterInFile = infile is not None
|
doneAfterInFile = infile is not None
|
||||||
@ -1322,15 +1342,16 @@ def install_missing_config_files():
|
|||||||
install ckpt configuration files that may have been added to the
|
install ckpt configuration files that may have been added to the
|
||||||
distro after original root directory configuration
|
distro after original root directory configuration
|
||||||
"""
|
"""
|
||||||
import invokeai.configs as conf
|
pass
|
||||||
from shutil import copyfile
|
# import invokeai.configs as conf
|
||||||
|
# from shutil import copyfile
|
||||||
|
|
||||||
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
# root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||||
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||||
for src in repo_configs.iterdir():
|
# for src in repo_configs.iterdir():
|
||||||
dest = root_configs / src.name
|
# dest = root_configs / src.name
|
||||||
if not dest.exists():
|
# if not dest.exists():
|
||||||
copyfile(src,dest)
|
# copyfile(src,dest)
|
||||||
|
|
||||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||||
"""
|
"""
|
||||||
|
@ -32,7 +32,8 @@ def expand_prompts(
|
|||||||
template_file: Path,
|
template_file: Path,
|
||||||
run_invoke: bool = False,
|
run_invoke: bool = False,
|
||||||
invoke_model: str = None,
|
invoke_model: str = None,
|
||||||
invoke_outdir: Path = None,
|
invoke_outdir: str = None,
|
||||||
|
invoke_root: str = None,
|
||||||
processes_per_gpu: int = 1,
|
processes_per_gpu: int = 1,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@ -61,6 +62,8 @@ def expand_prompts(
|
|||||||
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||||
if invoke_model:
|
if invoke_model:
|
||||||
invokeai_args.extend(("--model", invoke_model))
|
invokeai_args.extend(("--model", invoke_model))
|
||||||
|
if invoke_root:
|
||||||
|
invokeai_args.extend(("--root", invoke_root))
|
||||||
if invoke_outdir:
|
if invoke_outdir:
|
||||||
outdir = os.path.expanduser(invoke_outdir)
|
outdir = os.path.expanduser(invoke_outdir)
|
||||||
invokeai_args.extend(("--outdir", outdir))
|
invokeai_args.extend(("--outdir", outdir))
|
||||||
@ -79,6 +82,11 @@ def expand_prompts(
|
|||||||
)
|
)
|
||||||
import ldm.invoke.CLI
|
import ldm.invoke.CLI
|
||||||
|
|
||||||
|
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
|
||||||
|
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||||
|
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||||
|
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||||
|
|
||||||
parent_conn, child_conn = Pipe()
|
parent_conn, child_conn = Pipe()
|
||||||
children = set()
|
children = set()
|
||||||
for i in range(processes_to_launch):
|
for i in range(processes_to_launch):
|
||||||
@ -111,6 +119,13 @@ def expand_prompts(
|
|||||||
for p in children:
|
for p in children:
|
||||||
p.terminate()
|
p.terminate()
|
||||||
|
|
||||||
|
def _dummy_cli_main():
|
||||||
|
counter = 0
|
||||||
|
while line := sys.stdin.readline():
|
||||||
|
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
|
||||||
|
counter += 1
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
def _get_fn_format(directory:str, sequence:int)->str:
|
def _get_fn_format(directory:str, sequence:int)->str:
|
||||||
"""
|
"""
|
||||||
Get a filename that doesn't exceed filename length restrictions
|
Get a filename that doesn't exceed filename length restrictions
|
||||||
@ -179,9 +194,9 @@ def _run_invoke(
|
|||||||
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||||
sys.argv = args
|
sys.argv = args
|
||||||
sys.stdin = MessageToStdin(conn_in)
|
sys.stdin = MessageToStdin(conn_in)
|
||||||
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||||
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||||
entry_point()
|
entry_point()
|
||||||
|
|
||||||
|
|
||||||
def _filter_output(stream: TextIOBase):
|
def _filter_output(stream: TextIOBase):
|
||||||
@ -238,6 +253,10 @@ def main():
|
|||||||
default=1,
|
default=1,
|
||||||
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--root_dir',
|
||||||
|
default=None,
|
||||||
|
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
|
||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
if opt.example:
|
if opt.example:
|
||||||
@ -261,6 +280,7 @@ def main():
|
|||||||
run_invoke=opt.invoke,
|
run_invoke=opt.invoke,
|
||||||
invoke_model=opt.model,
|
invoke_model=opt.model,
|
||||||
invoke_outdir=opt.outdir,
|
invoke_outdir=opt.outdir,
|
||||||
|
invoke_root=opt.root,
|
||||||
processes_per_gpu=opt.processes_per_gpu,
|
processes_per_gpu=opt.processes_per_gpu,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user