mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
tested and working on Ubuntu
- You can now achieve several effects: `invokeai-configure` This will use console-based UI to initialize invokeai.init, download support models, and choose and download SD models `invokeai-configure --yes` Without activating the GUI, populate invokeai.init with default values, download support models and download the "recommended" SD models `invokeai-configure --default_only` As above, but only download the default SD model (currently SD-1.5) `invokeai-model-install` Select and install models. This can be used to download arbitrary models from the Internet, install HuggingFace models using their repo_id, or watch a directory for models to load at startup time `invokeai-model-install --yes` Import the recommended SD models without a GUI `invokeai-model-install --default_only` As above, but only import the default model
This commit is contained in:
@ -11,11 +11,12 @@ echo 1. command-line
|
|||||||
echo 2. browser-based UI
|
echo 2. browser-based UI
|
||||||
echo 3. run textual inversion training
|
echo 3. run textual inversion training
|
||||||
echo 4. merge models (diffusers type only)
|
echo 4. merge models (diffusers type only)
|
||||||
echo 5. re-run the configure script to download new models
|
echo 5. download and install models
|
||||||
echo 6. download more starter models from HuggingFace
|
echo 6. change InvokeAI startup options
|
||||||
echo 7. open the developer console
|
echo 7. re-run the configure script to fix a broken install
|
||||||
echo 8. command-line help
|
echo 8. open the developer console
|
||||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
echo 9. command-line help
|
||||||
|
set /P restore="Please enter 1-9: [2] "
|
||||||
if not defined restore set restore=2
|
if not defined restore set restore=2
|
||||||
IF /I "%restore%" == "1" (
|
IF /I "%restore%" == "1" (
|
||||||
echo Starting the InvokeAI command-line..
|
echo Starting the InvokeAI command-line..
|
||||||
@ -25,17 +26,20 @@ IF /I "%restore%" == "1" (
|
|||||||
python .venv\Scripts\invokeai.exe --web %*
|
python .venv\Scripts\invokeai.exe --web %*
|
||||||
) ELSE IF /I "%restore%" == "3" (
|
) ELSE IF /I "%restore%" == "3" (
|
||||||
echo Starting textual inversion training..
|
echo Starting textual inversion training..
|
||||||
python .venv\Scripts\invokeai-ti.exe --gui %*
|
python .venv\Scripts\invokeai-ti.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "4" (
|
) ELSE IF /I "%restore%" == "4" (
|
||||||
echo Starting model merging script..
|
echo Starting model merging script..
|
||||||
python .venv\Scripts\invokeai-merge.exe --gui %*
|
python .venv\Scripts\invokeai-merge.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "5" (
|
) ELSE IF /I "%restore%" == "5" (
|
||||||
echo Running invokeai-configure...
|
|
||||||
python .venv\Scripts\invokeai-configure.exe %*
|
|
||||||
) ELSE IF /I "%restore%" == "6" (
|
|
||||||
echo Running invokeai-model-install...
|
echo Running invokeai-model-install...
|
||||||
python .venv\Scripts\invokeai-initial-models.exe %*
|
python .venv\Scripts\invokeai-model-install.exe
|
||||||
|
) ELSE IF /I "%restore%" == "6" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||||
) ELSE IF /I "%restore%" == "7" (
|
) ELSE IF /I "%restore%" == "7" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||||
|
) ELSE IF /I "%restore%" == "8" (
|
||||||
echo Developer Console
|
echo Developer Console
|
||||||
echo Python command is:
|
echo Python command is:
|
||||||
where python
|
where python
|
||||||
|
@ -30,12 +30,13 @@ if [ "$0" != "bash" ]; then
|
|||||||
echo "2. browser-based UI"
|
echo "2. browser-based UI"
|
||||||
echo "3. run textual inversion training"
|
echo "3. run textual inversion training"
|
||||||
echo "4. merge models (diffusers type only)"
|
echo "4. merge models (diffusers type only)"
|
||||||
echo "5. re-run the configure script to fix a broken install"
|
echo "5. download and install models"
|
||||||
echo "6. download more starter models from HuggingFace"
|
echo "6. change InvokeAI startup options"
|
||||||
echo "7. open the developer console"
|
echo "7. re-run the configure script to fix a broken install"
|
||||||
echo "8. command-line help "
|
echo "8. open the developer console"
|
||||||
|
echo "9. command-line help "
|
||||||
echo ""
|
echo ""
|
||||||
read -p "Please enter 1, 2, 3, 4, 5, 6, 7 or 8: [2] " yn
|
read -p "Please enter 1-9: [2] " yn
|
||||||
choice=${yn:='2'}
|
choice=${yn:='2'}
|
||||||
case $choice in
|
case $choice in
|
||||||
1)
|
1)
|
||||||
@ -55,17 +56,20 @@ if [ "$0" != "bash" ]; then
|
|||||||
exec invokeai-merge --gui $@
|
exec invokeai-merge --gui $@
|
||||||
;;
|
;;
|
||||||
5)
|
5)
|
||||||
exec invokeai-configure --root ${INVOKEAI_ROOT}
|
|
||||||
;;
|
|
||||||
6)
|
|
||||||
exec invokeai-model-install --root ${INVOKEAI_ROOT}
|
exec invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
;;
|
;;
|
||||||
7)
|
6)
|
||||||
|
exec invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
|
;;
|
||||||
|
7)
|
||||||
|
exec invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
|
;;
|
||||||
|
8)
|
||||||
echo "Developer Console:"
|
echo "Developer Console:"
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
bash --init-file "$file_name"
|
bash --init-file "$file_name"
|
||||||
;;
|
;;
|
||||||
8)
|
9)
|
||||||
exec invokeai --help
|
exec invokeai --help
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
@ -98,6 +98,7 @@ from ldm.invoke.globals import Globals
|
|||||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
APP_ID = ldm.invoke.__app_id__
|
APP_ID = ldm.invoke.__app_id__
|
||||||
APP_NAME = ldm.invoke.__app_name__
|
APP_NAME = ldm.invoke.__app_name__
|
||||||
@ -172,10 +173,10 @@ class Args(object):
|
|||||||
self._arg_switches = self.parse_cmd('') # fill in defaults
|
self._arg_switches = self.parse_cmd('') # fill in defaults
|
||||||
self._cmd_switches = self.parse_cmd('') # fill in defaults
|
self._cmd_switches = self.parse_cmd('') # fill in defaults
|
||||||
|
|
||||||
def parse_args(self):
|
def parse_args(self, args: List[str]=None):
|
||||||
'''Parse the shell switches and store.'''
|
'''Parse the shell switches and store.'''
|
||||||
|
sysargs = args if args is not None else sys.argv[1:]
|
||||||
try:
|
try:
|
||||||
sysargs = sys.argv[1:]
|
|
||||||
# pre-parse before we do any initialization to get root directory
|
# pre-parse before we do any initialization to get root directory
|
||||||
# and intercept --version request
|
# and intercept --version request
|
||||||
switches = self._arg_parser.parse_args(sysargs)
|
switches = self._arg_parser.parse_args(sysargs)
|
||||||
|
@ -36,8 +36,12 @@ from transformers import (
|
|||||||
|
|
||||||
import invokeai.configs as configs
|
import invokeai.configs as configs
|
||||||
from ..args import Args, PRECISION_CHOICES
|
from ..args import Args, PRECISION_CHOICES
|
||||||
from .model_install_backend import download_from_hf
|
from .model_install_backend import (
|
||||||
from .model_install import select_and_download_models
|
download_from_hf,
|
||||||
|
recommended_datasets,
|
||||||
|
default_dataset,
|
||||||
|
)
|
||||||
|
from .model_install import process_and_execute, addModelsForm
|
||||||
from .widgets import IntTitleSlider
|
from .widgets import IntTitleSlider
|
||||||
from ..globals import Globals, global_config_dir
|
from ..globals import Globals, global_config_dir
|
||||||
from ..readline import generic_completer
|
from ..readline import generic_completer
|
||||||
@ -72,15 +76,12 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
|||||||
# -Ak_euler_a -C10.0
|
# -Ak_euler_a -C10.0
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------
|
# --------------------------------------------
|
||||||
def postscript(errors: None):
|
def postscript(errors: None):
|
||||||
if not any(errors):
|
if not any(errors):
|
||||||
message = f"""
|
message = f"""
|
||||||
** Model Installation Successful **
|
** INVOKEAI INSTALLATION SUCCESSFUL **
|
||||||
|
|
||||||
You're all set!
|
|
||||||
|
|
||||||
---
|
|
||||||
If you installed manually from source or with 'pip install': activate the virtual environment
|
If you installed manually from source or with 'pip install': activate the virtual environment
|
||||||
then run one of the following commands to start InvokeAI.
|
then run one of the following commands to start InvokeAI.
|
||||||
|
|
||||||
@ -90,15 +91,11 @@ Web UI:
|
|||||||
|
|
||||||
Command-line interface:
|
Command-line interface:
|
||||||
invokeai
|
invokeai
|
||||||
---
|
|
||||||
|
|
||||||
If you installed using an installation script, run:
|
If you installed using an installation script, run:
|
||||||
|
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
||||||
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
|
||||||
|
|
||||||
Add the '--help' argument to see all of the command-line switches available for use.
|
Add the '--help' argument to see all of the command-line switches available for use.
|
||||||
|
|
||||||
Have fun!
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -109,6 +106,7 @@ Have fun!
|
|||||||
|
|
||||||
print(message)
|
print(message)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def yes_or_no(prompt: str, default_yes=True):
|
def yes_or_no(prompt: str, default_yes=True):
|
||||||
completer.set_options(["yes", "no"])
|
completer.set_options(["yes", "no"])
|
||||||
@ -120,6 +118,7 @@ def yes_or_no(prompt: str, default_yes=True):
|
|||||||
else:
|
else:
|
||||||
return response[0] in ("y", "Y")
|
return response[0] in ("y", "Y")
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def HfLogin(access_token) -> str:
|
def HfLogin(access_token) -> str:
|
||||||
"""
|
"""
|
||||||
@ -136,6 +135,8 @@ def HfLogin(access_token) -> str:
|
|||||||
sys.stdout = sys.__stdout__
|
sys.stdout = sys.__stdout__
|
||||||
print(exc)
|
print(exc)
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
class ProgressBar:
|
class ProgressBar:
|
||||||
def __init__(self, model_name="file"):
|
def __init__(self, model_name="file"):
|
||||||
@ -154,6 +155,7 @@ class ProgressBar:
|
|||||||
)
|
)
|
||||||
self.pbar.update(block_size)
|
self.pbar.update(block_size)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
|
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
|
||||||
try:
|
try:
|
||||||
@ -172,11 +174,12 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
|||||||
print(f"Error downloading {label} model")
|
print(f"Error downloading {label} model")
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
# this will preload the Bert tokenizer fles
|
# this will preload the Bert tokenizer fles
|
||||||
def download_bert():
|
def download_bert():
|
||||||
print(
|
print(
|
||||||
"Installing bert tokenizer (ignore deprecation errors)...",
|
"Installing bert tokenizer...",
|
||||||
end="",
|
end="",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
@ -190,7 +193,7 @@ def download_bert():
|
|||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_clip():
|
def download_clip():
|
||||||
print("Installing CLIP model (ignore deprecation errors)...", file=sys.stderr)
|
print("Installing CLIP model...", file=sys.stderr)
|
||||||
version = "openai/clip-vit-large-patch14"
|
version = "openai/clip-vit-large-patch14"
|
||||||
print("Tokenizer...", file=sys.stderr, end="")
|
print("Tokenizer...", file=sys.stderr, end="")
|
||||||
download_from_hf(CLIPTokenizer, version)
|
download_from_hf(CLIPTokenizer, version)
|
||||||
@ -291,23 +294,23 @@ def get_root(root: str = None) -> str:
|
|||||||
|
|
||||||
|
|
||||||
class editOptsForm(npyscreen.FormMultiPage):
|
class editOptsForm(npyscreen.FormMultiPage):
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
old_opts = self.parentApp.old_opts
|
program_opts = self.parentApp.program_opts
|
||||||
|
old_opts = self.parentApp.invokeai_opts
|
||||||
first_time = not (Globals.root / Globals.initfile).exists()
|
first_time = not (Globals.root / Globals.initfile).exists()
|
||||||
access_token = HfFolder.get_token()
|
access_token = HfFolder.get_token()
|
||||||
|
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_height, window_width = curses.initscr().getmaxyx()
|
||||||
for i in [
|
for i in [
|
||||||
'Configure startup settings. You can come back and change these later.',
|
"Configure startup settings. You can come back and change these later.",
|
||||||
'Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.',
|
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
|
||||||
'Use cursor arrows to make a checkbox selection, and space to toggle.',
|
"Use cursor arrows to make a checkbox selection, and space to toggle.",
|
||||||
]:
|
]:
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value=i,
|
value=i,
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL',
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -317,60 +320,60 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
begin_entry_at=0,
|
begin_entry_at=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CONTROL",
|
color="CONTROL",
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value='Select an output directory for images:',
|
value="Select an output directory for images:",
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL',
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
self.outdir = self.add_widget_intelligent(
|
self.outdir = self.add_widget_intelligent(
|
||||||
npyscreen.TitleFilename,
|
npyscreen.TitleFilename,
|
||||||
name='(<tab> autocompletes, ctrl-N advances):',
|
name="(<tab> autocompletes, ctrl-N advances):",
|
||||||
value=old_opts.outdir or str(default_output_dir()),
|
value=old_opts.outdir or str(default_output_dir()),
|
||||||
select_dir=True,
|
select_dir=True,
|
||||||
must_exist=False,
|
must_exist=False,
|
||||||
use_two_lines=False,
|
use_two_lines=False,
|
||||||
labelColor='GOOD',
|
labelColor="GOOD",
|
||||||
begin_entry_at=40,
|
begin_entry_at=40,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value='Activate the NSFW checker to blur images showing potential sexual imagery:',
|
value="Activate the NSFW checker to blur images showing potential sexual imagery:",
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL'
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
self.safety_checker = self.add_widget_intelligent(
|
self.safety_checker = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='NSFW checker',
|
name="NSFW checker",
|
||||||
value=old_opts.safety_checker,
|
value=old_opts.safety_checker,
|
||||||
relx = 5,
|
relx=5,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
for i in [
|
for i in [
|
||||||
'If you have an account at HuggingFace you may paste your access token here',
|
"If you have an account at HuggingFace you may paste your access token here",
|
||||||
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
|
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
|
||||||
'See https://huggingface.co/settings/tokens',
|
"See https://huggingface.co/settings/tokens",
|
||||||
]:
|
]:
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value=i,
|
value=i,
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL',
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.hf_token = self.add_widget_intelligent(
|
self.hf_token = self.add_widget_intelligent(
|
||||||
npyscreen.TitlePassword,
|
npyscreen.TitlePassword,
|
||||||
name='Access Token (use shift-ctrl-V to paste):',
|
name="Access Token (use shift-ctrl-V to paste):",
|
||||||
value=access_token,
|
value=access_token,
|
||||||
begin_entry_at=42,
|
begin_entry_at=42,
|
||||||
use_two_lines=False,
|
use_two_lines=False,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
@ -379,7 +382,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
begin_entry_at=0,
|
begin_entry_at=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CONTROL",
|
color="CONTROL",
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
@ -388,37 +391,40 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
begin_entry_at=0,
|
begin_entry_at=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CONTROL",
|
color="CONTROL",
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
self.free_gpu_mem = self.add_widget_intelligent(
|
self.free_gpu_mem = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='Free GPU memory after each generation',
|
name="Free GPU memory after each generation",
|
||||||
value=old_opts.free_gpu_mem,
|
value=old_opts.free_gpu_mem,
|
||||||
relx=5,
|
relx=5,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.xformers = self.add_widget_intelligent(
|
self.xformers = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='Enable xformers support if available',
|
name="Enable xformers support if available",
|
||||||
value=old_opts.xformers,
|
value=old_opts.xformers,
|
||||||
relx=5,
|
relx=5,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.always_use_cpu = self.add_widget_intelligent(
|
self.always_use_cpu = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='Force CPU to be used on GPU systems',
|
name="Force CPU to be used on GPU systems",
|
||||||
value=old_opts.always_use_cpu,
|
value=old_opts.always_use_cpu,
|
||||||
relx=5,
|
relx=5,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
precision = old_opts.precision or (
|
||||||
|
"float32" if program_opts.full_precision else "auto"
|
||||||
)
|
)
|
||||||
self.precision = self.add_widget_intelligent(
|
self.precision = self.add_widget_intelligent(
|
||||||
npyscreen.TitleSelectOne,
|
npyscreen.TitleSelectOne,
|
||||||
name='Precision',
|
name="Precision",
|
||||||
values=PRECISION_CHOICES,
|
values=PRECISION_CHOICES,
|
||||||
value=PRECISION_CHOICES.index(old_opts.precision),
|
value=PRECISION_CHOICES.index(precision),
|
||||||
begin_entry_at=3,
|
begin_entry_at=3,
|
||||||
max_height=len(PRECISION_CHOICES)+1,
|
max_height=len(PRECISION_CHOICES) + 1,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.max_loaded_models = self.add_widget_intelligent(
|
self.max_loaded_models = self.add_widget_intelligent(
|
||||||
@ -428,23 +434,23 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
out_of=10,
|
out_of=10,
|
||||||
lowest=1,
|
lowest=1,
|
||||||
begin_entry_at=4,
|
begin_entry_at=4,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value='Directory containing embedding/textual inversion files:',
|
value="Directory containing embedding/textual inversion files:",
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL',
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
self.embedding_path = self.add_widget_intelligent(
|
self.embedding_path = self.add_widget_intelligent(
|
||||||
npyscreen.TitleFilename,
|
npyscreen.TitleFilename,
|
||||||
name='(<tab> autocompletes, ctrl-N advances):',
|
name="(<tab> autocompletes, ctrl-N advances):",
|
||||||
value=str(default_embedding_dir()),
|
value=str(default_embedding_dir()),
|
||||||
select_dir=True,
|
select_dir=True,
|
||||||
must_exist=False,
|
must_exist=False,
|
||||||
use_two_lines=False,
|
use_two_lines=False,
|
||||||
labelColor='GOOD',
|
labelColor="GOOD",
|
||||||
begin_entry_at=40,
|
begin_entry_at=40,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
@ -455,40 +461,49 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
begin_entry_at=0,
|
begin_entry_at=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CONTROL",
|
color="CONTROL",
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
for i in [
|
for i in [
|
||||||
'BY DOWNLOADING THE STABLE DIFFUSION WEIGHT FILES, YOU AGREE TO HAVE READ',
|
"BY DOWNLOADING THE STABLE DIFFUSION WEIGHT FILES, YOU AGREE TO HAVE READ",
|
||||||
'AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSE LOCATED AT',
|
"AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSE LOCATED AT",
|
||||||
'https://huggingface.co/spaces/CompVis/stable-diffusion-license'
|
"https://huggingface.co/spaces/CompVis/stable-diffusion-license",
|
||||||
]:
|
]:
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value=i,
|
value=i,
|
||||||
editable=False,
|
editable=False,
|
||||||
color='CONTROL',
|
color="CONTROL",
|
||||||
)
|
)
|
||||||
self.license_acceptance = self.add_widget_intelligent(
|
self.license_acceptance = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='I accept the CreativeML Responsible AI License',
|
name="I accept the CreativeML Responsible AI License",
|
||||||
value=not first_time,
|
value=not first_time,
|
||||||
relx = 2,
|
relx=2,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
|
label = (
|
||||||
|
"DONE"
|
||||||
|
if program_opts.skip_sd_weights or program_opts.default_only
|
||||||
|
else "NEXT"
|
||||||
|
)
|
||||||
self.ok_button = self.add_widget_intelligent(
|
self.ok_button = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
name='DONE',
|
name=label,
|
||||||
relx= (window_width-len('DONE'))//2,
|
relx=(window_width - len(label)) // 2,
|
||||||
rely= -3,
|
rely=-3,
|
||||||
when_pressed_function=self.on_ok
|
when_pressed_function=self.on_ok,
|
||||||
)
|
)
|
||||||
|
|
||||||
def on_ok(self):
|
def on_ok(self):
|
||||||
options = self.marshall_arguments()
|
options = self.marshall_arguments()
|
||||||
if self.validate_field_values(options):
|
if self.validate_field_values(options):
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.new_opts = options
|
||||||
|
if hasattr(self.parentApp,'model_select'):
|
||||||
|
self.parentApp.setNextForm("MODELS")
|
||||||
|
else:
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
self.editing = False
|
self.editing = False
|
||||||
else:
|
else:
|
||||||
self.editing = True
|
self.editing = True
|
||||||
@ -497,15 +512,15 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
bad_fields = []
|
bad_fields = []
|
||||||
if not opt.license_acceptance:
|
if not opt.license_acceptance:
|
||||||
bad_fields.append(
|
bad_fields.append(
|
||||||
'Please accept the license terms before proceeding to model downloads'
|
"Please accept the license terms before proceeding to model downloads"
|
||||||
)
|
)
|
||||||
if not Path(opt.outdir).parent.exists():
|
if not Path(opt.outdir).parent.exists():
|
||||||
bad_fields.append(
|
bad_fields.append(
|
||||||
f'The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory.'
|
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
|
||||||
)
|
)
|
||||||
if not Path(opt.embedding_path).parent.exists():
|
if not Path(opt.embedding_path).parent.exists():
|
||||||
bad_fields.append(
|
bad_fields.append(
|
||||||
f'The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory.'
|
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory."
|
||||||
)
|
)
|
||||||
if len(bad_fields) > 0:
|
if len(bad_fields) > 0:
|
||||||
message = "The following problems were detected and must be corrected:\n"
|
message = "The following problems were detected and must be corrected:\n"
|
||||||
@ -519,8 +534,15 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
def marshall_arguments(self):
|
def marshall_arguments(self):
|
||||||
new_opts = Namespace()
|
new_opts = Namespace()
|
||||||
|
|
||||||
for attr in ['outdir','safety_checker','free_gpu_mem','max_loaded_models',
|
for attr in [
|
||||||
'xformers','always_use_cpu','embedding_path']:
|
"outdir",
|
||||||
|
"safety_checker",
|
||||||
|
"free_gpu_mem",
|
||||||
|
"max_loaded_models",
|
||||||
|
"xformers",
|
||||||
|
"always_use_cpu",
|
||||||
|
"embedding_path",
|
||||||
|
]:
|
||||||
setattr(new_opts, attr, getattr(self, attr).value)
|
setattr(new_opts, attr, getattr(self, attr).value)
|
||||||
|
|
||||||
new_opts.hf_token = self.hf_token.value
|
new_opts.hf_token = self.hf_token.value
|
||||||
@ -531,26 +553,55 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
|
|
||||||
class EditOptApplication(npyscreen.NPSAppManaged):
|
class EditOptApplication(npyscreen.NPSAppManaged):
|
||||||
def __init__(self, old_opts=argparse.Namespace):
|
def __init__(self, program_opts: Namespace, invokeai_opts: Namespace):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.old_opts=old_opts
|
self.program_opts = program_opts
|
||||||
|
self.invokeai_opts = invokeai_opts
|
||||||
|
self.user_cancelled = False
|
||||||
|
self.user_selections = default_user_selections(program_opts)
|
||||||
|
|
||||||
def onStart(self):
|
def onStart(self):
|
||||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
self.main = self.addForm(
|
self.options = self.addForm(
|
||||||
"MAIN",
|
"MAIN",
|
||||||
editOptsForm,
|
editOptsForm,
|
||||||
name='InvokeAI Startup Options',
|
name="InvokeAI Startup Options",
|
||||||
)
|
)
|
||||||
|
if not (self.program_opts.skip_sd_weights or self.program_opts.default_only):
|
||||||
|
self.model_select = self.addForm(
|
||||||
|
"MODELS",
|
||||||
|
addModelsForm,
|
||||||
|
name="Add/Remove Models",
|
||||||
|
multipage=True,
|
||||||
|
)
|
||||||
|
|
||||||
def new_opts(self):
|
def new_opts(self):
|
||||||
return self.main.marshall_arguments()
|
return self.options.marshall_arguments()
|
||||||
|
|
||||||
def edit_opts(old_opts: argparse.Namespace)->argparse.Namespace:
|
def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Namespace:
|
||||||
editApp = EditOptApplication(old_opts)
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
editApp.run()
|
editApp.run()
|
||||||
return editApp.new_opts()
|
return editApp.new_opts()
|
||||||
|
|
||||||
|
def default_startup_options()->Namespace:
|
||||||
|
opts = Args().parse_args([])
|
||||||
|
opts.outdir = str(default_output_dir())
|
||||||
|
opts.safety_checker = True
|
||||||
|
return opts
|
||||||
|
|
||||||
|
def default_user_selections(program_opts: Namespace)->Namespace:
|
||||||
|
return Namespace(
|
||||||
|
starter_models=recommended_datasets()
|
||||||
|
if program_opts.yes_to_all
|
||||||
|
else default_dataset()
|
||||||
|
if program_opts.default_only
|
||||||
|
else dict(),
|
||||||
|
purge_deleted_models=False,
|
||||||
|
scan_directory=None,
|
||||||
|
autoscan_on_startup=None,
|
||||||
|
import_model_paths=None,
|
||||||
|
convert_to_diffusers=None,
|
||||||
|
)
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def initialize_rootdir(root: str, yes_to_all: bool = False):
|
def initialize_rootdir(root: str, yes_to_all: bool = False):
|
||||||
print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||||
@ -569,27 +620,27 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
|||||||
if not os.path.samefile(configs_src, configs_dest):
|
if not os.path.samefile(configs_src, configs_dest):
|
||||||
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
||||||
|
|
||||||
# -------------------------------------
|
|
||||||
def do_edit_opt_form(old_opts: argparse.Namespace)->argparse.Namespace:
|
|
||||||
editApp = EditOptApplication(old_opts)
|
|
||||||
editApp.run()
|
|
||||||
return editApp.new_opts()
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def edit_options(init_file: Path):
|
def run_console_ui(program_opts: Namespace) -> (Namespace, Namespace):
|
||||||
# get current settings from initfile
|
# parse_args() will read from init file if present
|
||||||
opt = Args().parse_args()
|
invokeai_opts = default_startup_options()
|
||||||
new_opt = do_edit_opt_form(opt)
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
write_opts(new_opt, init_file)
|
editApp.run()
|
||||||
|
if editApp.user_cancelled:
|
||||||
|
return (None, None)
|
||||||
|
else:
|
||||||
|
return (editApp.new_opts, editApp.user_selections)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_opts(opts: Namespace, init_file: Path):
|
def write_opts(opts: Namespace, init_file: Path):
|
||||||
'''
|
"""
|
||||||
Update the invokeai.init file with values from opts Namespace
|
Update the invokeai.init file with values from opts Namespace
|
||||||
'''
|
"""
|
||||||
# touch file if it doesn't exist
|
# touch file if it doesn't exist
|
||||||
if not init_file.exists():
|
if not init_file.exists():
|
||||||
with open(init_file,'w') as f:
|
with open(init_file, "w") as f:
|
||||||
f.write(INIT_FILE_PREAMBLE)
|
f.write(INIT_FILE_PREAMBLE)
|
||||||
|
|
||||||
# We want to write in the changed arguments without clobbering
|
# We want to write in the changed arguments without clobbering
|
||||||
@ -598,15 +649,18 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
# argparse: i.e. --outdir could be --outdir, --out, or -o
|
# argparse: i.e. --outdir could be --outdir, --out, or -o
|
||||||
# initfile needs to be replaced with a fully structured format
|
# initfile needs to be replaced with a fully structured format
|
||||||
# such as yaml; this is a hack that will work much of the time
|
# such as yaml; this is a hack that will work much of the time
|
||||||
args_to_skip = re.compile('^--?(o|out|no-xformer|xformer|free|no-nsfw|nsfw|prec|max_load|embed)')
|
args_to_skip = re.compile(
|
||||||
new_file = f'{init_file}.new'
|
"^--?(o|out|no-xformer|xformer|free|no-nsfw|nsfw|prec|max_load|embed|always)"
|
||||||
|
)
|
||||||
|
new_file = f"{init_file}.new"
|
||||||
try:
|
try:
|
||||||
lines = open(init_file,'r').readlines()
|
lines = [x.strip() for x in open(init_file, "r").readlines()]
|
||||||
with open(new_file,'w') as out_file:
|
with open(new_file, "w") as out_file:
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if not args_to_skip.match(line):
|
if len(line) > 0 and not args_to_skip.match(line):
|
||||||
out_file.write(line)
|
out_file.write(line + "\n")
|
||||||
out_file.write(f'''
|
out_file.write(
|
||||||
|
f"""
|
||||||
--outdir={opts.outdir}
|
--outdir={opts.outdir}
|
||||||
--embedding_path={opts.embedding_path}
|
--embedding_path={opts.embedding_path}
|
||||||
--precision={opts.precision}
|
--precision={opts.precision}
|
||||||
@ -615,34 +669,34 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
--{'no-' if not opts.xformers else ''}xformers
|
--{'no-' if not opts.xformers else ''}xformers
|
||||||
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
|
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
|
||||||
{'--always_use_cpu' if opts.always_use_cpu else ''}
|
{'--always_use_cpu' if opts.always_use_cpu else ''}
|
||||||
''')
|
"""
|
||||||
|
)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print(f'** An error occurred while writing the init file: {str(e)}')
|
print(f"** An error occurred while writing the init file: {str(e)}")
|
||||||
|
|
||||||
os.replace(new_file, init_file)
|
os.replace(new_file, init_file)
|
||||||
|
|
||||||
if opts.hf_token:
|
if opts.hf_token:
|
||||||
HfLogin(opts.hf_token)
|
HfLogin(opts.hf_token)
|
||||||
|
|
||||||
# -------------------------------------
|
|
||||||
def default_output_dir()->Path:
|
|
||||||
return Globals.root / 'outputs'
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def default_embedding_dir()->Path:
|
def default_output_dir() -> Path:
|
||||||
return Globals.root / 'embeddings'
|
return Globals.root / "outputs"
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_default_options(initfile: Path):
|
def default_embedding_dir() -> Path:
|
||||||
opt = Namespace(
|
return Globals.root / "embeddings"
|
||||||
outdir=str(default_output_dir()),
|
|
||||||
embedding_path=str(default_embedding_dir()),
|
|
||||||
nsfw_checker=True,
|
# -------------------------------------
|
||||||
max_loaded_models=2,
|
def write_default_options(program_opts: Namespace, initfile: Path):
|
||||||
free_gpu_mem=True
|
opt = default_startup_options()
|
||||||
)
|
opt.hf_token = HfFolder.get_token()
|
||||||
write_opts(opt, initfile)
|
write_opts(opt, initfile)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
@ -653,6 +707,13 @@ def main():
|
|||||||
default=False,
|
default=False,
|
||||||
help="skip downloading the large Stable Diffusion weight files",
|
help="skip downloading the large Stable Diffusion weight files",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-support-models",
|
||||||
|
dest="skip_support_models",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="skip downloading the support models",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--full-precision",
|
"--full-precision",
|
||||||
dest="full_precision",
|
dest="full_precision",
|
||||||
@ -691,35 +752,45 @@ def main():
|
|||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
# setting a global here
|
# setting a global here
|
||||||
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
Globals.root = Path(os.path.expanduser(get_root(opt.root) or ""))
|
||||||
|
|
||||||
errors = set()
|
errors = set()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
models_to_download = default_user_selections(opt)
|
||||||
|
|
||||||
# We check for to see if the runtime directory is correctly initialized.
|
# We check for to see if the runtime directory is correctly initialized.
|
||||||
init_file = Path(Globals.root, Globals.initfile)
|
init_file = Path(Globals.root, Globals.initfile)
|
||||||
if not init_file.exists():
|
if not init_file.exists():
|
||||||
initialize_rootdir(Globals.root, opt.yes_to_all)
|
initialize_rootdir(Globals.root, opt.yes_to_all)
|
||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
write_default_options(init_file)
|
write_default_options(opt, init_file)
|
||||||
else:
|
else:
|
||||||
edit_options(init_file)
|
init_options, models_to_download = run_console_ui(opt)
|
||||||
|
if init_options:
|
||||||
|
write_opts(init_options, init_file)
|
||||||
|
else:
|
||||||
|
print("\n** CANCELLED AT USER'S REQUEST. USE THE \"invoke.sh\" LAUNCHER TO RUN LATER **\n")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
print("\n** DOWNLOADING SUPPORT MODELS **")
|
if opt.skip_support_models:
|
||||||
download_bert()
|
print("\n** SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST **")
|
||||||
download_clip()
|
else:
|
||||||
download_realesrgan()
|
print("\n** DOWNLOADING SUPPORT MODELS **")
|
||||||
download_gfpgan()
|
download_bert()
|
||||||
download_codeformer()
|
download_clip()
|
||||||
download_clipseg()
|
download_realesrgan()
|
||||||
download_safety_checker()
|
download_gfpgan()
|
||||||
|
download_codeformer()
|
||||||
|
download_clipseg()
|
||||||
|
download_safety_checker()
|
||||||
|
|
||||||
if opt.skip_sd_weights:
|
if opt.skip_sd_weights:
|
||||||
print("** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
|
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
|
||||||
else:
|
elif models_to_download:
|
||||||
print("** DOWNLOADING DIFFUSION WEIGHTS **")
|
print("\n** DOWNLOADING DIFFUSION WEIGHTS **")
|
||||||
errors.add(select_and_download_models(opt))
|
process_and_execute(opt, models_to_download)
|
||||||
|
|
||||||
postscript(errors=errors)
|
postscript(errors=errors)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
|
@ -29,11 +29,11 @@ from ..globals import Globals
|
|||||||
from .widgets import MultiSelectColumns, TextBox
|
from .widgets import MultiSelectColumns, TextBox
|
||||||
from .model_install_backend import (Dataset_path, default_config_file,
|
from .model_install_backend import (Dataset_path, default_config_file,
|
||||||
install_requested_models,
|
install_requested_models,
|
||||||
default_dataset, get_root
|
default_dataset, recommended_datasets, get_root
|
||||||
)
|
)
|
||||||
|
|
||||||
class addModelsForm(npyscreen.FormMultiPageAction):
|
class addModelsForm(npyscreen.FormMultiPage):
|
||||||
def __init__(self, parentApp, name):
|
def __init__(self, parentApp, name, multipage=False):
|
||||||
self.initial_models = OmegaConf.load(Dataset_path)
|
self.initial_models = OmegaConf.load(Dataset_path)
|
||||||
try:
|
try:
|
||||||
self.existing_models = OmegaConf.load(default_config_file())
|
self.existing_models = OmegaConf.load(default_config_file())
|
||||||
@ -42,6 +42,7 @@ class addModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
self.starter_model_list = [
|
self.starter_model_list = [
|
||||||
x for x in list(self.initial_models.keys()) if x not in self.existing_models
|
x for x in list(self.initial_models.keys()) if x not in self.existing_models
|
||||||
]
|
]
|
||||||
|
self.multipage = multipage
|
||||||
self.installed_models=dict()
|
self.installed_models=dict()
|
||||||
super().__init__(parentApp, name)
|
super().__init__(parentApp, name)
|
||||||
|
|
||||||
@ -85,7 +86,7 @@ class addModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
columns=columns,
|
columns=columns,
|
||||||
values=self.installed_models,
|
values=self.installed_models,
|
||||||
value=[x for x in range(0,len(self.installed_models))],
|
value=[x for x in range(0,len(self.installed_models))],
|
||||||
max_height=2+len(self.installed_models) // columns,
|
max_height=1+len(self.installed_models) // columns,
|
||||||
relx = 4,
|
relx = 4,
|
||||||
slow_scroll=True,
|
slow_scroll=True,
|
||||||
scroll_exit = True,
|
scroll_exit = True,
|
||||||
@ -96,6 +97,7 @@ class addModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
value=False,
|
value=False,
|
||||||
scroll_exit=True
|
scroll_exit=True
|
||||||
)
|
)
|
||||||
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.TitleFixedText,
|
npyscreen.TitleFixedText,
|
||||||
name="== STARTER MODELS (recommended ones selected) ==",
|
name="== STARTER MODELS (recommended ones selected) ==",
|
||||||
@ -169,8 +171,37 @@ class addModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
values=['Keep original format','Convert to diffusers'],
|
values=['Keep original format','Convert to diffusers'],
|
||||||
value=0,
|
value=0,
|
||||||
begin_entry_at=4,
|
begin_entry_at=4,
|
||||||
|
max_height=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
|
self.cancel = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name='CANCEL',
|
||||||
|
rely = -3,
|
||||||
|
when_pressed_function=self.on_cancel,
|
||||||
|
)
|
||||||
|
done_label = 'DONE'
|
||||||
|
back_label = 'BACK'
|
||||||
|
button_length = len(done_label)
|
||||||
|
button_offset = 0
|
||||||
|
if self.multipage:
|
||||||
|
button_length += len(back_label)+1
|
||||||
|
button_offset += len(back_label)+1
|
||||||
|
self.back_button = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name=back_label,
|
||||||
|
relx= (window_width-button_length)//2,
|
||||||
|
rely= -3,
|
||||||
|
when_pressed_function=self.on_back
|
||||||
|
)
|
||||||
|
self.ok_button = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name=done_label,
|
||||||
|
relx= button_offset + 1 + (window_width-button_length)//2,
|
||||||
|
rely= -3,
|
||||||
|
when_pressed_function=self.on_ok
|
||||||
|
)
|
||||||
|
|
||||||
for i in [self.autoload_directory,self.autoscan_on_startup]:
|
for i in [self.autoload_directory,self.autoscan_on_startup]:
|
||||||
self.show_directory_fields.addVisibleWhenSelected(i)
|
self.show_directory_fields.addVisibleWhenSelected(i)
|
||||||
|
|
||||||
@ -204,11 +235,18 @@ class addModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
self.parentApp.user_cancelled = False
|
self.parentApp.user_cancelled = False
|
||||||
self.marshall_arguments()
|
self.marshall_arguments()
|
||||||
|
|
||||||
def on_cancel(self):
|
def on_back(self):
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.switchFormPrevious()
|
||||||
self.parentApp.user_cancelled = True
|
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
|
def on_cancel(self):
|
||||||
|
if npyscreen.notify_yes_no(
|
||||||
|
'Are you sure you want to cancel?\nYou may re-run this script later using the invoke.sh or invoke.bat command.\n'
|
||||||
|
):
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.parentApp.user_cancelled = True
|
||||||
|
self.editing = False
|
||||||
|
|
||||||
def marshall_arguments(self):
|
def marshall_arguments(self):
|
||||||
'''
|
'''
|
||||||
Assemble arguments and store as attributes of the application:
|
Assemble arguments and store as attributes of the application:
|
||||||
@ -255,7 +293,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.user_cancelled = False
|
self.user_cancelled = False
|
||||||
self.models_to_install = None
|
|
||||||
self.user_selections = Namespace(
|
self.user_selections = Namespace(
|
||||||
starter_models = None,
|
starter_models = None,
|
||||||
purge_deleted_models = False,
|
purge_deleted_models = False,
|
||||||
@ -270,7 +307,7 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
|||||||
self.main_form = self.addForm(
|
self.main_form = self.addForm(
|
||||||
"MAIN",
|
"MAIN",
|
||||||
addModelsForm,
|
addModelsForm,
|
||||||
name="Add/Remove Models",
|
name="Add/Remove Models"
|
||||||
)
|
)
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
@ -296,11 +333,16 @@ def process_and_execute(opt: Namespace, selections: Namespace):
|
|||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def select_and_download_models(opt: Namespace):
|
def select_and_download_models(opt: Namespace):
|
||||||
|
precision= 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device())),
|
||||||
if opt.default_only:
|
if opt.default_only:
|
||||||
models_to_install = default_dataset()
|
|
||||||
install_requested_models(
|
install_requested_models(
|
||||||
install_initial_models = models_to_install,
|
install_initial_models = default_dataset(),
|
||||||
precision = 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device())),
|
precision = precision,
|
||||||
|
)
|
||||||
|
elif opt.yes_to_all:
|
||||||
|
install_requested_models(
|
||||||
|
install_initial_models = recommended_datasets(),
|
||||||
|
precision = precision,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
installApp = AddModelApplication()
|
installApp = AddModelApplication()
|
||||||
|
Reference in New Issue
Block a user