Merge branch 'main' into 2.3-documentation-fixes

This commit is contained in:
Lincoln Stein 2023-02-08 12:47:27 -05:00
commit 4ecf016ace
12 changed files with 145 additions and 39 deletions

57
.github/CODEOWNERS vendored
View File

@ -1,7 +1,50 @@
ldm/invoke/pngwriter.py @CapableWeb
ldm/invoke/server_legacy.py @CapableWeb
scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @ebr
.github/workflows/ @mauwii
docker/ @mauwii
# continuous integration
/.github/workflows/ @mauwii
# documentation
/docs/ @lstein @mauwii @tildebyte
mkdocs.yml @lstein @mauwii
# installation and configuration
/pyproject.toml @mauwii @lstein @ebr
/docker/ @mauwii
/scripts/ @ebr @lstein
/installer/ @ebr @lstein @tildebyte
ldm/invoke/config @lstein @ebr
invokeai/assets @lstein @ebr
invokeai/configs @lstein @ebr
/ldm/invoke/_version.py @lstein @blessedcoolant
# web ui
/invokeai/frontend @blessedcoolant @psychedelicious
/invokeai/backend @blessedcoolant @psychedelicious
# generation and model management
/ldm/*.py @lstein
/ldm/generate.py @lstein @keturn
/ldm/invoke/args.py @lstein @blessedcoolant
/ldm/invoke/ckpt* @lstein
/ldm/invoke/ckpt_generator @lstein
/ldm/invoke/CLI.py @lstein
/ldm/invoke/config @lstein @ebr @mauwii
/ldm/invoke/generator @keturn @damian0815
/ldm/invoke/globals.py @lstein @blessedcoolant
/ldm/invoke/merge_diffusers.py @lstein
/ldm/invoke/model_manager.py @lstein @blessedcoolant
/ldm/invoke/txt2mask.py @lstein
/ldm/invoke/patchmatch.py @Kyle0654
/ldm/invoke/restoration @lstein @blessedcoolant
# attention, textual inversion, model configuration
/ldm/models @damian0815 @keturn
/ldm/modules @damian0815 @keturn
# Nodes
apps/ @Kyle0654
# legacy REST API
# is CapableWeb still engaged?
/ldm/invoke/pngwriter.py @CapableWeb
/ldm/invoke/server_legacy.py @CapableWeb
/scripts/legacy_api.py @CapableWeb
/tests/legacy_tests.sh @CapableWeb

View File

@ -92,6 +92,7 @@ You will need one of the following:
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- An Apple computer with an M1 chip.
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM

View File

@ -9,10 +9,9 @@ from pathlib import Path
from prompt_toolkit import prompt
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.shortcuts import CompleteStyle
from prompt_toolkit.validation import Validator
from rich import box, print
from rich.console import Console, Group
from rich.console import Console, Group, group
from rich.panel import Panel
from rich.prompt import Confirm
from rich.style import Style
@ -37,17 +36,21 @@ else:
def welcome():
@group()
def text():
if (platform_specific := _platform_specific_help()) != "":
yield platform_specific
yield ""
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
console.rule()
print(
Panel(
title="[bold wheat1]Welcome to the InvokeAI Installer",
renderable=Text(
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry.",
justify="center",
),
renderable=text(),
box=box.DOUBLE,
width=80,
expand=False,
expand=True,
padding=(1, 2),
style=Style(bgcolor="grey23", color="orange1"),
subtitle=f"[bold grey39]{OS}-{ARCH}",
@ -200,7 +203,7 @@ def graphical_accelerator():
[
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
"",
"See [steel_blue3]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
"",
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
]
@ -294,3 +297,16 @@ def introduction() -> None:
)
)
console.line(2)
def _platform_specific_help()->str:
if OS == "Darwin":
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
elif OS == "Windows":
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
enable long path support on your system.
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
else:
text = ""
return text

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-34c8aef8.js"></script>
<script type="module" crossorigin src="./assets/index-4baf9db0.js"></script>
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
</head>

View File

@ -5,15 +5,24 @@ export function getPromptAndNegative(input_prompt: InvokeAI.Prompt) {
let prompt: string = promptToString(input_prompt);
let negativePrompt: string | null = null;
const negativePromptRegExp = new RegExp(/(?<=\[)[^\][]*(?=])/, 'gi');
const negativePromptMatches = [...prompt.matchAll(negativePromptRegExp)];
// Matches all negative prompts, 1st capturing group is the prompt itself
const negativePromptRegExp = new RegExp(/\[([^\][]*)]/, 'gi');
if (negativePromptMatches && negativePromptMatches.length > 0) {
negativePrompt = negativePromptMatches.join(', ');
prompt = prompt
.replaceAll(negativePromptRegExp, '')
.replaceAll('[]', '')
.trim();
// Grab the actual prompt matches (capturing group 1 is 1st index of match)
const negativePromptMatches = [...prompt.matchAll(negativePromptRegExp)].map(
(match) => match[1]
);
if (negativePromptMatches.length) {
// Build the negative prompt itself
negativePrompt = negativePromptMatches.join(' ');
// Replace each match, including its surrounding brackets
// Remove each pair of empty brackets
// Trim whitespace
negativePromptMatches.forEach((match) => {
prompt = prompt.replace(`[${match}]`, '').replaceAll('[]', '').trim();
});
}
return [prompt, negativePrompt];

View File

@ -23,6 +23,7 @@ const selector = createSelector(
shouldShowCanvasDebugInfo,
layer,
boundingBoxScaleMethod,
shouldPreserveMaskedArea,
} = canvas;
let boundingBoxColor = 'inherit';
@ -56,6 +57,7 @@ const selector = createSelector(
shouldShowCanvasDebugInfo,
shouldShowBoundingBox: boundingBoxScaleMethod !== 'auto',
shouldShowScaledBoundingBox: boundingBoxScaleMethod !== 'none',
shouldPreserveMaskedArea,
};
},
{
@ -79,6 +81,7 @@ const IAICanvasStatusText = () => {
canvasScaleString,
shouldShowCanvasDebugInfo,
shouldShowBoundingBox,
shouldPreserveMaskedArea,
} = useAppSelector(selector);
const { t } = useTranslation();
@ -91,6 +94,15 @@ const IAICanvasStatusText = () => {
}}
>{`${t('unifiedcanvas:activeLayer')}: ${activeLayerString}`}</div>
<div>{`${t('unifiedcanvas:canvasScale')}: ${canvasScaleString}%`}</div>
{shouldPreserveMaskedArea && (
<div
style={{
color: 'var(--status-working-color)',
}}
>
Preserve Masked Area: On
</div>
)}
{shouldShowBoundingBox && (
<div
style={{

View File

@ -89,7 +89,7 @@ const DeleteImageModal = forwardRef(
() => {
shouldConfirmOnDelete ? onOpen() : handleDelete();
},
[image, shouldConfirmOnDelete]
[image, shouldConfirmOnDelete, isConnected, isProcessing]
);
const handleChangeShouldConfirmOnDelete = (

File diff suppressed because one or more lines are too long

View File

@ -1 +1 @@
__version__='2.3.0-rc5'
__version__='2.3.0-rc6'

View File

@ -230,7 +230,8 @@ class Args(object):
switches = ''
try:
self._cmd_switches = self._cmd_parser.parse_args(shlex.split(switches,comments=True))
setattr(self._cmd_switches,'prompt',prompt)
if not getattr(self._cmd_switches,'prompt'):
setattr(self._cmd_switches,'prompt',prompt)
return self._cmd_switches
except:
return None

View File

@ -10,6 +10,7 @@ print("Loading Python libraries...\n")
import argparse
import io
import os
import re
import shutil
import sys
import traceback
@ -320,7 +321,7 @@ You may re-run the configuration script again in the future if you do not wish t
while again:
try:
access_token = getpass_asterisk.getpass_asterisk(prompt="HF Token ")
if access_token is None or len(access_token)==0:
if access_token is None or len(access_token) == 0:
raise EOFError
HfLogin(access_token)
access_token = HfFolder.get_token()
@ -379,7 +380,7 @@ def download_weight_datasets(
migrate_models_ckpt()
successful = dict()
for mod in models.keys():
print(f"{mod}...", file=sys.stderr, end="")
print(f"Downloading {mod}:")
successful[mod] = _download_repo_or_file(
Datasets[mod], access_token, precision=precision
)
@ -532,7 +533,7 @@ def update_config_file(successfully_downloaded: dict, opt: dict):
configs_dest = Default_config_file.parent
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
yaml = new_config_file_contents(successfully_downloaded, config_file)
yaml = new_config_file_contents(successfully_downloaded, config_file, opt)
try:
backup = None
@ -568,7 +569,7 @@ def update_config_file(successfully_downloaded: dict, opt: dict):
# ---------------------------------------------
def new_config_file_contents(successfully_downloaded: dict, config_file: Path) -> str:
def new_config_file_contents(successfully_downloaded: dict, config_file: Path, opt: dict) -> str:
if config_file.exists():
conf = OmegaConf.load(str(config_file.expanduser().resolve()))
else:
@ -576,7 +577,14 @@ def new_config_file_contents(successfully_downloaded: dict, config_file: Path) -
default_selected = None
for model in successfully_downloaded:
stanza = conf[model] if model in conf else {}
# a bit hacky - what we are doing here is seeing whether a checkpoint
# version of the model was previously defined, and whether the current
# model is a diffusers (indicated with a path)
if conf.get(model) and Path(successfully_downloaded[model]).is_dir():
offer_to_delete_weights(model, conf[model], opt.yes_to_all)
stanza = {}
mod = Datasets[model]
stanza["description"] = mod["description"]
stanza["repo_id"] = mod["repo_id"]
@ -599,8 +607,8 @@ def new_config_file_contents(successfully_downloaded: dict, config_file: Path) -
)
else:
stanza["vae"] = mod["vae"]
if mod.get('default',False):
stanza['default'] = True
if mod.get("default", False):
stanza["default"] = True
default_selected = True
conf[model] = stanza
@ -612,7 +620,22 @@ def new_config_file_contents(successfully_downloaded: dict, config_file: Path) -
return OmegaConf.to_yaml(conf)
# ---------------------------------------------
def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool):
if not (weights := conf_stanza.get('weights')):
return
if re.match('/VAE/',conf_stanza.get('config')):
return
if yes_to_all or \
yes_or_no(f'\n** The checkpoint version of {model_name} is superseded by the diffusers version. Delete the original file {weights}?', default_yes=False):
weights = Path(weights)
if not weights.is_absolute():
weights = Path(Globals.root) / weights
try:
weights.unlink()
except OSError as e:
print(str(e))
# ---------------------------------------------
# this will preload the Bert tokenizer fles
def download_bert():
@ -641,7 +664,8 @@ def download_from_hf(
resume_download=True,
**kwargs,
)
return path if model else None
model_name = '--'.join(('models',*model_name.split('/')))
return path / model_name if model else None
# ---------------------------------------------