Merge branch 'main' into fix-optional

This commit is contained in:
psychedelicious 2023-07-31 16:31:01 +10:00 committed by GitHub
commit 2c07f54b6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 229 additions and 219 deletions

View File

@ -14,20 +14,25 @@ The nodes linked below have been developed and contributed by members of the Inv
## List of Nodes ## List of Nodes
### Face Mask ### FaceTools
**Description:** This node autodetects a face in the image using MediaPipe and masks it by making it transparent. Via outpainting you can swap faces with other faces, or invert the mask and swap things around the face with other things. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control. The node also outputs an all-white mask in the same dimensions as the input image. This is needed by the inpaint node (and unified canvas) for outpainting. **Description:** FaceTools is a collection of nodes created to manipulate faces as you would in Unified Canvas. It includes FaceMask, FaceOff, and FacePlace. FaceMask autodetects a face in the image using MediaPipe and creates a mask from it. FaceOff similarly detects a face, then takes the face off of the image by adding a square bounding box around it and cropping/scaling it. FacePlace puts the bounded face image from FaceOff back onto the original image. Using these nodes with other inpainting node(s), you can put new faces on existing things, put new things around existing faces, and work closer with a face as a bounded image. Additionally, you can supply X and Y offset values to scale/change the shape of the mask for finer control on FaceMask and FaceOff. See GitHub repository below for usage examples.
**Node Link:** https://github.com/ymgenesis/InvokeAI/blob/facemaskmediapipe/invokeai/app/invocations/facemask.py **Node Link:** https://github.com/ymgenesis/FaceTools/
**Example Node Graph:** https://www.mediafire.com/file/gohn5sb1bfp8use/21-July_2023-FaceMask.json/file **FaceMask Output Examples**
**Output Examples** ![5cc8abce-53b0-487a-b891-3bf94dcc8960](https://github.com/invoke-ai/InvokeAI/assets/25252829/43f36d24-1429-4ab1-bd06-a4bedfe0955e)
![b920b710-1882-49a0-8d02-82dff2cca907](https://github.com/invoke-ai/InvokeAI/assets/25252829/7660c1ed-bf7d-4d0a-947f-1fc1679557ba)
![71a91805-fda5-481c-b380-264665703133](https://github.com/invoke-ai/InvokeAI/assets/25252829/f8f6a2ee-2b68-4482-87da-b90221d5c3e2)
![2e3168cb-af6a-475d-bfac-c7b7fd67b4c2](https://github.com/ymgenesis/InvokeAI/assets/25252829/a5ad7d44-2ada-4b3c-a56e-a21f8244a1ac) <hr>
![2_annotated](https://github.com/ymgenesis/InvokeAI/assets/25252829/53416c8a-a23b-4d76-bb6d-3cfd776e0096)
![2fe2150c-fd08-4e26-8c36-f0610bf441bb](https://github.com/ymgenesis/InvokeAI/assets/25252829/b0f7ecfe-f093-4147-a904-b9f131b41dc9) ### Ideal Size
![831b6b98-4f0f-4360-93c8-69a9c1338cbe](https://github.com/ymgenesis/InvokeAI/assets/25252829/fc7b0622-e361-4155-8a76-082894d084f0)
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
**Node Link:** https://github.com/JPPhoto/ideal-size-node
-------------------------------- --------------------------------
### Super Cool Node Template ### Super Cool Node Template
@ -42,11 +47,5 @@ The nodes linked below have been developed and contributed by members of the Inv
![Invoke AI](https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png) ![Invoke AI](https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png)
### Ideal Size
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
**Node Link:** https://github.com/JPPhoto/ideal-size-node
## Help ## Help
If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy). If you run into any issues with a node, please post in the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).

View File

@ -13,7 +13,7 @@ from pathlib import Path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Union from typing import Union
SUPPORTED_PYTHON = ">=3.9.0,<3.11" SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100"
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"] INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp" BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
@ -249,6 +249,9 @@ class InvokeAiInstance:
pip[ pip[
"install", "install",
"--require-virtualenv", "--require-virtualenv",
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
"urllib3~=1.26.0",
"requests~=2.28.0",
"torch~=2.0.0", "torch~=2.0.0",
"torchmetrics==0.11.4", "torchmetrics==0.11.4",
"torchvision>=0.14.1", "torchvision>=0.14.1",

View File

@ -41,7 +41,7 @@ IF /I "%choice%" == "1" (
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
) ELSE IF /I "%choice%" == "7" ( ) ELSE IF /I "%choice%" == "7" (
echo Running invokeai-configure... echo Running invokeai-configure...
python .venv\Scripts\invokeai-configure.exe --yes --default_only python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
) ELSE IF /I "%choice%" == "8" ( ) ELSE IF /I "%choice%" == "8" (
echo Developer Console echo Developer Console
echo Python command is: echo Python command is:

View File

@ -82,7 +82,7 @@ do_choice() {
7) 7)
clear clear
printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n" printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n"
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights
;; ;;
8) 8)
clear clear

View File

@ -274,7 +274,7 @@ class InvokeAISettings(BaseSettings):
@classmethod @classmethod
def _excluded(self) -> List[str]: def _excluded(self) -> List[str]:
# internal fields that shouldn't be exposed as command line options # internal fields that shouldn't be exposed as command line options
return ["type", "initconf"] return ["type", "initconf", "cached_root"]
@classmethod @classmethod
def _excluded_from_yaml(self) -> List[str]: def _excluded_from_yaml(self) -> List[str]:
@ -290,6 +290,7 @@ class InvokeAISettings(BaseSettings):
"restore", "restore",
"root", "root",
"nsfw_checker", "nsfw_checker",
"cached_root",
] ]
class Config: class Config:
@ -423,6 +424,7 @@ class InvokeAIAppConfig(InvokeAISettings):
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging") log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other") version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
cached_root : Path = Field(default=None, description="internal use only", category="DEPRECATED")
# fmt: on # fmt: on
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False): def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
@ -470,10 +472,15 @@ class InvokeAIAppConfig(InvokeAISettings):
""" """
Path to the runtime root directory Path to the runtime root directory
""" """
if self.root: # we cache value of root to protect against it being '.' and the cwd changing
return Path(self.root).expanduser().absolute() if self.cached_root:
root = self.cached_root
elif self.root:
root = Path(self.root).expanduser().absolute()
else: else:
return self.find_root() root = self.find_root()
self.cached_root = root
return self.cached_root
@property @property
def root_dir(self) -> Path: def root_dir(self) -> Path:

View File

@ -400,7 +400,7 @@ class ModelInstall(object):
attributes.update(dict(config=str(legacy_conf))) attributes.update(dict(config=str(legacy_conf)))
return attributes return attributes
def relative_to_root(self, path: Path, root: None) -> Path: def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
root = root or self.config.root_path root = root or self.config.root_path
if path.is_relative_to(root): if path.is_relative_to(root):
return path.relative_to(root) return path.relative_to(root)

View File

@ -256,6 +256,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
model_labels = [self.model_labels[x] for x in model_list] model_labels = [self.model_labels[x] for x in model_list]
show_recommended = len(self.installed_models) == 0 show_recommended = len(self.installed_models) == 0
truncated = False
if len(model_list) > 0: if len(model_list) > 0:
max_width = max([len(x) for x in model_labels]) max_width = max([len(x) for x in model_labels])
columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding
@ -274,10 +275,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
) )
) )
truncation = False
if len(model_labels) > MAX_OTHER_MODELS: if len(model_labels) > MAX_OTHER_MODELS:
model_labels = model_labels[0:MAX_OTHER_MODELS] model_labels = model_labels[0:MAX_OTHER_MODELS]
truncation = True truncated = True
widgets.update( widgets.update(
models_selected=self.add_widget_intelligent( models_selected=self.add_widget_intelligent(
@ -297,7 +297,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
models=model_list, models=model_list,
) )
if truncation: if truncated:
widgets.update( widgets.update(
warning_message=self.add_widget_intelligent( warning_message=self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
@ -650,7 +650,8 @@ def process_and_execute(
): ):
# need to reinitialize config in subprocess # need to reinitialize config in subprocess
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()
config.parse_args() args = ["--root", opt.root] if opt.root else []
config.parse_args(args)
# set up so that stderr is sent to conn_out # set up so that stderr is sent to conn_out
if conn_out: if conn_out:

View File

@ -320,7 +320,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
def get_model_names(self, base_model: BaseModelType = None) -> List[str]: def get_model_names(self, base_model: BaseModelType = None) -> List[str]:
model_names = [ model_names = [
info["name"] info["model_name"]
for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model) for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model)
if info["model_format"] == "diffusers" if info["model_format"] == "diffusers"
] ]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0; margin: 0;
} }
</style> </style>
<script type="module" crossorigin src="./assets/index-bad7ff83.js"></script> <script type="module" crossorigin src="./assets/index-9bb68e3a.js"></script>
</head> </head>
<body dir="ltr"> <body dir="ltr">

View File

@ -1 +1 @@
__version__ = "3.0.1" __version__ = "3.0.1post3"