From e159bb3dceb26d1bc3ab12f249e4ef4669fa1584 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 11 Dec 2022 18:17:45 -0500 Subject: [PATCH 1/5] update installers for v2.2.4 tag (#1936) --- installer/install.bat.in | 4 ++-- installer/install.sh.in | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/installer/install.bat.in b/installer/install.bat.in index 02e09454de..51e47bb645 100644 --- a/installer/install.bat.in +++ b/installer/install.bat.in @@ -14,8 +14,8 @@ if "%1" == "use-cache" ( @rem Config @rem this should be changed to the tagged release! -set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip -@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip +@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip +set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting set PYTHON_URL=https://www.python.org/downloads/windows/ diff --git a/installer/install.sh.in b/installer/install.sh.in index 8c8598a465..0fd3de30a3 100644 --- a/installer/install.sh.in +++ b/installer/install.sh.in @@ -9,8 +9,8 @@ cd "$scriptdir" deactivate >/dev/null 2>&1 # this should be changed to the tagged release! -INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip -# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip +# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip +INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/ TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting MINIMUM_PYTHON_VERSION=3.9.0 From 96a12099edcbcd8117b71928168b5c93728a4e5e Mon Sep 17 00:00:00 2001 From: rmagur1203 Date: Mon, 12 Dec 2022 23:14:09 +0900 Subject: [PATCH 2/5] Fix the mistake of not importing the gc (#1939) --- ldm/invoke/generator/txt2img2img.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ldm/invoke/generator/txt2img2img.py b/ldm/invoke/generator/txt2img2img.py index d06b14e6fe..b8d0f81322 100644 --- a/ldm/invoke/generator/txt2img2img.py +++ b/ldm/invoke/generator/txt2img2img.py @@ -5,6 +5,7 @@ ldm.invoke.generator.txt2img inherits from ldm.invoke.generator import torch import numpy as np import math +import gc from ldm.invoke.generator.base import Generator from ldm.models.diffusion.ddim import DDIMSampler from ldm.invoke.generator.omnibus import Omnibus From f4e7383490692249b6dc7651984e524c3b10c6e5 Mon Sep 17 00:00:00 2001 From: rmagur1203 Date: Mon, 12 Dec 2022 23:14:30 +0900 Subject: [PATCH 3/5] Load model in inpaint when using free_gpu_mem option (#1938) * Load model in inpaint when using free_gpu_mem option * Passing free_gpu_mem option to inpaint generator --- ldm/generate.py | 1 + ldm/invoke/generator/inpaint.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ldm/generate.py b/ldm/generate.py index 7346f8dfe2..db36717135 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -806,6 +806,7 @@ class Generate: if not self.generators.get('inpaint'): from ldm.invoke.generator.inpaint import Inpaint self.generators['inpaint'] = Inpaint(self.model, self.precision) + self.generators['inpaint'].free_gpu_mem = self.free_gpu_mem return self.generators['inpaint'] # "omnibus" supports the runwayML custom inpainting model, which does diff --git a/ldm/invoke/generator/inpaint.py b/ldm/invoke/generator/inpaint.py index 7b4d151265..b86e21efe6 100644 --- a/ldm/invoke/generator/inpaint.py +++ b/ldm/invoke/generator/inpaint.py @@ -59,7 +59,7 @@ class Inpaint(Img2Img): writeable=False ) - def infill_patchmatch(self, im: Image.Image) -> Image: + def infill_patchmatch(self, im: Image.Image) -> Image: if im.mode != 'RGBA': return im @@ -128,7 +128,7 @@ class Inpaint(Img2Img): # Combine npmask = npgradient + npedge - # Expand + # Expand npmask = cv.dilate(npmask, np.ones((3,3), np.uint8), iterations = int(edge_size / 2)) new_mask = Image.fromarray(npmask) @@ -221,7 +221,7 @@ class Inpaint(Img2Img): init_filled = init_filled.resize((inpaint_width, inpaint_height)) debug_image(init_filled, "init_filled", debug_status=self.enable_image_debugging) - + # Create init tensor init_image = self._image_to_tensor(init_filled.convert('RGB')) @@ -254,7 +254,7 @@ class Inpaint(Img2Img): f">> Using recommended DDIM sampler for inpainting." ) sampler = DDIMSampler(self.model, device=self.model.device) - + sampler.make_schedule( ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False ) @@ -291,6 +291,9 @@ class Inpaint(Img2Img): masked_region = (1.0-inpaint_replace) * inverted_mask * z_enc + inpaint_replace * inverted_mask * l_noise z_enc = z_enc * mask_image + masked_region + if self.free_gpu_mem and self.model.model.device != self.model.device: + self.model.model.to(self.model.device) + # decode it samples = sampler.decode( z_enc, @@ -353,7 +356,7 @@ class Inpaint(Img2Img): if self.pil_image is None or self.pil_mask is None: return gen_result - + corrected_result = super().repaste_and_color_correct(gen_result, self.pil_image, self.pil_mask, self.mask_blur_radius) debug_image(corrected_result, "corrected_result", debug_status=self.enable_image_debugging) From 5c3cbd05f167af4da20d685c0751ad41135f403a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 13 Dec 2022 05:13:46 +1100 Subject: [PATCH 4/5] Improves configure_invokeai.py postscript (#1935) The first few lines directed the user to run `python scripts/invoke.py`, which is not exactly correct anymore, and a holdover from previous versions. Improves and clarifies the postscript messaging. --- scripts/configure_invokeai.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/scripts/configure_invokeai.py b/scripts/configure_invokeai.py index feeaedfaf6..146dd01eaa 100644 --- a/scripts/configure_invokeai.py +++ b/scripts/configure_invokeai.py @@ -65,16 +65,23 @@ this program and resume later.\n''' #-------------------------------------------- def postscript(errors: None): if not any(errors): - message='''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands: -Web version: - python scripts/invoke.py --web (connect to http://localhost:9090) -Command-line version: - python scripts/invoke.py + message=''' +** Model Installation Successful ** -If you installed manually, remember to activate the 'invokeai' -environment before running invoke.py. If you installed using the -automated installation script, execute "invoke.sh" (Linux/Mac) or -"invoke.bat" (Windows) to start InvokeAI. +You're all set! + +If you installed using one of the automated installation scripts, +execute 'invoke.sh' (Linux/macOS) or 'invoke.bat' (Windows) to +start InvokeAI. + +If you installed manually, activate the 'invokeai' environment +(e.g. 'conda activate invokeai'), then run one of the following +commands to start InvokeAI. + +Web UI: + python scripts/invoke.py --web # (connect to http://localhost:9090) +Command-line interface: + python scripts/invoke.py Have fun! ''' From 7314f1a862ab5f2ec7340c3918259409efd67931 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 12 Dec 2022 13:16:15 -0500 Subject: [PATCH 5/5] add --karras_max option to invoke.py command line (#1762) This addresses image regression image reported in #1754 --- ldm/invoke/args.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 4d5ea7b5cc..98a75c9886 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -583,6 +583,12 @@ class Args(object): action='store_true', help='Generates debugging image to display' ) + render_group.add_argument( + '--karras_max', + type=int, + default=None, + help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]." + ) # Restoration related args postprocessing_group.add_argument( '--no_restore',