From 7933f27a7201d065e9daee298590024884f5d239 Mon Sep 17 00:00:00 2001 From: mauwii Date: Sun, 5 Feb 2023 20:45:31 +0100 Subject: [PATCH 1/7] update `pypi_helper.py`` - dont rename requests - remove dash in verison (`2.3.0-rc3` becomes `2.3.0rc3`) - read package_name instead of hardcode it --- scripts/pypi_helper.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/pypi_helper.py b/scripts/pypi_helper.py index 66560d11c6..5fc0d4d59c 100644 --- a/scripts/pypi_helper.py +++ b/scripts/pypi_helper.py @@ -1,26 +1,26 @@ -import requests as request +import requests -import ldm.invoke._version as version +from ldm.invoke.__init__ import __app_name__, __version__ -local_version = str(version.__version__) +local_version = str(__version__).replace("-", "") +package_name = str(__app_name__) -def get_pypi_versions(package_name="InvokeAI") -> list[str]: +def get_pypi_versions(package_name=package_name) -> list[str]: """Get the versions of the package from PyPI""" url = f"https://pypi.org/pypi/{package_name}/json" - response = request.get(url).json() + response = requests.get(url).json() versions: list[str] = list(response["releases"].keys()) return versions -def local_on_pypi(package_name="InvokeAI", local_version=local_version) -> bool: +def local_on_pypi(package_name=package_name, local_version=local_version) -> bool: """Compare the versions of the package from PyPI and the local package""" pypi_versions = get_pypi_versions(package_name) return local_version in pypi_versions if __name__ == "__main__": - package_name = "InvokeAI" if local_on_pypi(): print(f"Package {package_name} is up to date") else: From fc2a136eb00a8a161c5435f15c572a159a057c6f Mon Sep 17 00:00:00 2001 From: mauwii Date: Sun, 5 Feb 2023 21:15:39 +0100 Subject: [PATCH 2/7] add requested change --- scripts/pypi_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/pypi_helper.py b/scripts/pypi_helper.py index 5fc0d4d59c..08646aa572 100644 --- a/scripts/pypi_helper.py +++ b/scripts/pypi_helper.py @@ -1,6 +1,6 @@ import requests -from ldm.invoke.__init__ import __app_name__, __version__ +from ldm.invoke import __app_name__, __version__ local_version = str(__version__).replace("-", "") package_name = str(__app_name__) From fc53f6d47c29eebfbb2a3c245b61aff30553a98d Mon Sep 17 00:00:00 2001 From: mauwii Date: Sun, 5 Feb 2023 21:25:44 +0100 Subject: [PATCH 3/7] hotfix `build-container.yml` --- .github/workflows/build-container.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 7e940ca015..41f7fc1a49 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -47,7 +47,7 @@ jobs: type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} - type=raw,value='sha'-{{sha}}-${{ matrix.flavor}} + type=sha,enable=true,prefix=sha-,suffix=${{ matrix.flavor}},format=short type=raw,value={{branch}}-${{ matrix.flavor }} flavor: | latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} From ab585aefaea760ce20094faea41fb6411db3f2cb Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 6 Feb 2023 09:07:44 +1100 Subject: [PATCH 4/7] Update README.md --- invokeai/frontend/README.md | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/invokeai/frontend/README.md b/invokeai/frontend/README.md index f597cc6f23..6723716db9 100644 --- a/invokeai/frontend/README.md +++ b/invokeai/frontend/README.md @@ -1,28 +1,20 @@ -# Stable Diffusion Web UI +# InvokeAI UI dev setup -## Run +The UI is in `invokeai/frontend`. -- `python scripts/dream.py --web` serves both frontend and backend at - http://localhost:9090 +## Environment set up -## Evironment - -Install [node](https://nodejs.org/en/download/) (includes npm) and optionally +Install [node](https://nodejs.org/en/download/) (includes npm) and [yarn](https://yarnpkg.com/getting-started/install). -From `frontend/` run `npm install` / `yarn install` to install the frontend -packages. +From `invokeai/frontend/` run `yarn install` to get everything set up. ## Dev -1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server. -2. Run `python scripts/dream.py --web`. -3. Navigate to the dev server address e.g. `http://localhost:5173/`. +1. Start the dev server: `yarn dev` +2. Start the InvokeAI UI per usual: `invokeai --web` +3. Point your browser to the dev server address e.g. `http://localhost:5173/` -To build for dev: `npm build-dev` / `yarn build-dev` +To build for dev: `yarn build-dev` -To build for production: `npm build` / `yarn build` - -## TODO - -- Search repo for "TODO" +To build for production: `yarn build` \ No newline at end of file From 142016827f50e4c7dde0fe5d84516fc72a398a25 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 5 Feb 2023 18:35:01 -0500 Subject: [PATCH 5/7] fix formatting bugs in both textual_inversion and merge front ends - Issue is that if insufficient diffusers models are defined in models.yaml the frontend would ungraciously crash. - Now it emits appropriate error messages telling user what the problem is. --- ldm/invoke/merge_diffusers.py | 140 +++++++++++++---------- ldm/invoke/training/textual_inversion.py | 12 +- 2 files changed, 93 insertions(+), 59 deletions(-) diff --git a/ldm/invoke/merge_diffusers.py b/ldm/invoke/merge_diffusers.py index 50b1f473f5..b5e7967ab1 100644 --- a/ldm/invoke/merge_diffusers.py +++ b/ldm/invoke/merge_diffusers.py @@ -15,20 +15,18 @@ from pathlib import Path from typing import List, Union import npyscreen -from diffusers import DiffusionPipeline, logging as dlogging +from diffusers import DiffusionPipeline +from diffusers import logging as dlogging +from npyscreen import widget from omegaconf import OmegaConf -from ldm.invoke.globals import ( - Globals, - global_cache_dir, - global_config_file, - global_models_dir, - global_set_root, -) +from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file, + global_models_dir, global_set_root) from ldm.invoke.model_manager import ModelManager DEST_MERGED_MODEL_DIR = "merged_models" + def merge_diffusion_models( model_ids_or_paths: List[Union[str, Path]], alpha: float = 0.5, @@ -48,10 +46,10 @@ def merge_diffusion_models( cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map """ with warnings.catch_warnings(): - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") verbosity = dlogging.get_verbosity() dlogging.set_verbosity_error() - + pipe = DiffusionPipeline.from_pretrained( model_ids_or_paths[0], cache_dir=kwargs.get("cache_dir", global_cache_dir()), @@ -188,13 +186,12 @@ class FloatTitleSlider(npyscreen.TitleText): class mergeModelsForm(npyscreen.FormMultiPageAction): - interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"] def __init__(self, parentApp, name): self.parentApp = parentApp - self.ALLOW_RESIZE=True - self.FIX_MINIMUM_SIZE_WHEN_CREATED=False + self.ALLOW_RESIZE = True + self.FIX_MINIMUM_SIZE_WHEN_CREATED = False super().__init__(parentApp, name) @property @@ -205,29 +202,29 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): self.parentApp.setNextForm(None) def create(self): - window_height,window_width=curses.initscr().getmaxyx() - + window_height, window_width = curses.initscr().getmaxyx() + self.model_names = self.get_model_names() max_width = max([len(x) for x in self.model_names]) max_width += 6 - horizontal_layout = max_width*3 < window_width - + horizontal_layout = max_width * 3 < window_width + self.add_widget_intelligent( npyscreen.FixedText, - color='CONTROL', + color="CONTROL", value=f"Select two models to merge and optionally a third.", editable=False, ) self.add_widget_intelligent( npyscreen.FixedText, - color='CONTROL', + color="CONTROL", value=f"Use up and down arrows to move, to select an item, and to move from one field to the next.", editable=False, ) self.add_widget_intelligent( npyscreen.FixedText, - value='MODEL 1', - color='GOOD', + value="MODEL 1", + color="GOOD", editable=False, rely=4 if horizontal_layout else None, ) @@ -242,57 +239,57 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): ) self.add_widget_intelligent( npyscreen.FixedText, - value='MODEL 2', - color='GOOD', + value="MODEL 2", + color="GOOD", editable=False, - relx=max_width+3 if horizontal_layout else None, + relx=max_width + 3 if horizontal_layout else None, rely=4 if horizontal_layout else None, ) self.model2 = self.add_widget_intelligent( npyscreen.SelectOne, - name='(2)', + name="(2)", values=self.model_names, value=1, max_height=len(self.model_names), max_width=max_width, - relx=max_width+3 if horizontal_layout else None, + relx=max_width + 3 if horizontal_layout else None, rely=5 if horizontal_layout else None, scroll_exit=True, ) self.add_widget_intelligent( npyscreen.FixedText, - value='MODEL 3', - color='GOOD', + value="MODEL 3", + color="GOOD", editable=False, - relx=max_width*2+3 if horizontal_layout else None, + relx=max_width * 2 + 3 if horizontal_layout else None, rely=4 if horizontal_layout else None, ) models_plus_none = self.model_names.copy() - models_plus_none.insert(0,'None') + models_plus_none.insert(0, "None") self.model3 = self.add_widget_intelligent( npyscreen.SelectOne, - name='(3)', + name="(3)", values=models_plus_none, value=0, - max_height=len(self.model_names)+1, + max_height=len(self.model_names) + 1, max_width=max_width, scroll_exit=True, - relx=max_width*2+3 if horizontal_layout else None, + relx=max_width * 2 + 3 if horizontal_layout else None, rely=5 if horizontal_layout else None, ) - for m in [self.model1,self.model2,self.model3]: + for m in [self.model1, self.model2, self.model3]: m.when_value_edited = self.models_changed self.merged_model_name = self.add_widget_intelligent( npyscreen.TitleText, name="Name for merged model:", - labelColor='CONTROL', + labelColor="CONTROL", value="", scroll_exit=True, ) self.force = self.add_widget_intelligent( npyscreen.Checkbox, name="Force merge of incompatible models", - labelColor='CONTROL', + labelColor="CONTROL", value=False, scroll_exit=True, ) @@ -301,7 +298,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): name="Merge Method:", values=self.interpolations, value=0, - labelColor='CONTROL', + labelColor="CONTROL", max_height=len(self.interpolations) + 1, scroll_exit=True, ) @@ -312,7 +309,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): step=0.05, lowest=0, value=0.5, - labelColor='CONTROL', + labelColor="CONTROL", scroll_exit=True, ) self.model1.editing = True @@ -322,43 +319,43 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): selected_model1 = self.model1.value[0] selected_model2 = self.model2.value[0] selected_model3 = self.model3.value[0] - merged_model_name = f'{models[selected_model1]}+{models[selected_model2]}' + merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}" self.merged_model_name.value = merged_model_name - + if selected_model3 > 0: - self.merge_method.values=['add_difference'], - self.merged_model_name.value += f'+{models[selected_model3]}' + self.merge_method.values = (["add_difference"],) + self.merged_model_name.value += f"+{models[selected_model3]}" else: - self.merge_method.values=self.interpolations - self.merge_method.value=0 + self.merge_method.values = self.interpolations + self.merge_method.value = 0 def on_ok(self): if self.validate_field_values() and self.check_for_overwrite(): self.parentApp.setNextForm(None) self.editing = False self.parentApp.merge_arguments = self.marshall_arguments() - npyscreen.notify('Starting the merge...') + npyscreen.notify("Starting the merge...") else: self.editing = True def on_cancel(self): sys.exit(0) - def marshall_arguments(self)->dict: + def marshall_arguments(self) -> dict: model_names = self.model_names models = [ model_names[self.model1.value[0]], model_names[self.model2.value[0]], - ] + ] if self.model3.value[0] > 0: - models.append(model_names[self.model3.value[0]-1]) + models.append(model_names[self.model3.value[0] - 1]) args = dict( models=models, - alpha = self.alpha.value, - interp = self.interpolations[self.merge_method.value[0]], - force = self.force.value, - merged_model_name = self.merged_model_name.value, + alpha=self.alpha.value, + interp=self.interpolations[self.merge_method.value[0]], + force=self.force.value, + merged_model_name=self.merged_model_name.value, ) return args @@ -371,18 +368,22 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): f"The chosen merged model destination, {model_out}, is already in use. Overwrite?" ) - def validate_field_values(self)->bool: + def validate_field_values(self) -> bool: bad_fields = [] model_names = self.model_names - selected_models = set((model_names[self.model1.value[0]],model_names[self.model2.value[0]])) + selected_models = set( + (model_names[self.model1.value[0]], model_names[self.model2.value[0]]) + ) if self.model3.value[0] > 0: - selected_models.add(model_names[self.model3.value[0]-1]) + selected_models.add(model_names[self.model3.value[0] - 1]) if len(selected_models) < 2: - bad_fields.append(f'Please select two or three DIFFERENT models to compare. You selected {selected_models}') + bad_fields.append( + f"Please select two or three DIFFERENT models to compare. You selected {selected_models}" + ) if len(bad_fields) > 0: - message = 'The following problems were detected and must be corrected:' + message = "The following problems were detected and must be corrected:" for problem in bad_fields: - message += f'\n* {problem}' + message += f"\n* {problem}" npyscreen.notify_confirm(message) return False else: @@ -410,6 +411,7 @@ class Mergeapp(npyscreen.NPSAppManaged): npyscreen.setTheme(npyscreen.Themes.ElegantTheme) self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings") + def run_gui(args: Namespace): mergeapp = Mergeapp() mergeapp.run() @@ -450,5 +452,27 @@ def main(): ] = cache_dir # because not clear the merge pipeline is honoring cache_dir args.cache_dir = cache_dir + try: + if args.front_end: + run_gui(args) + else: + run_cli(args) + print(f">> Conversion successful. New model is named {args.merged_model_name}") + except widget.NotEnoughSpaceForWidget as e: + if str(e).startswith("Height of 1 allocated"): + print( + "** You need to have at least two diffusers models defined in models.yaml in order to merge" + ) + else: + print(f"** A layout error has occurred: {str(e)}") + sys.exit(-1) + except Exception as e: + print(">> An error occurred:") + traceback.print_exc() + sys.exit(-1) + except KeyboardInterrupt: + sys.exit(-1) + + if __name__ == "__main__": main() diff --git a/ldm/invoke/training/textual_inversion.py b/ldm/invoke/training/textual_inversion.py index 5402e05ec9..d1a39f32ae 100755 --- a/ldm/invoke/training/textual_inversion.py +++ b/ldm/invoke/training/textual_inversion.py @@ -17,6 +17,7 @@ from pathlib import Path from typing import List, Tuple import npyscreen +from npyscreen import widget from omegaconf import OmegaConf from ldm.invoke.globals import Globals, global_set_root @@ -295,7 +296,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): for idx in range(len(model_names)) if "default" in conf[model_names[idx]] ] - default = defaults[0] if len(defaults)>0 else 0 + default = defaults[0] if len(defaults) > 0 else 0 return (model_names, default) def marshall_arguments(self) -> dict: @@ -438,11 +439,20 @@ def main(): do_front_end(args) else: do_textual_inversion_training(**vars(args)) + except widget.NotEnoughSpaceForWidget as e: + if str(e).startswith("Height of 1 allocated"): + print( + "** You need to have at least one diffusers models defined in models.yaml in order to train" + ) + else: + print(f"** A layout error has occurred: {str(e)}") + sys.exit(-1) except AssertionError as e: print(str(e)) sys.exit(-1) except KeyboardInterrupt: pass + if __name__ == "__main__": main() From 024065636167c8e2834300707cc9aca827e7f5c6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 5 Feb 2023 22:55:08 -0500 Subject: [PATCH 6/7] fix crash in txt2img and img2img w/ inpainting models and perlin > 0 - get_perlin_noise() was returning 9 channels; fixed code to return noise for just the 4 image channels and not the mask ones. - Closes Issue #2541 --- ldm/invoke/generator/base.py | 31 ++++++++++++++++++++++++++++++- ldm/invoke/generator/img2img.py | 19 ------------------- ldm/invoke/generator/txt2img.py | 22 ---------------------- 3 files changed, 30 insertions(+), 42 deletions(-) diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index bab63b6261..da2ade2f0c 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -240,7 +240,12 @@ class Generator: def get_perlin_noise(self,width,height): fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device - noise = torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device) + # limit noise to only the diffusion image channels, not the mask channels + input_channels = min(self.latent_channels, 4) + noise = torch.stack([ + rand_perlin_2d((height, width), + (8, 8), + device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device) return noise def new_seed(self): @@ -341,3 +346,27 @@ class Generator: def torch_dtype(self)->torch.dtype: return torch.float16 if self.precision == 'float16' else torch.float32 + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height): + device = self.model.device + # limit noise to only the diffusion image channels, not the mask channels + input_channels = min(self.latent_channels, 4) + if self.use_mps_noise or device.type == 'mps': + x = torch.randn([1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + dtype=self.torch_dtype(), + device='cpu').to(device) + else: + x = torch.randn([1, + input_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + dtype=self.torch_dtype(), + device=device) + if self.perlin > 0.0: + perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) + x = (1-self.perlin)*x + self.perlin*perlin_noise + return x diff --git a/ldm/invoke/generator/img2img.py b/ldm/invoke/generator/img2img.py index fedf6d3abc..bfa50617ef 100644 --- a/ldm/invoke/generator/img2img.py +++ b/ldm/invoke/generator/img2img.py @@ -63,22 +63,3 @@ class Img2Img(Generator): shape = like.shape x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) return x - - def get_noise(self,width,height): - # copy of the Txt2Img.get_noise - device = self.model.device - if self.use_mps_noise or device.type == 'mps': - x = torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - device='cpu').to(device) - else: - x = torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - device=device) - if self.perlin > 0.0: - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - return x diff --git a/ldm/invoke/generator/txt2img.py b/ldm/invoke/generator/txt2img.py index 77b16a734e..6578794fa7 100644 --- a/ldm/invoke/generator/txt2img.py +++ b/ldm/invoke/generator/txt2img.py @@ -51,26 +51,4 @@ class Txt2Img(Generator): return make_image - # returns a tensor filled with random numbers from a normal distribution - def get_noise(self,width,height): - device = self.model.device - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(self.latent_channels, 4) - if self.use_mps_noise or device.type == 'mps': - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device='cpu').to(device) - else: - x = torch.randn([1, - input_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - dtype=self.torch_dtype(), - device=device) - if self.perlin > 0.0: - x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor) - return x From b7ab025f406cf6ae30beab5068e322d0c2d672d9 Mon Sep 17 00:00:00 2001 From: Jonathan <34005131+JPPhoto@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:14:35 -0600 Subject: [PATCH 7/7] Update base.py (#2543) Free up CUDA cache right after each image is generated. VRAM usage drops down to pre-generation levels. --- ldm/invoke/generator/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index da2ade2f0c..f30ab256ae 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -122,6 +122,10 @@ class Generator: seed = self.new_seed() + # Free up memory from the last generation. + if self.model.device.type == 'cuda': + torch.cuda.empty_cache() + return results def sample_to_image(self,samples)->Image.Image: