From 57db66634d4a43ec8783c495482dfe8d2bacc7f9 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Wed, 8 Mar 2023 14:25:48 +0100 Subject: [PATCH 01/18] longer prompts wip --- invokeai/backend/prompting/conditioning.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 04051084c0..4c9b0f8adf 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -71,6 +71,7 @@ def get_uc_and_c_and_ec( text_encoder=text_encoder, textual_inversion_manager=model.textual_inversion_manager, dtype_for_device_getter=torch_dtype, + truncate_too_long_prompts=False ) # get rid of any newline characters @@ -82,12 +83,12 @@ def get_uc_and_c_and_ec( legacy_blend = try_parse_legacy_blend( positive_prompt_string, skip_normalize_legacy_blend ) - positive_prompt: FlattenedPrompt | Blend + positive_prompt: Union[FlattenedPrompt, Blend] if legacy_blend is not None: positive_prompt = legacy_blend else: positive_prompt = Compel.parse_prompt_string(positive_prompt_string) - negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( + negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string( negative_prompt_string ) @@ -96,6 +97,7 @@ def get_uc_and_c_and_ec( c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt) uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt) + c, uc = compel.pad_conditioning_tensors_to_same_length(c, uc) tokens_count = get_max_token_count(tokenizer, positive_prompt) From 768e969c902678bc5c614a5eea167a96f39c267b Mon Sep 17 00:00:00 2001 From: damian Date: Wed, 8 Mar 2023 18:00:54 +0100 Subject: [PATCH 02/18] cleanup and fix kwarg --- invokeai/backend/prompting/conditioning.py | 8 ++++---- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 4c9b0f8adf..6f6eb4690d 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -17,7 +17,7 @@ from compel.prompt_parser import ( Fragment, PromptParser, ) -from transformers import CLIPTextModel, CLIPTokenizer +from transformers import CLIPTokenizer from invokeai.backend.globals import Globals @@ -71,7 +71,7 @@ def get_uc_and_c_and_ec( text_encoder=text_encoder, textual_inversion_manager=model.textual_inversion_manager, dtype_for_device_getter=torch_dtype, - truncate_too_long_prompts=False + truncate_long_prompts=False ) # get rid of any newline characters @@ -118,12 +118,12 @@ def get_prompt_structure( legacy_blend = try_parse_legacy_blend( positive_prompt_string, skip_normalize_legacy_blend ) - positive_prompt: FlattenedPrompt | Blend + positive_prompt: Union[FlattenedPrompt, Blend] if legacy_blend is not None: positive_prompt = legacy_blend else: positive_prompt = Compel.parse_prompt_string(positive_prompt_string) - negative_prompt: FlattenedPrompt | Blend = Compel.parse_prompt_string( + negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string( negative_prompt_string ) diff --git a/pyproject.toml b/pyproject.toml index 4c2d903316..97df08a244 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ "albumentations", "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", - "compel==0.1.7", + "compel @ https://github.com/damian0815/compel/archive/no_max_token_limit.zip", "datasets", "diffusers[torch]~=0.14", "dnspython==2.2.1", From a38b75572fc67b2fa8dbb1595aa6dadd3577767b Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Wed, 8 Mar 2023 20:00:18 +0100 Subject: [PATCH 03/18] don't log excess tokens as truncated --- invokeai/backend/prompting/conditioning.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 6f6eb4690d..b7c4f2c14e 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -131,7 +131,7 @@ def get_prompt_structure( def get_max_token_count( - tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True + tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=False ) -> int: if type(prompt) is Blend: blend: Blend = prompt @@ -247,7 +247,7 @@ def log_tokenization_for_prompt_object( ) -def log_tokenization_for_text(text, tokenizer, display_label=None): +def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_too_long=False): """shows how the prompt is tokenized # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' @@ -262,11 +262,11 @@ def log_tokenization_for_text(text, tokenizer, display_label=None): token = tokens[i].replace("", " ") # alternate color s = (usedTokens % 6) + 1 - if i < tokenizer.model_max_length: + if truncate_if_too_long and i >= tokenizer.model_max_length: + discarded = discarded + f"\x1b[0;3{s};40m{token}" + else: tokenized = tokenized + f"\x1b[0;3{s};40m{token}" usedTokens += 1 - else: # over max token length - discarded = discarded + f"\x1b[0;3{s};40m{token}" if usedTokens > 0: print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') From 69e2dc0404391c5cb82f98cb9f8ed9395e0a260b Mon Sep 17 00:00:00 2001 From: damian Date: Wed, 8 Mar 2023 20:45:01 +0100 Subject: [PATCH 04/18] update for compel changes --- invokeai/backend/prompting/conditioning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index b7c4f2c14e..5f94b0f975 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -97,7 +97,7 @@ def get_uc_and_c_and_ec( c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt) uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt) - c, uc = compel.pad_conditioning_tensors_to_same_length(c, uc) + [c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc]) tokens_count = get_max_token_count(tokenizer, positive_prompt) From 88cb63e4a1a53ad830f5ccc97cdfee2b1fd96436 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Wed, 8 Mar 2023 23:24:30 +0100 Subject: [PATCH 05/18] pin new compel version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 97df08a244..4b15b5977c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ "albumentations", "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", - "compel @ https://github.com/damian0815/compel/archive/no_max_token_limit.zip", + "compel==0.1.9", "datasets", "diffusers[torch]~=0.14", "dnspython==2.2.1", From 65fc9a6e0e78a13b70e46b2afdbc8808eab854eb Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Thu, 9 Mar 2023 10:28:07 +0100 Subject: [PATCH 06/18] bump compel version to address issues --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4b15b5977c..db29c53883 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ "albumentations", "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", - "compel==0.1.9", + "compel==0.1.10", "datasets", "diffusers[torch]~=0.14", "dnspython==2.2.1", From 5418bd3b242a76e571439240d5f664744119ae5e Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Thu, 9 Mar 2023 09:22:29 -0500 Subject: [PATCH 07/18] (ci) unlabel stale issues when commented --- .github/workflows/close-inactive-issues.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index dbb89cc8f2..89c98c1c3f 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -2,6 +2,8 @@ name: Close inactive issues on: schedule: - cron: "00 6 * * *" + issue_comment: + types: [ "created" ] env: DAYS_BEFORE_ISSUE_STALE: 14 @@ -10,6 +12,7 @@ env: jobs: close-issues: runs-on: ubuntu-latest + if: ${{ !github.event.issue.pull_request }} permissions: issues: write pull-requests: write @@ -18,9 +21,9 @@ jobs: with: days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }} days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }} - stale-issue-label: "Inactive Issue" - stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release." - close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue." + stale-issue-label: "stale" + stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. Please reply with a comment to keep the issue open. We recommend testing with the latest release to make sure it hasn't been already fixed." + close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please open a new one and reference issue ${{ github.event.issue.number }}." days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} From 507e12520ee1ebd65a23e969c839208456423691 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 9 Mar 2023 19:21:57 +0100 Subject: [PATCH 08/18] Make sure command also works with Oh-my-zsh Many people use oh-my-zsh for their command line: https://ohmyz.sh/ Adding `""` should work both on ohmyzsh and native bash --- docs/installation/020_INSTALL_MANUAL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index 711df0f8f9..401560e76c 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -148,7 +148,7 @@ manager, please follow these steps: === "CUDA (NVidia)" ```bash - pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 + pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 ``` === "ROCm (AMD)" From 55d36eaf4fcb3a963ddd055d74fff90ba603834d Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:31:05 -0800 Subject: [PATCH 09/18] fix: image_resized_to_grid_as_tensor: reconnect dropped multiple_of argument --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 6bd1fe339d..cb842356e4 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -175,7 +175,7 @@ def image_resized_to_grid_as_tensor( :param normalize: scale the range to [-1, 1] instead of [0, 1] :param multiple_of: resize the input so both dimensions are a multiple of this """ - w, h = trim_to_multiple_of(*image.size) + w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of) transformation = T.Compose( [ T.Resize((h, w), T.InterpolationMode.LANCZOS), From 081397737b2fc4cc1444e9c142a44c9e094867c2 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:33:06 -0800 Subject: [PATCH 10/18] typo: docstring spelling fixes looks like they've already been corrected in the upstream copy --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index cb842356e4..8e73cc0bb2 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -290,10 +290,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offsensive or harmful. + Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. @@ -436,7 +436,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ Ready this pipeline's models. - i.e. pre-load them to the GPU if appropriate. + i.e. preload them to the GPU if appropriate. """ self._model_group.ready() From faa2558e2f5c56d402aeedd18f77f3b3b15aad37 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:34:41 -0800 Subject: [PATCH 11/18] chore: add new argument to overridden method to match new signature upstream --- invokeai/backend/stable_diffusion/diffusers_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 8e73cc0bb2..3857cf2def 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -440,7 +440,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ self._model_group.ready() - def to(self, torch_device: Optional[Union[str, torch.device]] = None): + def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False): # overridden method; types match the superclass. if torch_device is None: return self From 42355b70c2ef2d16dc2e2f55bbb3fb877f1f1961 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:35:54 -0800 Subject: [PATCH 12/18] fix(Pipeline.debug_latents): fix import for moved utility function --- .../backend/stable_diffusion/diffusers_pipeline.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 3857cf2def..c97b122728 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -942,11 +942,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): return super().decode_latents(latents) def debug_latents(self, latents, msg): + from invokeai.backend.image_util import debug_image with torch.inference_mode(): - from ldm.util import debug_image - decoded = self.numpy_to_pil(self.decode_latents(latents)) - for i, img in enumerate(decoded): - debug_image( - img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True - ) + for i, img in enumerate(decoded): + debug_image( + img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True + ) From ad7b1fa6fb41d0c1813660be96ec6876b8cc9ce8 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 17:59:55 -0800 Subject: [PATCH 13/18] model_manager: model to/from CPU methods are implemented on the Pipeline --- .../backend/model_management/model_manager.py | 40 +------------------ 1 file changed, 2 insertions(+), 38 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index b362500ff7..7639e79362 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -104,7 +104,7 @@ class ModelManager(object): if model_name in self.models: requested_model = self.models[model_name]["model"] print(f">> Retrieving model {model_name} from system RAM cache") - self.models[model_name]["model"] = self._model_from_cpu(requested_model) + requested_model.ready() width = self.models[model_name]["width"] height = self.models[model_name]["height"] hash = self.models[model_name]["hash"] @@ -499,6 +499,7 @@ class ModelManager(object): print(f">> Offloading {model_name} to CPU") model = self.models[model_name]["model"] + model.offload_all() self.models[model_name]["model"] = self._model_to_cpu(model) gc.collect() @@ -1044,43 +1045,6 @@ class ModelManager(object): self.stack.remove(model_name) self.models.pop(model_name, None) - def _model_to_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.offload_all() - return model - - model.cond_stage_model.device = CPU_DEVICE - model.to(CPU_DEVICE) - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(CPU_DEVICE) - except AttributeError: - pass - return model - - def _model_from_cpu(self, model): - if self.device == CPU_DEVICE: - return model - - if isinstance(model, StableDiffusionGeneratorPipeline): - model.ready() - return model - - model.to(self.device) - model.cond_stage_model.device = self.device - - for submodel in ("first_stage_model", "cond_stage_model", "model"): - try: - getattr(model, submodel).to(self.device) - except AttributeError: - pass - - return model - def _pop_oldest_model(self): """ Remove the first element of the FIFO, which ought From 9d339e94f25178b8bf317679cfc9a790480cea74 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 18:01:37 -0800 Subject: [PATCH 14/18] backend..conditioning: remove code for legacy model --- invokeai/backend/prompting/__init__.py | 1 - invokeai/backend/prompting/conditioning.py | 38 ++------------------ invokeai/backend/web/invoke_ai_web_server.py | 3 +- 3 files changed, 4 insertions(+), 38 deletions(-) diff --git a/invokeai/backend/prompting/__init__.py b/invokeai/backend/prompting/__init__.py index 152edf646b..b52206dd94 100644 --- a/invokeai/backend/prompting/__init__.py +++ b/invokeai/backend/prompting/__init__.py @@ -3,7 +3,6 @@ Initialization file for invokeai.backend.prompting """ from .conditioning import ( get_prompt_structure, - get_tokenizer, get_tokens_for_prompt_object, get_uc_and_c_and_ec, split_weighted_subprompts, diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 5f94b0f975..1ddae1e93d 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -7,7 +7,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an """ import re -from typing import Any, Optional, Union +from typing import Optional, Union from compel import Compel from compel.prompt_parser import ( @@ -17,7 +17,6 @@ from compel.prompt_parser import ( Fragment, PromptParser, ) -from transformers import CLIPTokenizer from invokeai.backend.globals import Globals @@ -25,36 +24,6 @@ from ..stable_diffusion import InvokeAIDiffuserComponent from ..util import torch_dtype -def get_tokenizer(model) -> CLIPTokenizer: - # TODO remove legacy ckpt fallback handling - return ( - getattr(model, "tokenizer", None) # diffusers - or model.cond_stage_model.tokenizer - ) # ldm - - -def get_text_encoder(model) -> Any: - # TODO remove legacy ckpt fallback handling - return getattr( - model, "text_encoder", None - ) or UnsqueezingLDMTransformer( # diffusers - model.cond_stage_model.transformer - ) # ldm - - -class UnsqueezingLDMTransformer: - def __init__(self, ldm_transformer): - self.ldm_transformer = ldm_transformer - - @property - def device(self): - return self.ldm_transformer.device - - def __call__(self, *args, **kwargs): - insufficiently_unsqueezed_tensor = self.ldm_transformer(*args, **kwargs) - return insufficiently_unsqueezed_tensor.unsqueeze(0) - - def get_uc_and_c_and_ec( prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False ): @@ -64,11 +33,10 @@ def get_uc_and_c_and_ec( prompt_string ) - tokenizer = get_tokenizer(model) - text_encoder = get_text_encoder(model) + tokenizer = model.tokenizer compel = Compel( tokenizer=tokenizer, - text_encoder=text_encoder, + text_encoder=model.text_encoder, textual_inversion_manager=model.textual_inversion_manager, dtype_for_device_getter=torch_dtype, truncate_long_prompts=False diff --git a/invokeai/backend/web/invoke_ai_web_server.py b/invokeai/backend/web/invoke_ai_web_server.py index a192073b73..dc77ff4723 100644 --- a/invokeai/backend/web/invoke_ai_web_server.py +++ b/invokeai/backend/web/invoke_ai_web_server.py @@ -29,7 +29,6 @@ from ..image_util import PngWriter, retrieve_metadata from ...frontend.merge.merge_diffusers import merge_diffusion_models from ..prompting import ( get_prompt_structure, - get_tokenizer, get_tokens_for_prompt_object, ) from ..stable_diffusion import PipelineIntermediateState @@ -1274,7 +1273,7 @@ class InvokeAIWebServer: None if type(parsed_prompt) is Blend else get_tokens_for_prompt_object( - get_tokenizer(self.generate.model), parsed_prompt + self.generate.model.tokenizer, parsed_prompt ) ) attention_maps_image_base64_url = ( From 1a829bb998f3f28f35f8d3801d1447e9c3abea10 Mon Sep 17 00:00:00 2001 From: Kevin Turner <83819+keturn@users.noreply.github.com> Date: Thu, 9 Mar 2023 18:04:11 -0800 Subject: [PATCH 15/18] pipeline: remove code for legacy model --- invokeai/backend/generate.py | 12 ---------- .../stable_diffusion/diffusers_pipeline.py | 24 ------------------- 2 files changed, 36 deletions(-) diff --git a/invokeai/backend/generate.py b/invokeai/backend/generate.py index ee5241bca1..35dba41ffb 100644 --- a/invokeai/backend/generate.py +++ b/invokeai/backend/generate.py @@ -495,18 +495,6 @@ class Generate: torch.cuda.reset_peak_memory_stats() results = list() - init_image = None - mask_image = None - - try: - if ( - self.free_gpu_mem - and self.model.cond_stage_model.device != self.model.device - ): - self.model.cond_stage_model.device = self.model.device - self.model.cond_stage_model.to(self.model.device) - except AttributeError: - pass try: uc, c, extra_conditioning_info = get_uc_and_c_and_ec( diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index c97b122728..51e7b1ee1d 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -54,16 +54,6 @@ class PipelineIntermediateState: attention_map_saver: Optional[AttentionMapSaver] = None -# copied from configs/stable-diffusion/v1-inference.yaml -_default_personalization_config_params = dict( - placeholder_strings=["*"], - initializer_wods=["sculpture"], - per_image_tokens=False, - num_vectors_per_token=1, - progressive_words=False, -) - - @dataclass class AddsMaskLatents: """Add the channels required for inpainting model input. @@ -917,20 +907,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): device=self._model_group.device_for(self.unet), ) - @property - def cond_stage_model(self): - return self.embeddings_provider - - @torch.inference_mode() - def _tokenize(self, prompt: Union[str, List[str]]): - return self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - @property def channels(self) -> int: """Compatible with DiffusionWrapper""" From 14c8738a71efc9ac26f89978c5f807861467b4bb Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 21:41:45 -0500 Subject: [PATCH 16/18] fix dangling reference to _model_to_cpu and missing variable model_description --- invokeai/backend/model_management/model_manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 7639e79362..4627f283f5 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -500,7 +500,6 @@ class ModelManager(object): print(f">> Offloading {model_name} to CPU") model = self.models[model_name]["model"] model.offload_all() - self.models[model_name]["model"] = self._model_to_cpu(model) gc.collect() if self._has_cuda(): @@ -558,7 +557,7 @@ class ModelManager(object): """ model_name = model_name or Path(repo_or_path).stem model_description = ( - model_description or f"Imported diffusers model {model_name}" + description or f"Imported diffusers model {model_name}" ) new_config = dict( description=model_description, From bb3d1bb6cb2a7a9a69861bfb9266072411176d3b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 9 Mar 2023 22:24:43 -0500 Subject: [PATCH 17/18] Revert "Remove label from stale issues on comment event" --- .github/workflows/close-inactive-issues.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index 89c98c1c3f..dbb89cc8f2 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -2,8 +2,6 @@ name: Close inactive issues on: schedule: - cron: "00 6 * * *" - issue_comment: - types: [ "created" ] env: DAYS_BEFORE_ISSUE_STALE: 14 @@ -12,7 +10,6 @@ env: jobs: close-issues: runs-on: ubuntu-latest - if: ${{ !github.event.issue.pull_request }} permissions: issues: write pull-requests: write @@ -21,9 +18,9 @@ jobs: with: days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }} days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }} - stale-issue-label: "stale" - stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. Please reply with a comment to keep the issue open. We recommend testing with the latest release to make sure it hasn't been already fixed." - close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please open a new one and reference issue ${{ github.event.issue.number }}." + stale-issue-label: "Inactive Issue" + stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release." + close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue." days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} From 685df33584be6dbf4bd7af5db974923103962e32 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 10 Mar 2023 13:11:32 -0500 Subject: [PATCH 18/18] fix bug that caused black images when converting ckpts to diffusers in RAM (#2914) Cause of the problem was inadvertent activation of the safety checker. When conversion occurs on disk, the safety checker is disabled during loading. However, when converting in RAM, the safety checker was not removed, resulting in it activating even when user specified --no-nsfw_checker. This PR fixes the problem by detecting when the caller has requested the InvokeAi StableDiffusionGeneratorPipeline class to be returned and setting safety checker to None. Do not do this with diffusers models destined for disk because then they will be incompatible with the merge script!! Closes #2836 --- invokeai/backend/model_management/convert_ckpt_to_diffusers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index ad2c6afe9e..ae5550880a 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -1274,7 +1274,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( tokenizer=tokenizer, unet=unet.to(precision), scheduler=scheduler, - safety_checker=safety_checker.to(precision), + safety_checker=None if return_generator_pipeline else safety_checker.to(precision), feature_extractor=feature_extractor, ) else: