From 75b62d6ca8abacb4174212cb17cb5a130c8a7f05 Mon Sep 17 00:00:00 2001 From: whosawhatsis Date: Sat, 4 Feb 2023 19:56:20 -0800 Subject: [PATCH 1/6] Add --log_tokenization to sysargs This allows the --log_tokenization option to be used as a command line argument (or from invokeai.init), making it possible to view tokenization information in the terminal when using the web interface. --- ldm/invoke/args.py | 9 ++++++++- ldm/invoke/conditioning.py | 5 +++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ldm/invoke/args.py b/ldm/invoke/args.py index 3904d2f573..4707565424 100644 --- a/ldm/invoke/args.py +++ b/ldm/invoke/args.py @@ -196,6 +196,7 @@ class Args(object): elif os.path.exists(legacyinit): print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.') sysargs.insert(0,f'@{legacyinit}') + Globals.log_tokenization = self._arg_parser.parse_args(sysargs).log_tokenization self._arg_switches = self._arg_parser.parse_args(sysargs) return self._arg_switches @@ -599,6 +600,12 @@ class Args(object): help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', default='k_lms', ) + render_group.add_argument( + '--log_tokenization', + '-t', + action='store_true', + help='shows how the prompt is split into tokens' + ) render_group.add_argument( '-f', '--strength', @@ -744,7 +751,7 @@ class Args(object): invoke> !fetch 0000015.8929913.png invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 invoke> !fetch /path/to/images/*.png prompts.txt - + !replay /path/to/prompts.txt Replays all the prompts contained in the file prompts.txt. diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index fec3c7e7b1..54ed10bc57 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -17,6 +17,7 @@ from ..models.diffusion import cross_attention_control from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter +from ldm.invoke.globals import Globals def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False): @@ -92,7 +93,7 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info) """ - if log_tokens: + if log_tokens or Globals.log_tokenization: print(f">> Parsed prompt to {parsed_prompt}") print(f">> Parsed negative prompt to {parsed_negative_prompt}") @@ -235,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm fragments = [x.text for x in flattened_prompt.children] weights = [x.weight for x in flattened_prompt.children] embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights]) - if log_tokens: + if log_tokens or Globals.log_tokenization: text = " ".join(fragments) log_tokenization(text, model, display_label=log_display_label) From f1dd76c20b9dd853aa36f95e066c235a4c33f589 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:10 +1300 Subject: [PATCH 2/6] Remove Deprecation Warning from Diffusers Pipeline --- ldm/invoke/generator/diffusers_pipeline.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index a63159b118..f8efe03762 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -4,7 +4,6 @@ import dataclasses import inspect import secrets import sys -import warnings from dataclasses import dataclass, field from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any @@ -641,7 +640,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): @property def cond_stage_model(self): - warnings.warn("legacy compatibility layer", DeprecationWarning) return self.prompt_fragments_to_embeddings_converter @torch.inference_mode() From f7532cdfd4b0ea08ef5d2c196815583d58992128 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:29 +1300 Subject: [PATCH 3/6] Beautify Token Log Outputs --- ldm/invoke/conditioning.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 54ed10bc57..99722ff388 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -94,8 +94,8 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p """ if log_tokens or Globals.log_tokenization: - print(f">> Parsed prompt to {parsed_prompt}") - print(f">> Parsed negative prompt to {parsed_negative_prompt}") + print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}") + print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}") conditioning = None cac_args: cross_attention_control.Arguments = None @@ -274,12 +274,12 @@ def log_tokenization(text, model, display_label=None): # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' """ - tokens = model.cond_stage_model.tokenizer.tokenize(text) tokenized = "" discarded = "" usedTokens = 0 totalTokens = len(tokens) + for i in range(0, totalTokens): token = tokens[i].replace('', ' ') # alternate color @@ -289,8 +289,11 @@ def log_tokenization(text, model, display_label=None): usedTokens += 1 else: # over max token length discarded = discarded + f"\x1b[0;3{s};40m{token}" - print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m") + + if usedTokens > 0: + print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') + print(f'{tokenized}\x1b[0m') + if discarded != "": - print( - f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m" - ) + print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):') + print(f'{discarded}\x1b[0m') \ No newline at end of file From bf4344be5137868a6ca04c4b0d52aaccb1dd49e7 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:40 +1300 Subject: [PATCH 4/6] Beautify Usage Stats Log --- ldm/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/generate.py b/ldm/generate.py index c421a52802..002ba47a97 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -574,7 +574,7 @@ class Generate: print('>> Could not generate image.') toc = time.time() - print('>> Usage stats:') + print('\n>> Usage stats:') print( f'>> {len(results)} image(s) generated in', '%4.2fs' % ( toc - tic) From 9c8fcaaf864a4ca00b875150dbb14379a5099a71 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sun, 5 Feb 2023 22:55:57 +1300 Subject: [PATCH 5/6] Beautify & Cleanup WebUI Logs --- invokeai/backend/invoke_ai_web_server.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index 6ec8098f59..9dd18ebe65 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -626,9 +626,10 @@ class InvokeAIWebServer: printable_parameters["init_mask"][:64] + "..." ) - print( - f">> Image generation requested: {printable_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}" - ) + print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n') + print(f'>> ESRGAN Parameters: {esrgan_parameters}') + print(f'>> Facetool Parameters: {facetool_parameters}') + self.generate_images( generation_parameters, esrgan_parameters, @@ -1154,7 +1155,7 @@ class InvokeAIWebServer: image, os.path.basename(path), self.thumbnail_image_path ) - print(f'>> Image generated: "{path}"') + print(f'\n\n>> Image generated: "{path}"\n') self.write_log_message(f'[Generated] "{path}": {command}') if progress.total_iterations > progress.current_iteration: @@ -1193,8 +1194,6 @@ class InvokeAIWebServer: progress.set_current_iteration(progress.current_iteration + 1) - print(generation_parameters) - def diffusers_step_callback_adapter(*cb_args, **kwargs): if isinstance(cb_args[0], PipelineIntermediateState): progress_state: PipelineIntermediateState = cb_args[0] @@ -1305,8 +1304,6 @@ class InvokeAIWebServer: rfc_dict["variations"] = variations - print(parameters) - if rfc_dict["type"] == "img2img": rfc_dict["strength"] = parameters["strength"] rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant From 9307ce3dc39da744758bba0fe4166b4190e2c034 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 5 Feb 2023 11:05:33 -0500 Subject: [PATCH 6/6] this fixes a crash in the TI frontend (#2527) - This fixes an edge case crash when the textual inversion frontend tried to display the list of models and no default model defined in models.yaml Co-authored-by: Jonathan <34005131+JPPhoto@users.noreply.github.com> --- ldm/invoke/training/textual_inversion.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ldm/invoke/training/textual_inversion.py b/ldm/invoke/training/textual_inversion.py index 835ad2d04e..5402e05ec9 100755 --- a/ldm/invoke/training/textual_inversion.py +++ b/ldm/invoke/training/textual_inversion.py @@ -295,7 +295,8 @@ class textualInversionForm(npyscreen.FormMultiPageAction): for idx in range(len(model_names)) if "default" in conf[model_names[idx]] ] - return (model_names, defaults[0]) + default = defaults[0] if len(defaults)>0 else 0 + return (model_names, default) def marshall_arguments(self) -> dict: args = dict()