Merge branch 'main' into 2.3.0rc4

This commit is contained in:
Lincoln Stein 2023-02-05 12:44:44 -05:00 committed by GitHub
commit 2e230774c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 29 additions and 22 deletions

View File

@ -626,9 +626,10 @@ class InvokeAIWebServer:
printable_parameters["init_mask"][:64] + "..." printable_parameters["init_mask"][:64] + "..."
) )
print( print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
f">> Image generation requested: {printable_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}" print(f'>> ESRGAN Parameters: {esrgan_parameters}')
) print(f'>> Facetool Parameters: {facetool_parameters}')
self.generate_images( self.generate_images(
generation_parameters, generation_parameters,
esrgan_parameters, esrgan_parameters,
@ -1154,7 +1155,7 @@ class InvokeAIWebServer:
image, os.path.basename(path), self.thumbnail_image_path image, os.path.basename(path), self.thumbnail_image_path
) )
print(f'>> Image generated: "{path}"') print(f'\n\n>> Image generated: "{path}"\n')
self.write_log_message(f'[Generated] "{path}": {command}') self.write_log_message(f'[Generated] "{path}": {command}')
if progress.total_iterations > progress.current_iteration: if progress.total_iterations > progress.current_iteration:
@ -1193,8 +1194,6 @@ class InvokeAIWebServer:
progress.set_current_iteration(progress.current_iteration + 1) progress.set_current_iteration(progress.current_iteration + 1)
print(generation_parameters)
def diffusers_step_callback_adapter(*cb_args, **kwargs): def diffusers_step_callback_adapter(*cb_args, **kwargs):
if isinstance(cb_args[0], PipelineIntermediateState): if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0] progress_state: PipelineIntermediateState = cb_args[0]
@ -1305,8 +1304,6 @@ class InvokeAIWebServer:
rfc_dict["variations"] = variations rfc_dict["variations"] = variations
print(parameters)
if rfc_dict["type"] == "img2img": if rfc_dict["type"] == "img2img":
rfc_dict["strength"] = parameters["strength"] rfc_dict["strength"] = parameters["strength"]
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant

View File

@ -574,7 +574,7 @@ class Generate:
print('>> Could not generate image.') print('>> Could not generate image.')
toc = time.time() toc = time.time()
print('>> Usage stats:') print('\n>> Usage stats:')
print( print(
f'>> {len(results)} image(s) generated in', '%4.2fs' % ( f'>> {len(results)} image(s) generated in', '%4.2fs' % (
toc - tic) toc - tic)

View File

@ -196,6 +196,7 @@ class Args(object):
elif os.path.exists(legacyinit): elif os.path.exists(legacyinit):
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.') print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
sysargs.insert(0,f'@{legacyinit}') sysargs.insert(0,f'@{legacyinit}')
Globals.log_tokenization = self._arg_parser.parse_args(sysargs).log_tokenization
self._arg_switches = self._arg_parser.parse_args(sysargs) self._arg_switches = self._arg_parser.parse_args(sysargs)
return self._arg_switches return self._arg_switches
@ -599,6 +600,12 @@ class Args(object):
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
default='k_lms', default='k_lms',
) )
render_group.add_argument(
'--log_tokenization',
'-t',
action='store_true',
help='shows how the prompt is split into tokens'
)
render_group.add_argument( render_group.add_argument(
'-f', '-f',
'--strength', '--strength',
@ -744,7 +751,7 @@ class Args(object):
invoke> !fetch 0000015.8929913.png invoke> !fetch 0000015.8929913.png
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5 invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
invoke> !fetch /path/to/images/*.png prompts.txt invoke> !fetch /path/to/images/*.png prompts.txt
!replay /path/to/prompts.txt !replay /path/to/prompts.txt
Replays all the prompts contained in the file prompts.txt. Replays all the prompts contained in the file prompts.txt.

View File

@ -17,6 +17,7 @@ from ..models.diffusion import cross_attention_control
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder
from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
from ldm.invoke.globals import Globals
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False): def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
@ -92,9 +93,9 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info) Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
""" """
if log_tokens: if log_tokens or Globals.log_tokenization:
print(f">> Parsed prompt to {parsed_prompt}") print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}")
print(f">> Parsed negative prompt to {parsed_negative_prompt}") print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}")
conditioning = None conditioning = None
cac_args: cross_attention_control.Arguments = None cac_args: cross_attention_control.Arguments = None
@ -235,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm
fragments = [x.text for x in flattened_prompt.children] fragments = [x.text for x in flattened_prompt.children]
weights = [x.weight for x in flattened_prompt.children] weights = [x.weight for x in flattened_prompt.children]
embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights]) embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights])
if log_tokens: if log_tokens or Globals.log_tokenization:
text = " ".join(fragments) text = " ".join(fragments)
log_tokenization(text, model, display_label=log_display_label) log_tokenization(text, model, display_label=log_display_label)
@ -273,12 +274,12 @@ def log_tokenization(text, model, display_label=None):
# usually tokens have '</w>' to indicate end-of-word, # usually tokens have '</w>' to indicate end-of-word,
# but for readability it has been replaced with ' ' # but for readability it has been replaced with ' '
""" """
tokens = model.cond_stage_model.tokenizer.tokenize(text) tokens = model.cond_stage_model.tokenizer.tokenize(text)
tokenized = "" tokenized = ""
discarded = "" discarded = ""
usedTokens = 0 usedTokens = 0
totalTokens = len(tokens) totalTokens = len(tokens)
for i in range(0, totalTokens): for i in range(0, totalTokens):
token = tokens[i].replace('</w>', ' ') token = tokens[i].replace('</w>', ' ')
# alternate color # alternate color
@ -288,8 +289,11 @@ def log_tokenization(text, model, display_label=None):
usedTokens += 1 usedTokens += 1
else: # over max token length else: # over max token length
discarded = discarded + f"\x1b[0;3{s};40m{token}" discarded = discarded + f"\x1b[0;3{s};40m{token}"
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
if usedTokens > 0:
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
print(f'{tokenized}\x1b[0m')
if discarded != "": if discarded != "":
print( print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):')
f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m" print(f'{discarded}\x1b[0m')
)

View File

@ -4,7 +4,6 @@ import dataclasses
import inspect import inspect
import secrets import secrets
import sys import sys
import warnings
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any
@ -641,7 +640,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property @property
def cond_stage_model(self): def cond_stage_model(self):
warnings.warn("legacy compatibility layer", DeprecationWarning)
return self.prompt_fragments_to_embeddings_converter return self.prompt_fragments_to_embeddings_converter
@torch.inference_mode() @torch.inference_mode()

View File

@ -295,7 +295,8 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
for idx in range(len(model_names)) for idx in range(len(model_names))
if "default" in conf[model_names[idx]] if "default" in conf[model_names[idx]]
] ]
return (model_names, defaults[0]) default = defaults[0] if len(defaults)>0 else 0
return (model_names, default)
def marshall_arguments(self) -> dict: def marshall_arguments(self) -> dict:
args = dict() args = dict()