diff --git a/ldm/invoke/conditioning.py b/ldm/invoke/conditioning.py index 54ed10bc57..99722ff388 100644 --- a/ldm/invoke/conditioning.py +++ b/ldm/invoke/conditioning.py @@ -94,8 +94,8 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p """ if log_tokens or Globals.log_tokenization: - print(f">> Parsed prompt to {parsed_prompt}") - print(f">> Parsed negative prompt to {parsed_negative_prompt}") + print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}") + print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}") conditioning = None cac_args: cross_attention_control.Arguments = None @@ -274,12 +274,12 @@ def log_tokenization(text, model, display_label=None): # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' """ - tokens = model.cond_stage_model.tokenizer.tokenize(text) tokenized = "" discarded = "" usedTokens = 0 totalTokens = len(tokens) + for i in range(0, totalTokens): token = tokens[i].replace('', ' ') # alternate color @@ -289,8 +289,11 @@ def log_tokenization(text, model, display_label=None): usedTokens += 1 else: # over max token length discarded = discarded + f"\x1b[0;3{s};40m{token}" - print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m") + + if usedTokens > 0: + print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') + print(f'{tokenized}\x1b[0m') + if discarded != "": - print( - f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m" - ) + print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):') + print(f'{discarded}\x1b[0m') \ No newline at end of file