From 829b9ad66bb8f95c19b9aa2744d15112760009e4 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 27 May 2024 10:53:12 -0400 Subject: [PATCH] Add a callout about the hackiness of dropping tokens in the TextualInversionManager. --- invokeai/backend/textual_inversion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 005031c95b..98104f769e 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -109,6 +109,9 @@ class TextualInversionManager(BaseTextualInversionManager): # compel.embeddings_provider.get_token_ids(), which first removes and then adds back the start and end tokens. max_length = self.tokenizer.model_max_length - 2 if len(new_token_ids) > max_length: + # HACK: If TI token expansion causes us to exceed the max text encoder input length, we silently discard + # tokens. Token expansion should happen in a way that is compatible with compel's default handling of long + # prompts. new_token_ids = new_token_ids[0:max_length] return new_token_ids