diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 1fb7832031..9494701565 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -16,8 +16,6 @@ from compel.prompt_parser import ( Fragment, ) -from invokeai.backend.globals import Globals - class ConditioningField(BaseModel): conditioning_name: Optional[str] = Field(default=None, description="The name of conditioning data") @@ -102,7 +100,8 @@ class CompelInvocation(BaseInvocation): prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string(prompt_str) - if getattr(Globals, "log_tokenization", False): + print('here i am') + if context.services.configuration.log_tokenization: log_tokenization_for_prompt_object(prompt, tokenizer) c, options = compel.build_conditioning_tensor_for_prompt_object(prompt)