From aae60b6142935544e2a5fa0b2f4ec9bdb4da1302 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 3 Jul 2023 10:08:10 -0400 Subject: [PATCH] quash memory leak when compel invocation called --- invokeai/app/invocations/compel.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 8c6b23944c..0421841e8a 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -2,6 +2,7 @@ from typing import Literal, Optional, Union from pydantic import BaseModel, Field from contextlib import ExitStack import re +import torch from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig from .model import ClipField @@ -56,6 +57,7 @@ class CompelInvocation(BaseInvocation): }, } + @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: tokenizer_info = context.services.model_manager.get_model(