mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
quash memory leak when compel invocation called
This commit is contained in:
parent
252c790969
commit
4d2c7806fc
@ -2,6 +2,7 @@ from typing import Literal, Optional, Union
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from contextlib import ExitStack
|
from contextlib import ExitStack
|
||||||
import re
|
import re
|
||||||
|
import torch
|
||||||
|
|
||||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||||
from .model import ClipField
|
from .model import ClipField
|
||||||
@ -56,6 +57,7 @@ class CompelInvocation(BaseInvocation):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
def invoke(self, context: InvocationContext) -> CompelOutput:
|
def invoke(self, context: InvocationContext) -> CompelOutput:
|
||||||
|
|
||||||
tokenizer_info = context.services.model_manager.get_model(
|
tokenizer_info = context.services.model_manager.get_model(
|
||||||
|
Loading…
Reference in New Issue
Block a user