From 5e09dd380dce3c14e86bb06ef647f5359f9297c9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 4 May 2023 14:10:03 +1000 Subject: [PATCH] Revert "feat(nodes): free gpu mem after invocation" This reverts commit 99cb33f477306d5dcc455efe04053ce41b8d85bd. --- invokeai/app/services/processor.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/invokeai/app/services/processor.py b/invokeai/app/services/processor.py index 0c2ad28bf5..35cbcd5068 100644 --- a/invokeai/app/services/processor.py +++ b/invokeai/app/services/processor.py @@ -1,15 +1,11 @@ -import gc import traceback from threading import Event, Thread, BoundedSemaphore -import torch - from ..invocations.baseinvocation import InvocationContext from .invocation_queue import InvocationQueueItem from .invoker import InvocationProcessorABC, Invoker from ..models.exceptions import CanceledException - class DefaultInvocationProcessor(InvocationProcessorABC): __invoker_thread: Thread __stop_event: Event @@ -26,7 +22,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC): target=self.__process, kwargs=dict(stop_event=self.__stop_event), ) - self.__invoker_thread.daemon = True # TODO: make async and do not use threads + self.__invoker_thread.daemon = ( + True # TODO: make async and do not use threads + ) self.__invoker_thread.start() def stop(self, *args, **kwargs) -> None: @@ -50,15 +48,13 @@ class DefaultInvocationProcessor(InvocationProcessorABC): ) # get the source node id to provide to clients (the prepared node id is not as useful) - source_node_id = graph_execution_state.prepared_source_mapping[ - invocation.id - ] + source_node_id = graph_execution_state.prepared_source_mapping[invocation.id] # Send starting event self.__invoker.services.events.emit_invocation_started( graph_execution_state_id=graph_execution_state.id, node=invocation.dict(), - source_node_id=source_node_id, + source_node_id=source_node_id ) # Invoke @@ -118,12 +114,11 @@ class DefaultInvocationProcessor(InvocationProcessorABC): ) pass - finally: - gc.collect() - torch.cuda.empty_cache() # Check queue to see if this is canceled, and skip if so - if self.__invoker.services.queue.is_canceled(graph_execution_state.id): + if self.__invoker.services.queue.is_canceled( + graph_execution_state.id + ): continue # Queue any further commands if invoking all