mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Just comment out cache emptying
This commit is contained in:
parent
db5f1c8623
commit
52568c7329
@ -860,9 +860,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata):
|
|||||||
vae.disable_tiling()
|
vae.disable_tiling()
|
||||||
|
|
||||||
# clear memory as vae decode can request a lot
|
# clear memory as vae decode can request a lot
|
||||||
torch.cuda.empty_cache()
|
# torch.cuda.empty_cache()
|
||||||
if choose_torch_device() == torch.device("mps"):
|
# if choose_torch_device() == torch.device("mps"):
|
||||||
mps.empty_cache()
|
# mps.empty_cache()
|
||||||
|
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
# copied from diffusers pipeline
|
# copied from diffusers pipeline
|
||||||
@ -874,9 +874,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata):
|
|||||||
|
|
||||||
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
||||||
|
|
||||||
torch.cuda.empty_cache()
|
# torch.cuda.empty_cache()
|
||||||
if choose_torch_device() == torch.device("mps"):
|
# if choose_torch_device() == torch.device("mps"):
|
||||||
mps.empty_cache()
|
# mps.empty_cache()
|
||||||
|
|
||||||
image_dto = context.services.images.create(
|
image_dto = context.services.images.create(
|
||||||
image=image,
|
image=image,
|
||||||
|
Loading…
Reference in New Issue
Block a user