mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Switch to using max for attention slicing in all cases for the time being. (#2569)
This commit is contained in:
parent
3efe9899c2
commit
f0f3520bca
@ -317,7 +317,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline.
|
||||
pass
|
||||
else:
|
||||
self.enable_attention_slicing(slice_size='auto')
|
||||
self.enable_attention_slicing(slice_size='max')
|
||||
|
||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||
conditioning_data: ConditioningData,
|
||||
|
Loading…
Reference in New Issue
Block a user