mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Fix crashing when using 2.1 model
We now require more free memory to avoid attention slicing. 17.5% free was not sufficient headroom, so now we require 25%.
This commit is contained in:
parent
3ab9d02883
commit
71bbd78574
@ -330,7 +330,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
16 * \
|
16 * \
|
||||||
latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \
|
latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \
|
||||||
bytes_per_element_needed_for_baddbmm_duplication
|
bytes_per_element_needed_for_baddbmm_duplication
|
||||||
if max_size_required_for_baddbmm > (mem_free * 3.3 / 4.0): # 3.3 / 4.0 is from old Invoke code
|
if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code
|
||||||
self.enable_attention_slicing(slice_size='max')
|
self.enable_attention_slicing(slice_size='max')
|
||||||
else:
|
else:
|
||||||
self.disable_attention_slicing()
|
self.disable_attention_slicing()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user