Fix crashing when using 2.1 model

We now require more free memory to avoid attention slicing. 17.5% free was not sufficient headroom, so now we require 25%.
This commit is contained in:
Jonathan 2023-02-21 12:35:03 -06:00 committed by GitHub
parent 3ab9d02883
commit 71bbd78574
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -330,7 +330,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
16 * \
latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \
bytes_per_element_needed_for_baddbmm_duplication
if max_size_required_for_baddbmm > (mem_free * 3.3 / 4.0): # 3.3 / 4.0 is from old Invoke code
if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code
self.enable_attention_slicing(slice_size='max')
else:
self.disable_attention_slicing()