diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index cb2a7e2df8..88c51d8fbd 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -114,7 +114,8 @@ class Generator: results.append([image, seed]) if image_callback is not None: - image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_images[-1]) + attention_maps_image = None if len(attention_maps_images)==0 else attention_maps_images[-1] + image_callback(image, seed, first_seed=first_seed, attention_maps_image=attention_maps_image) seed = self.new_seed() diff --git a/ldm/models/diffusion/cross_attention_control.py b/ldm/models/diffusion/cross_attention_control.py index 1c932be186..b32ccecae7 100644 --- a/ldm/models/diffusion/cross_attention_control.py +++ b/ldm/models/diffusion/cross_attention_control.py @@ -281,7 +281,7 @@ class InvokeAICrossAttentionMixin: return self.einsum_op_slice_dim1(q, k, v, slice_size) # fallback for when there is no saved strategy, or saved strategy does not slice - mem_free_total = self.cached_mem_free_total or get_mem_free_total(q.device) + mem_free_total = get_mem_free_total(q.device) # Divide factor of safety as there's copying and fragmentation return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) diff --git a/tests/pr_prompt.txt b/tests/pr_prompt.txt deleted file mode 100644 index c1a8e00cbe..0000000000 --- a/tests/pr_prompt.txt +++ /dev/null @@ -1 +0,0 @@ -banana sushi -Ak_lms -S42 -s10 diff --git a/tests/validate_pr_prompt.txt b/tests/validate_pr_prompt.txt index c1a8e00cbe..06c1b1ca4d 100644 --- a/tests/validate_pr_prompt.txt +++ b/tests/validate_pr_prompt.txt @@ -1 +1,3 @@ -banana sushi -Ak_lms -S42 -s10 +banana sushi -Ak_lms -S42 -s5 +banana sushi -Ak_heun -S42 -s5 +banana sushi -Addim -S42 -s5