mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into fix/inpaint_gen
This commit is contained in:
@ -33,7 +33,7 @@ from .diffusion import (
|
||||
PostprocessingSettings,
|
||||
BasicConditioningInfo,
|
||||
)
|
||||
from ..util import normalize_device
|
||||
from ..util import normalize_device, auto_detect_slice_size
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -291,6 +291,24 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if xformers is available, use it, otherwise use sliced attention.
|
||||
"""
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
if config.attention_type == "xformers":
|
||||
self.enable_xformers_memory_efficient_attention()
|
||||
return
|
||||
elif config.attention_type == "sliced":
|
||||
slice_size = config.attention_slice_size
|
||||
if slice_size == "auto":
|
||||
slice_size = auto_detect_slice_size(latents)
|
||||
elif slice_size == "balanced":
|
||||
slice_size = "auto"
|
||||
self.enable_attention_slicing(slice_size=slice_size)
|
||||
return
|
||||
elif config.attention_type == "normal":
|
||||
self.disable_attention_slicing()
|
||||
return
|
||||
elif config.attention_type == "torch-sdp":
|
||||
raise Exception("torch-sdp attention slicing not yet implemented")
|
||||
|
||||
# the remainder if this code is called when attention_type=='auto'
|
||||
if self.unet.device.type == "cuda":
|
||||
if is_xformers_available() and not config.disable_xformers:
|
||||
self.enable_xformers_memory_efficient_attention()
|
||||
|
Reference in New Issue
Block a user