add --sequential_guidance option for low-RAM tradeoff

This commit is contained in:
Kevin Turner 2023-02-19 21:21:14 -08:00
parent 6c8d4b091e
commit 2dded68267
4 changed files with 24 additions and 11 deletions

View File

@ -60,6 +60,7 @@ def main():
Globals.always_use_cpu = args.always_use_cpu Globals.always_use_cpu = args.always_use_cpu
Globals.internet_available = args.internet_available and check_internet() Globals.internet_available = args.internet_available and check_internet()
Globals.disable_xformers = not args.xformers Globals.disable_xformers = not args.xformers
Globals.sequential_guidance = args.sequential_guidance
Globals.ckpt_convert = args.ckpt_convert Globals.ckpt_convert = args.ckpt_convert
print(f">> Internet connectivity is {Globals.internet_available}") print(f">> Internet connectivity is {Globals.internet_available}")
@ -749,7 +750,7 @@ def import_ckpt_model(
base_name = Path(url_attachment_name(path_or_url)).name if is_a_url else Path(path_or_url).name base_name = Path(url_attachment_name(path_or_url)).name if is_a_url else Path(path_or_url).name
default_name = Path(base_name).stem default_name = Path(base_name).stem
default_description = f"Imported model {default_name}" default_description = f"Imported model {default_name}"
model_name, model_description = _get_model_name_and_desc( model_name, model_description = _get_model_name_and_desc(
manager, manager,
completer, completer,
@ -834,7 +835,7 @@ def _ask_for_config_file(model_path: Union[str,Path], completer, plural: bool=Fa
'2': 'v2-inference-v.yaml', '2': 'v2-inference-v.yaml',
'3': 'v1-inpainting-inference.yaml', '3': 'v1-inpainting-inference.yaml',
} }
prompt = '''What type of models are these?: prompt = '''What type of models are these?:
[1] Models based on Stable Diffusion 1.X [1] Models based on Stable Diffusion 1.X
[2] Models based on Stable Diffusion 2.X [2] Models based on Stable Diffusion 2.X
@ -843,7 +844,7 @@ def _ask_for_config_file(model_path: Union[str,Path], completer, plural: bool=Fa
[1] A model based on Stable Diffusion 1.X [1] A model based on Stable Diffusion 1.X
[2] A model based on Stable Diffusion 2.X [2] A model based on Stable Diffusion 2.X
[3] An inpainting models based on Stable Diffusion 1.X [3] An inpainting models based on Stable Diffusion 1.X
[4] Something else''' [4] Something else'''
print(prompt) print(prompt)
choice = input(f'Your choice: [{default}] ') choice = input(f'Your choice: [{default}] ')
choice = choice.strip() or default choice = choice.strip() or default

View File

@ -91,14 +91,14 @@ import pydoc
import re import re
import shlex import shlex
import sys import sys
import ldm.invoke
import ldm.invoke.pngwriter
from ldm.invoke.globals import Globals
from ldm.invoke.prompt_parser import split_weighted_subprompts
from argparse import Namespace from argparse import Namespace
from pathlib import Path from pathlib import Path
import ldm.invoke
import ldm.invoke.pngwriter
from ldm.invoke.globals import Globals
from ldm.invoke.prompt_parser import split_weighted_subprompts
APP_ID = ldm.invoke.__app_id__ APP_ID = ldm.invoke.__app_id__
APP_NAME = ldm.invoke.__app_name__ APP_NAME = ldm.invoke.__app_name__
APP_VERSION = ldm.invoke.__version__ APP_VERSION = ldm.invoke.__version__
@ -484,6 +484,13 @@ class Args(object):
action='store_true', action='store_true',
help='Force free gpu memory before final decoding', help='Force free gpu memory before final decoding',
) )
model_group.add_argument(
'--sequential_guidance',
dest='sequential_guidance',
action='store_true',
help="Calculate guidance in serial instead of in parallel, lowering memory requirement "
"at the expense of speed",
)
model_group.add_argument( model_group.add_argument(
'--xformers', '--xformers',
action=argparse.BooleanOptionalAction, action=argparse.BooleanOptionalAction,

View File

@ -13,8 +13,8 @@ the attributes:
import os import os
import os.path as osp import os.path as osp
from pathlib import Path
from argparse import Namespace from argparse import Namespace
from pathlib import Path
from typing import Union from typing import Union
Globals = Namespace() Globals = Namespace()
@ -48,6 +48,9 @@ Globals.internet_available = True
# Whether to disable xformers # Whether to disable xformers
Globals.disable_xformers = False Globals.disable_xformers = False
# Low-memory tradeoff for guidance calculations.
Globals.sequential_guidance = False
# whether we are forcing full precision # whether we are forcing full precision
Globals.full_precision = False Globals.full_precision = False

View File

@ -8,6 +8,7 @@ import torch
from diffusers.models.cross_attention import AttnProcessor from diffusers.models.cross_attention import AttnProcessor
from typing_extensions import TypeAlias from typing_extensions import TypeAlias
from ldm.invoke.globals import Globals
from ldm.models.diffusion.cross_attention_control import Arguments, \ from ldm.models.diffusion.cross_attention_control import Arguments, \
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \ restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
CrossAttentionType, SwapCrossAttnContext CrossAttentionType, SwapCrossAttnContext
@ -35,7 +36,7 @@ class InvokeAIDiffuserComponent:
* Hybrid conditioning (used for inpainting) * Hybrid conditioning (used for inpainting)
''' '''
debug_thresholding = False debug_thresholding = False
sequential_conditioning = False sequential_guidance = False
@dataclass @dataclass
class ExtraConditioningInfo: class ExtraConditioningInfo:
@ -60,6 +61,7 @@ class InvokeAIDiffuserComponent:
self.is_running_diffusers = is_running_diffusers self.is_running_diffusers = is_running_diffusers
self.model_forward_callback = model_forward_callback self.model_forward_callback = model_forward_callback
self.cross_attention_control_context = None self.cross_attention_control_context = None
self.sequential_guidance = Globals.sequential_guidance
@contextmanager @contextmanager
def custom_attention_context(self, def custom_attention_context(self,
@ -154,7 +156,7 @@ class InvokeAIDiffuserComponent:
unconditioning, unconditioning,
conditioning, conditioning,
cross_attention_control_types_to_do) cross_attention_control_types_to_do)
elif self.sequential_conditioning: elif self.sequential_guidance:
unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially( unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially(
x, sigma, unconditioning, conditioning) x, sigma, unconditioning, conditioning)