mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add --sequential_guidance
option for low-RAM tradeoff
This commit is contained in:
parent
6c8d4b091e
commit
2dded68267
@ -60,6 +60,7 @@ def main():
|
|||||||
Globals.always_use_cpu = args.always_use_cpu
|
Globals.always_use_cpu = args.always_use_cpu
|
||||||
Globals.internet_available = args.internet_available and check_internet()
|
Globals.internet_available = args.internet_available and check_internet()
|
||||||
Globals.disable_xformers = not args.xformers
|
Globals.disable_xformers = not args.xformers
|
||||||
|
Globals.sequential_guidance = args.sequential_guidance
|
||||||
Globals.ckpt_convert = args.ckpt_convert
|
Globals.ckpt_convert = args.ckpt_convert
|
||||||
|
|
||||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||||
|
@ -91,14 +91,14 @@ import pydoc
|
|||||||
import re
|
import re
|
||||||
import shlex
|
import shlex
|
||||||
import sys
|
import sys
|
||||||
import ldm.invoke
|
|
||||||
import ldm.invoke.pngwriter
|
|
||||||
|
|
||||||
from ldm.invoke.globals import Globals
|
|
||||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import ldm.invoke
|
||||||
|
import ldm.invoke.pngwriter
|
||||||
|
from ldm.invoke.globals import Globals
|
||||||
|
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||||
|
|
||||||
APP_ID = ldm.invoke.__app_id__
|
APP_ID = ldm.invoke.__app_id__
|
||||||
APP_NAME = ldm.invoke.__app_name__
|
APP_NAME = ldm.invoke.__app_name__
|
||||||
APP_VERSION = ldm.invoke.__version__
|
APP_VERSION = ldm.invoke.__version__
|
||||||
@ -484,6 +484,13 @@ class Args(object):
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Force free gpu memory before final decoding',
|
help='Force free gpu memory before final decoding',
|
||||||
)
|
)
|
||||||
|
model_group.add_argument(
|
||||||
|
'--sequential_guidance',
|
||||||
|
dest='sequential_guidance',
|
||||||
|
action='store_true',
|
||||||
|
help="Calculate guidance in serial instead of in parallel, lowering memory requirement "
|
||||||
|
"at the expense of speed",
|
||||||
|
)
|
||||||
model_group.add_argument(
|
model_group.add_argument(
|
||||||
'--xformers',
|
'--xformers',
|
||||||
action=argparse.BooleanOptionalAction,
|
action=argparse.BooleanOptionalAction,
|
||||||
|
@ -13,8 +13,8 @@ the attributes:
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import os.path as osp
|
import os.path as osp
|
||||||
from pathlib import Path
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
Globals = Namespace()
|
Globals = Namespace()
|
||||||
@ -48,6 +48,9 @@ Globals.internet_available = True
|
|||||||
# Whether to disable xformers
|
# Whether to disable xformers
|
||||||
Globals.disable_xformers = False
|
Globals.disable_xformers = False
|
||||||
|
|
||||||
|
# Low-memory tradeoff for guidance calculations.
|
||||||
|
Globals.sequential_guidance = False
|
||||||
|
|
||||||
# whether we are forcing full precision
|
# whether we are forcing full precision
|
||||||
Globals.full_precision = False
|
Globals.full_precision = False
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import torch
|
|||||||
from diffusers.models.cross_attention import AttnProcessor
|
from diffusers.models.cross_attention import AttnProcessor
|
||||||
from typing_extensions import TypeAlias
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
|
from ldm.invoke.globals import Globals
|
||||||
from ldm.models.diffusion.cross_attention_control import Arguments, \
|
from ldm.models.diffusion.cross_attention_control import Arguments, \
|
||||||
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
|
restore_default_cross_attention, override_cross_attention, Context, get_cross_attention_modules, \
|
||||||
CrossAttentionType, SwapCrossAttnContext
|
CrossAttentionType, SwapCrossAttnContext
|
||||||
@ -35,7 +36,7 @@ class InvokeAIDiffuserComponent:
|
|||||||
* Hybrid conditioning (used for inpainting)
|
* Hybrid conditioning (used for inpainting)
|
||||||
'''
|
'''
|
||||||
debug_thresholding = False
|
debug_thresholding = False
|
||||||
sequential_conditioning = False
|
sequential_guidance = False
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ExtraConditioningInfo:
|
class ExtraConditioningInfo:
|
||||||
@ -60,6 +61,7 @@ class InvokeAIDiffuserComponent:
|
|||||||
self.is_running_diffusers = is_running_diffusers
|
self.is_running_diffusers = is_running_diffusers
|
||||||
self.model_forward_callback = model_forward_callback
|
self.model_forward_callback = model_forward_callback
|
||||||
self.cross_attention_control_context = None
|
self.cross_attention_control_context = None
|
||||||
|
self.sequential_guidance = Globals.sequential_guidance
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def custom_attention_context(self,
|
def custom_attention_context(self,
|
||||||
@ -154,7 +156,7 @@ class InvokeAIDiffuserComponent:
|
|||||||
unconditioning,
|
unconditioning,
|
||||||
conditioning,
|
conditioning,
|
||||||
cross_attention_control_types_to_do)
|
cross_attention_control_types_to_do)
|
||||||
elif self.sequential_conditioning:
|
elif self.sequential_guidance:
|
||||||
unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially(
|
unconditioned_next_x, conditioned_next_x = self._apply_standard_conditioning_sequentially(
|
||||||
x, sigma, unconditioning, conditioning)
|
x, sigma, unconditioning, conditioning)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user