mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Multiple small fixes
This commit is contained in:
parent
fd8d1c12d4
commit
9f088d1bf5
@ -57,7 +57,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
)
|
||||
from invokeai.backend.stable_diffusion.diffusion.custom_atttention import CustomAttnProcessor2_0
|
||||
from invokeai.backend.stable_diffusion.diffusion_backend import StableDiffusionBackend
|
||||
from invokeai.backend.stable_diffusion.extensions import PreviewExt
|
||||
from invokeai.backend.stable_diffusion.extensions.preview import PreviewExt
|
||||
from invokeai.backend.stable_diffusion.extensions_manager import ExtensionsManager
|
||||
from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||
from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES
|
||||
@ -723,7 +723,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
@torch.no_grad()
|
||||
@SilenceWarnings() # This quenches the NSFW nag from diffusers.
|
||||
def _new_invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
with ExitStack() as exit_stack:
|
||||
# TODO: remove supression when extensions which use models added
|
||||
with ExitStack() as exit_stack: # noqa: F841
|
||||
ext_manager = ExtensionsManager()
|
||||
|
||||
device = TorchDevice.choose_torch_device()
|
||||
@ -804,7 +805,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
result_latents = sd_backend.latents_from_embeddings(denoise_ctx, ext_manager)
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
result_latents = result_latents.to("cpu") # TODO: detach?
|
||||
result_latents = result_latents.detach().to("cpu")
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=result_latents)
|
||||
|
@ -23,20 +23,11 @@ from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import IPAdapterData, TextConditioningData
|
||||
from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from invokeai.backend.stable_diffusion.diffusion.unet_attention_patcher import UNetAttentionPatcher, UNetIPAdapterData
|
||||
from invokeai.backend.stable_diffusion.extensions import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.extensions.preview import PipelineIntermediateState
|
||||
from invokeai.backend.util.attention import auto_detect_slice_size
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.hotfixes import ControlNetModel
|
||||
|
||||
# @dataclass
|
||||
# class PipelineIntermediateState:
|
||||
# step: int
|
||||
# order: int
|
||||
# total_steps: int
|
||||
# timestep: int
|
||||
# latents: torch.Tensor
|
||||
# predicted_original: Optional[torch.Tensor] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AddsMaskGuidance:
|
||||
|
@ -1,12 +0,0 @@
|
||||
"""
|
||||
Initialization file for the invokeai.backend.stable_diffusion.extensions package
|
||||
"""
|
||||
|
||||
from invokeai.backend.stable_diffusion.extensions.base import ExtensionBase
|
||||
from invokeai.backend.stable_diffusion.extensions.preview import PipelineIntermediateState, PreviewExt
|
||||
|
||||
__all__ = [
|
||||
"ExtensionBase",
|
||||
"PipelineIntermediateState",
|
||||
"PreviewExt",
|
||||
]
|
Loading…
Reference in New Issue
Block a user