diff --git a/invokeai/app/invocations/constants.py b/invokeai/app/invocations/constants.py index cebe0eb30f..e01589be81 100644 --- a/invokeai/app/invocations/constants.py +++ b/invokeai/app/invocations/constants.py @@ -1,6 +1,7 @@ from typing import Literal from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP +from invokeai.backend.util.devices import TorchDevice LATENT_SCALE_FACTOR = 8 """ @@ -15,3 +16,5 @@ SCHEDULER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())] IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] """A literal type for PIL image modes supported by Invoke""" + +DEFAULT_PRECISION = TorchDevice.choose_torch_dtype() diff --git a/invokeai/app/invocations/create_denoise_mask.py b/invokeai/app/invocations/create_denoise_mask.py index d128e0efec..2d66c20dbd 100644 --- a/invokeai/app/invocations/create_denoise_mask.py +++ b/invokeai/app/invocations/create_denoise_mask.py @@ -6,7 +6,7 @@ from PIL import Image from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation from invokeai.app.invocations.model import VAEField @@ -30,7 +30,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation): mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3) fp32: bool = InputField( - default=DEFAULT_PRECISION == "float32", + default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32, ui_order=4, ) diff --git a/invokeai/app/invocations/create_gradient_mask.py b/invokeai/app/invocations/create_gradient_mask.py index 2d2b13fdcc..089313463b 100644 --- a/invokeai/app/invocations/create_gradient_mask.py +++ b/invokeai/app/invocations/create_gradient_mask.py @@ -7,7 +7,7 @@ from PIL import Image, ImageFilter from torchvision.transforms.functional import resize as tv_resize from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( DenoiseMaskField, FieldDescriptions, @@ -74,7 +74,7 @@ class CreateGradientMaskInvocation(BaseInvocation): ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8) fp32: bool = InputField( - default=DEFAULT_PRECISION == "float32", + default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32, ui_order=9, ) diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index 3851caa647..7260fcb168 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -59,8 +59,6 @@ from .baseinvocation import BaseInvocation, invocation from .controlnet_image_processors import ControlField from .model import ModelIdentifierField, UNetField -DEFAULT_PRECISION = TorchDevice.choose_torch_dtype() - def get_scheduler( context: InvocationContext, diff --git a/invokeai/app/invocations/image_to_latents.py b/invokeai/app/invocations/image_to_latents.py index bf2eb414e1..06de530154 100644 --- a/invokeai/app/invocations/image_to_latents.py +++ b/invokeai/app/invocations/image_to_latents.py @@ -12,7 +12,7 @@ from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( FieldDescriptions, ImageField, @@ -44,7 +44,7 @@ class ImageToLatentsInvocation(BaseInvocation): input=Input.Connection, ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32) @staticmethod def vae_encode(vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor) -> torch.Tensor: diff --git a/invokeai/app/invocations/latents_to_image.py b/invokeai/app/invocations/latents_to_image.py index 648ee7ac68..202e8bfa1b 100644 --- a/invokeai/app/invocations/latents_to_image.py +++ b/invokeai/app/invocations/latents_to_image.py @@ -11,7 +11,7 @@ from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION +from invokeai.app.invocations.constants import DEFAULT_PRECISION from invokeai.app.invocations.fields import ( FieldDescriptions, Input, @@ -46,7 +46,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): input=Input.Connection, ) tiled: bool = InputField(default=False, description=FieldDescriptions.tiled) - fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) + fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32) @torch.no_grad() def invoke(self, context: InvocationContext) -> ImageOutput: