mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
e93f4d632d
* introduce new abstraction layer for GPU devices * add unit test for device abstraction * fix ruff * convert TorchDeviceSelect into a stateless class * move logic to select context-specific execution device into context API * add mock hardware environments to pytest * remove dangling mocker fixture * fix unit test for running on non-CUDA systems * remove unimplemented get_execution_device() call * remove autocast precision * Multiple changes: 1. Remove TorchDeviceSelect.get_execution_device(), as well as calls to context.models.get_execution_device(). 2. Rename TorchDeviceSelect to TorchDevice 3. Added back the legacy public API defined in `invocation_api`, including choose_precision(). 4. Added a config file migration script to accommodate removal of precision=autocast. * add deprecation warnings to choose_torch_device() and choose_precision() * fix test crash * remove app_config argument from choose_torch_device() and choose_torch_dtype() --------- Co-authored-by: Lincoln Stein <lstein@gmail.com>
127 lines
3.5 KiB
Python
127 lines
3.5 KiB
Python
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
|
|
|
|
|
|
import torch
|
|
from pydantic import field_validator
|
|
|
|
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR
|
|
from invokeai.app.invocations.fields import FieldDescriptions, InputField, LatentsField, OutputField
|
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
|
from invokeai.app.util.misc import SEED_MAX
|
|
|
|
from ...backend.util.devices import TorchDevice
|
|
from .baseinvocation import (
|
|
BaseInvocation,
|
|
BaseInvocationOutput,
|
|
invocation,
|
|
invocation_output,
|
|
)
|
|
|
|
"""
|
|
Utilities
|
|
"""
|
|
|
|
|
|
def get_noise(
|
|
width: int,
|
|
height: int,
|
|
device: torch.device,
|
|
seed: int = 0,
|
|
latent_channels: int = 4,
|
|
downsampling_factor: int = 8,
|
|
use_cpu: bool = True,
|
|
perlin: float = 0.0,
|
|
):
|
|
"""Generate noise for a given image size."""
|
|
noise_device_type = "cpu" if use_cpu else device.type
|
|
|
|
# limit noise to only the diffusion image channels, not the mask channels
|
|
input_channels = min(latent_channels, 4)
|
|
generator = torch.Generator(device=noise_device_type).manual_seed(seed)
|
|
|
|
noise_tensor = torch.randn(
|
|
[
|
|
1,
|
|
input_channels,
|
|
height // downsampling_factor,
|
|
width // downsampling_factor,
|
|
],
|
|
dtype=TorchDevice.choose_torch_dtype(device=device),
|
|
device=noise_device_type,
|
|
generator=generator,
|
|
).to("cpu")
|
|
|
|
return noise_tensor
|
|
|
|
|
|
"""
|
|
Nodes
|
|
"""
|
|
|
|
|
|
@invocation_output("noise_output")
|
|
class NoiseOutput(BaseInvocationOutput):
|
|
"""Invocation noise output"""
|
|
|
|
noise: LatentsField = OutputField(description=FieldDescriptions.noise)
|
|
width: int = OutputField(description=FieldDescriptions.width)
|
|
height: int = OutputField(description=FieldDescriptions.height)
|
|
|
|
@classmethod
|
|
def build(cls, latents_name: str, latents: torch.Tensor, seed: int) -> "NoiseOutput":
|
|
return cls(
|
|
noise=LatentsField(latents_name=latents_name, seed=seed),
|
|
width=latents.size()[3] * LATENT_SCALE_FACTOR,
|
|
height=latents.size()[2] * LATENT_SCALE_FACTOR,
|
|
)
|
|
|
|
|
|
@invocation(
|
|
"noise",
|
|
title="Noise",
|
|
tags=["latents", "noise"],
|
|
category="latents",
|
|
version="1.0.2",
|
|
)
|
|
class NoiseInvocation(BaseInvocation):
|
|
"""Generates latent noise."""
|
|
|
|
seed: int = InputField(
|
|
default=0,
|
|
ge=0,
|
|
le=SEED_MAX,
|
|
description=FieldDescriptions.seed,
|
|
)
|
|
width: int = InputField(
|
|
default=512,
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
|
gt=0,
|
|
description=FieldDescriptions.width,
|
|
)
|
|
height: int = InputField(
|
|
default=512,
|
|
multiple_of=LATENT_SCALE_FACTOR,
|
|
gt=0,
|
|
description=FieldDescriptions.height,
|
|
)
|
|
use_cpu: bool = InputField(
|
|
default=True,
|
|
description="Use CPU for noise generation (for reproducible results across platforms)",
|
|
)
|
|
|
|
@field_validator("seed", mode="before")
|
|
def modulo_seed(cls, v):
|
|
"""Return the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
|
|
return v % (SEED_MAX + 1)
|
|
|
|
def invoke(self, context: InvocationContext) -> NoiseOutput:
|
|
noise = get_noise(
|
|
width=self.width,
|
|
height=self.height,
|
|
device=TorchDevice.choose_torch_device(),
|
|
seed=self.seed,
|
|
use_cpu=self.use_cpu,
|
|
)
|
|
name = context.tensors.save(tensor=noise)
|
|
return NoiseOutput.build(latents_name=name, latents=noise, seed=self.seed)
|