InvokeAI/invokeai/app/invocations/noise.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

131 lines
3.4 KiB
Python
Raw Normal View History

2023-06-27 03:57:31 +00:00
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team
from typing import Literal
import torch
from pydantic import validator
2023-06-27 03:57:31 +00:00
from invokeai.app.invocations.latent import LatentsField
2023-06-27 03:57:31 +00:00
from invokeai.app.util.misc import SEED_MAX, get_random_seed
2023-06-27 03:57:31 +00:00
from ...backend.util.devices import choose_torch_device, torch_dtype
from .baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
FieldDescriptions,
InputField,
2023-06-27 03:57:31 +00:00
InvocationContext,
OutputField,
tags,
title,
2023-06-27 03:57:31 +00:00
)
"""
Utilities
"""
def get_noise(
width: int,
height: int,
device: torch.device,
seed: int = 0,
latent_channels: int = 4,
downsampling_factor: int = 8,
use_cpu: bool = True,
perlin: float = 0.0,
):
"""Generate noise for a given image size."""
noise_device_type = "cpu" if use_cpu else device.type
2023-06-27 03:57:31 +00:00
# limit noise to only the diffusion image channels, not the mask channels
input_channels = min(latent_channels, 4)
generator = torch.Generator(device=noise_device_type).manual_seed(seed)
noise_tensor = torch.randn(
[
1,
input_channels,
height // downsampling_factor,
width // downsampling_factor,
],
dtype=torch_dtype(device),
device=noise_device_type,
generator=generator,
).to("cpu")
2023-06-27 03:57:31 +00:00
return noise_tensor
"""
Nodes
"""
class NoiseOutput(BaseInvocationOutput):
"""Invocation noise output"""
type: Literal["noise_output"] = "noise_output"
2023-06-27 03:57:31 +00:00
# Inputs
noise: LatentsField = OutputField(default=None, description=FieldDescriptions.noise)
width: int = OutputField(description=FieldDescriptions.width)
height: int = OutputField(description=FieldDescriptions.height)
2023-06-27 03:57:31 +00:00
2023-08-08 01:00:33 +00:00
def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int):
2023-06-27 03:57:31 +00:00
return NoiseOutput(
2023-08-08 01:00:33 +00:00
noise=LatentsField(latents_name=latents_name, seed=seed),
2023-06-27 03:57:31 +00:00
width=latents.size()[3] * 8,
height=latents.size()[2] * 8,
)
@title("Noise")
@tags("latents", "noise")
2023-06-27 03:57:31 +00:00
class NoiseInvocation(BaseInvocation):
"""Generates latent noise."""
type: Literal["noise"] = "noise"
# Inputs
seed: int = InputField(
2023-06-27 03:57:31 +00:00
ge=0,
le=SEED_MAX,
description=FieldDescriptions.seed,
2023-06-27 03:57:31 +00:00
default_factory=get_random_seed,
)
width: int = InputField(
2023-06-27 03:57:31 +00:00
default=512,
multiple_of=8,
gt=0,
description=FieldDescriptions.width,
2023-06-27 03:57:31 +00:00
)
height: int = InputField(
2023-06-27 03:57:31 +00:00
default=512,
multiple_of=8,
gt=0,
description=FieldDescriptions.height,
2023-06-27 03:57:31 +00:00
)
use_cpu: bool = InputField(
2023-06-27 03:57:31 +00:00
default=True,
description="Use CPU for noise generation (for reproducible results across platforms)",
)
@validator("seed", pre=True)
def modulo_seed(cls, v):
"""Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
return v % (SEED_MAX + 1)
2023-06-27 03:57:31 +00:00
def invoke(self, context: InvocationContext) -> NoiseOutput:
noise = get_noise(
width=self.width,
height=self.height,
device=choose_torch_device(),
seed=self.seed,
use_cpu=self.use_cpu,
)
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.save(name, noise)
2023-08-08 01:00:33 +00:00
return build_noise_output(latents_name=name, latents=noise, seed=self.seed)