InvokeAI/invokeai/app/invocations/latent.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

864 lines
34 KiB
Python
Raw Normal View History

# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
2023-07-08 09:28:26 +00:00
from contextlib import ExitStack
2023-06-01 22:09:49 +00:00
from typing import List, Literal, Optional, Union
import einops
2023-08-20 18:49:18 +00:00
import numpy as np
import torch
import torchvision.transforms as T
from diffusers.image_processor import VaeImageProcessor
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import DPMSolverSDEScheduler
from diffusers.schedulers import SchedulerMixin as Scheduler
2023-08-17 22:45:25 +00:00
from pydantic import validator
from torchvision.transforms.functional import resize as tv_resize
from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.app.invocations.primitives import (
2023-08-26 18:17:08 +00:00
DenoiseMaskField,
DenoiseMaskOutput,
ImageField,
ImageOutput,
LatentsField,
LatentsOutput,
build_latents_output,
)
from invokeai.app.util.controlnet_utils import prepare_control_image
Partial migration of UI to nodes API (#3195) * feat(ui): add axios client generator and simple example * fix(ui): update client & nodes test code w/ new Edge type * chore(ui): organize generated files * chore(ui): update .eslintignore, .prettierignore * chore(ui): update openapi.json * feat(backend): fixes for nodes/generator * feat(ui): generate object args for api client * feat(ui): more nodes api prototyping * feat(ui): nodes cancel * chore(ui): regenerate api client * fix(ui): disable OG web server socket connection * fix(ui): fix scrollbar styles typing and prop just noticed the typo, and made the types stronger. * feat(ui): add socketio types * feat(ui): wip nodes - extract api client method arg types instead of manually declaring them - update example to display images - general tidy up * start building out node translations from frontend state and add notes about missing features * use reference to sampler_name * use reference to sampler_name * add optional apiUrl prop * feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation * feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node * feat(ui): img2img implementation * feat(ui): get intermediate images working but types are stubbed out * chore(ui): add support for package mode * feat(ui): add nodes mode script * feat(ui): handle random seeds * fix(ui): fix middleware types * feat(ui): add rtk action type guard * feat(ui): disable NodeAPITest This was polluting the network/socket logs. * feat(ui): fix parameters panel border color This commit should be elsewhere but I don't want to break my flow * feat(ui): make thunk types more consistent * feat(ui): add type guards for outputs * feat(ui): load images on socket connect Rudimentary * chore(ui): bump redux-toolkit * docs(ui): update readme * chore(ui): regenerate api client * chore(ui): add typescript as dev dependency I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue. * feat(ui): begin migrating gallery to nodes Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way. * feat(ui): clean up & comment results slice * fix(ui): separate thunk for initial gallery load so it properly gets index 0 * feat(ui): POST upload working * fix(ui): restore removed type * feat(ui): patch api generation for headers access * chore(ui): regenerate api * feat(ui): wip gallery migration * feat(ui): wip gallery migration * chore(ui): regenerate api * feat(ui): wip refactor socket events * feat(ui): disable panels based on app props * feat(ui): invert logic to be disabled * disable panels when app mounts * feat(ui): add support to disableTabs * docs(ui): organise and update docs * lang(ui): add toast strings * feat(ui): wip events, comments, and general refactoring * feat(ui): add optional token for auth * feat(ui): export StatusIndicator and ModelSelect for header use * feat(ui) working on making socket URL dynamic * feat(ui): dynamic middleware loading * feat(ui): prep for socket jwt * feat(ui): migrate cancelation also updated action names to be event-like instead of declaration-like sorry, i was scattered and this commit has a lot of unrelated stuff in it. * fix(ui): fix img2img type * chore(ui): regenerate api client * feat(ui): improve InvocationCompleteEvent types * feat(ui): increase StatusIndicator font size * fix(ui): fix middleware order for multi-node graphs * feat(ui): add exampleGraphs object w/ iterations example * feat(ui): generate iterations graph * feat(ui): update ModelSelect for nodes API * feat(ui): add hi-res functionality for txt2img generations * feat(ui): "subscribe" to particular nodes feels like a dirty hack but oh well it works * feat(ui): first steps to node editor ui * fix(ui): disable event subscription it is not fully baked just yet * feat(ui): wip node editor * feat(ui): remove extraneous field types * feat(ui): nodes before deleting stuff * feat(ui): cleanup nodes ui stuff * feat(ui): hook up nodes to redux * fix(ui): fix handle * fix(ui): add basic node edges & connection validation * feat(ui): add connection validation styling * feat(ui): increase edge width * feat(ui): it blends * feat(ui): wip model handling and graph topology validation * feat(ui): validation connections w/ graphlib * docs(ui): update nodes doc * feat(ui): wip node editor * chore(ui): rebuild api, update types * add redux-dynamic-middlewares as a dependency * feat(ui): add url host transformation * feat(ui): handle already-connected fields * feat(ui): rewrite SqliteItemStore in sqlalchemy * fix(ui): fix sqlalchemy dynamic model instantiation * feat(ui, nodes): metadata wip * feat(ui, nodes): models * feat(ui, nodes): more metadata wip * feat(ui): wip range/iterate * fix(nodes): fix sqlite typing * feat(ui): export new type for invoke component * tests(nodes): fix test instantiation of ImageField * feat(nodes): fix LoadImageInvocation * feat(nodes): add `title` ui hint * feat(nodes): make ImageField attrs optional * feat(ui): wip nodes etc * feat(nodes): roll back sqlalchemy * fix(nodes): partially address feedback * fix(backend): roll back changes to pngwriter * feat(nodes): wip address metadata feedback * feat(nodes): add seeded rng to RandomRange * feat(nodes): address feedback * feat(nodes): move GET images error handling to DiskImageStorage * feat(nodes): move GET images error handling to DiskImageStorage * fix(nodes): fix image output schema customization * feat(ui): img2img/txt2img -> linear - remove txt2img and img2img tabs - add linear tab - add initial image selection to linear parameters accordion * feat(ui): tidy graph builders * feat(ui): tidy misc * feat(ui): improve invocation union types * feat(ui): wip metadata viewer recall * feat(ui): move fonts to normal deps * feat(nodes): fix broken upload * feat(nodes): add metadata module + tests, thumbnails - `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service` - Handles loading/parsing/building metadata, and creating png info objects - added tests for MetadataModule - Lifted thumbnail stuff to util * fix(nodes): revert change to RandomRangeInvocation * feat(nodes): address feedback - make metadata a service - rip out pydantic validation, implement metadata parsing as simple functions - update tests - address other minor feedback items * fix(nodes): fix other tests * fix(nodes): add metadata service to cli * fix(nodes): fix latents/image field parsing * feat(nodes): customise LatentsField schema * feat(nodes): move metadata parsing to frontend * fix(nodes): fix metadata test --------- Co-authored-by: maryhipp <maryhipp@gmail.com> Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
from invokeai.app.util.step_callback import stable_diffusion_step_callback
2023-07-24 21:13:32 +00:00
from invokeai.backend.model_management.models import ModelType, SilenceWarnings
from ...backend.model_management.lora import ModelPatcher
2023-08-28 02:54:53 +00:00
from ...backend.model_management.seamless import set_seamless
from ...backend.model_management.models import BaseModelType
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (
2023-07-28 13:46:44 +00:00
ConditioningData,
ControlNetData,
StableDiffusionGeneratorPipeline,
image_resized_to_grid_as_tensor,
)
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
2023-05-11 12:40:03 +00:00
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from ...backend.util.devices import choose_precision, choose_torch_device
from ..models.image import ImageCategory, ResourceOrigin
2023-08-26 18:17:08 +00:00
from .baseinvocation import BaseInvocation, FieldDescriptions, Input, InputField, InvocationContext, UIType, tags, title
from .compel import ConditioningField
from .controlnet_image_processors import ControlField
from .model import ModelInfo, UNetField, VaeField
DEFAULT_PRECISION = choose_precision(choose_torch_device())
2023-07-28 13:46:44 +00:00
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
2023-08-26 17:50:13 +00:00
@title("Create Denoise Mask")
@tags("mask", "denoise")
class CreateDenoiseMaskInvocation(BaseInvocation):
"""Creates mask for denoising model run."""
2023-08-18 01:07:40 +00:00
# Metadata
2023-08-26 17:50:13 +00:00
type: Literal["create_denoise_mask"] = "create_denoise_mask"
2023-08-18 01:07:40 +00:00
# Inputs
2023-08-26 18:17:08 +00:00
vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
2023-08-26 18:14:35 +00:00
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32, ui_order=4)
2023-08-18 01:07:40 +00:00
def prep_mask_tensor(self, mask_image):
if mask_image.mode != "L":
mask_image = mask_image.convert("L")
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
if mask_tensor.dim() == 3:
mask_tensor = mask_tensor.unsqueeze(0)
# if shape is not None:
2023-08-18 01:07:40 +00:00
# mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR)
return mask_tensor
@torch.no_grad()
2023-08-26 17:50:13 +00:00
def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
2023-08-18 01:07:40 +00:00
if self.image is not None:
image = context.services.images.get_pil_image(self.image.image_name)
image = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image.dim() == 3:
image = image.unsqueeze(0)
else:
image = None
mask = self.prep_mask_tensor(
context.services.images.get_pil_image(self.mask.image_name),
)
2023-08-18 01:07:40 +00:00
if image is not None:
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
context=context,
)
img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
2023-08-18 01:07:40 +00:00
masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0)
# TODO:
masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
masked_latents_name = f"{context.graph_execution_state_id}__{self.id}_masked_latents"
context.services.latents.save(masked_latents_name, masked_latents)
else:
masked_latents_name = None
mask_name = f"{context.graph_execution_state_id}__{self.id}_mask"
context.services.latents.save(mask_name, mask)
2023-08-26 17:50:13 +00:00
return DenoiseMaskOutput(
denoise_mask=DenoiseMaskField(
2023-08-18 01:07:40 +00:00
mask_name=mask_name,
masked_latents_name=masked_latents_name,
),
)
def get_scheduler(
context: InvocationContext,
scheduler_info: ModelInfo,
scheduler_name: str,
2023-08-13 21:24:38 +00:00
seed: int,
) -> Scheduler:
2023-07-28 13:46:44 +00:00
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"])
orig_scheduler_info = context.services.model_manager.get_model(
2023-07-28 13:46:44 +00:00
**scheduler_info.dict(),
context=context,
2023-07-05 17:00:43 +00:00
)
with orig_scheduler_info as orig_scheduler:
scheduler_config = orig_scheduler.config
if "_backup" in scheduler_config:
scheduler_config = scheduler_config["_backup"]
2023-07-05 17:00:43 +00:00
scheduler_config = {
**scheduler_config,
**scheduler_extra_config,
"_backup": scheduler_config,
}
2023-08-13 21:24:38 +00:00
# make dpmpp_sde reproducable(seed can be passed only in initializer)
if scheduler_class is DPMSolverSDEScheduler:
scheduler_config["noise_sampler_seed"] = seed
scheduler = scheduler_class.from_config(scheduler_config)
# hack copied over from generate.py
2023-07-28 13:46:44 +00:00
if not hasattr(scheduler, "uses_inpainting_model"):
scheduler.uses_inpainting_model = lambda: False
return scheduler
@title("Denoise Latents")
@tags("latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l")
class DenoiseLatentsInvocation(BaseInvocation):
"""Denoises noisy latents to decodable images"""
type: Literal["denoise_latents"] = "denoise_latents"
# Inputs
positive_conditioning: ConditioningField = InputField(
description=FieldDescriptions.positive_cond, input=Input.Connection, ui_order=0
)
negative_conditioning: ConditioningField = InputField(
description=FieldDescriptions.negative_cond, input=Input.Connection, ui_order=1
)
noise: Optional[LatentsField] = InputField(description=FieldDescriptions.noise, input=Input.Connection, ui_order=3)
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
cfg_scale: Union[float, List[float]] = InputField(
2023-08-17 11:37:28 +00:00
default=7.5, ge=1, description=FieldDescriptions.cfg_scale, ui_type=UIType.Float, title="CFG Scale"
)
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
scheduler: SAMPLER_NAME_VALUES = InputField(
default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler
)
unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet", ui_order=2)
control: Union[ControlField, list[ControlField]] = InputField(
default=None, description=FieldDescriptions.control, input=Input.Connection, ui_order=5
)
latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection)
2023-08-26 17:50:13 +00:00
denoise_mask: Optional[DenoiseMaskField] = InputField(
default=None,
description=FieldDescriptions.mask,
)
Feat/easy param (#3504) * Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step. * Adding first attempt at float param easing node, using Penner easing functions. * Core implementation of ControlNet and MultiControlNet. * Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving. * Added example of using ControlNet with legacy Txt2Img generator * Resolving rebase conflict * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * More rebase repair. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Fixed lint-ish formatting error * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Added dependency on controlnet-aux v0.0.3 * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): add value to conditioning field * fix(ui): add control field type * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor. * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Added Mediapipe image processor for use as ControlNet preprocessor. Also hacked in ability to specify HF subfolder when loading ControlNet models from string. * Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params. * Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput. * Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements. * Added float to FIELD_TYPE_MAP ins constants.ts * Progress toward improvement in fieldTemplateBuilder.ts getFieldType() * Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services. * Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP * Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale * Fixed math for per-step param easing. * Added option to show plot of param value at each step * Just cleaning up after adding param easing plot option, removing vestigial code. * Modified control_weight ControlNet param to be polistmorphic -- can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step. * Added more informative error message when _validat_edge() throws an error. * Just improving parm easing bar chart title to include easing type. * Added requirement for easing-functions package * Taking out some diagnostic prints. * Added option to use both easing function and mirror of easing function together. * Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default. --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
@validator("cfg_scale")
def ge_one(cls, v):
"""validate that all cfg_scale values are >= 1"""
if isinstance(v, list):
for i in v:
if i < 1:
2023-07-28 13:46:44 +00:00
raise ValueError("cfg_scale must be greater than 1")
Feat/easy param (#3504) * Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step. * Adding first attempt at float param easing node, using Penner easing functions. * Core implementation of ControlNet and MultiControlNet. * Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving. * Added example of using ControlNet with legacy Txt2Img generator * Resolving rebase conflict * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * More rebase repair. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Fixed lint-ish formatting error * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Added dependency on controlnet-aux v0.0.3 * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): add value to conditioning field * fix(ui): add control field type * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor. * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Added Mediapipe image processor for use as ControlNet preprocessor. Also hacked in ability to specify HF subfolder when loading ControlNet models from string. * Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params. * Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput. * Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements. * Added float to FIELD_TYPE_MAP ins constants.ts * Progress toward improvement in fieldTemplateBuilder.ts getFieldType() * Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services. * Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP * Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale * Fixed math for per-step param easing. * Added option to show plot of param value at each step * Just cleaning up after adding param easing plot option, removing vestigial code. * Modified control_weight ControlNet param to be polistmorphic -- can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step. * Added more informative error message when _validat_edge() throws an error. * Just improving parm easing bar chart title to include easing type. * Added requirement for easing-functions package * Taking out some diagnostic prints. * Added option to use both easing function and mirror of easing function together. * Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default. --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
else:
if v < 1:
2023-07-28 13:46:44 +00:00
raise ValueError("cfg_scale must be greater than 1")
Feat/easy param (#3504) * Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step. * Adding first attempt at float param easing node, using Penner easing functions. * Core implementation of ControlNet and MultiControlNet. * Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving. * Added example of using ControlNet with legacy Txt2Img generator * Resolving rebase conflict * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * More rebase repair. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Fixed lint-ish formatting error * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Added dependency on controlnet-aux v0.0.3 * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): add value to conditioning field * fix(ui): add control field type * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor. * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Added Mediapipe image processor for use as ControlNet preprocessor. Also hacked in ability to specify HF subfolder when loading ControlNet models from string. * Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params. * Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput. * Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements. * Added float to FIELD_TYPE_MAP ins constants.ts * Progress toward improvement in fieldTemplateBuilder.ts getFieldType() * Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services. * Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP * Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale * Fixed math for per-step param easing. * Added option to show plot of param value at each step * Just cleaning up after adding param easing plot option, removing vestigial code. * Modified control_weight ControlNet param to be polistmorphic -- can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step. * Added more informative error message when _validat_edge() throws an error. * Just improving parm easing bar chart title to include easing type. * Added requirement for easing-functions package * Taking out some diagnostic prints. * Added option to use both easing function and mirror of easing function together. * Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default. --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
return v
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
2023-07-05 17:00:43 +00:00
self,
context: InvocationContext,
source_node_id: str,
intermediate_state: PipelineIntermediateState,
2023-08-07 18:27:32 +00:00
base_model: BaseModelType,
2023-07-05 17:00:43 +00:00
) -> None:
Partial migration of UI to nodes API (#3195) * feat(ui): add axios client generator and simple example * fix(ui): update client & nodes test code w/ new Edge type * chore(ui): organize generated files * chore(ui): update .eslintignore, .prettierignore * chore(ui): update openapi.json * feat(backend): fixes for nodes/generator * feat(ui): generate object args for api client * feat(ui): more nodes api prototyping * feat(ui): nodes cancel * chore(ui): regenerate api client * fix(ui): disable OG web server socket connection * fix(ui): fix scrollbar styles typing and prop just noticed the typo, and made the types stronger. * feat(ui): add socketio types * feat(ui): wip nodes - extract api client method arg types instead of manually declaring them - update example to display images - general tidy up * start building out node translations from frontend state and add notes about missing features * use reference to sampler_name * use reference to sampler_name * add optional apiUrl prop * feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation * feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node * feat(ui): img2img implementation * feat(ui): get intermediate images working but types are stubbed out * chore(ui): add support for package mode * feat(ui): add nodes mode script * feat(ui): handle random seeds * fix(ui): fix middleware types * feat(ui): add rtk action type guard * feat(ui): disable NodeAPITest This was polluting the network/socket logs. * feat(ui): fix parameters panel border color This commit should be elsewhere but I don't want to break my flow * feat(ui): make thunk types more consistent * feat(ui): add type guards for outputs * feat(ui): load images on socket connect Rudimentary * chore(ui): bump redux-toolkit * docs(ui): update readme * chore(ui): regenerate api client * chore(ui): add typescript as dev dependency I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue. * feat(ui): begin migrating gallery to nodes Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way. * feat(ui): clean up & comment results slice * fix(ui): separate thunk for initial gallery load so it properly gets index 0 * feat(ui): POST upload working * fix(ui): restore removed type * feat(ui): patch api generation for headers access * chore(ui): regenerate api * feat(ui): wip gallery migration * feat(ui): wip gallery migration * chore(ui): regenerate api * feat(ui): wip refactor socket events * feat(ui): disable panels based on app props * feat(ui): invert logic to be disabled * disable panels when app mounts * feat(ui): add support to disableTabs * docs(ui): organise and update docs * lang(ui): add toast strings * feat(ui): wip events, comments, and general refactoring * feat(ui): add optional token for auth * feat(ui): export StatusIndicator and ModelSelect for header use * feat(ui) working on making socket URL dynamic * feat(ui): dynamic middleware loading * feat(ui): prep for socket jwt * feat(ui): migrate cancelation also updated action names to be event-like instead of declaration-like sorry, i was scattered and this commit has a lot of unrelated stuff in it. * fix(ui): fix img2img type * chore(ui): regenerate api client * feat(ui): improve InvocationCompleteEvent types * feat(ui): increase StatusIndicator font size * fix(ui): fix middleware order for multi-node graphs * feat(ui): add exampleGraphs object w/ iterations example * feat(ui): generate iterations graph * feat(ui): update ModelSelect for nodes API * feat(ui): add hi-res functionality for txt2img generations * feat(ui): "subscribe" to particular nodes feels like a dirty hack but oh well it works * feat(ui): first steps to node editor ui * fix(ui): disable event subscription it is not fully baked just yet * feat(ui): wip node editor * feat(ui): remove extraneous field types * feat(ui): nodes before deleting stuff * feat(ui): cleanup nodes ui stuff * feat(ui): hook up nodes to redux * fix(ui): fix handle * fix(ui): add basic node edges & connection validation * feat(ui): add connection validation styling * feat(ui): increase edge width * feat(ui): it blends * feat(ui): wip model handling and graph topology validation * feat(ui): validation connections w/ graphlib * docs(ui): update nodes doc * feat(ui): wip node editor * chore(ui): rebuild api, update types * add redux-dynamic-middlewares as a dependency * feat(ui): add url host transformation * feat(ui): handle already-connected fields * feat(ui): rewrite SqliteItemStore in sqlalchemy * fix(ui): fix sqlalchemy dynamic model instantiation * feat(ui, nodes): metadata wip * feat(ui, nodes): models * feat(ui, nodes): more metadata wip * feat(ui): wip range/iterate * fix(nodes): fix sqlite typing * feat(ui): export new type for invoke component * tests(nodes): fix test instantiation of ImageField * feat(nodes): fix LoadImageInvocation * feat(nodes): add `title` ui hint * feat(nodes): make ImageField attrs optional * feat(ui): wip nodes etc * feat(nodes): roll back sqlalchemy * fix(nodes): partially address feedback * fix(backend): roll back changes to pngwriter * feat(nodes): wip address metadata feedback * feat(nodes): add seeded rng to RandomRange * feat(nodes): address feedback * feat(nodes): move GET images error handling to DiskImageStorage * feat(nodes): move GET images error handling to DiskImageStorage * fix(nodes): fix image output schema customization * feat(ui): img2img/txt2img -> linear - remove txt2img and img2img tabs - add linear tab - add initial image selection to linear parameters accordion * feat(ui): tidy graph builders * feat(ui): tidy misc * feat(ui): improve invocation union types * feat(ui): wip metadata viewer recall * feat(ui): move fonts to normal deps * feat(nodes): fix broken upload * feat(nodes): add metadata module + tests, thumbnails - `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service` - Handles loading/parsing/building metadata, and creating png info objects - added tests for MetadataModule - Lifted thumbnail stuff to util * fix(nodes): revert change to RandomRangeInvocation * feat(nodes): address feedback - make metadata a service - rip out pydantic validation, implement metadata parsing as simple functions - update tests - address other minor feedback items * fix(nodes): fix other tests * fix(nodes): add metadata service to cli * fix(nodes): fix latents/image field parsing * feat(nodes): customise LatentsField schema * feat(nodes): move metadata parsing to frontend * fix(nodes): fix metadata test --------- Co-authored-by: maryhipp <maryhipp@gmail.com> Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
stable_diffusion_step_callback(
context=context,
intermediate_state=intermediate_state,
node=self.dict(),
source_node_id=source_node_id,
2023-08-07 18:27:32 +00:00
base_model=base_model,
Partial migration of UI to nodes API (#3195) * feat(ui): add axios client generator and simple example * fix(ui): update client & nodes test code w/ new Edge type * chore(ui): organize generated files * chore(ui): update .eslintignore, .prettierignore * chore(ui): update openapi.json * feat(backend): fixes for nodes/generator * feat(ui): generate object args for api client * feat(ui): more nodes api prototyping * feat(ui): nodes cancel * chore(ui): regenerate api client * fix(ui): disable OG web server socket connection * fix(ui): fix scrollbar styles typing and prop just noticed the typo, and made the types stronger. * feat(ui): add socketio types * feat(ui): wip nodes - extract api client method arg types instead of manually declaring them - update example to display images - general tidy up * start building out node translations from frontend state and add notes about missing features * use reference to sampler_name * use reference to sampler_name * add optional apiUrl prop * feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation * feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node * feat(ui): img2img implementation * feat(ui): get intermediate images working but types are stubbed out * chore(ui): add support for package mode * feat(ui): add nodes mode script * feat(ui): handle random seeds * fix(ui): fix middleware types * feat(ui): add rtk action type guard * feat(ui): disable NodeAPITest This was polluting the network/socket logs. * feat(ui): fix parameters panel border color This commit should be elsewhere but I don't want to break my flow * feat(ui): make thunk types more consistent * feat(ui): add type guards for outputs * feat(ui): load images on socket connect Rudimentary * chore(ui): bump redux-toolkit * docs(ui): update readme * chore(ui): regenerate api client * chore(ui): add typescript as dev dependency I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue. * feat(ui): begin migrating gallery to nodes Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way. * feat(ui): clean up & comment results slice * fix(ui): separate thunk for initial gallery load so it properly gets index 0 * feat(ui): POST upload working * fix(ui): restore removed type * feat(ui): patch api generation for headers access * chore(ui): regenerate api * feat(ui): wip gallery migration * feat(ui): wip gallery migration * chore(ui): regenerate api * feat(ui): wip refactor socket events * feat(ui): disable panels based on app props * feat(ui): invert logic to be disabled * disable panels when app mounts * feat(ui): add support to disableTabs * docs(ui): organise and update docs * lang(ui): add toast strings * feat(ui): wip events, comments, and general refactoring * feat(ui): add optional token for auth * feat(ui): export StatusIndicator and ModelSelect for header use * feat(ui) working on making socket URL dynamic * feat(ui): dynamic middleware loading * feat(ui): prep for socket jwt * feat(ui): migrate cancelation also updated action names to be event-like instead of declaration-like sorry, i was scattered and this commit has a lot of unrelated stuff in it. * fix(ui): fix img2img type * chore(ui): regenerate api client * feat(ui): improve InvocationCompleteEvent types * feat(ui): increase StatusIndicator font size * fix(ui): fix middleware order for multi-node graphs * feat(ui): add exampleGraphs object w/ iterations example * feat(ui): generate iterations graph * feat(ui): update ModelSelect for nodes API * feat(ui): add hi-res functionality for txt2img generations * feat(ui): "subscribe" to particular nodes feels like a dirty hack but oh well it works * feat(ui): first steps to node editor ui * fix(ui): disable event subscription it is not fully baked just yet * feat(ui): wip node editor * feat(ui): remove extraneous field types * feat(ui): nodes before deleting stuff * feat(ui): cleanup nodes ui stuff * feat(ui): hook up nodes to redux * fix(ui): fix handle * fix(ui): add basic node edges & connection validation * feat(ui): add connection validation styling * feat(ui): increase edge width * feat(ui): it blends * feat(ui): wip model handling and graph topology validation * feat(ui): validation connections w/ graphlib * docs(ui): update nodes doc * feat(ui): wip node editor * chore(ui): rebuild api, update types * add redux-dynamic-middlewares as a dependency * feat(ui): add url host transformation * feat(ui): handle already-connected fields * feat(ui): rewrite SqliteItemStore in sqlalchemy * fix(ui): fix sqlalchemy dynamic model instantiation * feat(ui, nodes): metadata wip * feat(ui, nodes): models * feat(ui, nodes): more metadata wip * feat(ui): wip range/iterate * fix(nodes): fix sqlite typing * feat(ui): export new type for invoke component * tests(nodes): fix test instantiation of ImageField * feat(nodes): fix LoadImageInvocation * feat(nodes): add `title` ui hint * feat(nodes): make ImageField attrs optional * feat(ui): wip nodes etc * feat(nodes): roll back sqlalchemy * fix(nodes): partially address feedback * fix(backend): roll back changes to pngwriter * feat(nodes): wip address metadata feedback * feat(nodes): add seeded rng to RandomRange * feat(nodes): address feedback * feat(nodes): move GET images error handling to DiskImageStorage * feat(nodes): move GET images error handling to DiskImageStorage * fix(nodes): fix image output schema customization * feat(ui): img2img/txt2img -> linear - remove txt2img and img2img tabs - add linear tab - add initial image selection to linear parameters accordion * feat(ui): tidy graph builders * feat(ui): tidy misc * feat(ui): improve invocation union types * feat(ui): wip metadata viewer recall * feat(ui): move fonts to normal deps * feat(nodes): fix broken upload * feat(nodes): add metadata module + tests, thumbnails - `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service` - Handles loading/parsing/building metadata, and creating png info objects - added tests for MetadataModule - Lifted thumbnail stuff to util * fix(nodes): revert change to RandomRangeInvocation * feat(nodes): address feedback - make metadata a service - rip out pydantic validation, implement metadata parsing as simple functions - update tests - address other minor feedback items * fix(nodes): fix other tests * fix(nodes): add metadata service to cli * fix(nodes): fix latents/image field parsing * feat(nodes): customise LatentsField schema * feat(nodes): move metadata parsing to frontend * fix(nodes): fix metadata test --------- Co-authored-by: maryhipp <maryhipp@gmail.com> Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
)
def get_conditioning_data(
2023-07-05 17:00:43 +00:00
self,
context: InvocationContext,
scheduler,
unet,
2023-08-08 01:00:33 +00:00
seed,
2023-07-05 17:00:43 +00:00
) -> ConditioningData:
2023-07-16 03:24:24 +00:00
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
2023-08-06 02:05:25 +00:00
c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
extra_conditioning_info = c.extra_conditioning
2023-07-16 03:24:24 +00:00
negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name)
2023-08-06 02:05:25 +00:00
uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
conditioning_data = ConditioningData(
Feat/easy param (#3504) * Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step. * Adding first attempt at float param easing node, using Penner easing functions. * Core implementation of ControlNet and MultiControlNet. * Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving. * Added example of using ControlNet with legacy Txt2Img generator * Resolving rebase conflict * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * More rebase repair. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Fixed lint-ish formatting error * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Added dependency on controlnet-aux v0.0.3 * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): add value to conditioning field * fix(ui): add control field type * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor. * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Switching to ControlField for output from controlnet nodes. * Resolving conflicts in rebase to origin/main * Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke()) * changes to base class for controlnet nodes * Added HED, LineArt, and OpenPose ControlNet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this... * Fixed use of ControlNet control_weight parameter * Core implementation of ControlNet and MultiControlNet. * Added first controlnet preprocessor node for canny edge detection. * Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node * Switching to ControlField for output from controlnet nodes. * Refactored controlnet node to output ControlField that bundles control info. * changes to base class for controlnet nodes * Added more preprocessor nodes for: MidasDepth ZoeDepth MLSD NormalBae Pidi LineartAnime ContentShuffle Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup. * Prep for splitting pre-processor and controlnet nodes * Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes. * Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue. * Cleaning up TextToLatent arg testing * Cleaning up mistakes after rebase. * Removed last bits of dtype and and device hardwiring from controlnet section * Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled. * Added support for specifying which step iteration to start using each ControlNet, and which step to end using each controlnet (specified as fraction of total steps) * Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input. * Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it. * Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names. * Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle. * Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier. * Cleaning up after ControlNet refactor in TextToLatentsInvocation * Extended node-based ControlNet support to LatentsToLatentsInvocation. * chore(ui): regen api client * fix(ui): fix node ui type hints * fix(nodes): controlnet input accepts list or single controlnet * Added Mediapipe image processor for use as ControlNet preprocessor. Also hacked in ability to specify HF subfolder when loading ControlNet models from string. * Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params. * Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput. * Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements. * Added float to FIELD_TYPE_MAP ins constants.ts * Progress toward improvement in fieldTemplateBuilder.ts getFieldType() * Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services. * Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP * Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale * Fixed math for per-step param easing. * Added option to show plot of param value at each step * Just cleaning up after adding param easing plot option, removing vestigial code. * Modified control_weight ControlNet param to be polistmorphic -- can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step. * Added more informative error message when _validat_edge() throws an error. * Just improving parm easing bar chart title to include easing type. * Added requirement for easing-functions package * Taking out some diagnostic prints. * Added option to use both easing function and mirror of easing function together. * Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default. --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
unconditioned_embeddings=uc,
text_embeddings=c,
guidance_scale=self.cfg_scale,
extra=extra_conditioning_info,
postprocessing_settings=PostprocessingSettings(
threshold=0.0, # threshold,
warmup=0.2, # warmup,
h_symmetry_time_pct=None, # h_symmetry_time_pct,
2023-07-28 13:46:44 +00:00
v_symmetry_time_pct=None, # v_symmetry_time_pct,
),
)
conditioning_data = conditioning_data.add_scheduler_args_if_applicable(
scheduler,
# for ddim scheduler
eta=0.0, # ddim_eta
# for ancestral and sde schedulers
# flip all bits to have noise different from initial
generator=torch.Generator(device=unet.device).manual_seed(seed ^ 0xFFFFFFFF),
)
return conditioning_data
def create_pipeline(
2023-07-05 17:00:43 +00:00
self,
unet,
scheduler,
) -> StableDiffusionGeneratorPipeline:
# TODO:
# configure_model_padding(
# unet,
# self.seamless,
# self.seamless_axes,
# )
class FakeVae:
class FakeVaeConfig:
def __init__(self):
self.block_out_channels = [0]
def __init__(self):
self.config = FakeVae.FakeVaeConfig()
return StableDiffusionGeneratorPipeline(
vae=FakeVae(), # TODO: oh...
text_encoder=None,
tokenizer=None,
unet=unet,
scheduler=scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
def prep_control_data(
self,
context: InvocationContext,
# really only need model for dtype and device
model: StableDiffusionGeneratorPipeline,
control_input: List[ControlField],
latents_shape: List[int],
2023-07-05 17:00:43 +00:00
exit_stack: ExitStack,
do_classifier_free_guidance: bool = True,
) -> List[ControlNetData]:
# assuming fixed dimensional scaling of 8:1 for image:latents
control_height_resize = latents_shape[2] * 8
control_width_resize = latents_shape[3] * 8
if control_input is None:
control_list = None
elif isinstance(control_input, list) and len(control_input) == 0:
control_list = None
elif isinstance(control_input, ControlField):
control_list = [control_input]
elif isinstance(control_input, list) and len(control_input) > 0 and isinstance(control_input[0], ControlField):
control_list = control_input
else:
control_list = None
2023-07-28 13:46:44 +00:00
if control_list is None:
control_data = None
# from above handling, any control that is not None should now be of type list[ControlField]
else:
# FIXME: add checks to skip entry if model or image is None
# and if weight is None, populate with default 1.0?
control_data = []
control_models = []
for control_info in control_list:
2023-07-05 17:00:43 +00:00
control_model = exit_stack.enter_context(
context.services.model_manager.get_model(
2023-07-05 17:00:43 +00:00
model_name=control_info.control_model.model_name,
model_type=ModelType.ControlNet,
base_model=control_info.control_model.base_model,
context=context,
2023-07-05 17:00:43 +00:00
)
)
control_models.append(control_model)
control_image_field = control_info.image
2023-07-28 13:46:44 +00:00
input_image = context.services.images.get_pil_image(control_image_field.image_name)
# self.image.image_type, self.image.image_name
# FIXME: still need to test with different widths, heights, devices, dtypes
# and add in batch_size, num_images_per_prompt?
# and do real check for classifier_free_guidance?
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
control_image = prepare_control_image(
image=input_image,
do_classifier_free_guidance=do_classifier_free_guidance,
width=control_width_resize,
height=control_height_resize,
# batch_size=batch_size * num_images_per_prompt,
# num_images_per_prompt=num_images_per_prompt,
device=control_model.device,
dtype=control_model.dtype,
control_mode=control_info.control_mode,
resize_mode=control_info.resize_mode,
)
control_item = ControlNetData(
model=control_model,
image_tensor=control_image,
weight=control_info.control_weight,
begin_step_percent=control_info.begin_step_percent,
end_step_percent=control_info.end_step_percent,
2023-07-05 17:00:43 +00:00
control_mode=control_info.control_mode,
# any resizing needed should currently be happening in prepare_control_image(),
# but adding resize_mode to ControlNetData in case needed in the future
resize_mode=control_info.resize_mode,
2023-07-05 17:00:43 +00:00
)
control_data.append(control_item)
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
return control_data
# original idea by https://github.com/AmericanPresidentJimmyCarter
# TODO: research more for second order schedulers timesteps
def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end):
2023-08-14 02:14:05 +00:00
if scheduler.config.get("cpu_only", False):
scheduler.set_timesteps(steps, device="cpu")
2023-08-14 02:14:05 +00:00
timesteps = scheduler.timesteps.to(device=device)
else:
scheduler.set_timesteps(steps, device=device)
2023-08-14 02:14:05 +00:00
timesteps = scheduler.timesteps
# skip greater order timesteps
_timesteps = timesteps[:: scheduler.order]
# get start timestep index
t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start)))
t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, _timesteps)))
# get end timestep index
t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end)))
t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, _timesteps[t_start_idx:])))
# apply order to indexes
t_start_idx *= scheduler.order
t_end_idx *= scheduler.order
init_timestep = timesteps[t_start_idx : t_start_idx + 1]
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
num_inference_steps = len(timesteps) // scheduler.order
return num_inference_steps, timesteps, init_timestep
2023-08-18 01:07:40 +00:00
def prep_inpaint_mask(self, context, latents):
2023-08-26 17:50:13 +00:00
if self.denoise_mask is None:
2023-08-18 01:07:40 +00:00
return None, None
2023-08-08 15:50:36 +00:00
2023-08-26 17:50:13 +00:00
mask = context.services.latents.get(self.denoise_mask.mask_name)
mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
2023-08-26 17:50:13 +00:00
if self.denoise_mask.masked_latents_name is not None:
masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name)
2023-08-18 01:07:40 +00:00
else:
masked_latents = None
return 1 - mask, masked_latents
2023-08-08 15:50:36 +00:00
2023-07-05 04:39:15 +00:00
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
2023-07-24 21:13:32 +00:00
with SilenceWarnings(): # this quenches NSFW nag from diffusers
seed = None
noise = None
if self.noise is not None:
noise = context.services.latents.get(self.noise.latents_name)
seed = self.noise.seed
if self.latents is not None:
latents = context.services.latents.get(self.latents.latents_name)
if seed is None:
seed = self.latents.seed
else:
latents = torch.zeros_like(noise)
if seed is None:
seed = 0
2023-08-18 01:07:40 +00:00
mask, masked_latents = self.prep_inpaint_mask(context, latents)
2023-08-08 15:50:36 +00:00
2023-07-24 21:13:32 +00:00
# Get the source node id (we are invoking the prepared node)
2023-07-27 19:01:00 +00:00
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
2023-07-24 21:13:32 +00:00
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
Partial migration of UI to nodes API (#3195) * feat(ui): add axios client generator and simple example * fix(ui): update client & nodes test code w/ new Edge type * chore(ui): organize generated files * chore(ui): update .eslintignore, .prettierignore * chore(ui): update openapi.json * feat(backend): fixes for nodes/generator * feat(ui): generate object args for api client * feat(ui): more nodes api prototyping * feat(ui): nodes cancel * chore(ui): regenerate api client * fix(ui): disable OG web server socket connection * fix(ui): fix scrollbar styles typing and prop just noticed the typo, and made the types stronger. * feat(ui): add socketio types * feat(ui): wip nodes - extract api client method arg types instead of manually declaring them - update example to display images - general tidy up * start building out node translations from frontend state and add notes about missing features * use reference to sampler_name * use reference to sampler_name * add optional apiUrl prop * feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation * feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node * feat(ui): img2img implementation * feat(ui): get intermediate images working but types are stubbed out * chore(ui): add support for package mode * feat(ui): add nodes mode script * feat(ui): handle random seeds * fix(ui): fix middleware types * feat(ui): add rtk action type guard * feat(ui): disable NodeAPITest This was polluting the network/socket logs. * feat(ui): fix parameters panel border color This commit should be elsewhere but I don't want to break my flow * feat(ui): make thunk types more consistent * feat(ui): add type guards for outputs * feat(ui): load images on socket connect Rudimentary * chore(ui): bump redux-toolkit * docs(ui): update readme * chore(ui): regenerate api client * chore(ui): add typescript as dev dependency I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue. * feat(ui): begin migrating gallery to nodes Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way. * feat(ui): clean up & comment results slice * fix(ui): separate thunk for initial gallery load so it properly gets index 0 * feat(ui): POST upload working * fix(ui): restore removed type * feat(ui): patch api generation for headers access * chore(ui): regenerate api * feat(ui): wip gallery migration * feat(ui): wip gallery migration * chore(ui): regenerate api * feat(ui): wip refactor socket events * feat(ui): disable panels based on app props * feat(ui): invert logic to be disabled * disable panels when app mounts * feat(ui): add support to disableTabs * docs(ui): organise and update docs * lang(ui): add toast strings * feat(ui): wip events, comments, and general refactoring * feat(ui): add optional token for auth * feat(ui): export StatusIndicator and ModelSelect for header use * feat(ui) working on making socket URL dynamic * feat(ui): dynamic middleware loading * feat(ui): prep for socket jwt * feat(ui): migrate cancelation also updated action names to be event-like instead of declaration-like sorry, i was scattered and this commit has a lot of unrelated stuff in it. * fix(ui): fix img2img type * chore(ui): regenerate api client * feat(ui): improve InvocationCompleteEvent types * feat(ui): increase StatusIndicator font size * fix(ui): fix middleware order for multi-node graphs * feat(ui): add exampleGraphs object w/ iterations example * feat(ui): generate iterations graph * feat(ui): update ModelSelect for nodes API * feat(ui): add hi-res functionality for txt2img generations * feat(ui): "subscribe" to particular nodes feels like a dirty hack but oh well it works * feat(ui): first steps to node editor ui * fix(ui): disable event subscription it is not fully baked just yet * feat(ui): wip node editor * feat(ui): remove extraneous field types * feat(ui): nodes before deleting stuff * feat(ui): cleanup nodes ui stuff * feat(ui): hook up nodes to redux * fix(ui): fix handle * fix(ui): add basic node edges & connection validation * feat(ui): add connection validation styling * feat(ui): increase edge width * feat(ui): it blends * feat(ui): wip model handling and graph topology validation * feat(ui): validation connections w/ graphlib * docs(ui): update nodes doc * feat(ui): wip node editor * chore(ui): rebuild api, update types * add redux-dynamic-middlewares as a dependency * feat(ui): add url host transformation * feat(ui): handle already-connected fields * feat(ui): rewrite SqliteItemStore in sqlalchemy * fix(ui): fix sqlalchemy dynamic model instantiation * feat(ui, nodes): metadata wip * feat(ui, nodes): models * feat(ui, nodes): more metadata wip * feat(ui): wip range/iterate * fix(nodes): fix sqlite typing * feat(ui): export new type for invoke component * tests(nodes): fix test instantiation of ImageField * feat(nodes): fix LoadImageInvocation * feat(nodes): add `title` ui hint * feat(nodes): make ImageField attrs optional * feat(ui): wip nodes etc * feat(nodes): roll back sqlalchemy * fix(nodes): partially address feedback * fix(backend): roll back changes to pngwriter * feat(nodes): wip address metadata feedback * feat(nodes): add seeded rng to RandomRange * feat(nodes): address feedback * feat(nodes): move GET images error handling to DiskImageStorage * feat(nodes): move GET images error handling to DiskImageStorage * fix(nodes): fix image output schema customization * feat(ui): img2img/txt2img -> linear - remove txt2img and img2img tabs - add linear tab - add initial image selection to linear parameters accordion * feat(ui): tidy graph builders * feat(ui): tidy misc * feat(ui): improve invocation union types * feat(ui): wip metadata viewer recall * feat(ui): move fonts to normal deps * feat(nodes): fix broken upload * feat(nodes): add metadata module + tests, thumbnails - `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service` - Handles loading/parsing/building metadata, and creating png info objects - added tests for MetadataModule - Lifted thumbnail stuff to util * fix(nodes): revert change to RandomRangeInvocation * feat(nodes): address feedback - make metadata a service - rip out pydantic validation, implement metadata parsing as simple functions - update tests - address other minor feedback items * fix(nodes): fix other tests * fix(nodes): add metadata service to cli * fix(nodes): fix latents/image field parsing * feat(nodes): customise LatentsField schema * feat(nodes): move metadata parsing to frontend * fix(nodes): fix metadata test --------- Co-authored-by: maryhipp <maryhipp@gmail.com> Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
2023-07-24 21:13:32 +00:00
def step_callback(state: PipelineIntermediateState):
2023-08-07 18:27:32 +00:00
self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model)
2023-07-24 21:13:32 +00:00
def _lora_loader():
for lora in self.unet.loras:
lora_info = context.services.model_manager.get_model(
2023-07-27 19:01:00 +00:00
**lora.dict(exclude={"weight"}),
context=context,
2023-07-24 21:13:32 +00:00
)
yield (lora_info.context.model, lora.weight)
del lora_info
return
2023-07-18 13:51:16 +00:00
2023-07-24 21:13:32 +00:00
unet_info = context.services.model_manager.get_model(
2023-07-27 19:01:00 +00:00
**self.unet.unet.dict(),
context=context,
)
2023-07-27 19:01:00 +00:00
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
unet_info.context.model, _lora_loader()
2023-08-28 02:54:53 +00:00
), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet:
latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None:
noise = noise.to(device=unet.device, dtype=unet.dtype)
if mask is not None:
mask = mask.to(device=unet.device, dtype=unet.dtype)
2023-08-18 01:07:40 +00:00
if masked_latents is not None:
masked_latents = masked_latents.to(device=unet.device, dtype=unet.dtype)
2023-07-24 21:13:32 +00:00
scheduler = get_scheduler(
context=context,
scheduler_info=self.unet.scheduler,
scheduler_name=self.scheduler,
2023-08-13 21:24:38 +00:00
seed=seed,
2023-07-24 21:13:32 +00:00
)
2023-07-24 21:13:32 +00:00
pipeline = self.create_pipeline(unet, scheduler)
2023-08-08 01:00:33 +00:00
conditioning_data = self.get_conditioning_data(context, scheduler, unet, seed)
2023-07-24 21:13:32 +00:00
control_data = self.prep_control_data(
2023-07-27 19:01:00 +00:00
model=pipeline,
context=context,
control_input=self.control,
latents_shape=latents.shape,
2023-07-24 21:13:32 +00:00
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
do_classifier_free_guidance=True,
exit_stack=exit_stack,
)
num_inference_steps, timesteps, init_timestep = self.init_scheduler(
scheduler,
2023-07-24 21:13:32 +00:00
device=unet.device,
steps=self.steps,
denoising_start=self.denoising_start,
denoising_end=self.denoising_end,
2023-07-24 21:13:32 +00:00
)
2023-07-24 21:13:32 +00:00
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
latents=latents,
2023-07-24 21:13:32 +00:00
timesteps=timesteps,
init_timestep=init_timestep,
2023-07-24 21:13:32 +00:00
noise=noise,
2023-08-08 15:50:36 +00:00
seed=seed,
mask=mask,
2023-08-18 01:07:40 +00:00
masked_latents=masked_latents,
num_inference_steps=num_inference_steps,
2023-07-24 21:13:32 +00:00
conditioning_data=conditioning_data,
control_data=control_data, # list[ControlNetData]
2023-07-27 19:01:00 +00:00
callback=step_callback,
2023-07-24 21:13:32 +00:00
)
2023-07-24 21:13:32 +00:00
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
result_latents = result_latents.to("cpu")
torch.cuda.empty_cache()
2023-07-27 19:01:00 +00:00
name = f"{context.graph_execution_state_id}__{self.id}"
2023-07-24 21:13:32 +00:00
context.services.latents.save(name, result_latents)
2023-08-08 01:00:33 +00:00
return build_latents_output(latents_name=name, latents=result_latents, seed=seed)
@title("Latents to Image")
2023-08-20 10:20:02 +00:00
@tags("latents", "image", "vae", "l2i")
class LatentsToImageInvocation(BaseInvocation):
"""Generates an image from latents."""
type: Literal["l2i"] = "l2i"
# Inputs
latents: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
)
vae: VaeField = InputField(
description=FieldDescriptions.vae,
input=Input.Connection,
)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
metadata: CoreMetadata = InputField(
default=None,
description=FieldDescriptions.core_metadata,
ui_hidden=True,
2023-07-28 13:46:44 +00:00
)
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
latents = context.services.latents.get(self.latents.latents_name)
vae_info = context.services.model_manager.get_model(
2023-07-28 13:46:44 +00:00
**self.vae.vae.dict(),
context=context,
)
2023-08-28 02:54:53 +00:00
with set_seamless(vae_info.context.model, self.vae.seamless_axes), vae_info as vae:
latents = latents.to(vae.device)
if self.fp32:
vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
vae.post_quant_conv.to(latents.dtype)
vae.decoder.conv_in.to(latents.dtype)
vae.decoder.mid_block.to(latents.dtype)
else:
latents = latents.float()
else:
vae.to(dtype=torch.float16)
latents = latents.half()
2023-05-26 03:28:15 +00:00
if self.tiled or context.services.configuration.tiled_decode:
vae.enable_tiling()
else:
vae.disable_tiling()
Partial migration of UI to nodes API (#3195) * feat(ui): add axios client generator and simple example * fix(ui): update client & nodes test code w/ new Edge type * chore(ui): organize generated files * chore(ui): update .eslintignore, .prettierignore * chore(ui): update openapi.json * feat(backend): fixes for nodes/generator * feat(ui): generate object args for api client * feat(ui): more nodes api prototyping * feat(ui): nodes cancel * chore(ui): regenerate api client * fix(ui): disable OG web server socket connection * fix(ui): fix scrollbar styles typing and prop just noticed the typo, and made the types stronger. * feat(ui): add socketio types * feat(ui): wip nodes - extract api client method arg types instead of manually declaring them - update example to display images - general tidy up * start building out node translations from frontend state and add notes about missing features * use reference to sampler_name * use reference to sampler_name * add optional apiUrl prop * feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation * feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node * feat(ui): img2img implementation * feat(ui): get intermediate images working but types are stubbed out * chore(ui): add support for package mode * feat(ui): add nodes mode script * feat(ui): handle random seeds * fix(ui): fix middleware types * feat(ui): add rtk action type guard * feat(ui): disable NodeAPITest This was polluting the network/socket logs. * feat(ui): fix parameters panel border color This commit should be elsewhere but I don't want to break my flow * feat(ui): make thunk types more consistent * feat(ui): add type guards for outputs * feat(ui): load images on socket connect Rudimentary * chore(ui): bump redux-toolkit * docs(ui): update readme * chore(ui): regenerate api client * chore(ui): add typescript as dev dependency I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue. * feat(ui): begin migrating gallery to nodes Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way. * feat(ui): clean up & comment results slice * fix(ui): separate thunk for initial gallery load so it properly gets index 0 * feat(ui): POST upload working * fix(ui): restore removed type * feat(ui): patch api generation for headers access * chore(ui): regenerate api * feat(ui): wip gallery migration * feat(ui): wip gallery migration * chore(ui): regenerate api * feat(ui): wip refactor socket events * feat(ui): disable panels based on app props * feat(ui): invert logic to be disabled * disable panels when app mounts * feat(ui): add support to disableTabs * docs(ui): organise and update docs * lang(ui): add toast strings * feat(ui): wip events, comments, and general refactoring * feat(ui): add optional token for auth * feat(ui): export StatusIndicator and ModelSelect for header use * feat(ui) working on making socket URL dynamic * feat(ui): dynamic middleware loading * feat(ui): prep for socket jwt * feat(ui): migrate cancelation also updated action names to be event-like instead of declaration-like sorry, i was scattered and this commit has a lot of unrelated stuff in it. * fix(ui): fix img2img type * chore(ui): regenerate api client * feat(ui): improve InvocationCompleteEvent types * feat(ui): increase StatusIndicator font size * fix(ui): fix middleware order for multi-node graphs * feat(ui): add exampleGraphs object w/ iterations example * feat(ui): generate iterations graph * feat(ui): update ModelSelect for nodes API * feat(ui): add hi-res functionality for txt2img generations * feat(ui): "subscribe" to particular nodes feels like a dirty hack but oh well it works * feat(ui): first steps to node editor ui * fix(ui): disable event subscription it is not fully baked just yet * feat(ui): wip node editor * feat(ui): remove extraneous field types * feat(ui): nodes before deleting stuff * feat(ui): cleanup nodes ui stuff * feat(ui): hook up nodes to redux * fix(ui): fix handle * fix(ui): add basic node edges & connection validation * feat(ui): add connection validation styling * feat(ui): increase edge width * feat(ui): it blends * feat(ui): wip model handling and graph topology validation * feat(ui): validation connections w/ graphlib * docs(ui): update nodes doc * feat(ui): wip node editor * chore(ui): rebuild api, update types * add redux-dynamic-middlewares as a dependency * feat(ui): add url host transformation * feat(ui): handle already-connected fields * feat(ui): rewrite SqliteItemStore in sqlalchemy * fix(ui): fix sqlalchemy dynamic model instantiation * feat(ui, nodes): metadata wip * feat(ui, nodes): models * feat(ui, nodes): more metadata wip * feat(ui): wip range/iterate * fix(nodes): fix sqlite typing * feat(ui): export new type for invoke component * tests(nodes): fix test instantiation of ImageField * feat(nodes): fix LoadImageInvocation * feat(nodes): add `title` ui hint * feat(nodes): make ImageField attrs optional * feat(ui): wip nodes etc * feat(nodes): roll back sqlalchemy * fix(nodes): partially address feedback * fix(backend): roll back changes to pngwriter * feat(nodes): wip address metadata feedback * feat(nodes): add seeded rng to RandomRange * feat(nodes): address feedback * feat(nodes): move GET images error handling to DiskImageStorage * feat(nodes): move GET images error handling to DiskImageStorage * fix(nodes): fix image output schema customization * feat(ui): img2img/txt2img -> linear - remove txt2img and img2img tabs - add linear tab - add initial image selection to linear parameters accordion * feat(ui): tidy graph builders * feat(ui): tidy misc * feat(ui): improve invocation union types * feat(ui): wip metadata viewer recall * feat(ui): move fonts to normal deps * feat(nodes): fix broken upload * feat(nodes): add metadata module + tests, thumbnails - `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service` - Handles loading/parsing/building metadata, and creating png info objects - added tests for MetadataModule - Lifted thumbnail stuff to util * fix(nodes): revert change to RandomRangeInvocation * feat(nodes): address feedback - make metadata a service - rip out pydantic validation, implement metadata parsing as simple functions - update tests - address other minor feedback items * fix(nodes): fix other tests * fix(nodes): add metadata service to cli * fix(nodes): fix latents/image field parsing * feat(nodes): customise LatentsField schema * feat(nodes): move metadata parsing to frontend * fix(nodes): fix metadata test --------- Co-authored-by: maryhipp <maryhipp@gmail.com> Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
# clear memory as vae decode can request a lot
torch.cuda.empty_cache()
with torch.inference_mode():
# copied from diffusers pipeline
latents = latents / vae.config.scaling_factor
image = vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clamp(0, 1) # denormalize
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
np_image = image.cpu().permute(0, 2, 3, 1).float().numpy()
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
torch.cuda.empty_cache()
2023-05-26 02:40:45 +00:00
image_dto = context.services.images.create(
image=image,
2023-06-01 22:09:49 +00:00
image_origin=ResourceOrigin.INTERNAL,
2023-05-26 02:40:45 +00:00
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
metadata=self.metadata.dict() if self.metadata else None,
2023-08-24 11:42:32 +00:00
workflow=self.workflow,
)
2023-05-26 02:40:45 +00:00
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
2023-05-26 02:40:45 +00:00
width=image_dto.width,
height=image_dto.height,
)
2023-07-28 13:46:44 +00:00
LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"]
@title("Resize Latents")
@tags("latents", "resize")
class ResizeLatentsInvocation(BaseInvocation):
"""Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8."""
type: Literal["lresize"] = "lresize"
# Inputs
latents: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
2023-07-28 13:46:44 +00:00
)
width: int = InputField(
ge=64,
multiple_of=8,
description=FieldDescriptions.width,
)
height: int = InputField(
ge=64,
multiple_of=8,
description=FieldDescriptions.width,
)
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
# TODO:
2023-07-28 13:46:44 +00:00
device = choose_torch_device()
resized_latents = torch.nn.functional.interpolate(
2023-07-28 13:46:44 +00:00
latents.to(device),
size=(self.height // 8, self.width // 8),
mode=self.mode,
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
2023-07-05 17:00:43 +00:00
)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
resized_latents = resized_latents.to("cpu")
torch.cuda.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)
context.services.latents.save(name, resized_latents)
2023-08-08 01:00:33 +00:00
return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed)
@title("Scale Latents")
@tags("latents", "resize")
class ScaleLatentsInvocation(BaseInvocation):
"""Scales latents by a given factor."""
type: Literal["lscale"] = "lscale"
# Inputs
latents: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
2023-07-28 13:46:44 +00:00
)
scale_factor: float = InputField(gt=0, description=FieldDescriptions.scale_factor)
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents = context.services.latents.get(self.latents.latents_name)
# TODO:
2023-07-28 13:46:44 +00:00
device = choose_torch_device()
# resizing
resized_latents = torch.nn.functional.interpolate(
2023-07-28 13:46:44 +00:00
latents.to(device),
scale_factor=self.scale_factor,
mode=self.mode,
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
2023-07-05 17:00:43 +00:00
)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
resized_latents = resized_latents.to("cpu")
torch.cuda.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)
context.services.latents.save(name, resized_latents)
2023-08-08 01:00:33 +00:00
return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed)
@title("Image to Latents")
2023-08-20 10:20:02 +00:00
@tags("latents", "image", "vae", "i2l")
class ImageToLatentsInvocation(BaseInvocation):
"""Encodes an image into latents."""
type: Literal["i2l"] = "i2l"
# Inputs
image: ImageField = InputField(
description="The image to encode",
)
vae: VaeField = InputField(
description=FieldDescriptions.vae,
input=Input.Connection,
)
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
2023-08-18 01:07:40 +00:00
@staticmethod
def vae_encode(vae_info, upcast, tiled, image_tensor):
with vae_info as vae:
orig_dtype = vae.dtype
2023-08-18 01:07:40 +00:00
if upcast:
vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
vae.post_quant_conv.to(orig_dtype)
vae.decoder.conv_in.to(orig_dtype)
vae.decoder.mid_block.to(orig_dtype)
2023-07-28 13:46:44 +00:00
# else:
# latents = latents.float()
else:
vae.to(dtype=torch.float16)
2023-07-28 13:46:44 +00:00
# latents = latents.half()
2023-08-18 01:07:40 +00:00
if tiled:
vae.enable_tiling()
else:
vae.disable_tiling()
# non_noised_latents_from_image
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
with torch.inference_mode():
image_tensor_dist = vae.encode(image_tensor).latent_dist
2023-07-28 13:46:44 +00:00
latents = image_tensor_dist.sample().to(dtype=vae.dtype) # FIXME: uses torch.randn. make reproducible!
2023-07-20 15:54:51 +00:00
latents = vae.config.scaling_factor * latents
latents = latents.to(dtype=orig_dtype)
2023-08-18 01:07:40 +00:00
return latents
@torch.no_grad()
def invoke(self, context: InvocationContext) -> LatentsOutput:
image = context.services.images.get_pil_image(self.image.image_name)
vae_info = context.services.model_manager.get_model(
**self.vae.vae.dict(),
context=context,
)
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
if image_tensor.dim() == 3:
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor)
name = f"{context.graph_execution_state_id}__{self.id}"
latents = latents.to("cpu")
context.services.latents.save(name, latents)
2023-08-08 01:00:33 +00:00
return build_latents_output(latents_name=name, latents=latents, seed=None)
2023-08-20 18:49:18 +00:00
@title("Blend Latents")
@tags("latents", "blend")
class BlendLatentsInvocation(BaseInvocation):
"""Blend two latents using a given alpha. Latents must have same size."""
type: Literal["lblend"] = "lblend"
# Inputs
latents_a: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
)
latents_b: LatentsField = InputField(
description=FieldDescriptions.latents,
input=Input.Connection,
)
alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
def invoke(self, context: InvocationContext) -> LatentsOutput:
latents_a = context.services.latents.get(self.latents_a.latents_name)
latents_b = context.services.latents.get(self.latents_b.latents_name)
if latents_a.shape != latents_b.shape:
raise "Latents to blend must be the same size."
# TODO:
device = choose_torch_device()
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
"""
Spherical linear interpolation
Args:
t (float/np.ndarray): Float value between 0.0 and 1.0
v0 (np.ndarray): Starting vector
v1 (np.ndarray): Final vector
DOT_THRESHOLD (float): Threshold for considering the two vectors as
colineal. Not recommended to alter this.
Returns:
v2 (np.ndarray): Interpolation vector between v0 and v1
"""
inputs_are_torch = False
if not isinstance(v0, np.ndarray):
inputs_are_torch = True
v0 = v0.detach().cpu().numpy()
if not isinstance(v1, np.ndarray):
inputs_are_torch = True
v1 = v1.detach().cpu().numpy()
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
if np.abs(dot) > DOT_THRESHOLD:
v2 = (1 - t) * v0 + t * v1
else:
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = theta_0 * t
sin_theta_t = np.sin(theta_t)
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
s1 = sin_theta_t / sin_theta_0
v2 = s0 * v0 + s1 * v1
if inputs_are_torch:
v2 = torch.from_numpy(v2).to(device)
return v2
# blend
blended_latents = slerp(self.alpha, latents_a, latents_b)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
blended_latents = blended_latents.to("cpu")
torch.cuda.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)
context.services.latents.save(name, blended_latents)
return build_latents_output(latents_name=name, latents=blended_latents)