2023-04-06 04:06:05 +00:00
|
|
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
|
|
|
|
2023-07-08 09:28:26 +00:00
|
|
|
from contextlib import ExitStack
|
2023-06-01 22:09:49 +00:00
|
|
|
from typing import List, Literal, Optional, Union
|
2023-05-12 03:33:24 +00:00
|
|
|
|
2023-06-13 21:26:37 +00:00
|
|
|
import einops
|
2023-04-06 04:06:05 +00:00
|
|
|
import torch
|
2023-08-11 10:20:37 +00:00
|
|
|
import torchvision.transforms as T
|
2023-05-13 13:08:03 +00:00
|
|
|
from diffusers.image_processor import VaeImageProcessor
|
2023-08-06 03:41:47 +00:00
|
|
|
from diffusers.models.attention_processor import (
|
|
|
|
AttnProcessor2_0,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
from diffusers.schedulers import DPMSolverSDEScheduler
|
|
|
|
from diffusers.schedulers import SchedulerMixin as Scheduler
|
2023-08-17 22:45:25 +00:00
|
|
|
from pydantic import validator
|
2023-08-11 10:20:37 +00:00
|
|
|
from torchvision.transforms.functional import resize as tv_resize
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-07-12 15:14:22 +00:00
|
|
|
from invokeai.app.invocations.metadata import CoreMetadata
|
2023-08-14 09:41:29 +00:00
|
|
|
from invokeai.app.invocations.primitives import (
|
|
|
|
ImageField,
|
|
|
|
ImageOutput,
|
|
|
|
LatentsField,
|
|
|
|
LatentsOutput,
|
|
|
|
build_latents_output,
|
|
|
|
)
|
2023-08-06 03:41:47 +00:00
|
|
|
from invokeai.app.util.controlnet_utils import prepare_control_image
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
2023-07-24 21:13:32 +00:00
|
|
|
from invokeai.backend.model_management.models import ModelType, SilenceWarnings
|
2023-08-11 10:20:37 +00:00
|
|
|
|
2023-08-17 22:45:25 +00:00
|
|
|
from ...backend.model_management.models import BaseModelType
|
2023-08-14 03:23:09 +00:00
|
|
|
from ...backend.model_management.lora import ModelPatcher
|
2023-05-12 03:33:24 +00:00
|
|
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
|
|
|
from ...backend.stable_diffusion.diffusers_pipeline import (
|
2023-07-28 13:46:44 +00:00
|
|
|
ConditioningData,
|
|
|
|
ControlNetData,
|
|
|
|
StableDiffusionGeneratorPipeline,
|
|
|
|
image_resized_to_grid_as_tensor,
|
|
|
|
)
|
|
|
|
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
2023-05-11 12:40:03 +00:00
|
|
|
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
2023-08-14 03:23:09 +00:00
|
|
|
from ...backend.util.devices import choose_precision, choose_torch_device
|
2023-08-14 09:41:29 +00:00
|
|
|
from ..models.image import ImageCategory, ResourceOrigin
|
2023-08-14 03:23:09 +00:00
|
|
|
from .baseinvocation import (
|
|
|
|
BaseInvocation,
|
|
|
|
FieldDescriptions,
|
|
|
|
Input,
|
|
|
|
InputField,
|
|
|
|
InvocationContext,
|
2023-08-15 11:45:40 +00:00
|
|
|
UIType,
|
2023-08-14 03:23:09 +00:00
|
|
|
tags,
|
|
|
|
title,
|
|
|
|
)
|
2023-08-11 10:20:37 +00:00
|
|
|
from .compel import ConditioningField
|
|
|
|
from .controlnet_image_processors import ControlField
|
|
|
|
from .model import ModelInfo, UNetField, VaeField
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-07-19 15:55:37 +00:00
|
|
|
DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
|
|
|
|
|
|
|
|
2023-07-28 13:46:44 +00:00
|
|
|
SAMPLER_NAME_VALUES = Literal[tuple(list(SCHEDULER_MAP.keys()))]
|
2023-04-06 04:06:05 +00:00
|
|
|
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
def get_scheduler(
|
|
|
|
context: InvocationContext,
|
|
|
|
scheduler_info: ModelInfo,
|
|
|
|
scheduler_name: str,
|
2023-08-13 21:24:38 +00:00
|
|
|
seed: int,
|
2023-05-13 13:08:03 +00:00
|
|
|
) -> Scheduler:
|
2023-07-28 13:46:44 +00:00
|
|
|
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"])
|
2023-07-05 02:37:16 +00:00
|
|
|
orig_scheduler_info = context.services.model_manager.get_model(
|
2023-07-28 13:46:44 +00:00
|
|
|
**scheduler_info.dict(),
|
|
|
|
context=context,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-05-14 00:06:26 +00:00
|
|
|
with orig_scheduler_info as orig_scheduler:
|
2023-05-13 13:08:03 +00:00
|
|
|
scheduler_config = orig_scheduler.config
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-05-11 14:23:33 +00:00
|
|
|
if "_backup" in scheduler_config:
|
|
|
|
scheduler_config = scheduler_config["_backup"]
|
2023-07-05 17:00:43 +00:00
|
|
|
scheduler_config = {
|
|
|
|
**scheduler_config,
|
|
|
|
**scheduler_extra_config,
|
|
|
|
"_backup": scheduler_config,
|
|
|
|
}
|
2023-08-13 21:24:38 +00:00
|
|
|
|
|
|
|
# make dpmpp_sde reproducable(seed can be passed only in initializer)
|
|
|
|
if scheduler_class is DPMSolverSDEScheduler:
|
|
|
|
scheduler_config["noise_sampler_seed"] = seed
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
scheduler = scheduler_class.from_config(scheduler_config)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
# hack copied over from generate.py
|
2023-07-28 13:46:44 +00:00
|
|
|
if not hasattr(scheduler, "uses_inpainting_model"):
|
2023-04-06 04:06:05 +00:00
|
|
|
scheduler.uses_inpainting_model = lambda: False
|
|
|
|
return scheduler
|
|
|
|
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
@title("Denoise Latents")
|
|
|
|
@tags("latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l")
|
2023-08-11 10:20:37 +00:00
|
|
|
class DenoiseLatentsInvocation(BaseInvocation):
|
|
|
|
"""Denoises noisy latents to decodable images"""
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-08-11 10:20:37 +00:00
|
|
|
type: Literal["denoise_latents"] = "denoise_latents"
|
2023-04-06 04:06:05 +00:00
|
|
|
|
|
|
|
# Inputs
|
2023-08-14 03:23:09 +00:00
|
|
|
positive_conditioning: ConditioningField = InputField(
|
|
|
|
description=FieldDescriptions.positive_cond, input=Input.Connection
|
|
|
|
)
|
|
|
|
negative_conditioning: ConditioningField = InputField(
|
|
|
|
description=FieldDescriptions.negative_cond, input=Input.Connection
|
|
|
|
)
|
|
|
|
noise: Optional[LatentsField] = InputField(description=FieldDescriptions.noise, input=Input.Connection)
|
|
|
|
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
|
|
|
cfg_scale: Union[float, List[float]] = InputField(
|
2023-08-15 11:45:40 +00:00
|
|
|
default=7.5, ge=1, description=FieldDescriptions.cfg_scale, ui_type=UIType.Float
|
2023-08-11 10:20:37 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
denoising_start: float = InputField(default=0.0, ge=0, le=1, description=FieldDescriptions.denoising_start)
|
|
|
|
denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end)
|
|
|
|
scheduler: SAMPLER_NAME_VALUES = InputField(default="euler", description=FieldDescriptions.scheduler)
|
|
|
|
unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection)
|
|
|
|
control: Union[ControlField, list[ControlField]] = InputField(
|
|
|
|
default=None, description=FieldDescriptions.control, input=Input.Connection
|
|
|
|
)
|
|
|
|
latents: Optional[LatentsField] = InputField(description=FieldDescriptions.latents, input=Input.Connection)
|
|
|
|
mask: Optional[ImageField] = InputField(
|
|
|
|
default=None,
|
|
|
|
description=FieldDescriptions.mask,
|
2023-08-11 10:20:37 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
@validator("cfg_scale")
|
|
|
|
def ge_one(cls, v):
|
|
|
|
"""validate that all cfg_scale values are >= 1"""
|
|
|
|
if isinstance(v, list):
|
|
|
|
for i in v:
|
|
|
|
if i < 1:
|
2023-07-28 13:46:44 +00:00
|
|
|
raise ValueError("cfg_scale must be greater than 1")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
else:
|
|
|
|
if v < 1:
|
2023-07-28 13:46:44 +00:00
|
|
|
raise ValueError("cfg_scale must be greater than 1")
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
return v
|
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
|
|
|
def dispatch_progress(
|
2023-07-05 17:00:43 +00:00
|
|
|
self,
|
|
|
|
context: InvocationContext,
|
|
|
|
source_node_id: str,
|
|
|
|
intermediate_state: PipelineIntermediateState,
|
2023-08-07 18:27:32 +00:00
|
|
|
base_model: BaseModelType,
|
2023-07-05 17:00:43 +00:00
|
|
|
) -> None:
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
stable_diffusion_step_callback(
|
|
|
|
context=context,
|
|
|
|
intermediate_state=intermediate_state,
|
|
|
|
node=self.dict(),
|
|
|
|
source_node_id=source_node_id,
|
2023-08-07 18:27:32 +00:00
|
|
|
base_model=base_model,
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
)
|
2023-04-14 16:29:52 +00:00
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
def get_conditioning_data(
|
2023-07-05 17:00:43 +00:00
|
|
|
self,
|
|
|
|
context: InvocationContext,
|
|
|
|
scheduler,
|
2023-07-18 13:20:25 +00:00
|
|
|
unet,
|
2023-08-08 01:00:33 +00:00
|
|
|
seed,
|
2023-07-05 17:00:43 +00:00
|
|
|
) -> ConditioningData:
|
2023-07-16 03:24:24 +00:00
|
|
|
positive_cond_data = context.services.latents.get(self.positive_conditioning.conditioning_name)
|
2023-08-06 02:05:25 +00:00
|
|
|
c = positive_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
|
|
|
|
extra_conditioning_info = c.extra_conditioning
|
2023-07-16 03:24:24 +00:00
|
|
|
|
|
|
|
negative_cond_data = context.services.latents.get(self.negative_conditioning.conditioning_name)
|
2023-08-06 02:05:25 +00:00
|
|
|
uc = negative_cond_data.conditionings[0].to(device=unet.device, dtype=unet.dtype)
|
2023-04-25 01:21:03 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
conditioning_data = ConditioningData(
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
unconditioned_embeddings=uc,
|
|
|
|
text_embeddings=c,
|
|
|
|
guidance_scale=self.cfg_scale,
|
|
|
|
extra=extra_conditioning_info,
|
2023-04-06 04:06:05 +00:00
|
|
|
postprocessing_settings=PostprocessingSettings(
|
2023-07-05 02:37:16 +00:00
|
|
|
threshold=0.0, # threshold,
|
|
|
|
warmup=0.2, # warmup,
|
|
|
|
h_symmetry_time_pct=None, # h_symmetry_time_pct,
|
2023-07-28 13:46:44 +00:00
|
|
|
v_symmetry_time_pct=None, # v_symmetry_time_pct,
|
2023-04-06 04:06:05 +00:00
|
|
|
),
|
2023-06-18 21:34:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
conditioning_data = conditioning_data.add_scheduler_args_if_applicable(
|
|
|
|
scheduler,
|
|
|
|
# for ddim scheduler
|
2023-07-05 02:37:16 +00:00
|
|
|
eta=0.0, # ddim_eta
|
2023-06-18 21:34:01 +00:00
|
|
|
# for ancestral and sde schedulers
|
2023-08-13 16:50:16 +00:00
|
|
|
# flip all bits to have noise different from initial
|
|
|
|
generator=torch.Generator(device=unet.device).manual_seed(seed ^ 0xFFFFFFFF),
|
2023-06-18 21:34:01 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
return conditioning_data
|
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
def create_pipeline(
|
2023-07-05 17:00:43 +00:00
|
|
|
self,
|
|
|
|
unet,
|
|
|
|
scheduler,
|
|
|
|
) -> StableDiffusionGeneratorPipeline:
|
2023-06-13 21:26:37 +00:00
|
|
|
# TODO:
|
2023-07-05 02:37:16 +00:00
|
|
|
# configure_model_padding(
|
2023-06-13 21:26:37 +00:00
|
|
|
# unet,
|
|
|
|
# self.seamless,
|
|
|
|
# self.seamless_axes,
|
2023-07-05 02:37:16 +00:00
|
|
|
# )
|
2023-05-13 13:08:03 +00:00
|
|
|
|
|
|
|
class FakeVae:
|
|
|
|
class FakeVaeConfig:
|
|
|
|
def __init__(self):
|
|
|
|
self.block_out_channels = [0]
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
def __init__(self):
|
|
|
|
self.config = FakeVae.FakeVaeConfig()
|
|
|
|
|
|
|
|
return StableDiffusionGeneratorPipeline(
|
2023-07-05 02:37:16 +00:00
|
|
|
vae=FakeVae(), # TODO: oh...
|
2023-05-13 13:08:03 +00:00
|
|
|
text_encoder=None,
|
|
|
|
tokenizer=None,
|
|
|
|
unet=unet,
|
|
|
|
scheduler=scheduler,
|
|
|
|
safety_checker=None,
|
|
|
|
feature_extractor=None,
|
|
|
|
requires_safety_checker=False,
|
|
|
|
)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-06-13 21:26:37 +00:00
|
|
|
def prep_control_data(
|
|
|
|
self,
|
|
|
|
context: InvocationContext,
|
2023-07-05 02:37:16 +00:00
|
|
|
# really only need model for dtype and device
|
|
|
|
model: StableDiffusionGeneratorPipeline,
|
2023-06-13 21:26:37 +00:00
|
|
|
control_input: List[ControlField],
|
|
|
|
latents_shape: List[int],
|
2023-07-05 17:00:43 +00:00
|
|
|
exit_stack: ExitStack,
|
2023-06-13 21:26:37 +00:00
|
|
|
do_classifier_free_guidance: bool = True,
|
|
|
|
) -> List[ControlNetData]:
|
2023-05-09 02:19:24 +00:00
|
|
|
# assuming fixed dimensional scaling of 8:1 for image:latents
|
|
|
|
control_height_resize = latents_shape[2] * 8
|
|
|
|
control_width_resize = latents_shape[3] * 8
|
2023-05-18 00:23:21 +00:00
|
|
|
if control_input is None:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-05-18 00:23:21 +00:00
|
|
|
elif isinstance(control_input, list) and len(control_input) == 0:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-05-18 00:23:21 +00:00
|
|
|
elif isinstance(control_input, ControlField):
|
|
|
|
control_list = [control_input]
|
|
|
|
elif isinstance(control_input, list) and len(control_input) > 0 and isinstance(control_input[0], ControlField):
|
|
|
|
control_list = control_input
|
2023-04-30 14:44:50 +00:00
|
|
|
else:
|
2023-05-09 02:19:24 +00:00
|
|
|
control_list = None
|
2023-07-28 13:46:44 +00:00
|
|
|
if control_list is None:
|
2023-05-12 17:43:10 +00:00
|
|
|
control_data = None
|
2023-05-18 00:23:21 +00:00
|
|
|
# from above handling, any control that is not None should now be of type list[ControlField]
|
2023-05-09 02:19:24 +00:00
|
|
|
else:
|
|
|
|
# FIXME: add checks to skip entry if model or image is None
|
|
|
|
# and if weight is None, populate with default 1.0?
|
2023-05-12 08:43:47 +00:00
|
|
|
control_data = []
|
2023-05-09 02:19:24 +00:00
|
|
|
control_models = []
|
|
|
|
for control_info in control_list:
|
2023-07-05 17:00:43 +00:00
|
|
|
control_model = exit_stack.enter_context(
|
2023-07-08 09:47:27 +00:00
|
|
|
context.services.model_manager.get_model(
|
2023-07-05 17:00:43 +00:00
|
|
|
model_name=control_info.control_model.model_name,
|
|
|
|
model_type=ModelType.ControlNet,
|
|
|
|
base_model=control_info.control_model.base_model,
|
2023-07-15 16:12:01 +00:00
|
|
|
context=context,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2023-05-09 02:19:24 +00:00
|
|
|
control_models.append(control_model)
|
|
|
|
control_image_field = control_info.image
|
2023-07-28 13:46:44 +00:00
|
|
|
input_image = context.services.images.get_pil_image(control_image_field.image_name)
|
2023-05-26 23:47:27 +00:00
|
|
|
# self.image.image_type, self.image.image_name
|
2023-05-09 02:19:24 +00:00
|
|
|
# FIXME: still need to test with different widths, heights, devices, dtypes
|
|
|
|
# and add in batch_size, num_images_per_prompt?
|
|
|
|
# and do real check for classifier_free_guidance?
|
2023-05-12 08:43:47 +00:00
|
|
|
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
|
2023-07-20 02:26:49 +00:00
|
|
|
control_image = prepare_control_image(
|
2023-05-12 08:43:47 +00:00
|
|
|
image=input_image,
|
2023-05-18 00:23:21 +00:00
|
|
|
do_classifier_free_guidance=do_classifier_free_guidance,
|
2023-05-12 08:43:47 +00:00
|
|
|
width=control_width_resize,
|
|
|
|
height=control_height_resize,
|
|
|
|
# batch_size=batch_size * num_images_per_prompt,
|
|
|
|
# num_images_per_prompt=num_images_per_prompt,
|
|
|
|
device=control_model.device,
|
|
|
|
dtype=control_model.dtype,
|
2023-06-14 04:08:34 +00:00
|
|
|
control_mode=control_info.control_mode,
|
2023-07-20 02:26:49 +00:00
|
|
|
resize_mode=control_info.resize_mode,
|
2023-05-09 02:19:24 +00:00
|
|
|
)
|
2023-07-05 02:37:16 +00:00
|
|
|
control_item = ControlNetData(
|
2023-07-20 02:26:49 +00:00
|
|
|
model=control_model,
|
|
|
|
image_tensor=control_image,
|
2023-07-05 02:37:16 +00:00
|
|
|
weight=control_info.control_weight,
|
|
|
|
begin_step_percent=control_info.begin_step_percent,
|
|
|
|
end_step_percent=control_info.end_step_percent,
|
2023-07-05 17:00:43 +00:00
|
|
|
control_mode=control_info.control_mode,
|
2023-07-20 02:26:49 +00:00
|
|
|
# any resizing needed should currently be happening in prepare_control_image(),
|
|
|
|
# but adding resize_mode to ControlNetData in case needed in the future
|
|
|
|
resize_mode=control_info.resize_mode,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-05-12 08:43:47 +00:00
|
|
|
control_data.append(control_item)
|
2023-05-18 00:30:08 +00:00
|
|
|
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
|
2023-05-18 00:23:21 +00:00
|
|
|
return control_data
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-08-12 00:19:49 +00:00
|
|
|
# original idea by https://github.com/AmericanPresidentJimmyCarter
|
2023-08-13 16:31:47 +00:00
|
|
|
# TODO: research more for second order schedulers timesteps
|
2023-08-07 16:57:11 +00:00
|
|
|
def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end):
|
|
|
|
num_inference_steps = steps
|
2023-08-14 02:14:05 +00:00
|
|
|
if scheduler.config.get("cpu_only", False):
|
|
|
|
scheduler.set_timesteps(num_inference_steps, device="cpu")
|
|
|
|
timesteps = scheduler.timesteps.to(device=device)
|
|
|
|
else:
|
|
|
|
scheduler.set_timesteps(num_inference_steps, device=device)
|
|
|
|
timesteps = scheduler.timesteps
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2023-08-12 00:19:49 +00:00
|
|
|
# apply denoising_start
|
|
|
|
t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start)))
|
|
|
|
t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, timesteps)))
|
|
|
|
timesteps = timesteps[t_start_idx:]
|
2023-08-13 16:31:47 +00:00
|
|
|
if scheduler.order == 2 and t_start_idx > 0:
|
2023-08-12 00:19:49 +00:00
|
|
|
timesteps = timesteps[1:]
|
|
|
|
|
|
|
|
# save start timestep to apply noise
|
2023-08-11 12:46:16 +00:00
|
|
|
init_timestep = timesteps[:1]
|
|
|
|
|
2023-08-07 16:57:11 +00:00
|
|
|
# apply denoising_end
|
2023-08-12 00:19:49 +00:00
|
|
|
t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end)))
|
|
|
|
t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, timesteps)))
|
2023-08-13 16:31:47 +00:00
|
|
|
if scheduler.order == 2 and t_end_idx > 0:
|
|
|
|
t_end_idx += 1
|
2023-08-12 00:19:49 +00:00
|
|
|
timesteps = timesteps[:t_end_idx]
|
|
|
|
|
|
|
|
# calculate step count based on scheduler order
|
|
|
|
num_inference_steps = len(timesteps)
|
|
|
|
if scheduler.order == 2:
|
2023-08-13 09:28:39 +00:00
|
|
|
num_inference_steps += num_inference_steps % 2
|
2023-08-12 00:19:49 +00:00
|
|
|
num_inference_steps = num_inference_steps // 2
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2023-08-11 12:46:16 +00:00
|
|
|
return num_inference_steps, timesteps, init_timestep
|
2023-08-07 16:57:11 +00:00
|
|
|
|
2023-08-08 15:50:36 +00:00
|
|
|
def prep_mask_tensor(self, mask, context, lantents):
|
|
|
|
if mask is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
mask_image = context.services.images.get_pil_image(mask.image_name)
|
|
|
|
if mask_image.mode != "L":
|
|
|
|
# FIXME: why do we get passed an RGB image here? We can only use single-channel.
|
|
|
|
mask_image = mask_image.convert("L")
|
|
|
|
mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
|
|
|
|
if mask_tensor.dim() == 3:
|
|
|
|
mask_tensor = mask_tensor.unsqueeze(0)
|
2023-08-11 10:20:37 +00:00
|
|
|
mask_tensor = tv_resize(mask_tensor, lantents.shape[-2:], T.InterpolationMode.BILINEAR)
|
2023-08-08 17:01:49 +00:00
|
|
|
return 1 - mask_tensor
|
2023-08-08 15:50:36 +00:00
|
|
|
|
2023-07-05 04:39:15 +00:00
|
|
|
@torch.no_grad()
|
2023-04-14 06:41:06 +00:00
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2023-07-24 21:13:32 +00:00
|
|
|
with SilenceWarnings(): # this quenches NSFW nag from diffusers
|
2023-08-10 03:19:22 +00:00
|
|
|
seed = None
|
2023-08-07 16:57:11 +00:00
|
|
|
noise = None
|
|
|
|
if self.noise is not None:
|
|
|
|
noise = context.services.latents.get(self.noise.latents_name)
|
2023-08-10 03:19:22 +00:00
|
|
|
seed = self.noise.seed
|
|
|
|
|
|
|
|
if self.latents is not None:
|
|
|
|
latents = context.services.latents.get(self.latents.latents_name)
|
|
|
|
if seed is None:
|
|
|
|
seed = self.latents.seed
|
|
|
|
else:
|
|
|
|
latents = torch.zeros_like(noise)
|
|
|
|
|
|
|
|
if seed is None:
|
|
|
|
seed = 0
|
2023-04-14 06:41:06 +00:00
|
|
|
|
2023-08-10 03:19:22 +00:00
|
|
|
mask = self.prep_mask_tensor(self.mask, context, latents)
|
2023-08-08 15:50:36 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
# Get the source node id (we are invoking the prepared node)
|
2023-07-27 19:01:00 +00:00
|
|
|
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
|
2023-07-24 21:13:32 +00:00
|
|
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
def step_callback(state: PipelineIntermediateState):
|
2023-08-07 18:27:32 +00:00
|
|
|
self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model)
|
2023-04-14 06:41:06 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
def _lora_loader():
|
|
|
|
for lora in self.unet.loras:
|
|
|
|
lora_info = context.services.model_manager.get_model(
|
2023-07-27 19:01:00 +00:00
|
|
|
**lora.dict(exclude={"weight"}),
|
|
|
|
context=context,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
|
|
|
yield (lora_info.context.model, lora.weight)
|
|
|
|
del lora_info
|
|
|
|
return
|
2023-07-18 13:51:16 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
unet_info = context.services.model_manager.get_model(
|
2023-07-27 19:01:00 +00:00
|
|
|
**self.unet.unet.dict(),
|
2023-05-13 13:08:03 +00:00
|
|
|
context=context,
|
|
|
|
)
|
2023-07-27 19:01:00 +00:00
|
|
|
with ExitStack() as exit_stack, ModelPatcher.apply_lora_unet(
|
|
|
|
unet_info.context.model, _lora_loader()
|
|
|
|
), unet_info as unet:
|
2023-08-10 03:19:22 +00:00
|
|
|
latents = latents.to(device=unet.device, dtype=unet.dtype)
|
2023-08-07 16:57:11 +00:00
|
|
|
if noise is not None:
|
|
|
|
noise = noise.to(device=unet.device, dtype=unet.dtype)
|
2023-08-08 17:01:49 +00:00
|
|
|
if mask is not None:
|
|
|
|
mask = mask.to(device=unet.device, dtype=unet.dtype)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
scheduler = get_scheduler(
|
|
|
|
context=context,
|
|
|
|
scheduler_info=self.unet.scheduler,
|
|
|
|
scheduler_name=self.scheduler,
|
2023-08-13 21:24:38 +00:00
|
|
|
seed=seed,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
pipeline = self.create_pipeline(unet, scheduler)
|
2023-08-08 01:00:33 +00:00
|
|
|
conditioning_data = self.get_conditioning_data(context, scheduler, unet, seed)
|
2023-05-13 13:08:03 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
control_data = self.prep_control_data(
|
2023-07-27 19:01:00 +00:00
|
|
|
model=pipeline,
|
|
|
|
context=context,
|
|
|
|
control_input=self.control,
|
2023-08-10 03:19:22 +00:00
|
|
|
latents_shape=latents.shape,
|
2023-07-24 21:13:32 +00:00
|
|
|
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
|
|
|
|
do_classifier_free_guidance=True,
|
|
|
|
exit_stack=exit_stack,
|
|
|
|
)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2023-08-11 12:46:16 +00:00
|
|
|
num_inference_steps, timesteps, init_timestep = self.init_scheduler(
|
2023-08-07 16:57:11 +00:00
|
|
|
scheduler,
|
2023-07-24 21:13:32 +00:00
|
|
|
device=unet.device,
|
2023-08-07 16:57:11 +00:00
|
|
|
steps=self.steps,
|
|
|
|
denoising_start=self.denoising_start,
|
|
|
|
denoising_end=self.denoising_end,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-05-06 04:44:12 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
result_latents, result_attention_map_saver = pipeline.latents_from_embeddings(
|
2023-08-10 03:19:22 +00:00
|
|
|
latents=latents,
|
2023-07-24 21:13:32 +00:00
|
|
|
timesteps=timesteps,
|
2023-08-11 12:46:16 +00:00
|
|
|
init_timestep=init_timestep,
|
2023-07-24 21:13:32 +00:00
|
|
|
noise=noise,
|
2023-08-08 15:50:36 +00:00
|
|
|
seed=seed,
|
|
|
|
mask=mask,
|
2023-08-07 16:57:11 +00:00
|
|
|
num_inference_steps=num_inference_steps,
|
2023-07-24 21:13:32 +00:00
|
|
|
conditioning_data=conditioning_data,
|
|
|
|
control_data=control_data, # list[ControlNetData]
|
2023-07-27 19:01:00 +00:00
|
|
|
callback=step_callback,
|
2023-07-24 21:13:32 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-07-24 21:13:32 +00:00
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
|
|
|
result_latents = result_latents.to("cpu")
|
|
|
|
torch.cuda.empty_cache()
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-07-27 19:01:00 +00:00
|
|
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
2023-07-24 21:13:32 +00:00
|
|
|
context.services.latents.save(name, result_latents)
|
2023-08-08 01:00:33 +00:00
|
|
|
return build_latents_output(latents_name=name, latents=result_latents, seed=seed)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
@title("Latents to Image")
|
|
|
|
@tags("latents", "image", "vae")
|
2023-05-13 13:08:03 +00:00
|
|
|
class LatentsToImageInvocation(BaseInvocation):
|
2023-04-06 04:06:05 +00:00
|
|
|
"""Generates an image from latents."""
|
|
|
|
|
|
|
|
type: Literal["l2i"] = "l2i"
|
|
|
|
|
|
|
|
# Inputs
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
vae: VaeField = InputField(
|
|
|
|
description=FieldDescriptions.vae,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
|
|
|
|
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
|
|
|
|
metadata: CoreMetadata = InputField(
|
|
|
|
default=None,
|
|
|
|
description=FieldDescriptions.core_metadata,
|
|
|
|
ui_hidden=True,
|
2023-07-28 13:46:44 +00:00
|
|
|
)
|
2023-04-10 09:07:48 +00:00
|
|
|
|
2023-04-06 04:06:05 +00:00
|
|
|
@torch.no_grad()
|
|
|
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
|
|
|
latents = context.services.latents.get(self.latents.latents_name)
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
vae_info = context.services.model_manager.get_model(
|
2023-07-28 13:46:44 +00:00
|
|
|
**self.vae.vae.dict(),
|
|
|
|
context=context,
|
2023-05-13 13:08:03 +00:00
|
|
|
)
|
2023-04-06 04:06:05 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
with vae_info as vae:
|
2023-07-18 13:20:25 +00:00
|
|
|
latents = latents.to(vae.device)
|
2023-07-11 15:19:36 +00:00
|
|
|
if self.fp32:
|
|
|
|
vae.to(dtype=torch.float32)
|
|
|
|
|
|
|
|
use_torch_2_0_or_xformers = isinstance(
|
|
|
|
vae.decoder.mid_block.attentions[0].processor,
|
|
|
|
(
|
|
|
|
AttnProcessor2_0,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# if xformers or torch_2_0 is used attention block does not need
|
|
|
|
# to be in float32 which can save lots of memory
|
|
|
|
if use_torch_2_0_or_xformers:
|
|
|
|
vae.post_quant_conv.to(latents.dtype)
|
|
|
|
vae.decoder.conv_in.to(latents.dtype)
|
|
|
|
vae.decoder.mid_block.to(latents.dtype)
|
|
|
|
else:
|
|
|
|
latents = latents.float()
|
|
|
|
|
|
|
|
else:
|
|
|
|
vae.to(dtype=torch.float16)
|
|
|
|
latents = latents.half()
|
|
|
|
|
2023-05-26 03:28:15 +00:00
|
|
|
if self.tiled or context.services.configuration.tiled_decode:
|
2023-05-13 13:08:03 +00:00
|
|
|
vae.enable_tiling()
|
|
|
|
else:
|
|
|
|
vae.disable_tiling()
|
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
# clear memory as vae decode can request a lot
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
with torch.inference_mode():
|
|
|
|
# copied from diffusers pipeline
|
|
|
|
latents = latents / vae.config.scaling_factor
|
|
|
|
image = vae.decode(latents, return_dict=False)[0]
|
2023-07-05 02:37:16 +00:00
|
|
|
image = (image / 2 + 0.5).clamp(0, 1) # denormalize
|
2023-05-13 13:08:03 +00:00
|
|
|
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
|
|
|
np_image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
|
|
|
|
|
|
|
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
|
|
|
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2023-05-26 02:40:45 +00:00
|
|
|
image_dto = context.services.images.create(
|
|
|
|
image=image,
|
2023-06-01 22:09:49 +00:00
|
|
|
image_origin=ResourceOrigin.INTERNAL,
|
2023-05-26 02:40:45 +00:00
|
|
|
image_category=ImageCategory.GENERAL,
|
|
|
|
node_id=self.id,
|
|
|
|
session_id=context.graph_execution_state_id,
|
2023-07-12 15:14:22 +00:00
|
|
|
is_intermediate=self.is_intermediate,
|
|
|
|
metadata=self.metadata.dict() if self.metadata else None,
|
2023-05-13 13:08:03 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2023-05-26 02:40:45 +00:00
|
|
|
return ImageOutput(
|
2023-06-14 14:29:01 +00:00
|
|
|
image=ImageField(image_name=image_dto.image_name),
|
2023-05-26 02:40:45 +00:00
|
|
|
width=image_dto.width,
|
|
|
|
height=image_dto.height,
|
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
2023-07-05 02:37:16 +00:00
|
|
|
|
2023-07-28 13:46:44 +00:00
|
|
|
LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"]
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
@title("Resize Latents")
|
|
|
|
@tags("latents", "resize")
|
2023-04-24 12:07:53 +00:00
|
|
|
class ResizeLatentsInvocation(BaseInvocation):
|
2023-04-26 23:59:22 +00:00
|
|
|
"""Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8."""
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
type: Literal["lresize"] = "lresize"
|
|
|
|
|
|
|
|
# Inputs
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
2023-07-28 13:46:44 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
width: int = InputField(
|
|
|
|
ge=64,
|
|
|
|
multiple_of=8,
|
|
|
|
description=FieldDescriptions.width,
|
|
|
|
)
|
|
|
|
height: int = InputField(
|
|
|
|
ge=64,
|
|
|
|
multiple_of=8,
|
|
|
|
description=FieldDescriptions.width,
|
|
|
|
)
|
|
|
|
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
|
|
|
|
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
|
|
|
latents = context.services.latents.get(self.latents.latents_name)
|
2023-04-26 11:45:05 +00:00
|
|
|
|
2023-07-18 13:20:25 +00:00
|
|
|
# TODO:
|
2023-07-28 13:46:44 +00:00
|
|
|
device = choose_torch_device()
|
2023-07-18 13:20:25 +00:00
|
|
|
|
2023-04-24 12:07:53 +00:00
|
|
|
resized_latents = torch.nn.functional.interpolate(
|
2023-07-28 13:46:44 +00:00
|
|
|
latents.to(device),
|
|
|
|
size=(self.height // 8, self.width // 8),
|
|
|
|
mode=self.mode,
|
|
|
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
2023-07-18 13:20:25 +00:00
|
|
|
resized_latents = resized_latents.to("cpu")
|
2023-04-24 12:07:53 +00:00
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
2023-05-26 23:47:27 +00:00
|
|
|
# context.services.latents.set(name, resized_latents)
|
2023-05-21 07:26:46 +00:00
|
|
|
context.services.latents.save(name, resized_latents)
|
2023-08-08 01:00:33 +00:00
|
|
|
return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
@title("Scale Latents")
|
|
|
|
@tags("latents", "resize")
|
2023-04-24 12:07:53 +00:00
|
|
|
class ScaleLatentsInvocation(BaseInvocation):
|
|
|
|
"""Scales latents by a given factor."""
|
|
|
|
|
|
|
|
type: Literal["lscale"] = "lscale"
|
|
|
|
|
|
|
|
# Inputs
|
2023-08-14 03:23:09 +00:00
|
|
|
latents: LatentsField = InputField(
|
|
|
|
description=FieldDescriptions.latents,
|
|
|
|
input=Input.Connection,
|
2023-07-28 13:46:44 +00:00
|
|
|
)
|
2023-08-14 03:23:09 +00:00
|
|
|
scale_factor: float = InputField(gt=0, description=FieldDescriptions.scale_factor)
|
|
|
|
mode: LATENTS_INTERPOLATION_MODE = InputField(default="bilinear", description=FieldDescriptions.interp_mode)
|
|
|
|
antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
|
|
|
latents = context.services.latents.get(self.latents.latents_name)
|
|
|
|
|
2023-07-18 13:20:25 +00:00
|
|
|
# TODO:
|
2023-07-28 13:46:44 +00:00
|
|
|
device = choose_torch_device()
|
2023-07-18 13:20:25 +00:00
|
|
|
|
2023-04-24 12:07:53 +00:00
|
|
|
# resizing
|
|
|
|
resized_latents = torch.nn.functional.interpolate(
|
2023-07-28 13:46:44 +00:00
|
|
|
latents.to(device),
|
|
|
|
scale_factor=self.scale_factor,
|
|
|
|
mode=self.mode,
|
|
|
|
antialias=self.antialias if self.mode in ["bilinear", "bicubic"] else False,
|
2023-07-05 17:00:43 +00:00
|
|
|
)
|
2023-04-24 12:07:53 +00:00
|
|
|
|
|
|
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
2023-07-18 13:20:25 +00:00
|
|
|
resized_latents = resized_latents.to("cpu")
|
2023-04-24 12:07:53 +00:00
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
2023-05-26 23:47:27 +00:00
|
|
|
# context.services.latents.set(name, resized_latents)
|
2023-05-21 07:26:46 +00:00
|
|
|
context.services.latents.save(name, resized_latents)
|
2023-08-08 01:00:33 +00:00
|
|
|
return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
|
|
|
|
2023-08-14 03:23:09 +00:00
|
|
|
@title("Image to Latents")
|
|
|
|
@tags("latents", "image", "vae")
|
2023-05-13 13:08:03 +00:00
|
|
|
class ImageToLatentsInvocation(BaseInvocation):
|
2023-05-05 05:15:55 +00:00
|
|
|
"""Encodes an image into latents."""
|
|
|
|
|
|
|
|
type: Literal["i2l"] = "i2l"
|
|
|
|
|
|
|
|
# Inputs
|
2023-08-14 03:23:09 +00:00
|
|
|
image: ImageField = InputField(
|
|
|
|
description="The image to encode",
|
|
|
|
)
|
|
|
|
vae: VaeField = InputField(
|
|
|
|
description=FieldDescriptions.vae,
|
|
|
|
input=Input.Connection,
|
|
|
|
)
|
|
|
|
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
|
|
|
|
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
2023-05-26 23:47:27 +00:00
|
|
|
# image = context.services.images.get(
|
|
|
|
# self.image.image_type, self.image.image_name
|
|
|
|
# )
|
2023-06-14 11:40:09 +00:00
|
|
|
image = context.services.images.get_pil_image(self.image.image_name)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
2023-07-28 13:46:44 +00:00
|
|
|
# vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
|
2023-05-13 13:08:03 +00:00
|
|
|
vae_info = context.services.model_manager.get_model(
|
2023-07-28 13:46:44 +00:00
|
|
|
**self.vae.vae.dict(),
|
|
|
|
context=context,
|
2023-05-13 13:08:03 +00:00
|
|
|
)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
|
|
|
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
|
|
|
if image_tensor.dim() == 3:
|
|
|
|
image_tensor = einops.rearrange(image_tensor, "c h w -> 1 c h w")
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
with vae_info as vae:
|
2023-07-16 03:00:37 +00:00
|
|
|
orig_dtype = vae.dtype
|
|
|
|
if self.fp32:
|
|
|
|
vae.to(dtype=torch.float32)
|
|
|
|
|
|
|
|
use_torch_2_0_or_xformers = isinstance(
|
|
|
|
vae.decoder.mid_block.attentions[0].processor,
|
|
|
|
(
|
|
|
|
AttnProcessor2_0,
|
|
|
|
XFormersAttnProcessor,
|
|
|
|
LoRAXFormersAttnProcessor,
|
|
|
|
LoRAAttnProcessor2_0,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
# if xformers or torch_2_0 is used attention block does not need
|
|
|
|
# to be in float32 which can save lots of memory
|
|
|
|
if use_torch_2_0_or_xformers:
|
|
|
|
vae.post_quant_conv.to(orig_dtype)
|
|
|
|
vae.decoder.conv_in.to(orig_dtype)
|
|
|
|
vae.decoder.mid_block.to(orig_dtype)
|
2023-07-28 13:46:44 +00:00
|
|
|
# else:
|
2023-07-16 03:00:37 +00:00
|
|
|
# latents = latents.float()
|
|
|
|
|
|
|
|
else:
|
|
|
|
vae.to(dtype=torch.float16)
|
2023-07-28 13:46:44 +00:00
|
|
|
# latents = latents.half()
|
2023-07-16 03:00:37 +00:00
|
|
|
|
2023-05-13 13:08:03 +00:00
|
|
|
if self.tiled:
|
|
|
|
vae.enable_tiling()
|
|
|
|
else:
|
|
|
|
vae.disable_tiling()
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
# non_noised_latents_from_image
|
|
|
|
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
|
|
|
with torch.inference_mode():
|
|
|
|
image_tensor_dist = vae.encode(image_tensor).latent_dist
|
2023-07-28 13:46:44 +00:00
|
|
|
latents = image_tensor_dist.sample().to(dtype=vae.dtype) # FIXME: uses torch.randn. make reproducible!
|
2023-05-14 00:06:26 +00:00
|
|
|
|
2023-07-20 15:54:51 +00:00
|
|
|
latents = vae.config.scaling_factor * latents
|
2023-07-16 03:00:37 +00:00
|
|
|
latents = latents.to(dtype=orig_dtype)
|
2023-05-05 05:15:55 +00:00
|
|
|
|
|
|
|
name = f"{context.graph_execution_state_id}__{self.id}"
|
2023-07-18 13:20:25 +00:00
|
|
|
latents = latents.to("cpu")
|
2023-05-21 07:26:46 +00:00
|
|
|
context.services.latents.save(name, latents)
|
2023-08-08 01:00:33 +00:00
|
|
|
return build_latents_output(latents_name=name, latents=latents, seed=None)
|