2023-04-06 04:06:05 +00:00
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
2023-07-08 09:28:26 +00:00
from contextlib import ExitStack
2023-06-01 22:09:49 +00:00
from typing import List , Literal , Optional , Union
2023-05-12 03:33:24 +00:00
2023-06-13 21:26:37 +00:00
import einops
2023-04-06 04:06:05 +00:00
import torch
2023-07-03 16:17:45 +00:00
from diffusers import ControlNetModel
2023-05-13 13:08:03 +00:00
from diffusers . image_processor import VaeImageProcessor
2023-05-26 02:40:45 +00:00
from diffusers . schedulers import SchedulerMixin as Scheduler
2023-07-05 02:37:16 +00:00
from pydantic import BaseModel , Field , validator
2023-04-06 04:06:05 +00:00
2023-07-12 15:14:22 +00:00
from invokeai . app . invocations . metadata import CoreMetadata
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
from invokeai . app . util . step_callback import stable_diffusion_step_callback
2023-07-08 09:47:27 +00:00
from invokeai . backend . model_management . models . base import ModelType
2023-04-10 09:07:48 +00:00
2023-07-05 02:37:16 +00:00
from . . . backend . model_management . lora import ModelPatcher
2023-05-12 03:33:24 +00:00
from . . . backend . stable_diffusion import PipelineIntermediateState
from . . . backend . stable_diffusion . diffusers_pipeline import (
2023-06-01 22:09:49 +00:00
ConditioningData , ControlNetData , StableDiffusionGeneratorPipeline ,
2023-05-12 03:33:24 +00:00
image_resized_to_grid_as_tensor )
from . . . backend . stable_diffusion . diffusion . shared_invokeai_diffusion import \
PostprocessingSettings
2023-05-11 12:40:03 +00:00
from . . . backend . stable_diffusion . schedulers import SCHEDULER_MAP
2023-06-21 01:24:25 +00:00
from . . . backend . model_management import ModelPatcher
2023-07-19 15:16:00 +00:00
from . . . backend . util . devices import choose_torch_device , torch_dtype , choose_precision
2023-07-12 15:14:22 +00:00
from . . models . image import ImageCategory , ImageField , ResourceOrigin
2023-05-26 02:40:45 +00:00
from . baseinvocation import ( BaseInvocation , BaseInvocationOutput ,
InvocationConfig , InvocationContext )
2023-04-25 01:21:03 +00:00
from . compel import ConditioningField
2023-06-01 22:09:49 +00:00
from . controlnet_image_processors import ControlField
from . image import ImageOutput
2023-05-13 13:08:03 +00:00
from . model import ModelInfo , UNetField , VaeField
2023-07-20 02:26:49 +00:00
from invokeai . app . util . controlnet_utils import prepare_control_image
2023-05-13 13:08:03 +00:00
2023-07-11 15:19:36 +00:00
from diffusers . models . attention_processor import (
AttnProcessor2_0 ,
LoRAAttnProcessor2_0 ,
LoRAXFormersAttnProcessor ,
XFormersAttnProcessor ,
)
2023-07-05 02:37:16 +00:00
2023-07-19 15:55:37 +00:00
DEFAULT_PRECISION = choose_precision ( choose_torch_device ( ) )
2023-04-06 04:06:05 +00:00
class LatentsField ( BaseModel ) :
""" A latents field used for passing latents between invocations """
2023-07-05 02:37:16 +00:00
latents_name : Optional [ str ] = Field (
default = None , description = " The name of the latents " )
2023-04-06 04:06:05 +00:00
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
class Config :
schema_extra = { " required " : [ " latents_name " ] }
2023-04-06 04:06:05 +00:00
2023-07-05 02:37:16 +00:00
2023-04-06 04:06:05 +00:00
class LatentsOutput ( BaseInvocationOutput ) :
""" Base class for invocations that output latents """
#fmt: off
2023-05-11 10:23:02 +00:00
type : Literal [ " latents_output " ] = " latents_output "
# Inputs
latents : LatentsField = Field ( default = None , description = " The output latents " )
width : int = Field ( description = " The width of the latents in pixels " )
height : int = Field ( description = " The height of the latents in pixels " )
2023-04-06 04:06:05 +00:00
#fmt: on
2023-05-11 10:23:02 +00:00
def build_latents_output ( latents_name : str , latents : torch . Tensor ) :
2023-07-05 02:37:16 +00:00
return LatentsOutput (
latents = LatentsField ( latents_name = latents_name ) ,
width = latents . size ( ) [ 3 ] * 8 ,
height = latents . size ( ) [ 2 ] * 8 ,
)
2023-04-06 04:06:05 +00:00
SAMPLER_NAME_VALUES = Literal [
2023-05-11 12:40:03 +00:00
tuple ( list ( SCHEDULER_MAP . keys ( ) ) )
2023-04-06 04:06:05 +00:00
]
2023-05-13 13:08:03 +00:00
def get_scheduler (
context : InvocationContext ,
scheduler_info : ModelInfo ,
scheduler_name : str ,
) - > Scheduler :
2023-07-05 02:37:16 +00:00
scheduler_class , scheduler_extra_config = SCHEDULER_MAP . get (
2023-07-05 17:00:43 +00:00
scheduler_name , SCHEDULER_MAP [ ' ddim ' ]
)
2023-07-05 02:37:16 +00:00
orig_scheduler_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * scheduler_info . dict ( ) , context = context ,
2023-07-05 17:00:43 +00:00
)
2023-05-14 00:06:26 +00:00
with orig_scheduler_info as orig_scheduler :
2023-05-13 13:08:03 +00:00
scheduler_config = orig_scheduler . config
2023-07-05 02:37:16 +00:00
2023-05-11 14:23:33 +00:00
if " _backup " in scheduler_config :
scheduler_config = scheduler_config [ " _backup " ]
2023-07-05 17:00:43 +00:00
scheduler_config = {
* * scheduler_config ,
* * scheduler_extra_config ,
" _backup " : scheduler_config ,
}
2023-05-13 13:08:03 +00:00
scheduler = scheduler_class . from_config ( scheduler_config )
2023-07-05 02:37:16 +00:00
2023-04-06 04:06:05 +00:00
# hack copied over from generate.py
if not hasattr ( scheduler , ' uses_inpainting_model ' ) :
scheduler . uses_inpainting_model = lambda : False
return scheduler
# Text to image
2023-05-13 13:08:03 +00:00
class TextToLatentsInvocation ( BaseInvocation ) :
2023-05-05 18:09:29 +00:00
""" Generates latents from conditionings. """
2023-04-06 04:06:05 +00:00
type : Literal [ " t2l " ] = " t2l "
# Inputs
# fmt: off
2023-05-05 18:09:29 +00:00
positive_conditioning : Optional [ ConditioningField ] = Field ( description = " Positive conditioning for generation " )
negative_conditioning : Optional [ ConditioningField ] = Field ( description = " Negative conditioning for generation " )
2023-04-06 04:06:05 +00:00
noise : Optional [ LatentsField ] = Field ( description = " The noise to use " )
steps : int = Field ( default = 10 , gt = 0 , description = " The number of steps to use to generate the image " )
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
cfg_scale : Union [ float , List [ float ] ] = Field ( default = 7.5 , ge = 1 , description = " The Classifier-Free Guidance, higher values may result in a result closer to the prompt " , )
2023-05-24 20:47:47 +00:00
scheduler : SAMPLER_NAME_VALUES = Field ( default = " euler " , description = " The scheduler to use " )
2023-05-13 13:08:03 +00:00
unet : UNetField = Field ( default = None , description = " UNet submodel " )
2023-05-26 23:47:27 +00:00
control : Union [ ControlField , list [ ControlField ] ] = Field ( default = None , description = " The control to use " )
2023-06-13 20:37:52 +00:00
#seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
#seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
2023-04-06 04:06:05 +00:00
# fmt: on
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
@validator ( " cfg_scale " )
def ge_one ( cls , v ) :
""" validate that all cfg_scale values are >= 1 """
if isinstance ( v , list ) :
for i in v :
if i < 1 :
raise ValueError ( ' cfg_scale must be greater than 1 ' )
else :
if v < 1 :
raise ValueError ( ' cfg_scale must be greater than 1 ' )
return v
2023-04-10 09:07:48 +00:00
# Schema customisation
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
2023-07-18 14:26:45 +00:00
" title " : " Text To Latents " ,
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
" tags " : [ " latents " ] ,
2023-04-10 09:07:48 +00:00
" type_hints " : {
2023-07-05 02:37:16 +00:00
" model " : " model " ,
" control " : " control " ,
# "cfg_scale": "float",
" cfg_scale " : " number "
2023-04-10 09:07:48 +00:00
}
} ,
}
2023-04-06 04:06:05 +00:00
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress (
2023-07-05 17:00:43 +00:00
self ,
context : InvocationContext ,
source_node_id : str ,
intermediate_state : PipelineIntermediateState ,
) - > None :
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
stable_diffusion_step_callback (
context = context ,
intermediate_state = intermediate_state ,
node = self . dict ( ) ,
source_node_id = source_node_id ,
)
2023-04-14 16:29:52 +00:00
2023-07-05 02:37:16 +00:00
def get_conditioning_data (
2023-07-05 17:00:43 +00:00
self ,
context : InvocationContext ,
scheduler ,
2023-07-18 13:20:25 +00:00
unet ,
2023-07-05 17:00:43 +00:00
) - > ConditioningData :
2023-07-16 03:24:24 +00:00
positive_cond_data = context . services . latents . get ( self . positive_conditioning . conditioning_name )
2023-07-18 13:20:25 +00:00
c = positive_cond_data . conditionings [ 0 ] . embeds . to ( device = unet . device , dtype = unet . dtype )
2023-07-16 03:24:24 +00:00
extra_conditioning_info = positive_cond_data . conditionings [ 0 ] . extra_conditioning
negative_cond_data = context . services . latents . get ( self . negative_conditioning . conditioning_name )
2023-07-18 13:20:25 +00:00
uc = negative_cond_data . conditionings [ 0 ] . embeds . to ( device = unet . device , dtype = unet . dtype )
2023-04-25 01:21:03 +00:00
2023-04-06 04:06:05 +00:00
conditioning_data = ConditioningData (
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
unconditioned_embeddings = uc ,
text_embeddings = c ,
guidance_scale = self . cfg_scale ,
extra = extra_conditioning_info ,
2023-04-06 04:06:05 +00:00
postprocessing_settings = PostprocessingSettings (
2023-07-05 02:37:16 +00:00
threshold = 0.0 , # threshold,
warmup = 0.2 , # warmup,
h_symmetry_time_pct = None , # h_symmetry_time_pct,
v_symmetry_time_pct = None # v_symmetry_time_pct,
2023-04-06 04:06:05 +00:00
) ,
2023-06-18 21:34:01 +00:00
)
conditioning_data = conditioning_data . add_scheduler_args_if_applicable (
scheduler ,
# for ddim scheduler
2023-07-05 02:37:16 +00:00
eta = 0.0 , # ddim_eta
2023-06-18 21:34:01 +00:00
# for ancestral and sde schedulers
2023-07-18 13:20:25 +00:00
generator = torch . Generator ( device = unet . device ) . manual_seed ( 0 ) ,
2023-06-18 21:34:01 +00:00
)
2023-04-06 04:06:05 +00:00
return conditioning_data
2023-07-05 02:37:16 +00:00
def create_pipeline (
2023-07-05 17:00:43 +00:00
self ,
unet ,
scheduler ,
) - > StableDiffusionGeneratorPipeline :
2023-06-13 21:26:37 +00:00
# TODO:
2023-07-05 02:37:16 +00:00
# configure_model_padding(
2023-06-13 21:26:37 +00:00
# unet,
# self.seamless,
# self.seamless_axes,
2023-07-05 02:37:16 +00:00
# )
2023-05-13 13:08:03 +00:00
class FakeVae :
class FakeVaeConfig :
def __init__ ( self ) :
self . block_out_channels = [ 0 ]
2023-07-05 02:37:16 +00:00
2023-05-13 13:08:03 +00:00
def __init__ ( self ) :
self . config = FakeVae . FakeVaeConfig ( )
return StableDiffusionGeneratorPipeline (
2023-07-05 02:37:16 +00:00
vae = FakeVae ( ) , # TODO: oh...
2023-05-13 13:08:03 +00:00
text_encoder = None ,
tokenizer = None ,
unet = unet ,
scheduler = scheduler ,
safety_checker = None ,
feature_extractor = None ,
requires_safety_checker = False ,
precision = " float16 " if unet . dtype == torch . float16 else " float32 " ,
)
2023-07-05 02:37:16 +00:00
2023-06-13 21:26:37 +00:00
def prep_control_data (
self ,
context : InvocationContext ,
2023-07-05 02:37:16 +00:00
# really only need model for dtype and device
model : StableDiffusionGeneratorPipeline ,
2023-06-13 21:26:37 +00:00
control_input : List [ ControlField ] ,
latents_shape : List [ int ] ,
2023-07-05 17:00:43 +00:00
exit_stack : ExitStack ,
2023-06-13 21:26:37 +00:00
do_classifier_free_guidance : bool = True ,
) - > List [ ControlNetData ] :
2023-05-09 02:19:24 +00:00
# assuming fixed dimensional scaling of 8:1 for image:latents
control_height_resize = latents_shape [ 2 ] * 8
control_width_resize = latents_shape [ 3 ] * 8
2023-05-18 00:23:21 +00:00
if control_input is None :
2023-05-09 02:19:24 +00:00
control_list = None
2023-05-18 00:23:21 +00:00
elif isinstance ( control_input , list ) and len ( control_input ) == 0 :
2023-05-09 02:19:24 +00:00
control_list = None
2023-05-18 00:23:21 +00:00
elif isinstance ( control_input , ControlField ) :
control_list = [ control_input ]
elif isinstance ( control_input , list ) and len ( control_input ) > 0 and isinstance ( control_input [ 0 ] , ControlField ) :
control_list = control_input
2023-04-30 14:44:50 +00:00
else :
2023-05-09 02:19:24 +00:00
control_list = None
if ( control_list is None ) :
2023-05-12 17:43:10 +00:00
control_data = None
2023-05-18 00:23:21 +00:00
# from above handling, any control that is not None should now be of type list[ControlField]
2023-05-09 02:19:24 +00:00
else :
# FIXME: add checks to skip entry if model or image is None
# and if weight is None, populate with default 1.0?
2023-05-12 08:43:47 +00:00
control_data = [ ]
2023-05-09 02:19:24 +00:00
control_models = [ ]
for control_info in control_list :
2023-07-05 17:00:43 +00:00
control_model = exit_stack . enter_context (
2023-07-08 09:47:27 +00:00
context . services . model_manager . get_model (
2023-07-05 17:00:43 +00:00
model_name = control_info . control_model . model_name ,
model_type = ModelType . ControlNet ,
base_model = control_info . control_model . base_model ,
2023-07-15 16:12:01 +00:00
context = context ,
2023-07-05 17:00:43 +00:00
)
)
2023-05-09 02:19:24 +00:00
control_models . append ( control_model )
control_image_field = control_info . image
2023-07-05 02:37:16 +00:00
input_image = context . services . images . get_pil_image (
2023-07-05 17:00:43 +00:00
control_image_field . image_name
)
2023-05-26 23:47:27 +00:00
# self.image.image_type, self.image.image_name
2023-05-09 02:19:24 +00:00
# FIXME: still need to test with different widths, heights, devices, dtypes
# and add in batch_size, num_images_per_prompt?
# and do real check for classifier_free_guidance?
2023-05-12 08:43:47 +00:00
# prepare_control_image should return torch.Tensor of shape(batch_size, 3, height, width)
2023-07-20 02:26:49 +00:00
control_image = prepare_control_image (
2023-05-12 08:43:47 +00:00
image = input_image ,
2023-05-18 00:23:21 +00:00
do_classifier_free_guidance = do_classifier_free_guidance ,
2023-05-12 08:43:47 +00:00
width = control_width_resize ,
height = control_height_resize ,
# batch_size=batch_size * num_images_per_prompt,
# num_images_per_prompt=num_images_per_prompt,
device = control_model . device ,
dtype = control_model . dtype ,
2023-06-14 04:08:34 +00:00
control_mode = control_info . control_mode ,
2023-07-20 02:26:49 +00:00
resize_mode = control_info . resize_mode ,
2023-05-09 02:19:24 +00:00
)
2023-07-05 02:37:16 +00:00
control_item = ControlNetData (
2023-07-20 02:26:49 +00:00
model = control_model ,
image_tensor = control_image ,
2023-07-05 02:37:16 +00:00
weight = control_info . control_weight ,
begin_step_percent = control_info . begin_step_percent ,
end_step_percent = control_info . end_step_percent ,
2023-07-05 17:00:43 +00:00
control_mode = control_info . control_mode ,
2023-07-20 02:26:49 +00:00
# any resizing needed should currently be happening in prepare_control_image(),
# but adding resize_mode to ControlNetData in case needed in the future
resize_mode = control_info . resize_mode ,
2023-07-05 17:00:43 +00:00
)
2023-05-12 08:43:47 +00:00
control_data . append ( control_item )
2023-05-18 00:30:08 +00:00
# MultiControlNetModel has been refactored out, just need list[ControlNetData]
2023-05-18 00:23:21 +00:00
return control_data
2023-04-06 04:06:05 +00:00
2023-07-05 04:39:15 +00:00
@torch.no_grad ( )
2023-04-06 04:06:05 +00:00
def invoke ( self , context : InvocationContext ) - > LatentsOutput :
noise = context . services . latents . get ( self . noise . latents_name )
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
# Get the source node id (we are invoking the prepared node)
2023-07-05 02:37:16 +00:00
graph_execution_state = context . services . graph_execution_manager . get (
2023-07-05 17:00:43 +00:00
context . graph_execution_state_id
)
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
source_node_id = graph_execution_state . prepared_source_mapping [ self . id ]
2023-04-06 04:06:05 +00:00
def step_callback ( state : PipelineIntermediateState ) :
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
self . dispatch_progress ( context , source_node_id , state )
2023-05-13 13:08:03 +00:00
2023-07-05 02:37:16 +00:00
def _lora_loader ( ) :
for lora in self . unet . loras :
lora_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * lora . dict ( exclude = { " weight " } ) , context = context ,
2023-07-05 17:00:43 +00:00
)
2023-07-05 02:37:16 +00:00
yield ( lora_info . context . model , lora . weight )
del lora_info
return
unet_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * self . unet . unet . dict ( ) , context = context ,
2023-07-05 17:00:43 +00:00
)
with ExitStack ( ) as exit_stack , \
ModelPatcher . apply_lora_unet ( unet_info . context . model , _lora_loader ( ) ) , \
2023-07-05 02:37:16 +00:00
unet_info as unet :
2023-05-29 22:11:00 +00:00
2023-07-18 13:51:16 +00:00
noise = noise . to ( device = unet . device , dtype = unet . dtype )
2023-05-13 13:08:03 +00:00
scheduler = get_scheduler (
context = context ,
scheduler_info = self . unet . scheduler ,
scheduler_name = self . scheduler ,
)
2023-07-05 02:37:16 +00:00
2023-05-13 13:08:03 +00:00
pipeline = self . create_pipeline ( unet , scheduler )
2023-07-18 13:20:25 +00:00
conditioning_data = self . get_conditioning_data ( context , scheduler , unet )
2023-05-13 13:08:03 +00:00
2023-06-13 21:26:37 +00:00
control_data = self . prep_control_data (
model = pipeline , context = context , control_input = self . control ,
latents_shape = noise . shape ,
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
do_classifier_free_guidance = True ,
2023-07-05 17:00:43 +00:00
exit_stack = exit_stack ,
2023-06-13 21:26:37 +00:00
)
2023-06-01 22:09:49 +00:00
2023-07-05 02:37:16 +00:00
# TODO: Verify the noise is the right size
result_latents , result_attention_map_saver = pipeline . latents_from_embeddings (
latents = torch . zeros_like ( noise , dtype = torch_dtype ( unet . device ) ) ,
noise = noise ,
num_inference_steps = self . steps ,
conditioning_data = conditioning_data ,
control_data = control_data , # list[ControlNetData]
callback = step_callback ,
)
2023-05-13 13:08:03 +00:00
2023-04-06 04:06:05 +00:00
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
2023-07-18 13:20:25 +00:00
result_latents = result_latents . to ( " cpu " )
2023-04-06 04:06:05 +00:00
torch . cuda . empty_cache ( )
name = f ' { context . graph_execution_state_id } __ { self . id } '
2023-05-21 07:26:46 +00:00
context . services . latents . save ( name , result_latents )
2023-05-11 10:23:02 +00:00
return build_latents_output ( latents_name = name , latents = result_latents )
2023-04-06 04:06:05 +00:00
2023-07-05 02:37:16 +00:00
2023-05-13 13:08:03 +00:00
class LatentsToLatentsInvocation ( TextToLatentsInvocation ) :
2023-04-06 04:06:05 +00:00
""" Generates latents using latents as base image. """
type : Literal [ " l2l " ] = " l2l "
2023-05-06 09:05:47 +00:00
# Inputs
2023-07-05 02:37:16 +00:00
latents : Optional [ LatentsField ] = Field (
description = " The latents to use as a base image " )
strength : float = Field (
default = 0.7 , ge = 0 , le = 1 ,
description = " The strength of the latents to use " )
2023-05-06 09:05:47 +00:00
2023-04-10 09:07:48 +00:00
# Schema customisation
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
2023-07-18 14:26:45 +00:00
" title " : " Latent To Latents " ,
2023-04-10 09:07:48 +00:00
" tags " : [ " latents " ] ,
2023-05-26 23:47:27 +00:00
" type_hints " : {
" model " : " model " ,
" control " : " control " ,
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
" cfg_scale " : " number " ,
2023-05-26 23:47:27 +00:00
}
2023-04-10 09:07:48 +00:00
} ,
}
2023-07-05 04:39:15 +00:00
@torch.no_grad ( )
2023-04-14 06:41:06 +00:00
def invoke ( self , context : InvocationContext ) - > LatentsOutput :
noise = context . services . latents . get ( self . noise . latents_name )
latent = context . services . latents . get ( self . latents . latents_name )
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
# Get the source node id (we are invoking the prepared node)
2023-07-05 02:37:16 +00:00
graph_execution_state = context . services . graph_execution_manager . get (
2023-07-05 17:00:43 +00:00
context . graph_execution_state_id
)
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
source_node_id = graph_execution_state . prepared_source_mapping [ self . id ]
2023-04-14 06:41:06 +00:00
def step_callback ( state : PipelineIntermediateState ) :
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
self . dispatch_progress ( context , source_node_id , state )
2023-04-14 06:41:06 +00:00
2023-07-05 02:37:16 +00:00
def _lora_loader ( ) :
for lora in self . unet . loras :
lora_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * lora . dict ( exclude = { " weight " } ) , context = context ,
2023-07-05 17:00:43 +00:00
)
2023-07-05 02:37:16 +00:00
yield ( lora_info . context . model , lora . weight )
del lora_info
return
2023-05-06 04:44:12 +00:00
2023-07-05 02:37:16 +00:00
unet_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * self . unet . unet . dict ( ) , context = context ,
2023-07-05 17:00:43 +00:00
)
with ExitStack ( ) as exit_stack , \
ModelPatcher . apply_lora_unet ( unet_info . context . model , _lora_loader ( ) ) , \
2023-07-05 02:37:16 +00:00
unet_info as unet :
2023-05-29 22:11:00 +00:00
2023-07-18 13:51:16 +00:00
noise = noise . to ( device = unet . device , dtype = unet . dtype )
latent = latent . to ( device = unet . device , dtype = unet . dtype )
2023-05-13 13:08:03 +00:00
scheduler = get_scheduler (
context = context ,
scheduler_info = self . unet . scheduler ,
scheduler_name = self . scheduler ,
)
2023-05-06 04:44:12 +00:00
2023-05-13 13:08:03 +00:00
pipeline = self . create_pipeline ( unet , scheduler )
2023-07-18 13:20:25 +00:00
conditioning_data = self . get_conditioning_data ( context , scheduler , unet )
2023-07-05 02:37:16 +00:00
2023-06-13 21:26:37 +00:00
control_data = self . prep_control_data (
model = pipeline , context = context , control_input = self . control ,
latents_shape = noise . shape ,
# do_classifier_free_guidance=(self.cfg_scale >= 1.0))
do_classifier_free_guidance = True ,
2023-07-05 17:00:43 +00:00
exit_stack = exit_stack ,
2023-06-13 21:26:37 +00:00
)
2023-05-13 13:08:03 +00:00
# TODO: Verify the noise is the right size
2023-05-06 04:44:12 +00:00
initial_latents = latent if self . strength < 1.0 else torch . zeros_like (
2023-07-05 17:00:43 +00:00
latent , device = unet . device , dtype = latent . dtype
)
2023-05-06 04:44:12 +00:00
2023-05-13 13:08:03 +00:00
timesteps , _ = pipeline . get_img2img_timesteps (
2023-05-06 04:44:12 +00:00
self . steps ,
self . strength ,
2023-05-13 13:08:03 +00:00
device = unet . device ,
2023-05-06 04:44:12 +00:00
)
2023-07-05 02:37:16 +00:00
result_latents , result_attention_map_saver = pipeline . latents_from_embeddings (
latents = initial_latents ,
timesteps = timesteps ,
noise = noise ,
num_inference_steps = self . steps ,
conditioning_data = conditioning_data ,
control_data = control_data , # list[ControlNetData]
callback = step_callback
)
2023-04-06 04:06:05 +00:00
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
2023-07-18 13:20:25 +00:00
result_latents = result_latents . to ( " cpu " )
2023-04-06 04:06:05 +00:00
torch . cuda . empty_cache ( )
name = f ' { context . graph_execution_state_id } __ { self . id } '
2023-05-21 07:26:46 +00:00
context . services . latents . save ( name , result_latents )
2023-05-11 10:23:02 +00:00
return build_latents_output ( latents_name = name , latents = result_latents )
2023-04-06 04:06:05 +00:00
# Latent to image
2023-05-13 13:08:03 +00:00
class LatentsToImageInvocation ( BaseInvocation ) :
2023-04-06 04:06:05 +00:00
""" Generates an image from latents. """
type : Literal [ " l2i " ] = " l2i "
# Inputs
2023-07-05 02:37:16 +00:00
latents : Optional [ LatentsField ] = Field (
description = " The latents to generate an image from " )
2023-05-13 13:08:03 +00:00
vae : VaeField = Field ( default = None , description = " Vae submodel " )
2023-07-05 02:37:16 +00:00
tiled : bool = Field (
default = False ,
2023-07-24 03:32:08 +00:00
description = " Decode latents by overlapping tiles(less memory consumption) " )
2023-07-19 15:55:37 +00:00
fp32 : bool = Field ( DEFAULT_PRECISION == ' float32 ' , description = " Decode in full precision " )
2023-07-12 15:14:22 +00:00
metadata : Optional [ CoreMetadata ] = Field ( default = None , description = " Optional core metadata to be written to the image " )
2023-04-06 04:06:05 +00:00
2023-04-10 09:07:48 +00:00
# Schema customisation
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
2023-07-18 14:26:45 +00:00
" title " : " Latents To Image " ,
2023-04-10 09:07:48 +00:00
" tags " : [ " latents " , " image " ] ,
} ,
}
2023-04-06 04:06:05 +00:00
@torch.no_grad ( )
def invoke ( self , context : InvocationContext ) - > ImageOutput :
latents = context . services . latents . get ( self . latents . latents_name )
2023-05-13 13:08:03 +00:00
vae_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * self . vae . vae . dict ( ) , context = context ,
2023-05-13 13:08:03 +00:00
)
2023-04-06 04:06:05 +00:00
2023-05-14 00:06:26 +00:00
with vae_info as vae :
2023-07-18 13:20:25 +00:00
latents = latents . to ( vae . device )
2023-07-11 15:19:36 +00:00
if self . fp32 :
vae . to ( dtype = torch . float32 )
use_torch_2_0_or_xformers = isinstance (
vae . decoder . mid_block . attentions [ 0 ] . processor ,
(
AttnProcessor2_0 ,
XFormersAttnProcessor ,
LoRAXFormersAttnProcessor ,
LoRAAttnProcessor2_0 ,
) ,
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers :
vae . post_quant_conv . to ( latents . dtype )
vae . decoder . conv_in . to ( latents . dtype )
vae . decoder . mid_block . to ( latents . dtype )
else :
latents = latents . float ( )
else :
vae . to ( dtype = torch . float16 )
latents = latents . half ( )
2023-05-26 03:28:15 +00:00
if self . tiled or context . services . configuration . tiled_decode :
2023-05-13 13:08:03 +00:00
vae . enable_tiling ( )
else :
vae . disable_tiling ( )
Partial migration of UI to nodes API (#3195)
* feat(ui): add axios client generator and simple example
* fix(ui): update client & nodes test code w/ new Edge type
* chore(ui): organize generated files
* chore(ui): update .eslintignore, .prettierignore
* chore(ui): update openapi.json
* feat(backend): fixes for nodes/generator
* feat(ui): generate object args for api client
* feat(ui): more nodes api prototyping
* feat(ui): nodes cancel
* chore(ui): regenerate api client
* fix(ui): disable OG web server socket connection
* fix(ui): fix scrollbar styles typing and prop
just noticed the typo, and made the types stronger.
* feat(ui): add socketio types
* feat(ui): wip nodes
- extract api client method arg types instead of manually declaring them
- update example to display images
- general tidy up
* start building out node translations from frontend state and add notes about missing features
* use reference to sampler_name
* use reference to sampler_name
* add optional apiUrl prop
* feat(ui): start hooking up dynamic txt2img node generation, create middleware for session invocation
* feat(ui): write separate nodes socket layer, txt2img generating and rendering w single node
* feat(ui): img2img implementation
* feat(ui): get intermediate images working but types are stubbed out
* chore(ui): add support for package mode
* feat(ui): add nodes mode script
* feat(ui): handle random seeds
* fix(ui): fix middleware types
* feat(ui): add rtk action type guard
* feat(ui): disable NodeAPITest
This was polluting the network/socket logs.
* feat(ui): fix parameters panel border color
This commit should be elsewhere but I don't want to break my flow
* feat(ui): make thunk types more consistent
* feat(ui): add type guards for outputs
* feat(ui): load images on socket connect
Rudimentary
* chore(ui): bump redux-toolkit
* docs(ui): update readme
* chore(ui): regenerate api client
* chore(ui): add typescript as dev dependency
I am having trouble with TS versions after vscode updated and now uses TS 5. `madge` has installed 3.9.10 and for whatever reason my vscode wants to use that. Manually specifying 4.9.5 and then setting vscode to use that as the workspace TS fixes the issue.
* feat(ui): begin migrating gallery to nodes
Along the way, migrate to use RTK `createEntityAdapter` for gallery images, and separate `results` and `uploads` into separate slices. Much cleaner this way.
* feat(ui): clean up & comment results slice
* fix(ui): separate thunk for initial gallery load so it properly gets index 0
* feat(ui): POST upload working
* fix(ui): restore removed type
* feat(ui): patch api generation for headers access
* chore(ui): regenerate api
* feat(ui): wip gallery migration
* feat(ui): wip gallery migration
* chore(ui): regenerate api
* feat(ui): wip refactor socket events
* feat(ui): disable panels based on app props
* feat(ui): invert logic to be disabled
* disable panels when app mounts
* feat(ui): add support to disableTabs
* docs(ui): organise and update docs
* lang(ui): add toast strings
* feat(ui): wip events, comments, and general refactoring
* feat(ui): add optional token for auth
* feat(ui): export StatusIndicator and ModelSelect for header use
* feat(ui) working on making socket URL dynamic
* feat(ui): dynamic middleware loading
* feat(ui): prep for socket jwt
* feat(ui): migrate cancelation
also updated action names to be event-like instead of declaration-like
sorry, i was scattered and this commit has a lot of unrelated stuff in it.
* fix(ui): fix img2img type
* chore(ui): regenerate api client
* feat(ui): improve InvocationCompleteEvent types
* feat(ui): increase StatusIndicator font size
* fix(ui): fix middleware order for multi-node graphs
* feat(ui): add exampleGraphs object w/ iterations example
* feat(ui): generate iterations graph
* feat(ui): update ModelSelect for nodes API
* feat(ui): add hi-res functionality for txt2img generations
* feat(ui): "subscribe" to particular nodes
feels like a dirty hack but oh well it works
* feat(ui): first steps to node editor ui
* fix(ui): disable event subscription
it is not fully baked just yet
* feat(ui): wip node editor
* feat(ui): remove extraneous field types
* feat(ui): nodes before deleting stuff
* feat(ui): cleanup nodes ui stuff
* feat(ui): hook up nodes to redux
* fix(ui): fix handle
* fix(ui): add basic node edges & connection validation
* feat(ui): add connection validation styling
* feat(ui): increase edge width
* feat(ui): it blends
* feat(ui): wip model handling and graph topology validation
* feat(ui): validation connections w/ graphlib
* docs(ui): update nodes doc
* feat(ui): wip node editor
* chore(ui): rebuild api, update types
* add redux-dynamic-middlewares as a dependency
* feat(ui): add url host transformation
* feat(ui): handle already-connected fields
* feat(ui): rewrite SqliteItemStore in sqlalchemy
* fix(ui): fix sqlalchemy dynamic model instantiation
* feat(ui, nodes): metadata wip
* feat(ui, nodes): models
* feat(ui, nodes): more metadata wip
* feat(ui): wip range/iterate
* fix(nodes): fix sqlite typing
* feat(ui): export new type for invoke component
* tests(nodes): fix test instantiation of ImageField
* feat(nodes): fix LoadImageInvocation
* feat(nodes): add `title` ui hint
* feat(nodes): make ImageField attrs optional
* feat(ui): wip nodes etc
* feat(nodes): roll back sqlalchemy
* fix(nodes): partially address feedback
* fix(backend): roll back changes to pngwriter
* feat(nodes): wip address metadata feedback
* feat(nodes): add seeded rng to RandomRange
* feat(nodes): address feedback
* feat(nodes): move GET images error handling to DiskImageStorage
* feat(nodes): move GET images error handling to DiskImageStorage
* fix(nodes): fix image output schema customization
* feat(ui): img2img/txt2img -> linear
- remove txt2img and img2img tabs
- add linear tab
- add initial image selection to linear parameters accordion
* feat(ui): tidy graph builders
* feat(ui): tidy misc
* feat(ui): improve invocation union types
* feat(ui): wip metadata viewer recall
* feat(ui): move fonts to normal deps
* feat(nodes): fix broken upload
* feat(nodes): add metadata module + tests, thumbnails
- `MetadataModule` is stateless and needed in places where the `InvocationContext` is not available, so have not made it a `service`
- Handles loading/parsing/building metadata, and creating png info objects
- added tests for MetadataModule
- Lifted thumbnail stuff to util
* fix(nodes): revert change to RandomRangeInvocation
* feat(nodes): address feedback
- make metadata a service
- rip out pydantic validation, implement metadata parsing as simple functions
- update tests
- address other minor feedback items
* fix(nodes): fix other tests
* fix(nodes): add metadata service to cli
* fix(nodes): fix latents/image field parsing
* feat(nodes): customise LatentsField schema
* feat(nodes): move metadata parsing to frontend
* fix(nodes): fix metadata test
---------
Co-authored-by: maryhipp <maryhipp@gmail.com>
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-04-22 03:10:20 +00:00
2023-05-14 00:06:26 +00:00
# clear memory as vae decode can request a lot
torch . cuda . empty_cache ( )
2023-05-13 13:08:03 +00:00
with torch . inference_mode ( ) :
# copied from diffusers pipeline
latents = latents / vae . config . scaling_factor
image = vae . decode ( latents , return_dict = False ) [ 0 ]
2023-07-05 02:37:16 +00:00
image = ( image / 2 + 0.5 ) . clamp ( 0 , 1 ) # denormalize
2023-05-13 13:08:03 +00:00
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
np_image = image . cpu ( ) . permute ( 0 , 2 , 3 , 1 ) . float ( ) . numpy ( )
image = VaeImageProcessor . numpy_to_pil ( np_image ) [ 0 ]
torch . cuda . empty_cache ( )
2023-05-26 02:40:45 +00:00
image_dto = context . services . images . create (
image = image ,
2023-06-01 22:09:49 +00:00
image_origin = ResourceOrigin . INTERNAL ,
2023-05-26 02:40:45 +00:00
image_category = ImageCategory . GENERAL ,
node_id = self . id ,
session_id = context . graph_execution_state_id ,
2023-07-12 15:14:22 +00:00
is_intermediate = self . is_intermediate ,
metadata = self . metadata . dict ( ) if self . metadata else None ,
2023-05-13 13:08:03 +00:00
)
2023-04-24 12:07:53 +00:00
2023-05-26 02:40:45 +00:00
return ImageOutput (
2023-06-14 14:29:01 +00:00
image = ImageField ( image_name = image_dto . image_name ) ,
2023-05-26 02:40:45 +00:00
width = image_dto . width ,
height = image_dto . height ,
)
2023-04-24 12:07:53 +00:00
2023-07-05 02:37:16 +00:00
LATENTS_INTERPOLATION_MODE = Literal [ " nearest " , " linear " ,
" bilinear " , " bicubic " , " trilinear " , " area " , " nearest-exact " ]
2023-04-24 12:07:53 +00:00
class ResizeLatentsInvocation ( BaseInvocation ) :
2023-04-26 23:59:22 +00:00
""" Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8. """
2023-04-24 12:07:53 +00:00
type : Literal [ " lresize " ] = " lresize "
# Inputs
2023-07-05 02:37:16 +00:00
latents : Optional [ LatentsField ] = Field (
description = " The latents to resize " )
2023-07-14 05:04:02 +00:00
width : Union [ int , None ] = Field ( default = 512 ,
2023-07-05 02:37:16 +00:00
ge = 64 , multiple_of = 8 , description = " The width to resize to (px) " )
2023-07-14 05:04:02 +00:00
height : Union [ int , None ] = Field ( default = 512 ,
2023-07-05 02:37:16 +00:00
ge = 64 , multiple_of = 8 , description = " The height to resize to (px) " )
mode : LATENTS_INTERPOLATION_MODE = Field (
default = " bilinear " , description = " The interpolation mode " )
antialias : bool = Field (
default = False ,
description = " Whether or not to antialias (applied in bilinear and bicubic modes only) " )
2023-07-20 02:26:49 +00:00
2023-07-18 14:26:45 +00:00
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
" title " : " Resize Latents " ,
" tags " : [ " latents " , " resize " ]
} ,
}
2023-04-24 12:07:53 +00:00
def invoke ( self , context : InvocationContext ) - > LatentsOutput :
latents = context . services . latents . get ( self . latents . latents_name )
2023-04-26 11:45:05 +00:00
2023-07-18 13:20:25 +00:00
# TODO:
device = choose_torch_device ( )
2023-04-24 12:07:53 +00:00
resized_latents = torch . nn . functional . interpolate (
2023-07-18 13:20:25 +00:00
latents . to ( device ) , size = ( self . height / / 8 , self . width / / 8 ) ,
2023-07-05 02:37:16 +00:00
mode = self . mode , antialias = self . antialias
2023-07-05 17:00:43 +00:00
if self . mode in [ " bilinear " , " bicubic " ] else False ,
)
2023-04-24 12:07:53 +00:00
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
2023-07-18 13:20:25 +00:00
resized_latents = resized_latents . to ( " cpu " )
2023-04-24 12:07:53 +00:00
torch . cuda . empty_cache ( )
name = f " { context . graph_execution_state_id } __ { self . id } "
2023-05-26 23:47:27 +00:00
# context.services.latents.set(name, resized_latents)
2023-05-21 07:26:46 +00:00
context . services . latents . save ( name , resized_latents )
2023-05-11 10:23:02 +00:00
return build_latents_output ( latents_name = name , latents = resized_latents )
2023-04-24 12:07:53 +00:00
class ScaleLatentsInvocation ( BaseInvocation ) :
""" Scales latents by a given factor. """
type : Literal [ " lscale " ] = " lscale "
# Inputs
2023-07-05 02:37:16 +00:00
latents : Optional [ LatentsField ] = Field (
description = " The latents to scale " )
scale_factor : float = Field (
gt = 0 , description = " The factor by which to scale the latents " )
mode : LATENTS_INTERPOLATION_MODE = Field (
default = " bilinear " , description = " The interpolation mode " )
antialias : bool = Field (
default = False ,
description = " Whether or not to antialias (applied in bilinear and bicubic modes only) " )
2023-07-20 02:26:49 +00:00
2023-07-18 14:26:45 +00:00
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
" title " : " Scale Latents " ,
" tags " : [ " latents " , " scale " ]
} ,
}
2023-04-24 12:07:53 +00:00
def invoke ( self , context : InvocationContext ) - > LatentsOutput :
latents = context . services . latents . get ( self . latents . latents_name )
2023-07-18 13:20:25 +00:00
# TODO:
device = choose_torch_device ( )
2023-04-24 12:07:53 +00:00
# resizing
resized_latents = torch . nn . functional . interpolate (
2023-07-18 13:20:25 +00:00
latents . to ( device ) , scale_factor = self . scale_factor , mode = self . mode ,
2023-07-05 02:37:16 +00:00
antialias = self . antialias
2023-07-05 17:00:43 +00:00
if self . mode in [ " bilinear " , " bicubic " ] else False ,
)
2023-04-24 12:07:53 +00:00
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
2023-07-18 13:20:25 +00:00
resized_latents = resized_latents . to ( " cpu " )
2023-04-24 12:07:53 +00:00
torch . cuda . empty_cache ( )
name = f " { context . graph_execution_state_id } __ { self . id } "
2023-05-26 23:47:27 +00:00
# context.services.latents.set(name, resized_latents)
2023-05-21 07:26:46 +00:00
context . services . latents . save ( name , resized_latents )
2023-05-11 10:23:02 +00:00
return build_latents_output ( latents_name = name , latents = resized_latents )
2023-05-05 05:15:55 +00:00
2023-05-13 13:08:03 +00:00
class ImageToLatentsInvocation ( BaseInvocation ) :
2023-05-05 05:15:55 +00:00
""" Encodes an image into latents. """
type : Literal [ " i2l " ] = " i2l "
# Inputs
2023-07-03 16:17:45 +00:00
image : Optional [ ImageField ] = Field ( description = " The image to encode " )
2023-05-13 13:08:03 +00:00
vae : VaeField = Field ( default = None , description = " Vae submodel " )
2023-07-05 02:37:16 +00:00
tiled : bool = Field (
default = False ,
description = " Encode latents by overlaping tiles(less memory consumption) " )
2023-07-19 15:55:37 +00:00
fp32 : bool = Field ( DEFAULT_PRECISION == ' float32 ' , description = " Decode in full precision " )
2023-07-16 03:00:37 +00:00
2023-05-05 05:15:55 +00:00
# Schema customisation
class Config ( InvocationConfig ) :
schema_extra = {
" ui " : {
2023-07-18 14:26:45 +00:00
" title " : " Image To Latents " ,
" tags " : [ " latents " , " image " ]
2023-05-05 05:15:55 +00:00
} ,
}
@torch.no_grad ( )
def invoke ( self , context : InvocationContext ) - > LatentsOutput :
2023-05-26 23:47:27 +00:00
# image = context.services.images.get(
# self.image.image_type, self.image.image_name
# )
2023-06-14 11:40:09 +00:00
image = context . services . images . get_pil_image ( self . image . image_name )
2023-05-05 05:15:55 +00:00
2023-05-13 13:08:03 +00:00
#vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
vae_info = context . services . model_manager . get_model (
2023-07-15 16:12:01 +00:00
* * self . vae . vae . dict ( ) , context = context ,
2023-05-13 13:08:03 +00:00
)
2023-05-05 05:15:55 +00:00
image_tensor = image_resized_to_grid_as_tensor ( image . convert ( " RGB " ) )
if image_tensor . dim ( ) == 3 :
image_tensor = einops . rearrange ( image_tensor , " c h w -> 1 c h w " )
2023-05-14 00:06:26 +00:00
with vae_info as vae :
2023-07-16 03:00:37 +00:00
orig_dtype = vae . dtype
if self . fp32 :
vae . to ( dtype = torch . float32 )
use_torch_2_0_or_xformers = isinstance (
vae . decoder . mid_block . attentions [ 0 ] . processor ,
(
AttnProcessor2_0 ,
XFormersAttnProcessor ,
LoRAXFormersAttnProcessor ,
LoRAAttnProcessor2_0 ,
) ,
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers :
vae . post_quant_conv . to ( orig_dtype )
vae . decoder . conv_in . to ( orig_dtype )
vae . decoder . mid_block . to ( orig_dtype )
#else:
# latents = latents.float()
else :
vae . to ( dtype = torch . float16 )
#latents = latents.half()
2023-05-13 13:08:03 +00:00
if self . tiled :
vae . enable_tiling ( )
else :
vae . disable_tiling ( )
2023-05-14 00:06:26 +00:00
# non_noised_latents_from_image
image_tensor = image_tensor . to ( device = vae . device , dtype = vae . dtype )
with torch . inference_mode ( ) :
image_tensor_dist = vae . encode ( image_tensor ) . latent_dist
latents = image_tensor_dist . sample ( ) . to (
dtype = vae . dtype
) # FIXME: uses torch.randn. make reproducible!
2023-07-20 15:54:51 +00:00
latents = vae . config . scaling_factor * latents
2023-07-16 03:00:37 +00:00
latents = latents . to ( dtype = orig_dtype )
2023-05-05 05:15:55 +00:00
name = f " { context . graph_execution_state_id } __ { self . id } "
2023-07-18 13:20:25 +00:00
latents = latents . to ( " cpu " )
2023-05-21 07:26:46 +00:00
context . services . latents . save ( name , latents )
2023-05-11 10:23:02 +00:00
return build_latents_output ( latents_name = name , latents = latents )