Compare commits

..

484 Commits

Author SHA1 Message Date
056c56d322 chore: release v4.2.9.dev20240824 2024-08-24 12:36:25 +10:00
afc6f83d72 fix(ui): lint & fix issues with adding regional ip adapters 2024-08-24 12:32:38 +10:00
c776ac3af2 feat(ui): add knipignore tag
I'm not ready to delete some things but still want to build the app.
2024-08-24 12:32:00 +10:00
b7b3683bef feat(ui): duplicate entity 2024-08-24 12:20:35 +10:00
fb26b6824a feat(ui): autocomplete on getPrefixeId 2024-08-24 12:20:26 +10:00
63d8ad912f feat(ui): paste canvas gens back on source in generate mode 2024-08-24 11:56:24 +10:00
bbd7d7fc17 chore(ui): typegen 2024-08-24 11:55:50 +10:00
6507a78182 feat(nodes): CanvasV2MaskAndCropInvocation can paste generated image back on source
This is needed for `Generate` mode.
2024-08-24 11:55:43 +10:00
22f46517f4 fix(ui): extraneous entity preview updates 2024-08-24 11:28:05 +10:00
45596e1f94 fix(ui): newly-added entities are selected 2024-08-24 11:14:58 +10:00
6de0dbe854 feat(ui): add crosshair to color picker 2024-08-24 10:51:34 +10:00
011827fa29 fix(ui): color picker ignores alpha 2024-08-24 10:16:27 +10:00
fc6d244071 fix(ui): calculate renderable entities correctly in tool module 2024-08-24 10:10:21 +10:00
cd3da886d6 feat(ui): better color picker 2024-08-24 10:10:04 +10:00
c013c55d92 feat(ui): colored mask preview image 2024-08-24 08:54:20 +10:00
cd3dd7db0d fix(ui): new rectangles don't trigger rerender 2024-08-23 23:24:16 +10:00
1fdcce9429 chore: bump version v4.2.9.dev20240823 2024-08-23 20:52:16 +10:00
181e40926d feat(ui): disable most interaction while filtering 2024-08-23 20:32:49 +10:00
c62ede5878 fix(ui): filter preview offset 2024-08-23 20:24:40 +10:00
a2ad5f1a9a feat(ui): tweak layout of staging area toolbar 2024-08-23 19:55:02 +10:00
ff74a5356f chore(ui): typegen 2024-08-23 19:52:37 +10:00
92dc30dace tidy(app): clean up app changes for canvas v2 2024-08-23 19:52:04 +10:00
3af577b210 feat(ui): use singleton for clear q confirm dialog 2024-08-23 19:47:51 +10:00
d0464330f7 fix(ui): rip out broken recall logic, NO TS ERRORS 2024-08-23 19:47:51 +10:00
dd3ef4a80f chore(ui): lint 2024-08-23 19:47:51 +10:00
0ced891944 fix(ui): staging area interaction scopes 2024-08-23 19:47:51 +10:00
10a5452df9 fix(ui): staging area actions 2024-08-23 19:47:51 +10:00
cb97969bbc tidy(ui): more cleanup 2024-08-23 19:47:51 +10:00
71e742e238 fix(ui): upscale tab graph 2024-08-23 19:47:51 +10:00
fadd20fb8e fix(ui): sdxl graph builder 2024-08-23 19:47:51 +10:00
01b9ca78e4 fix(ui): select next entity in the list when deleting 2024-08-23 19:47:51 +10:00
2baf825f34 feat(ui): fix delete layer hotkey 2024-08-23 19:47:51 +10:00
1fa8048509 tidy(ui): "eye dropper" -> "color picker" 2024-08-23 19:47:51 +10:00
a000ad75f6 tidy(ui): regional guidance buttons 2024-08-23 19:47:51 +10:00
aefb2339bb feat(ui): update entity list menu 2024-08-23 19:47:51 +10:00
a4f8671f86 feat(ui): add log debug button 2024-08-23 19:47:51 +10:00
73530ba54f chore(ui): lint 2024-08-23 19:47:51 +10:00
685eb9927d chore(ui): prettier 2024-08-23 19:47:51 +10:00
ee57302fc3 chore(ui): eslint 2024-08-23 19:47:51 +10:00
c1fb9cdb93 tidy(ui): remove unused stuff 4 2024-08-23 19:47:36 +10:00
aa6d441552 tidy(ui): remove unused stuff 3 2024-08-23 19:47:01 +10:00
25d8d4c2e9 tidy(ui): remove unused pkg @chakra-ui/react-use-size 2024-08-23 19:47:01 +10:00
427ea6da5c feat(ui): revise graph building for control layers, fix issues w/ invocation complete events 2024-08-23 19:47:01 +10:00
d9f4266630 feat(ui): use unique id for metadata in Graph class 2024-08-23 19:47:01 +10:00
96f6e9e683 tidy(ui): remove unused stuff 2 2024-08-23 19:47:01 +10:00
f10248e3f5 tidy(ui): remove unused stuff 2024-08-23 19:47:01 +10:00
6a21f5fde1 tidy(ui): reduce use of parseify util 2024-08-23 19:47:01 +10:00
ff20dd509a feat(ui): refine canvas entity list items & menus 2024-08-23 19:47:01 +10:00
78a59b5b78 feat(ui): canvas layer preview, revised reactivity for adapters 2024-08-23 19:47:01 +10:00
46bfbbbc87 feat(ui): add SyncableMap
Can be used with useSyncExternal store to make a `Map` reactive.
2024-08-23 19:47:01 +10:00
a6d73d0773 tidy(ui): removed unused transform methods from canvasmanager 2024-08-23 19:47:01 +10:00
6578e8bef8 feat(ui): transform tool ux 2024-08-23 19:47:01 +10:00
0596d25e07 feat(ui): rough out canvas mode 2024-08-23 19:47:01 +10:00
86e8ce9139 feat(ui): add canvas autosave checkbox 2024-08-23 19:47:01 +10:00
5aa2957da4 fix(ui): memory leak when getting image DTO
must unsubscribe!
2024-08-23 19:47:01 +10:00
82f0cb2c8c feat(ui): rework settings menu 2024-08-23 19:47:01 +10:00
fa48145cbc feat(ui): no entities fallback buttons 2024-08-23 19:47:01 +10:00
6d1edc330d perf(ui): optimize gallery image delete button rendering 2024-08-23 19:47:01 +10:00
97c0d3f6be feat(ui): remove "solid" background option 2024-08-23 19:47:01 +10:00
a79a25ad63 tidy(ui): organise files and classes 2024-08-23 19:47:01 +10:00
6a8ceef404 tidy(ui): abstract compositing logic to module 2024-08-23 19:47:01 +10:00
3539670d93 fix(ui): fix canvas cache property access 2024-08-23 19:47:01 +10:00
c54bc32ef6 tidy(ui): clean up CanvasFilter class 2024-08-23 19:47:01 +10:00
fee293e289 tidy(ui): clean up a few bits and bobs 2024-08-23 19:47:01 +10:00
747eef9ccc tidy(ui): abstract canvas rendering logic to module 2024-08-23 19:47:01 +10:00
7d2df399ed tidy(ui): abstract caching logic to module 2024-08-23 19:47:01 +10:00
68fad5cdcc tidy(ui): abstract worker logic to module 2024-08-23 19:47:01 +10:00
b4d656c203 tidy(ui): abstract stage logic into module 2024-08-23 19:47:01 +10:00
3136d89d52 feat(ui): add entity group hiding 2024-08-23 19:47:01 +10:00
27e829b955 feat(ui): move all caching out of redux
While we lose the benefit of the caches persisting across reloads, this is a much simpler way to handle things. If we need a persistent cache, we can explore it in the future.
2024-08-23 19:47:01 +10:00
e03e870d5b feat(ui): revised rasterization caching
- use `stable-hash` to generate stable, non-crypto hashes for cache entries, instead of using deep object comparisons
- use an object to store image name caches
2024-08-23 19:47:01 +10:00
9465ff450b feat(ui): revise filter implementation 2024-08-23 19:47:01 +10:00
92906a9575 fix(ui): add button to delete inpaint mask 2024-08-23 19:47:01 +10:00
77f206abe4 feat(ui): add contexts/hooks to access entity adapters directly 2024-08-23 19:47:01 +10:00
44a3f61580 feat(ui): add CanvasManagerProviderGate
This context waits to render its children its until the canvas manager is available. Then its children have access to the manager directly via hook.
2024-08-23 19:47:01 +10:00
0a2afed08b feat(ui) do not set $canvasManager until ready 2024-08-23 19:47:01 +10:00
9b3b961105 fix(ui): inpaint mask naming 2024-08-23 19:47:01 +10:00
9b1828e1aa feat(ui): efficient canvas compositing
Also solves issue of exporting layers at different opacities than what is visible
2024-08-23 19:47:01 +10:00
5101873f49 feat(ui): allow multiple inpaint masks
This is easier than making it a nullable singleton
2024-08-23 19:47:01 +10:00
c612f18114 fix(ui): missing rasterization cache invalidations 2024-08-23 19:47:01 +10:00
7e400d876f feat(ui): iterate on filter UI, flow 2024-08-23 19:47:01 +10:00
677dddcfc9 fix(ui): rehydration data loss 2024-08-23 19:47:01 +10:00
0792b9175e feat(ui): sort log namespaces 2024-08-23 19:47:01 +10:00
e4829f80af fix(ui): do not merge arrays by index during rehydration 2024-08-23 19:47:01 +10:00
bb760f3eb4 fix(ui): clone parsed data during state rehydration
Without this, the objects and arrays in `parsed` could be mutated, and the log statment would show the mutated data.
2024-08-23 19:47:01 +10:00
388c65287b fix(ui): fix logger filter
was accidetnally replacing the filter instead of appending to it.
2024-08-23 19:47:01 +10:00
12cd41e05c fix(ui): race condition queue status
Sequence of events causing the race condition:
- Enqueue batch
- Invalidate `SessionQueueStatus` tag
- Request updated queue status via HTTP - batch still processing at this point
- Batch completes
- Event emitted saying so
- Optimistically update the queue status cache, it is correct
- HTTP request makes it back and overwrites the optimistic update, indicating the batch is still in progress

FIxed by not invalidating the cache.
2024-08-23 19:47:01 +10:00
7765c03949 fix(ui): handle opacity for masks 2024-08-23 19:47:01 +10:00
3daa80c57f feat(ui): default background to checkerboard 2024-08-23 19:47:01 +10:00
5dbbef4ebd feat(ui): clean up logging namespaces, allow skipping namespaces 2024-08-23 19:47:01 +10:00
db33b3f7b5 chore(ui): bump ui library 2024-08-23 19:47:01 +10:00
8ffcf2a6be fix(ui): do not allow drawing if layer disabled 2024-08-23 19:47:01 +10:00
2e7ae6a07e fix(ui): stale state causing race conditions & extraneous renders 2024-08-23 19:47:01 +10:00
fea1711f0c fix(ui): do not clear buffer when rendering "real" objects 2024-08-23 19:47:01 +10:00
2a3546db97 tidy(ui): remove "filter" from CanvasImageState 2024-08-23 19:47:01 +10:00
285c266612 feat(ui): better editable title 2024-08-23 19:47:01 +10:00
426ad54c53 fix(ui): stroke eraserline 2024-08-23 19:47:01 +10:00
fc75f7919f feat(ui): restore transparency effect for control layers 2024-08-23 19:47:01 +10:00
6c6b1aaff6 feat(ui): use text cursor for entity title 2024-08-23 19:47:01 +10:00
c319d653ac tidy(ui): remove extraneous logging in CanvasStateApi 2024-08-23 19:47:01 +10:00
d887e474e7 feat(ui): better buffer commit logic 2024-08-23 19:47:01 +10:00
da7b52d6ba feat(ui): render buffer separately from "real" objects 2024-08-23 19:47:01 +10:00
b5aa308593 fix(ui): pixelRect should always be integer 2024-08-23 19:47:01 +10:00
0b7ceb3bb6 fix(ui): only update stage attrs when stage itself is dragged 2024-08-23 19:47:01 +10:00
3a70cefda2 feat(ui): add line simplification
This fixes some awkward issues where line segments stack up.
2024-08-23 19:47:01 +10:00
4b609251e1 fix(ui): various things listening when they need not listen 2024-08-23 19:47:01 +10:00
0839eac0f7 feat(ui): layer opacity via caching 2024-08-23 19:47:01 +10:00
5f2a7feeee feat(ui): reset view fits all visible objects 2024-08-23 19:47:01 +10:00
982535eb92 fix(ui): rerenders when changing canvas scale 2024-08-23 19:47:01 +10:00
0c2b8edc8d fix(ui): do not render rasterized layer unless renderObjects=true 2024-08-23 19:47:01 +10:00
f78f4ca25f feat(ui): revise app layout strategy, add interaction scopes for hotkeys 2024-08-23 19:47:01 +10:00
d6b3e6c07d feat(ui): tweak mask patterns 2024-08-23 19:47:01 +10:00
071ff8e74a fix(ui): dynamic prompts recalcs when presets are loaded 2024-08-23 19:47:01 +10:00
1ea8aafca1 fix(ui): use style preset prompts correctly 2024-08-23 19:46:05 +10:00
533dd221f8 fix(ui): discard selected staging image not all other images 2024-08-23 19:46:05 +10:00
2b325c6683 fix(ui): respect image size in staging preview 2024-08-23 19:46:05 +10:00
3845b1b3e6 tidy(ui): cleanup after events change 2024-08-23 19:46:05 +10:00
cea7890a67 feat(ui): move socket event handling out of redux
Download events and invocation status events (including progress images) are very frequent. There's no real need for these to pass through redux. Handling them outside redux is a significant performance win - far fewer store subscription calls, far fewer trips through middleware.

All event handling is moved outside middleware. Cleanup of unused actions and listeners to follow.
2024-08-23 19:46:05 +10:00
c38fe8025d fix(ui): rebase conflicts 2024-08-23 19:46:05 +10:00
f1de95349c fix(ui): update compositing rect when fill changes 2024-08-23 19:46:05 +10:00
2950775fa7 feat(ui): add canvas background style 2024-08-23 19:46:05 +10:00
cb293fd7ac feat(ui): mask layers choose own opacity 2024-08-23 19:46:05 +10:00
43b3fab6be feat(ui): mask fill patterns 2024-08-23 19:46:05 +10:00
d4b0dbce49 build(ui): add vite types to tsconfig 2024-08-23 19:46:05 +10:00
137b810669 fix(ui): do not smooth pixel data when using eyeDropper 2024-08-23 19:46:05 +10:00
c172657324 tidy(ui): tool components & translations 2024-08-23 19:46:05 +10:00
97c966b04f feat(ui): rough out eyedropper tool
It's a bit slow bc we are converting the stage to canvas on every mouse move. Also need to improve the visual but it works.
2024-08-23 19:46:05 +10:00
7178fc6253 fix(ui): ip adapters work 2024-08-23 19:46:05 +10:00
4adb2eabf5 feat(ui): rename layers 2024-08-23 19:46:05 +10:00
9f2c815e13 feat(ui): revise entity menus 2024-08-23 19:46:05 +10:00
1435557d1d feat(ui): split control layers from raster layers for UI and internal state, same rendering as raster layers 2024-08-23 19:46:05 +10:00
96abf687f6 feat(ui): implement cache for image rasterization, rip out some old controladapters code 2024-08-23 19:46:05 +10:00
636d9a7209 feat(ui, app): use layer as control (wip) 2024-08-23 19:46:05 +10:00
3b36eb0223 feat(ui): add contextmenu for canvas entities 2024-08-23 19:46:05 +10:00
388c97bff0 feat(ui): more better logging & naming 2024-08-23 19:46:05 +10:00
b1cb018695 feat(ui): better logging w/ path 2024-08-23 19:46:05 +10:00
df78dd7953 feat(ui): always show marks on canvas scale slider 2024-08-23 19:46:05 +10:00
0dc344a22e fix(ui): do not import button from chakra 2024-08-23 19:46:05 +10:00
350d7f6f14 fix(ui): scaled bbox preview 2024-08-23 19:46:05 +10:00
11059ee2d4 feat(ui): tidy up atoms 2024-08-23 19:46:05 +10:00
c90d3f3bb9 feat(ui): convert all my pubsubs to atoms
its the same but better
2024-08-23 19:46:05 +10:00
7f6d439fd1 feat(ui): add trnalsation 2024-08-23 19:46:05 +10:00
783a78f069 fix(ui): give up on thumbnail loading, causes flash during transformer 2024-08-23 19:46:05 +10:00
0ff031950d fix(ui): depth anything v2 2024-08-23 19:46:05 +10:00
d7e8f3d756 tidy(ui): remove unused code, comments 2024-08-23 19:46:05 +10:00
4668ea449b fix(ui): staging area works 2024-08-23 19:46:05 +10:00
30d318d021 feat(nodes): temp disable canvas output crop 2024-08-23 19:46:05 +10:00
de96f97e5f fix(ui): max scale 1 when reset view 2024-08-23 19:46:05 +10:00
57c0a2dfb1 feat(ui): better scale changer component, reset view functionality 2024-08-23 19:46:05 +10:00
cd4e464bde fix(ui): img2img 2024-08-23 19:46:05 +10:00
49e48c3eb7 feat(ui): add manual scale controls 2024-08-23 19:46:05 +10:00
edd3b3bce9 fix(ui): do not await clearBuffer 2024-08-23 19:46:04 +10:00
f8bfb66108 feat(ui): dnd image into layer 2024-08-23 19:46:04 +10:00
3b6a76cbf3 fix(ui): do not await commitBuffer 2024-08-23 19:46:04 +10:00
e0b60e4320 fix(ui): properly destroy entities in manager cleanup 2024-08-23 19:46:04 +10:00
2159319035 tidy(ui): clearer component names for regional guidance 2024-08-23 19:46:04 +10:00
b170fc232e tidy(ui): clearer component names for ip adapter 2024-08-23 19:46:04 +10:00
594da60f2f tidy(ui): clearer component names for inpaint mask 2024-08-23 19:46:04 +10:00
6a432f6518 tidy(ui): clearer component names for control adapters 2024-08-23 19:46:04 +10:00
eb8eacfec6 feat(ui): simplify canvas list item headers 2024-08-23 19:46:04 +10:00
c8d04d42e2 fix(ui): ip adapter list item 2024-08-23 19:46:04 +10:00
d39c9de81e tidy(ui): clean up unused logic 2024-08-23 19:46:04 +10:00
a27d39b9ff feat(ui): clean up state, add mutex for image loading, add thumbnail loading 2024-08-23 19:46:04 +10:00
6b385614f0 chore(ui): add async-mutex dep 2024-08-23 19:46:04 +10:00
3ae7250ef7 feat(ui): txt2img, img2img, inpaint & outpaint working 2024-08-23 19:46:04 +10:00
a42d0ce1d2 feat(ui): no padding on transformer outlines 2024-08-23 19:46:04 +10:00
d9131f7563 feat(ui): restore object count to layer titles 2024-08-23 19:46:04 +10:00
bdce958f29 tidy(ui): "useIsEntitySelected" -> "useEntityIsSelected" 2024-08-23 19:46:04 +10:00
3c86f1e979 tidy(ui): move transformer statics into class 2024-08-23 19:46:04 +10:00
894b8a29b9 tidy(ui): massive cleanup
- create a context for entity identifiers, massively simplifying UI for each entity int he list
- consolidate common redux actions
- remove now-unused code
2024-08-23 19:46:04 +10:00
8436a44973 perf(ui): do not add duplicate points to lines 2024-08-23 19:46:04 +10:00
f9f9ec3688 feat(ui): up line tension to 0.3 2024-08-23 19:46:04 +10:00
5a98d7a1f6 perf(ui): disable stroke, perfect draw on compositing rect 2024-08-23 19:46:04 +10:00
f9bc96e497 tidy(ui): remove unused code, initial image 2024-08-23 19:46:04 +10:00
56350ff5dc tidy(ui): remove unused state & actions 2024-08-23 19:46:04 +10:00
6c1139340c feat(ui): region mask rendering 2024-08-23 19:46:04 +10:00
641b1a7e6f feat(ui): esc cancels drawing buffer
maybe this is not wanted? we'll see
2024-08-23 19:46:04 +10:00
674a3f462f fix(ui): render transformer over objects, fix issue w/ inpaint rect color 2024-08-23 19:46:04 +10:00
2283186d3a fix(ui): brush preview fill for inpaint/region 2024-08-23 19:46:04 +10:00
340af1fe50 fix(ui): no objects rendered until vis toggled 2024-08-23 19:46:04 +10:00
9378656d78 feat(ui): inpaint mask transform 2024-08-23 19:46:04 +10:00
5f0413e222 fix(ui): layer accidental early set isFirstRender=false 2024-08-23 19:46:04 +10:00
c3e47515b1 fix(ui): inpaint mask rendering 2024-08-23 19:46:04 +10:00
5dcef6fa0d feat(ui): wip inpaint mask uses new API 2024-08-23 19:46:04 +10:00
31ac02cd93 feat(ui): move updatePosition to transformer 2024-08-23 19:46:04 +10:00
ab16976084 feat(ui): move resetScale to transformer 2024-08-23 19:46:04 +10:00
8e2b7845e1 tidy(ui): more imperative naming 2024-08-23 19:46:04 +10:00
3973bce342 tidy(ui): use imperative names for setters in stateapi 2024-08-23 19:46:04 +10:00
f63847a504 fix(ui): commit drawing buffer on tool change, fixing bbox not calculating 2024-08-23 19:46:04 +10:00
07e3529948 fix(ui): sync transformer when requesting bbox calc 2024-08-23 19:46:04 +10:00
03e1c60694 tidy(ui): rename union CanvasEntity -> CanvasEntityState 2024-08-23 19:46:04 +10:00
d766ed71fc fix(ui): request rect calc immediately on transform, hiding rect 2024-08-23 19:46:04 +10:00
ae68ef142a feat(ui): move bbox calculation to transformer 2024-08-23 19:46:04 +10:00
20f55768c4 feat(ui): use set for transformer subscriptions 2024-08-23 19:46:04 +10:00
c4fad4456e tidy(ui): clean up worker tasks when complete 2024-08-23 19:46:04 +10:00
78f5ec44ad tidy(ui): remove unused code in CanvasTool 2024-08-23 19:46:04 +10:00
e14ba86942 feat(ui): use pubsub for isTransforming on manager 2024-08-23 19:46:04 +10:00
d4e7720f6b docs(ui): update transformer docstrings 2024-08-23 19:46:04 +10:00
a3f0e7e1cb feat(ui): revised event pubsub, transformer logic split out 2024-08-23 19:46:04 +10:00
30a696c476 feat(ui): add simple pubsub 2024-08-23 19:46:04 +10:00
66d6c64e16 feat(ui): document & clean up object renderer 2024-08-23 19:46:04 +10:00
d15be9b57c feat(ui): split out object renderer 2024-08-23 19:46:04 +10:00
e5da902fd0 fix(ui): unable to hold shit while transforming to retain ratio 2024-08-23 19:46:04 +10:00
fc558094c2 tidy(ui): rename canvas stuff 2024-08-23 19:46:04 +10:00
ad9312e989 tidy(ui): consolidate getLoggingContext builders 2024-08-23 19:46:04 +10:00
8e1a70b008 fix(ui): align all tools to 1px grid
- Offset brush tool by 0.5px when width is odd, ensuring each stroke edge is exactly on a pixel boundary
- Round the rect tool also
2024-08-23 19:46:04 +10:00
17f88cd5ad feat(ui): disable image smoothing on layers 2024-08-23 19:46:04 +10:00
298f1919fa fix(ui): round position when rasterizing layer 2024-08-23 19:46:04 +10:00
4d20cc11d4 feat(ui): continue modularizing transform 2024-08-23 19:46:04 +10:00
14f249a2f0 feat(ui): fix a few things that didn't unsubscribe correctly, add helper to manage subscriptions 2024-08-23 19:46:04 +10:00
b9746a6c2c feat(ui): merge bbox outline into transformer 2024-08-23 19:46:04 +10:00
94f298a6f4 fix(ui): update parent's pos not transformers 2024-08-23 19:46:04 +10:00
8d3a8178da feat(ui): merge interaction rect into transformer class 2024-08-23 19:46:04 +10:00
cad4212fe8 feat(ui): prepare staging area 2024-08-23 19:46:04 +10:00
cff28dfaa9 feat(ui): typing for logging context 2024-08-23 19:46:04 +10:00
70d7509fcc feat(ui): remove inheritance of CanvasObject
JS is terrible
2024-08-23 19:46:04 +10:00
cf83af7a27 feat(ui): split & document transformer logic, iterate on class structures 2024-08-23 19:46:04 +10:00
5c5a405c0f feat(ui): rotation snap to nearest 45deg when holding shift 2024-08-23 19:46:04 +10:00
0208e4b232 feat(ui): expose subscribe method for nanostores 2024-08-23 19:46:04 +10:00
e940754795 tidy(ui): remove layer scaling reducers 2024-08-23 19:46:04 +10:00
dc9fa1a735 fix(ui): pixel-perfect transforms 2024-08-23 19:46:04 +10:00
08591fbf6d fix(ui): layer visibility toggle 2024-08-23 19:46:04 +10:00
74db71bb5d fix(nodes): fix canvas mask erode
it wasn't eroding enough and caused incorrect transparency in result images
2024-08-23 19:46:04 +10:00
60dbe798a5 fix(ui): do not reset layer on first render 2024-08-23 19:46:04 +10:00
0e676605fe feat(ui): revised logging and naming setup, fix staging area 2024-08-23 19:46:04 +10:00
3f781016f6 feat(ui): add repr methods to layer and object classes 2024-08-23 19:46:04 +10:00
17cd2f6b02 feat(ui): use nanoid(10) instead of uuidv4 for canvas
Shorter ids makes it much more readable
2024-08-23 19:46:04 +10:00
99102a1b34 build(ui): add nanoid as explicit dep 2024-08-23 19:46:04 +10:00
8d72e7d9e8 fix(ui): move CanvasImage's konva image to correct object 2024-08-23 19:46:04 +10:00
0b6b6f97ad fix(ui): prevent flash when applying transform 2024-08-23 19:46:04 +10:00
fb2f6382b1 build(ui): add eslint rules for async stuff 2024-08-23 19:46:04 +10:00
1ddea87c35 feat(ui): trying to fix flicker after transform 2024-08-23 19:46:04 +10:00
ea02323095 feat(ui): transform cleanup 2024-08-23 19:46:04 +10:00
49733091c7 feat(ui): fix transform when rotated 2024-08-23 19:46:04 +10:00
cf833fd6e2 fix(ui): use pixel bbox when image is in layer 2024-08-23 19:46:04 +10:00
ba5cf07ab8 fix(ui): transforming when axes flipped 2024-08-23 19:46:04 +10:00
d15321a373 feat(ui): hallelujah (???) 2024-08-23 19:46:04 +10:00
de597a5eb4 feat(ui): add debug button 2024-08-23 19:46:04 +10:00
e5f5cbdf5c fix(ui): transformer padding 2024-08-23 19:46:04 +10:00
7d4342bbff feat(ui): wip transform mode 2 2024-08-23 19:46:04 +10:00
7f8a1d8d20 feat(ui): wip transform mode 2024-08-23 19:46:04 +10:00
65353ac1e1 feat(ui): wip transform mode 2024-08-23 19:46:04 +10:00
7f9a31ca4a fix(ui): dnd to canvas broke 2024-08-23 19:46:04 +10:00
592eb2886c fix(ui): conflicts after rebasing 2024-08-23 19:46:04 +10:00
c220dd8987 fix(ui): imageDropped listener 2024-08-23 19:46:04 +10:00
a263beb0d5 wip 2024-08-23 19:46:04 +10:00
46b7c510eb fix(ui): transform tool seems to be working 2024-08-23 19:46:04 +10:00
f405e472ea fix(ui): move tool fixes, add transform tool 2024-08-23 19:46:04 +10:00
7bdfd3ef5f feat(ui): move tool now only moves 2024-08-23 19:46:04 +10:00
778ee2c679 feat(ui): layer bbox calc in worker 2024-08-23 19:46:04 +10:00
e70339ff3e feat(ui): tweaked entity & group selection styles 2024-08-23 19:46:04 +10:00
88c57a9750 feat(ui): canvas entity list headers 2024-08-23 19:46:04 +10:00
137252128b tidy(ui): CanvasRegion 2024-08-23 19:46:04 +10:00
d4297b1345 tidy(ui): CanvasRect 2024-08-23 19:46:04 +10:00
6059bc7b47 tidy(ui): CanvasLayer 2024-08-23 19:46:04 +10:00
c3ff3eb51f tidy(ui): CanvasInpaintMask 2024-08-23 19:46:04 +10:00
0b7751c413 tidy(ui): CanvasInitialImage 2024-08-23 19:46:04 +10:00
d7f1c30624 tidy(ui): CanvasImage 2024-08-23 19:46:04 +10:00
3f4d7dbeea tidy(ui): CanvasEraserLine 2024-08-23 19:46:04 +10:00
19b6ae2907 tidy(ui): CanvasControlAdapter 2024-08-23 19:46:04 +10:00
769f96ff9f tidy(ui): CanvasBrushLine 2024-08-23 19:46:04 +10:00
fdaf75faa4 tidy(ui): CanvasBbox 2024-08-23 19:46:04 +10:00
1380bb7ae6 tidy(ui): CanvasBackground 2024-08-23 19:46:04 +10:00
9483c8cc29 tidy(ui): update canvas classes, organise location of konva nodes 2024-08-23 19:46:04 +10:00
2ef8a8cf5a feat(ui): add names to all konva objects
Makes troubleshooting much simpler
2024-08-23 19:46:04 +10:00
d296ec1932 fix(ui): do not await creating new canvas image
If you await this, it causes a race condition where multiple images are created.
2024-08-23 19:46:04 +10:00
444ad3dae1 feat(ui): use position and dimensions instead of separate x,y,width,height attrs 2024-08-23 19:46:04 +10:00
8cdcc71378 fix(ui): remove weird rtkq hook wrapper
I do not understand why I did that initially but it doesn't work with TS.
2024-08-23 19:46:04 +10:00
e8bc06cfd3 feat(ui): rename types size and position to dimensions and coordinate 2024-08-23 19:46:04 +10:00
67a0a024e9 tidy(ui): hide layer settings by default 2024-08-23 19:46:04 +10:00
bd2c46c267 fix(ui): layer rendering when starting as disabled 2024-08-23 19:46:04 +10:00
5acb27f350 feat(invocation): reduce canvas v2 mask & crop mask dilation 2024-08-23 19:46:04 +10:00
7271b12d0f feat(ui): de-jank staging area and progress images 2024-08-23 19:46:04 +10:00
4a79467a33 feat(ui): update staging handling to work w/ cropped mask 2024-08-23 19:46:04 +10:00
5501bb87a3 chore(ui): typegen 2024-08-23 19:46:04 +10:00
561610e296 feat(app): update CanvasV2MaskAndCropInvocation 2024-08-23 19:46:04 +10:00
b76609ef18 feat(ui): use new canvas output node 2024-08-23 19:46:04 +10:00
070b78501b chore(ui): typegen 2024-08-23 19:46:04 +10:00
50df4f4ab6 feat(app): add CanvasV2MaskAndCropInvocation & CanvasV2MaskAndCropOutput
This handles some masking and cropping that the canvas needs.
2024-08-23 19:46:04 +10:00
9bbf430125 fix(ui): restore nodes output tracking 2024-08-23 19:46:04 +10:00
384a90958a feat(ui): rip out document size
barely knew ye
2024-08-23 19:46:04 +10:00
0e4a25b029 feat(ui): convert initial image to layer when starting canvas session 2024-08-23 19:46:04 +10:00
4a44e171fd fix(ui): fix layer transparency calculation 2024-08-23 19:46:04 +10:00
9bc57a6f59 fix(ui): reset initial image when resetting canvas 2024-08-23 19:46:03 +10:00
4341ed7ab4 fix(ui): reset node executions states when loading workflow 2024-08-23 19:46:03 +10:00
97ce72c542 fix(ui): entity display list 2024-08-23 19:46:03 +10:00
a2c78a57a7 feat(ui): img2img working 2024-08-23 19:46:03 +10:00
044a713dc9 feat(ui): rough out img2img on canvas 2024-08-23 19:46:03 +10:00
b8479c5fe2 UNDO ME WIP 2024-08-23 19:46:03 +10:00
4e5d056824 feat(ui): log invocation source id on socket event 2024-08-23 19:46:03 +10:00
118278b372 feat(ui): restore document size overlay renderer 2024-08-23 19:46:03 +10:00
8e8c255f3f feat(ui): make documnet size a rect 2024-08-23 19:46:03 +10:00
1575bee401 refactor(ui): remove modular imagesize components
This is no longer necessary with canvas v2 and added a ton of extraneous redux actions when changing the image size. Also renamed to document size
2024-08-23 19:46:03 +10:00
249bbfc883 feat(ui): initialState is for generation mode 2024-08-23 19:46:03 +10:00
3993ae410f feat(ui): split out canvas entity list component 2024-08-23 19:46:03 +10:00
edf040e3d2 feat(ui): hide bbox button when no canvas session active 2024-08-23 19:46:03 +10:00
66fd077ee7 tidy(ui): remove unused naming objects/utils
The canvas manager means we don't need to worry about konva node names as we never directly select konva nodes.
2024-08-23 19:46:03 +10:00
b93462ebb6 feat(ui): split up tool chooser buttons
Prep for distinct toolbars for generation vs canvas modes
2024-08-23 19:46:03 +10:00
aae60d0cdc feat(ui): add useAssertSingleton util hook
This simple hook asserts that it is only ever called once. Particularly useful for things like hotkeys hooks.
2024-08-23 19:46:03 +10:00
d4da00e607 feat(ui): "stagingArea" -> "session" 2024-08-23 19:46:03 +10:00
0c539ff00b feat(ui): add reset button to canvas 2024-08-23 19:46:03 +10:00
5983cbf26c feat(ui): add snapToRect util 2024-08-23 19:46:03 +10:00
c513d6e3af fix(ui): fiddle with control adapter filters
some jank still
2024-08-23 19:46:03 +10:00
9d57c0e631 feat(ui): temp disable doc size overlay 2024-08-23 19:46:03 +10:00
a1923a8966 feat(ui): no animation on layer selection
Felt sluggish
2024-08-23 19:46:03 +10:00
d988e18731 feat(ui): use canvas as source for control images (wip) 2024-08-23 19:46:03 +10:00
51008da2dd fix(ui): control adapter translate & scale 2024-08-23 19:46:03 +10:00
6ccc1f5672 tidy(ui): removed unused state related to non-buffered drawing 2024-08-23 19:46:03 +10:00
4a556f84e0 feat(ui): control adapter image rendering 2024-08-23 19:46:03 +10:00
2f21a2220d fix(ui): do not floor bbox calc, it cuts off the last pixels 2024-08-23 19:46:03 +10:00
91a420b13e feat(ui): fix issue where creating line needs 2 points 2024-08-23 19:46:03 +10:00
c27da3581b fix(ui): edge cases when holding shift and drawing lines 2024-08-23 19:46:03 +10:00
961dfbce93 fix(ui): set buffered rect color to full alpha 2024-08-23 19:46:03 +10:00
df39c825ae fix(ui): handle mouseup correctly 2024-08-23 19:46:03 +10:00
3f6ee1b7a4 feat(ui): buffered rect drawing 2024-08-23 19:46:03 +10:00
908e504a6f fix(ui): buffered drawing edge cases 2024-08-23 19:46:03 +10:00
f2fa41afc5 perf(ui): do not use stage.find 2024-08-23 19:46:03 +10:00
440ff40ad5 perf(ui): object groups do not listen 2024-08-23 19:46:03 +10:00
5c15458e15 perf(ui): buffered drawing (wip) 2024-08-23 19:46:03 +10:00
be5b474f1e tidy(ui): organise files 2024-08-23 19:46:03 +10:00
cee178c2b6 tidy(ui): organise files 2024-08-23 19:46:03 +10:00
27657f8b7a tidy(ui): organise files 2024-08-23 19:46:03 +10:00
e0cde3815a fix(ui): background rendering 2024-08-23 19:46:03 +10:00
09d0421de4 pkg(ui): remove unused deps react-konva & use-image 2024-08-23 19:46:03 +10:00
47b94d563c feat(ui): organize konva state and files 2024-08-23 19:46:03 +10:00
0b5d20c9f0 fix(ui): merge conflicts in image deletion listener 2024-08-23 19:46:03 +10:00
80e7e1293a fix(ui): region rendering 2024-08-23 19:46:03 +10:00
3a82b0cbc1 fix(ui): inpaint mask rendering 2024-08-23 19:46:03 +10:00
a27cbc13b6 fix(ui): staging area rendering 2024-08-23 19:46:03 +10:00
a8f962eb3f fix(ui): stale selected entity 2024-08-23 19:46:03 +10:00
7f40d23f19 fix(ui): staging area image offset 2024-08-23 19:46:03 +10:00
918354cd9d feat(ui): tweak layer ui component 2024-08-23 19:46:03 +10:00
eef9278ee6 fix(ui): resetting layer resets position 2024-08-23 19:46:03 +10:00
2c32e2e5c1 feat(ui): updated layer list component styling 2024-08-23 19:46:03 +10:00
6f05654db5 feat(ui): transformable layers 2024-08-23 19:46:03 +10:00
1d31b6902f feat(ui): move tool icon is pointer like in other apps 2024-08-23 19:46:03 +10:00
5a7d615e64 feat(ui): do not floor cursor position 2024-08-23 19:46:03 +10:00
1dbf9e4ed4 feat(ui): disable gallery hotkeys while staging 2024-08-23 19:46:03 +10:00
5dcc6ee203 feat(ui): revised canvas progress & staging image handling 2024-08-23 19:46:03 +10:00
84a4e6ae3f feat(ui): show queue item origin in queue list 2024-08-23 19:46:03 +10:00
f283bfd68f chore(ui): typegen 2024-08-23 19:46:03 +10:00
6e5ff7b79c feat(app): add origin to session queue
The origin is an optional field indicating the queue item's origin. For example, "canvas" when the queue item originated from the canvas or "workflows" when the queue item originated from the workflows tab. If omitted, we assume the queue item originated from the API directly.

- Add migration to add the nullable column to the `session_queue` table.
- Update relevant event payloads with the new field.
- Add `cancel_by_origin` method to `session_queue` service and corresponding route. This is required for the canvas to bail out early when staging images.
- Add `origin` to both `SessionQueueItem` and `Batch` - it needs to be provided initially via the batch and then passed onto the queue item.
-
2024-08-23 19:46:03 +10:00
7c3800d03f fix(ui): denoise start on outpainting 2024-08-23 19:46:03 +10:00
941db90518 feat(ui): add redux events for queue cleared & batch enqueued socket events 2024-08-23 19:46:03 +10:00
0d9ecf0f90 feat(ui): canvas staging area works 2024-08-23 19:46:03 +10:00
9c77023a11 feat(ui): switch to view tool when staging 2024-08-23 19:46:03 +10:00
b55378c63c tidy(ui): disable preview images on every enqueue 2024-08-23 19:46:03 +10:00
946c2a49ab feat(ui): rough out save staging image 2024-08-23 19:46:03 +10:00
b823c31ec6 feat(ui): staging area image visibility toggle 2024-08-23 19:46:03 +10:00
ec6361e5cb fix(ui): batch building after removing canvas files 2024-08-23 19:46:03 +10:00
0c26d28278 feat(ui): make Graph class's getMetadataNode public 2024-08-23 19:46:03 +10:00
c5172d4c5a tidy(ui): remove old canvas graphs 2024-08-23 19:46:03 +10:00
89de04775e fix(ui): do not select already-selected entity 2024-08-23 19:46:03 +10:00
b4c3c940b5 tidy(ui): naming things 2024-08-23 19:46:03 +10:00
aee2aad959 tidy(ui): file organisation 2024-08-23 19:46:03 +10:00
5ca48a8a5f fix(ui): reset cursor pos when fitting document 2024-08-23 19:46:03 +10:00
1806aa187b feat(ui): staging area works more better 2024-08-23 19:46:03 +10:00
7824cb7a1a feat(ui): staging area barely works 2024-08-23 19:46:03 +10:00
9807a896f4 feat(ui): consolidate konva API 2024-08-23 19:46:03 +10:00
19866f057d feat(ui): consolidate konva API 2024-08-23 19:46:03 +10:00
ec4eae3c9c feat(ui): staging area (rendering wip) 2024-08-23 19:46:03 +10:00
bea0cba038 tidy(ui): type "Dimensions" -> "Size" 2024-08-23 19:46:03 +10:00
48ee75af9c feat(ui): add updateNode to Graph 2024-08-23 19:46:03 +10:00
929c593d2f feat(ui): sdxl graphs 2024-08-23 19:46:03 +10:00
221f32eca7 feat(ui): sd1 outpaint graph 2024-08-23 19:46:03 +10:00
c3acc15e8b tests(ui): add missing tests for Graph class 2024-08-23 19:46:03 +10:00
1b653278fc feat(ui): add Graph.getid() util 2024-08-23 19:46:03 +10:00
cc9062ee46 feat(ui): outpaint graph, organize builder a bit 2024-08-23 19:46:03 +10:00
91c0feb0ad feat(ui): inpaint sd1 graph 2024-08-23 19:46:03 +10:00
ae60292ac8 feat(ui): temp disable image caching while testing 2024-08-23 19:46:03 +10:00
a6ca17b19d feat(ui): txt2img & img2img graphs 2024-08-23 19:46:03 +10:00
6a4a5ece74 feat(ui): minor change to canvas bbox state type 2024-08-23 19:46:03 +10:00
9b81860307 feat(ui): simplified konva node to blob/imagedata utils 2024-08-23 19:46:03 +10:00
5f4a3928d2 feat(ui): node manager getter/setter 2024-08-23 19:46:03 +10:00
b703884763 feat(ui): generation mode calculation, fudged graphs 2024-08-23 19:46:03 +10:00
32da98ab8f feat(ui): add utils for getting images from canvas 2024-08-23 19:46:03 +10:00
bd5a85bf70 feat(ui): even more simplified API - lean on the konva node manager to abstract imperative state API & rendering 2024-08-23 19:46:03 +10:00
d045f24014 feat(ui): revised docstrings for renderers & simplified api 2024-08-23 19:46:03 +10:00
2aad3f89c3 feat(ui): inpaint mask UI components 2024-08-23 19:46:03 +10:00
dd54d19f00 feat(ui): inpaint mask rendering (wip) 2024-08-23 19:46:03 +10:00
0ed6591d8c fix(ui): models loaded handler 2024-08-23 19:46:03 +10:00
712e090134 feat(ui): internal state for inpaint mask 2024-08-23 19:46:03 +10:00
8fc2a1d1cf refactor(ui): divvy up canvas state a bit 2024-08-23 19:46:03 +10:00
cc15c1593e feat(ui): get region and base layer canvas to blob logic working 2024-08-23 19:46:03 +10:00
9997d3abda refactor(ui): node manager handles more tedious annoying stuff 2024-08-23 19:46:03 +10:00
031471e785 feat(ui): use node manager for addRegions 2024-08-23 19:46:03 +10:00
2e860c6791 feat(ui): persist bbox 2024-08-23 19:46:03 +10:00
d071a9e17d fix(ui): fix generation graphs 2024-08-23 19:46:03 +10:00
ed53d33321 feat(ui): add toggle for clipToBbox 2024-08-23 19:46:03 +10:00
382bc6d978 feat(ui): rename konva node manager 2024-08-23 19:46:03 +10:00
dab42e258c refactor(ui): create classes to abstract mgmt of konva nodes 2024-08-23 19:46:03 +10:00
81556410bb tidy(ui): organise renderers 2024-08-23 19:46:03 +10:00
1f2dfd473c refactor(ui): create entity to konva node map abstraction (wip)
Instead of chaining konva `find` and `findOne` methods, all konva nodes are added to a mapping object. Finding and manipulating them is much simpler.

Done for regions and layers, wip for control adapters.
2024-08-23 19:46:03 +10:00
8f0f51be2c perf(ui): fix lag w/ region rendering
Needed to memoize these selectors
2024-08-23 19:46:03 +10:00
7179e250ed feat(ui): move canvas fill color picker to right 2024-08-23 19:46:03 +10:00
5bec091fd6 refactor(ui): remove unused ellipse & polygon objects 2024-08-23 19:46:03 +10:00
2c5896cb0c fix(ui): incorrect rect/brush/eraser positions 2024-08-23 19:46:03 +10:00
93ff252dc0 refactor(ui): enable global debugging flag 2024-08-23 19:46:03 +10:00
ac52224455 refactor(ui): disable the preview renderer for now 2024-08-23 19:46:03 +10:00
4087cad23d tweak(ui): canvas editor layout 2024-08-23 19:46:03 +10:00
e936b1ff8f perf(ui): memoize layeractionsmenu valid actions 2024-08-23 19:46:03 +10:00
b7f9c5e221 refactor(ui): decouple konva renderer from react
Subscribe to redux store directly, skipping all the react overhead.

With react in dev mode, a typical frame while using the brush tool on almost-empty canvas is reduced from ~7.5ms to ~3.5ms. All things considered, this still feels slow, but it's a massive improvement.
2024-08-23 19:46:03 +10:00
fc5467150e feat(ui): clip lines to bbox 2024-08-23 19:46:03 +10:00
4dcab357a0 fix(ui): document fit positioning 2024-08-23 19:46:03 +10:00
695e464255 feat(ui): document bounds overlay 2024-08-23 19:46:03 +10:00
9999b60c3b tidy(ui): background layer 2024-08-23 19:46:03 +10:00
e7df53e260 refactor(ui): use "entity" instead of "data" for canvas 2024-08-23 19:46:03 +10:00
844590a571 feat(ui): brush size border radius = 1 2024-08-23 19:46:03 +10:00
9622beaa0d fix(ui): canvas HUD doesn't interrupt tool 2024-08-23 19:46:03 +10:00
007e2553a8 refactor(ui): split up canvas entity renderers, temp disable preview 2024-08-23 19:46:03 +10:00
15ad4e3f5e fix(ui): delete all layers button 2024-08-23 19:46:03 +10:00
be5094fcb4 fix(ui): ignore keyboard shortcuts in input/textarea elements 2024-08-23 19:46:03 +10:00
a20a861680 fix(ui): canvas entity ids getting clobbered 2024-08-23 19:46:03 +10:00
396d0a4bc0 fix(ui): move lora followup fixes 2024-08-23 19:46:03 +10:00
ca9314e077 chore(ui): lint 2024-08-23 19:46:03 +10:00
4b848798e7 refactor(ui): move loras to canvas slice 2024-08-23 19:46:03 +10:00
083bcbc77d fix(ui): layer is selected when added 2024-08-23 19:46:03 +10:00
e8cdc9ae62 feat(ui): r to center & fit stage on document 2024-08-23 19:46:03 +10:00
8abfa759a4 feat(ui): better HUD 2024-08-23 19:46:03 +10:00
f6a324b633 fix(ui): always use current brush width when making straight lines 2024-08-23 19:46:03 +10:00
f083be9391 feat(ui): hold shift w/ brush to draw straight line 2024-08-23 19:46:03 +10:00
091e2fb751 fix(ui): update bg on canvas resize 2024-08-23 19:46:03 +10:00
d8539daf1f refactor(ui): better hud 2024-08-23 19:46:03 +10:00
7ec059f5fa refactor(ui): scaled tool preview border 2024-08-23 19:46:03 +10:00
4f2ecdefd2 refactor(ui): port remaining canvasV1 rendering logic to V2, remove old code 2024-08-23 19:46:03 +10:00
e8891a1988 refactor(ui): fix more types 2024-08-23 19:46:03 +10:00
37d2607f34 refactor(ui): metadata recall (wip)
just enough let the app run
2024-08-23 19:46:03 +10:00
0e7b10d3d9 refactor(ui): undo/redo button temp fix 2024-08-23 19:46:03 +10:00
1f85888638 refactor(ui): fix renderer stuff 2024-08-23 19:46:03 +10:00
c1f9a129fa refactor(ui): fix misc types 2024-08-23 19:46:03 +10:00
7ccc5ba398 refactor(ui): fix gallery stuff 2024-08-23 19:46:03 +10:00
5e1a6ae334 refactor(ui): fix delete image stuff 2024-08-23 19:46:03 +10:00
3f6cf638f9 refactor(ui): fix useIsReadyToEnqueue for new adapterType field 2024-08-23 19:46:03 +10:00
46e062a828 refactor(ui): update generation tab graphs 2024-08-23 19:46:02 +10:00
cc3a0b5d6c refactor(ui): add adapterType to ControlAdapterData 2024-08-23 19:46:02 +10:00
775479ab7b refactor(ui): update components & logic to use new unified slice (again) 2024-08-23 19:46:02 +10:00
6b9e0e6d63 refactor(ui): update components & logic to use new unified slice 2024-08-23 19:46:02 +10:00
83a5c87f5e refactor(ui): merge compositing, params into canvasV2 slice 2024-08-23 19:46:02 +10:00
84fde74331 refactor(ui): add scaled bbox state 2024-08-23 19:46:02 +10:00
a517e29b39 refactor(ui): update dnd/image upload 2024-08-23 19:46:02 +10:00
ccceba7565 refactor(ui): update size/prompts state 2024-08-23 19:46:02 +10:00
5fc7a03669 refactor(ui): rip out old control adapter implementation 2024-08-23 19:46:02 +10:00
8864ad1b50 refactor(ui): canvas v2 (wip)
fix entity count select
2024-08-23 19:46:02 +10:00
f2989885fb refactor(ui): canvas v2 (wip)
delete unused file
2024-08-23 19:46:02 +10:00
19c66e5c76 refactor(ui): canvas v2 (wip)
merge all canvas state reducers into one big slice (but with the logic split across files so it's not hell)
2024-08-23 19:46:02 +10:00
8a6690a57c refactor(ui): canvas v2 (wip)
Fix a few more components
2024-08-23 19:46:02 +10:00
2cc60f253a refactor(ui): canvas v2 (wip)
missed a spot
2024-08-23 19:46:02 +10:00
cb69872dd3 refactor(ui): canvas v2 (wip)
Redo all UI components for different canvas entity types
2024-08-23 19:46:02 +10:00
ba66d7c9a6 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
9fe727c9f8 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
58c656224f refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
c51253f5f6 refactor(ui): canvas v2 (wip) 2024-08-23 19:46:02 +10:00
6c1d1588fc feat(ui): bbox tool 2024-08-23 19:46:02 +10:00
95d6183a6c fix(ui): rect tool preview 2024-08-23 19:46:02 +10:00
f4c9facdaf fix(ui): multiple stages 2024-08-23 19:46:02 +10:00
a274e6f165 feat(ui): decouple konva logic from nanostores 2024-08-23 19:46:02 +10:00
154e3e6f64 feat(ui): store all stage attrs in nanostores 2024-08-23 19:46:02 +10:00
2c3ac972e5 feat(ui): round stage scale 2024-08-23 19:46:02 +10:00
2e2e072b0b chore(ui): bump konva 2024-08-23 19:46:02 +10:00
9d8dd2bf66 feat(ui): generation bbox transformation working
whew
2024-08-23 19:46:02 +10:00
9047f6db30 feat(ui): wip generation bbox 2024-08-23 19:46:02 +10:00
5ab345ee63 feat(ui): wip generation bbox 2024-08-23 19:46:02 +10:00
d8a83acd3a feat(ui): CL zoom and pan, some rendering optimizations 2024-08-23 19:46:02 +10:00
1f58e5756b Revert "feat(ui): add x,y,scaleX,scaleY,rotation to objects"
This reverts commit 53318b396c967c72326a7e4dea09667b2ab20bdd.
2024-08-23 19:46:02 +10:00
744acb8f07 feat(ui): layers manage their own bbox 2024-08-23 19:46:02 +10:00
ae7228d821 docs(ui): konva image object docstrings 2024-08-23 19:46:02 +10:00
99d8b3a7bf feat(ui): add x,y,scaleX,scaleY,rotation to objects 2024-08-23 19:46:02 +10:00
3fbe65bbcf fix(ui): show color picker when using rect tool 2024-08-23 19:46:02 +10:00
f5d879d8e7 feat(ui): image loading fallback for raster layers 2024-08-23 19:46:02 +10:00
cbc5a4f8e6 feat(ui): bbox calc for raster layers 2024-08-23 19:46:02 +10:00
37ac7d8ed5 feat(ui): do not fill brush preview when drawing 2024-08-23 19:46:02 +10:00
bee3fa339d fix(ui): brush spacing handling 2024-08-23 19:46:02 +10:00
c171fe2b96 fix(ui): jank when starting a shape when not already focused on stage 2024-08-23 19:46:02 +10:00
1fa8032fdb feat(ui): wip raster layers
I meant to split this up into smaller commits and undo some of it, but I committed afterwards and it's tedious to undo.
2024-08-23 19:46:02 +10:00
5c0676bcc2 feat(ui): support image objects on raster layers
Just the UI and internal state, not rendering yet.
2024-08-23 19:46:02 +10:00
cefd9a027c tidy(ui): clean up event handlers
Separate logic for each tool in preparation for ellipse and polygon tools.
2024-08-23 19:46:02 +10:00
1bce156de1 feat(ui): raster layer reset, object group util 2024-08-23 19:46:02 +10:00
cd4f63f2fd feat(ui): rect shape preview now has fill 2024-08-23 19:46:02 +10:00
3c7140cbf3 feat(ui): cancel shape drawing on esc 2024-08-23 19:46:02 +10:00
b71ba63b5a feat(ui): temp disable history on CL 2024-08-23 19:46:02 +10:00
d540e2c0d3 feat(ui): raster layer logic
- Deduplicate shared logic
- Split up giant renderers file into separate cohesive files
- Tons of cleanup
- Progress on raster layer functionality
2024-08-23 19:46:02 +10:00
d79fafc5f5 feat(ui): add raster layer rendering and interaction (WIP) 2024-08-23 19:46:02 +10:00
9e93fa2092 feat(ui): scaffold out raster layers
Raster layers may have images, lines and shapes. These will replace initial image layers and provide sketching functionality like we have on canvas.
2024-08-23 19:46:02 +10:00
392e9b4882 refactor(ui): revise types for line and rect objects
- Create separate object types for brush and eraser lines, instead of a single type that has a `tool` field.
- Create new object type for rect shapes.
- Add logic to schemas to migrate old object types to new.
- Update renderers & reducers.
2024-08-23 19:46:02 +10:00
459 changed files with 4987 additions and 10167 deletions

View File

@ -60,7 +60,7 @@ jobs:
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- platform: macos-default
os: macOS-14
os: macOS-12
github-env: $GITHUB_ENV
- platform: windows-cpu
os: windows-2022

View File

@ -40,7 +40,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
# region Model Field Types
MainModel = "MainModelField"
FluxMainModel = "FluxMainModelField"
SDXLMainModel = "SDXLMainModelField"
SDXLRefinerModel = "SDXLRefinerModelField"
ONNXModel = "ONNXModelField"
@ -49,7 +48,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
ControlNetModel = "ControlNetModelField"
IPAdapterModel = "IPAdapterModelField"
T2IAdapterModel = "T2IAdapterModelField"
T5EncoderModel = "T5EncoderModelField"
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
# endregion
@ -127,16 +125,13 @@ class FieldDescriptions:
negative_cond = "Negative conditioning tensor"
noise = "Noise tensor"
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
t5_encoder = "T5 tokenizer and text encoder"
unet = "UNet (scheduler, LoRAs)"
transformer = "Transformer"
vae = "VAE"
cond = "Conditioning tensor"
controlnet_model = "ControlNet model to load"
vae_model = "VAE model to load"
lora_model = "LoRA model to load"
main_model = "Main model (UNet, VAE, CLIP) to load"
flux_model = "Flux model (Transformer) to load"
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
@ -236,12 +231,6 @@ class ColorField(BaseModel):
return (self.r, self.g, self.b, self.a)
class FluxConditioningField(BaseModel):
"""A conditioning tensor primitive value"""
conditioning_name: str = Field(description="The name of conditioning tensor")
class ConditioningField(BaseModel):
"""A conditioning tensor primitive value"""

View File

@ -1,86 +0,0 @@
from typing import Literal
import torch
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
from invokeai.app.invocations.model import CLIPField, T5EncoderField
from invokeai.app.invocations.primitives import FluxConditioningOutput
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.modules.conditioner import HFEncoder
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
@invocation(
"flux_text_encoder",
title="FLUX Text Encoding",
tags=["prompt", "conditioning", "flux"],
category="conditioning",
version="1.0.0",
classification=Classification.Prototype,
)
class FluxTextEncoderInvocation(BaseInvocation):
"""Encodes and preps a prompt for a flux image."""
clip: CLIPField = InputField(
title="CLIP",
description=FieldDescriptions.clip,
input=Input.Connection,
)
t5_encoder: T5EncoderField = InputField(
title="T5Encoder",
description=FieldDescriptions.t5_encoder,
input=Input.Connection,
)
t5_max_seq_len: Literal[256, 512] = InputField(
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
)
prompt: str = InputField(description="Text prompt to encode.")
@torch.no_grad()
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
t5_embeddings, clip_embeddings = self._encode_prompt(context)
conditioning_data = ConditioningFieldData(
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
)
conditioning_name = context.conditioning.save(conditioning_data)
return FluxConditioningOutput.build(conditioning_name)
def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
# Load CLIP.
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
# Load T5.
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
prompt = [self.prompt]
with (
t5_text_encoder_info as t5_text_encoder,
t5_tokenizer_info as t5_tokenizer,
):
assert isinstance(t5_text_encoder, T5EncoderModel)
assert isinstance(t5_tokenizer, T5Tokenizer)
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
prompt_embeds = t5_encoder(prompt)
with (
clip_text_encoder_info as clip_text_encoder,
clip_tokenizer_info as clip_tokenizer,
):
assert isinstance(clip_text_encoder, CLIPTextModel)
assert isinstance(clip_tokenizer, CLIPTokenizer)
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
pooled_prompt_embeds = clip_encoder(prompt)
assert isinstance(prompt_embeds, torch.Tensor)
assert isinstance(pooled_prompt_embeds, torch.Tensor)
return prompt_embeds, pooled_prompt_embeds

View File

@ -1,172 +0,0 @@
import torch
from einops import rearrange
from PIL import Image
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
from invokeai.app.invocations.fields import (
FieldDescriptions,
FluxConditioningField,
Input,
InputField,
WithBoard,
WithMetadata,
)
from invokeai.app.invocations.model import TransformerField, VAEField
from invokeai.app.invocations.primitives import ImageOutput
from invokeai.app.services.session_processor.session_processor_common import CanceledException
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.flux.sampling import denoise, get_noise, get_schedule, prepare_latent_img_patches, unpack
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
from invokeai.backend.util.devices import TorchDevice
@invocation(
"flux_text_to_image",
title="FLUX Text to Image",
tags=["image", "flux"],
category="image",
version="1.0.0",
classification=Classification.Prototype,
)
class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
"""Text-to-image generation using a FLUX model."""
transformer: TransformerField = InputField(
description=FieldDescriptions.flux_model,
input=Input.Connection,
title="Transformer",
)
vae: VAEField = InputField(
description=FieldDescriptions.vae,
input=Input.Connection,
)
positive_text_conditioning: FluxConditioningField = InputField(
description=FieldDescriptions.positive_cond, input=Input.Connection
)
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
num_steps: int = InputField(
default=4, description="Number of diffusion steps. Recommend values are schnell: 4, dev: 50."
)
guidance: float = InputField(
default=4.0,
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
)
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
@torch.no_grad()
def invoke(self, context: InvocationContext) -> ImageOutput:
# Load the conditioning data.
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
assert len(cond_data.conditionings) == 1
flux_conditioning = cond_data.conditionings[0]
assert isinstance(flux_conditioning, FLUXConditioningInfo)
latents = self._run_diffusion(context, flux_conditioning.clip_embeds, flux_conditioning.t5_embeds)
image = self._run_vae_decoding(context, latents)
image_dto = context.images.save(image=image)
return ImageOutput.build(image_dto)
def _run_diffusion(
self,
context: InvocationContext,
clip_embeddings: torch.Tensor,
t5_embeddings: torch.Tensor,
):
transformer_info = context.models.load(self.transformer.transformer)
inference_dtype = torch.bfloat16
# Prepare input noise.
x = get_noise(
num_samples=1,
height=self.height,
width=self.width,
device=TorchDevice.choose_torch_device(),
dtype=inference_dtype,
seed=self.seed,
)
img, img_ids = prepare_latent_img_patches(x)
is_schnell = "schnell" in transformer_info.config.config_path
timesteps = get_schedule(
num_steps=self.num_steps,
image_seq_len=img.shape[1],
shift=not is_schnell,
)
bs, t5_seq_len, _ = t5_embeddings.shape
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
# HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from
# disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems
# if the cache is not empty.
context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30)
with transformer_info as transformer:
assert isinstance(transformer, Flux)
def step_callback() -> None:
if context.util.is_canceled():
raise CanceledException
# TODO: Make this look like the image before re-enabling
# latent_image = unpack(img.float(), self.height, self.width)
# latent_image = latent_image.squeeze() # Remove unnecessary dimensions
# flattened_tensor = latent_image.reshape(-1) # Flatten to shape [48*128*128]
# # Create a new tensor of the required shape [255, 255, 3]
# latent_image = flattened_tensor[: 255 * 255 * 3].reshape(255, 255, 3) # Reshape to RGB format
# # Convert to a NumPy array and then to a PIL Image
# image = Image.fromarray(latent_image.cpu().numpy().astype(np.uint8))
# (width, height) = image.size
# width *= 8
# height *= 8
# dataURL = image_to_dataURL(image, image_format="JPEG")
# # TODO: move this whole function to invocation context to properly reference these variables
# context._services.events.emit_invocation_denoise_progress(
# context._data.queue_item,
# context._data.invocation,
# state,
# ProgressImage(dataURL=dataURL, width=width, height=height),
# )
x = denoise(
model=transformer,
img=img,
img_ids=img_ids,
txt=t5_embeddings,
txt_ids=txt_ids,
vec=clip_embeddings,
timesteps=timesteps,
step_callback=step_callback,
guidance=self.guidance,
)
x = unpack(x.float(), self.height, self.width)
return x
def _run_vae_decoding(
self,
context: InvocationContext,
latents: torch.Tensor,
) -> Image.Image:
vae_info = context.models.load(self.vae.vae)
with vae_info as vae:
assert isinstance(vae, AutoEncoder)
latents = latents.to(dtype=TorchDevice.choose_torch_dtype())
img = vae.decode(latents)
img = img.clamp(-1, 1)
img = rearrange(img[0], "c h w -> h w c")
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
return img_pil

View File

@ -1,5 +1,5 @@
import copy
from typing import List, Literal, Optional
from typing import List, Optional
from pydantic import BaseModel, Field
@ -13,14 +13,7 @@ from invokeai.app.invocations.baseinvocation import (
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
from invokeai.app.services.shared.invocation_context import InvocationContext
from invokeai.app.shared.models import FreeUConfig
from invokeai.backend.flux.util import max_seq_lengths
from invokeai.backend.model_manager.config import (
AnyModelConfig,
BaseModelType,
CheckpointConfigBase,
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
class ModelIdentifierField(BaseModel):
@ -67,15 +60,6 @@ class CLIPField(BaseModel):
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
class TransformerField(BaseModel):
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
class T5EncoderField(BaseModel):
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
class VAEField(BaseModel):
vae: ModelIdentifierField = Field(description="Info to load vae submodel")
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
@ -138,112 +122,6 @@ class ModelIdentifierInvocation(BaseInvocation):
return ModelIdentifierOutput(model=self.model)
@invocation_output("flux_model_loader_output")
class FluxModelLoaderOutput(BaseInvocationOutput):
"""Flux base model loader output"""
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
max_seq_len: Literal[256, 512] = OutputField(
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
title="Max Seq Length",
)
@invocation(
"flux_model_loader",
title="Flux Main Model",
tags=["model", "flux"],
category="model",
version="1.0.3",
classification=Classification.Prototype,
)
class FluxModelLoaderInvocation(BaseInvocation):
"""Loads a flux base model, outputting its submodels."""
model: ModelIdentifierField = InputField(
description=FieldDescriptions.flux_model,
ui_type=UIType.FluxMainModel,
input=Input.Direct,
)
t5_encoder: ModelIdentifierField = InputField(
description=FieldDescriptions.t5_encoder,
ui_type=UIType.T5EncoderModel,
input=Input.Direct,
)
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
model_key = self.model.key
if not context.models.exists(model_key):
raise ValueError(f"Unknown model: {model_key}")
transformer = self._get_model(context, SubModelType.Transformer)
tokenizer = self._get_model(context, SubModelType.Tokenizer)
tokenizer2 = self._get_model(context, SubModelType.Tokenizer2)
clip_encoder = self._get_model(context, SubModelType.TextEncoder)
t5_encoder = self._get_model(context, SubModelType.TextEncoder2)
vae = self._get_model(context, SubModelType.VAE)
transformer_config = context.models.get_config(transformer)
assert isinstance(transformer_config, CheckpointConfigBase)
return FluxModelLoaderOutput(
transformer=TransformerField(transformer=transformer),
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
vae=VAEField(vae=vae),
max_seq_len=max_seq_lengths[transformer_config.config_path],
)
def _get_model(self, context: InvocationContext, submodel: SubModelType) -> ModelIdentifierField:
match submodel:
case SubModelType.Transformer:
return self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
case SubModelType.VAE:
return self._pull_model_from_mm(
context,
SubModelType.VAE,
"FLUX.1-schnell_ae",
ModelType.VAE,
BaseModelType.Flux,
)
case submodel if submodel in [SubModelType.Tokenizer, SubModelType.TextEncoder]:
return self._pull_model_from_mm(
context,
submodel,
"clip-vit-large-patch14",
ModelType.CLIPEmbed,
BaseModelType.Any,
)
case submodel if submodel in [SubModelType.Tokenizer2, SubModelType.TextEncoder2]:
return self._pull_model_from_mm(
context,
submodel,
self.t5_encoder.name,
ModelType.T5Encoder,
BaseModelType.Any,
)
case _:
raise Exception(f"{submodel.value} is not a supported submodule for a flux model")
def _pull_model_from_mm(
self,
context: InvocationContext,
submodel: SubModelType,
name: str,
type: ModelType,
base: BaseModelType,
):
if models := context.models.search_by_attrs(name=name, base=base, type=type):
if len(models) != 1:
raise Exception(f"Multiple models detected for selected model with name {name}")
return ModelIdentifierField.from_config(models[0]).model_copy(update={"submodel_type": submodel})
else:
raise ValueError(f"Please install the {base}:{type} model named {name} via starter models")
@invocation(
"main_model_loader",
title="Main Model",

View File

@ -12,7 +12,6 @@ from invokeai.app.invocations.fields import (
ConditioningField,
DenoiseMaskField,
FieldDescriptions,
FluxConditioningField,
ImageField,
Input,
InputField,
@ -415,17 +414,6 @@ class MaskOutput(BaseInvocationOutput):
height: int = OutputField(description="The height of the mask in pixels.")
@invocation_output("flux_conditioning_output")
class FluxConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single conditioning tensor"""
conditioning: FluxConditioningField = OutputField(description=FieldDescriptions.cond)
@classmethod
def build(cls, conditioning_name: str) -> "FluxConditioningOutput":
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
@invocation_output("conditioning_output")
class ConditioningOutput(BaseInvocationOutput):
"""Base class for nodes that output a single conditioning tensor"""

View File

@ -783,9 +783,8 @@ class ModelInstallService(ModelInstallServiceBase):
# So what we do is to synthesize a folder named "sdxl-turbo_vae" here.
if subfolder:
top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/"
path_to_remove = top / subfolder # sdxl-turbo/vae/
subfolder_rename = subfolder.name.replace("/", "_").replace("\\", "_")
path_to_add = Path(f"{top}_{subfolder_rename}")
path_to_remove = top / subfolder.parts[-1] # sdxl-turbo/vae/
path_to_add = Path(f"{top}_{subfolder}")
else:
path_to_remove = Path(".")
path_to_add = Path(".")

View File

@ -77,7 +77,6 @@ class ModelRecordChanges(BaseModelExcludeNull):
type: Optional[ModelType] = Field(description="Type of model", default=None)
key: Optional[str] = Field(description="Database ID for this model", default=None)
hash: Optional[str] = Field(description="hash of model file", default=None)
format: Optional[str] = Field(description="format of model file", default=None)
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
description="Default settings for this model", default=None

View File

@ -1,266 +0,0 @@
{
"name": "FLUX Text to Image",
"author": "InvokeAI",
"description": "A simple text-to-image workflow using FLUX dev or schnell models. Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
"version": "1.0.0",
"contact": "",
"tags": "text2image, flux",
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
"exposedFields": [
{
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"fieldName": "model"
},
{
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"fieldName": "prompt"
},
{
"nodeId": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"fieldName": "num_steps"
},
{
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"fieldName": "t5_encoder"
}
],
"meta": {
"version": "3.0.0",
"category": "default"
},
"nodes": [
{
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"type": "invocation",
"data": {
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"type": "flux_model_loader",
"version": "1.0.3",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"model": {
"name": "model",
"label": "Model (Starter Models can be found in Model Manager)",
"value": {
"key": "f04a7a2f-c74d-4538-8d5e-879a53501662",
"hash": "random:4875da7a9508444ffa706f61961c260d0c6729f6181a86b31fad06df1277b850",
"name": "FLUX Dev (Quantized)",
"base": "flux",
"type": "main"
}
},
"t5_encoder": {
"name": "t5_encoder",
"label": "T 5 Encoder (Starter Models can be found in Model Manager)",
"value": {
"key": "20dcd9ec-5fbb-4012-8401-049e707da5e5",
"hash": "random:f986be43ff3502169e4adbdcee158afb0e0a65a1edc4cab16ae59963630cfd8f",
"name": "t5_bnb_int8_quantized_encoder",
"base": "any",
"type": "t5_encoder"
}
}
}
},
"position": {
"x": 337.09365228062825,
"y": 40.63469521079861
}
},
{
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "invocation",
"data": {
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"type": "flux_text_encoder",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": true,
"inputs": {
"clip": {
"name": "clip",
"label": ""
},
"t5_encoder": {
"name": "t5_encoder",
"label": ""
},
"t5_max_seq_len": {
"name": "t5_max_seq_len",
"label": "T5 Max Seq Len",
"value": 256
},
"prompt": {
"name": "prompt",
"label": "",
"value": "a cat"
}
}
},
"position": {
"x": 824.1970602278849,
"y": 146.98251001061735
}
},
{
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "invocation",
"data": {
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
"type": "rand_int",
"version": "1.0.1",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": true,
"useCache": false,
"inputs": {
"low": {
"name": "low",
"label": "",
"value": 0
},
"high": {
"name": "high",
"label": "",
"value": 2147483647
}
}
},
"position": {
"x": 822.9899179655476,
"y": 360.9657214885052
}
},
{
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"type": "invocation",
"data": {
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"type": "flux_text_to_image",
"version": "1.0.0",
"label": "",
"notes": "",
"isOpen": true,
"isIntermediate": false,
"useCache": true,
"inputs": {
"board": {
"name": "board",
"label": ""
},
"metadata": {
"name": "metadata",
"label": ""
},
"transformer": {
"name": "transformer",
"label": ""
},
"vae": {
"name": "vae",
"label": ""
},
"positive_text_conditioning": {
"name": "positive_text_conditioning",
"label": ""
},
"width": {
"name": "width",
"label": "",
"value": 1024
},
"height": {
"name": "height",
"label": "",
"value": 1024
},
"num_steps": {
"name": "num_steps",
"label": "Steps (Recommend 30 for Dev, 4 for Schnell)",
"value": 30
},
"guidance": {
"name": "guidance",
"label": "",
"value": 4
},
"seed": {
"name": "seed",
"label": "",
"value": 0
}
}
},
"position": {
"x": 1216.3900791301849,
"y": 5.500841807102248
}
}
],
"edges": [
{
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33amax_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
"type": "default",
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "max_seq_len",
"targetHandle": "t5_max_seq_len"
},
{
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33avae-159bdf1b-79e7-4174-b86e-d40e646964c8vae",
"type": "default",
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"sourceHandle": "vae",
"targetHandle": "vae"
},
{
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33atransformer-159bdf1b-79e7-4174-b86e-d40e646964c8transformer",
"type": "default",
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"sourceHandle": "transformer",
"targetHandle": "transformer"
},
{
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33at5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
"type": "default",
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "t5_encoder",
"targetHandle": "t5_encoder"
},
{
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33aclip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
"type": "default",
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"sourceHandle": "clip",
"targetHandle": "clip"
},
{
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-159bdf1b-79e7-4174-b86e-d40e646964c8positive_text_conditioning",
"type": "default",
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"sourceHandle": "conditioning",
"targetHandle": "positive_text_conditioning"
},
{
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-159bdf1b-79e7-4174-b86e-d40e646964c8seed",
"type": "default",
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
"sourceHandle": "value",
"targetHandle": "seed"
}
]
}

View File

@ -1,32 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import torch
from einops import rearrange
from torch import Tensor
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
q, k = apply_rope(q, k, pe)
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
x = rearrange(x, "B H L D -> B L (H D)")
return x
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
assert dim % 2 == 0
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
omega = 1.0 / (theta**scale)
out = torch.einsum("...n,d->...nd", pos, omega)
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
return out.float()
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)

View File

@ -1,117 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
import torch
from torch import Tensor, nn
from invokeai.backend.flux.modules.layers import (
DoubleStreamBlock,
EmbedND,
LastLayer,
MLPEmbedder,
SingleStreamBlock,
timestep_embedding,
)
@dataclass
class FluxParams:
in_channels: int
vec_in_dim: int
context_in_dim: int
hidden_size: int
mlp_ratio: float
num_heads: int
depth: int
depth_single_blocks: int
axes_dim: list[int]
theta: int
qkv_bias: bool
guidance_embed: bool
class Flux(nn.Module):
"""
Transformer model for flow matching on sequences.
"""
def __init__(self, params: FluxParams):
super().__init__()
self.params = params
self.in_channels = params.in_channels
self.out_channels = self.in_channels
if params.hidden_size % params.num_heads != 0:
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
pe_dim = params.hidden_size // params.num_heads
if sum(params.axes_dim) != pe_dim:
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
self.hidden_size = params.hidden_size
self.num_heads = params.num_heads
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
self.guidance_in = (
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
)
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
self.double_blocks = nn.ModuleList(
[
DoubleStreamBlock(
self.hidden_size,
self.num_heads,
mlp_ratio=params.mlp_ratio,
qkv_bias=params.qkv_bias,
)
for _ in range(params.depth)
]
)
self.single_blocks = nn.ModuleList(
[
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
for _ in range(params.depth_single_blocks)
]
)
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
def forward(
self,
img: Tensor,
img_ids: Tensor,
txt: Tensor,
txt_ids: Tensor,
timesteps: Tensor,
y: Tensor,
guidance: Tensor | None = None,
) -> Tensor:
if img.ndim != 3 or txt.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")
# running on sequences img
img = self.img_in(img)
vec = self.time_in(timestep_embedding(timesteps, 256))
if self.params.guidance_embed:
if guidance is None:
raise ValueError("Didn't get guidance strength for guidance distilled model.")
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
vec = vec + self.vector_in(y)
txt = self.txt_in(txt)
ids = torch.cat((txt_ids, img_ids), dim=1)
pe = self.pe_embedder(ids)
for block in self.double_blocks:
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
img = torch.cat((txt, img), 1)
for block in self.single_blocks:
img = block(img, vec=vec, pe=pe)
img = img[:, txt.shape[1] :, ...]
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
return img

View File

@ -1,310 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
import torch
from einops import rearrange
from torch import Tensor, nn
@dataclass
class AutoEncoderParams:
resolution: int
in_channels: int
ch: int
out_ch: int
ch_mult: list[int]
num_res_blocks: int
z_channels: int
scale_factor: float
shift_factor: float
class AttnBlock(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
self.in_channels = in_channels
self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
def attention(self, h_: Tensor) -> Tensor:
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
h_ = nn.functional.scaled_dot_product_attention(q, k, v)
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
def forward(self, x: Tensor) -> Tensor:
return x + self.proj_out(self.attention(x))
class ResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
h = x
h = self.norm1(h)
h = torch.nn.functional.silu(h)
h = self.conv1(h)
h = self.norm2(h)
h = torch.nn.functional.silu(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
x = self.nin_shortcut(x)
return x + h
class Downsample(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
# no asymmetric padding in torch conv, must do it ourselves
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
def forward(self, x: Tensor):
pad = (0, 1, 0, 1)
x = nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
return x
class Upsample(nn.Module):
def __init__(self, in_channels: int):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x: Tensor):
x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
x = self.conv(x)
return x
class Encoder(nn.Module):
def __init__(
self,
resolution: int,
in_channels: int,
ch: int,
ch_mult: list[int],
num_res_blocks: int,
z_channels: int,
):
super().__init__()
self.ch = ch
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
curr_res = resolution
in_ch_mult = (1,) + tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
block_in = self.ch
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch * in_ch_mult[i_level]
block_out = ch * ch_mult[i_level]
for _ in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
# end
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
def forward(self, x: Tensor) -> Tensor:
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1])
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions - 1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# end
h = self.norm_out(h)
h = torch.nn.functional.silu(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(
self,
ch: int,
out_ch: int,
ch_mult: list[int],
num_res_blocks: int,
in_channels: int,
resolution: int,
z_channels: int,
):
super().__init__()
self.ch = ch
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.ffactor = 2 ** (self.num_resolutions - 1)
# compute in_ch_mult, block_in and curr_res at lowest res
block_in = ch * ch_mult[self.num_resolutions - 1]
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
# z to block_in
self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch * ch_mult[i_level]
for _ in range(self.num_res_blocks + 1):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
def forward(self, z: Tensor) -> Tensor:
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.up[i_level].block[i_block](h)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = torch.nn.functional.silu(h)
h = self.conv_out(h)
return h
class DiagonalGaussian(nn.Module):
def __init__(self, sample: bool = True, chunk_dim: int = 1):
super().__init__()
self.sample = sample
self.chunk_dim = chunk_dim
def forward(self, z: Tensor) -> Tensor:
mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
if self.sample:
std = torch.exp(0.5 * logvar)
return mean + std * torch.randn_like(mean)
else:
return mean
class AutoEncoder(nn.Module):
def __init__(self, params: AutoEncoderParams):
super().__init__()
self.encoder = Encoder(
resolution=params.resolution,
in_channels=params.in_channels,
ch=params.ch,
ch_mult=params.ch_mult,
num_res_blocks=params.num_res_blocks,
z_channels=params.z_channels,
)
self.decoder = Decoder(
resolution=params.resolution,
in_channels=params.in_channels,
ch=params.ch,
out_ch=params.out_ch,
ch_mult=params.ch_mult,
num_res_blocks=params.num_res_blocks,
z_channels=params.z_channels,
)
self.reg = DiagonalGaussian()
self.scale_factor = params.scale_factor
self.shift_factor = params.shift_factor
def encode(self, x: Tensor) -> Tensor:
z = self.reg(self.encoder(x))
z = self.scale_factor * (z - self.shift_factor)
return z
def decode(self, z: Tensor) -> Tensor:
z = z / self.scale_factor + self.shift_factor
return self.decoder(z)
def forward(self, x: Tensor) -> Tensor:
return self.decode(self.encode(x))

View File

@ -1,33 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from torch import Tensor, nn
from transformers import PreTrainedModel, PreTrainedTokenizer
class HFEncoder(nn.Module):
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
super().__init__()
self.max_length = max_length
self.is_clip = is_clip
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
self.tokenizer = tokenizer
self.hf_module = encoder
self.hf_module = self.hf_module.eval().requires_grad_(False)
def forward(self, text: list[str]) -> Tensor:
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=False,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
outputs = self.hf_module(
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
attention_mask=None,
output_hidden_states=False,
)
return outputs[self.output_key]

View File

@ -1,253 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import math
from dataclasses import dataclass
import torch
from einops import rearrange
from torch import Tensor, nn
from invokeai.backend.flux.math import attention, rope
class EmbedND(nn.Module):
def __init__(self, dim: int, theta: int, axes_dim: list[int]):
super().__init__()
self.dim = dim
self.theta = theta
self.axes_dim = axes_dim
def forward(self, ids: Tensor) -> Tensor:
n_axes = ids.shape[-1]
emb = torch.cat(
[rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
dim=-3,
)
return emb.unsqueeze(1)
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
t = time_factor * t
half = dim // 2
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
if torch.is_floating_point(t):
embedding = embedding.to(t)
return embedding
class MLPEmbedder(nn.Module):
def __init__(self, in_dim: int, hidden_dim: int):
super().__init__()
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
self.silu = nn.SiLU()
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
def forward(self, x: Tensor) -> Tensor:
return self.out_layer(self.silu(self.in_layer(x)))
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.scale = nn.Parameter(torch.ones(dim))
def forward(self, x: Tensor):
x_dtype = x.dtype
x = x.float()
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
return (x * rrms).to(dtype=x_dtype) * self.scale
class QKNorm(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.query_norm = RMSNorm(dim)
self.key_norm = RMSNorm(dim)
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
q = self.query_norm(q)
k = self.key_norm(k)
return q.to(v), k.to(v)
class SelfAttention(nn.Module):
def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.norm = QKNorm(head_dim)
self.proj = nn.Linear(dim, dim)
def forward(self, x: Tensor, pe: Tensor) -> Tensor:
qkv = self.qkv(x)
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
q, k = self.norm(q, k, v)
x = attention(q, k, v, pe=pe)
x = self.proj(x)
return x
@dataclass
class ModulationOut:
shift: Tensor
scale: Tensor
gate: Tensor
class Modulation(nn.Module):
def __init__(self, dim: int, double: bool):
super().__init__()
self.is_double = double
self.multiplier = 6 if double else 3
self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]:
out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1)
return (
ModulationOut(*out[:3]),
ModulationOut(*out[3:]) if self.is_double else None,
)
class DoubleStreamBlock(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False):
super().__init__()
mlp_hidden_dim = int(hidden_size * mlp_ratio)
self.num_heads = num_heads
self.hidden_size = hidden_size
self.img_mod = Modulation(hidden_size, double=True)
self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.img_mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
nn.GELU(approximate="tanh"),
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
)
self.txt_mod = Modulation(hidden_size, double=True)
self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.txt_mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
nn.GELU(approximate="tanh"),
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
)
def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> tuple[Tensor, Tensor]:
img_mod1, img_mod2 = self.img_mod(vec)
txt_mod1, txt_mod2 = self.txt_mod(vec)
# prepare image for attention
img_modulated = self.img_norm1(img)
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
img_qkv = self.img_attn.qkv(img_modulated)
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
# prepare txt for attention
txt_modulated = self.txt_norm1(txt)
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
txt_qkv = self.txt_attn.qkv(txt_modulated)
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
# run actual attention
q = torch.cat((txt_q, img_q), dim=2)
k = torch.cat((txt_k, img_k), dim=2)
v = torch.cat((txt_v, img_v), dim=2)
attn = attention(q, k, v, pe=pe)
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
# calculate the img bloks
img = img + img_mod1.gate * self.img_attn.proj(img_attn)
img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
# calculate the txt bloks
txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
return img, txt
class SingleStreamBlock(nn.Module):
"""
A DiT block with parallel linear layers as described in
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
"""
def __init__(
self,
hidden_size: int,
num_heads: int,
mlp_ratio: float = 4.0,
qk_scale: float | None = None,
):
super().__init__()
self.hidden_dim = hidden_size
self.num_heads = num_heads
head_dim = hidden_size // num_heads
self.scale = qk_scale or head_dim**-0.5
self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
# qkv and mlp_in
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
# proj and mlp_out
self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
self.norm = QKNorm(head_dim)
self.hidden_size = hidden_size
self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.mlp_act = nn.GELU(approximate="tanh")
self.modulation = Modulation(hidden_size, double=False)
def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
mod, _ = self.modulation(vec)
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
q, k = self.norm(q, k, v)
# compute attention
attn = attention(q, k, v, pe=pe)
# compute activation in mlp stream, cat again and run second linear layer
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
return x + mod.gate * output
class LastLayer(nn.Module):
def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
super().__init__()
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
def forward(self, x: Tensor, vec: Tensor) -> Tensor:
shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
x = self.linear(x)
return x

View File

@ -1,176 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
import math
from typing import Callable
import torch
from einops import rearrange, repeat
from torch import Tensor
from tqdm import tqdm
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.modules.conditioner import HFEncoder
def get_noise(
num_samples: int,
height: int,
width: int,
device: torch.device,
dtype: torch.dtype,
seed: int,
):
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
rand_device = "cpu"
rand_dtype = torch.float16
return torch.randn(
num_samples,
16,
# allow for packing
2 * math.ceil(height / 16),
2 * math.ceil(width / 16),
device=rand_device,
dtype=rand_dtype,
generator=torch.Generator(device=rand_device).manual_seed(seed),
).to(device=device, dtype=dtype)
def prepare(t5: HFEncoder, clip: HFEncoder, img: Tensor, prompt: str | list[str]) -> dict[str, Tensor]:
bs, c, h, w = img.shape
if bs == 1 and not isinstance(prompt, str):
bs = len(prompt)
img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
if img.shape[0] == 1 and bs > 1:
img = repeat(img, "1 ... -> bs ...", bs=bs)
img_ids = torch.zeros(h // 2, w // 2, 3)
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
if isinstance(prompt, str):
prompt = [prompt]
txt = t5(prompt)
if txt.shape[0] == 1 and bs > 1:
txt = repeat(txt, "1 ... -> bs ...", bs=bs)
txt_ids = torch.zeros(bs, txt.shape[1], 3)
vec = clip(prompt)
if vec.shape[0] == 1 and bs > 1:
vec = repeat(vec, "1 ... -> bs ...", bs=bs)
return {
"img": img,
"img_ids": img_ids.to(img.device),
"txt": txt.to(img.device),
"txt_ids": txt_ids.to(img.device),
"vec": vec.to(img.device),
}
def time_shift(mu: float, sigma: float, t: Tensor):
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
return lambda x: m * x + b
def get_schedule(
num_steps: int,
image_seq_len: int,
base_shift: float = 0.5,
max_shift: float = 1.15,
shift: bool = True,
) -> list[float]:
# extra step for zero
timesteps = torch.linspace(1, 0, num_steps + 1)
# shifting the schedule to favor high timesteps for higher signal images
if shift:
# eastimate mu based on linear estimation between two points
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
timesteps = time_shift(mu, 1.0, timesteps)
return timesteps.tolist()
def denoise(
model: Flux,
# model input
img: Tensor,
img_ids: Tensor,
txt: Tensor,
txt_ids: Tensor,
vec: Tensor,
# sampling parameters
timesteps: list[float],
step_callback: Callable[[], None],
guidance: float = 4.0,
):
dtype = model.txt_in.bias.dtype
# TODO(ryand): This shouldn't be necessary if we manage the dtypes properly in the caller.
img = img.to(dtype=dtype)
img_ids = img_ids.to(dtype=dtype)
txt = txt.to(dtype=dtype)
txt_ids = txt_ids.to(dtype=dtype)
vec = vec.to(dtype=dtype)
# this is ignored for schnell
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
pred = model(
img=img,
img_ids=img_ids,
txt=txt,
txt_ids=txt_ids,
y=vec,
timesteps=t_vec,
guidance=guidance_vec,
)
img = img + (t_prev - t_curr) * pred
step_callback()
return img
def unpack(x: Tensor, height: int, width: int) -> Tensor:
return rearrange(
x,
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
h=math.ceil(height / 16),
w=math.ceil(width / 16),
ph=2,
pw=2,
)
def prepare_latent_img_patches(latent_img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""Convert an input image in latent space to patches for diffusion.
This implementation was extracted from:
https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/sampling.py#L32
Returns:
tuple[Tensor, Tensor]: (img, img_ids), as defined in the original flux repo.
"""
bs, c, h, w = latent_img.shape
# Pixel unshuffle with a scale of 2, and flatten the height/width dimensions to get an array of patches.
img = rearrange(latent_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
if img.shape[0] == 1 and bs > 1:
img = repeat(img, "1 ... -> bs ...", bs=bs)
# Generate patch position ids.
img_ids = torch.zeros(h // 2, w // 2, 3, device=img.device)
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=img.device)[:, None]
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=img.device)[None, :]
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
return img, img_ids

View File

@ -1,71 +0,0 @@
# Initially pulled from https://github.com/black-forest-labs/flux
from dataclasses import dataclass
from typing import Dict, Literal
from invokeai.backend.flux.model import FluxParams
from invokeai.backend.flux.modules.autoencoder import AutoEncoderParams
@dataclass
class ModelSpec:
params: FluxParams
ae_params: AutoEncoderParams
ckpt_path: str | None
ae_path: str | None
repo_id: str | None
repo_flow: str | None
repo_ae: str | None
max_seq_lengths: Dict[str, Literal[256, 512]] = {
"flux-dev": 512,
"flux-schnell": 256,
}
ae_params = {
"flux": AutoEncoderParams(
resolution=256,
in_channels=3,
ch=128,
out_ch=3,
ch_mult=[1, 2, 4, 4],
num_res_blocks=2,
z_channels=16,
scale_factor=0.3611,
shift_factor=0.1159,
)
}
params = {
"flux-dev": FluxParams(
in_channels=64,
vec_in_dim=768,
context_in_dim=4096,
hidden_size=3072,
mlp_ratio=4.0,
num_heads=24,
depth=19,
depth_single_blocks=38,
axes_dim=[16, 56, 56],
theta=10_000,
qkv_bias=True,
guidance_embed=True,
),
"flux-schnell": FluxParams(
in_channels=64,
vec_in_dim=768,
context_in_dim=4096,
hidden_size=3072,
mlp_ratio=4.0,
num_heads=24,
depth=19,
depth_single_blocks=38,
axes_dim=[16, 56, 56],
theta=10_000,
qkv_bias=True,
guidance_embed=False,
),
}

View File

@ -52,7 +52,6 @@ class BaseModelType(str, Enum):
StableDiffusion2 = "sd-2"
StableDiffusionXL = "sdxl"
StableDiffusionXLRefiner = "sdxl-refiner"
Flux = "flux"
# Kandinsky2_1 = "kandinsky-2.1"
@ -67,9 +66,7 @@ class ModelType(str, Enum):
TextualInversion = "embedding"
IPAdapter = "ip_adapter"
CLIPVision = "clip_vision"
CLIPEmbed = "clip_embed"
T2IAdapter = "t2i_adapter"
T5Encoder = "t5_encoder"
SpandrelImageToImage = "spandrel_image_to_image"
@ -77,7 +74,6 @@ class SubModelType(str, Enum):
"""Submodel type."""
UNet = "unet"
Transformer = "transformer"
TextEncoder = "text_encoder"
TextEncoder2 = "text_encoder_2"
Tokenizer = "tokenizer"
@ -108,9 +104,6 @@ class ModelFormat(str, Enum):
EmbeddingFile = "embedding_file"
EmbeddingFolder = "embedding_folder"
InvokeAI = "invokeai"
T5Encoder = "t5_encoder"
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
BnbQuantizednf4b = "bnb_quantized_nf4b"
class SchedulerPredictionType(str, Enum):
@ -193,9 +186,7 @@ class ModelConfigBase(BaseModel):
class CheckpointConfigBase(ModelConfigBase):
"""Model config for checkpoint-style models."""
format: Literal[ModelFormat.Checkpoint, ModelFormat.BnbQuantizednf4b] = Field(
description="Format of the provided checkpoint model", default=ModelFormat.Checkpoint
)
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
config_path: str = Field(description="path to the checkpoint model config file")
converted_at: Optional[float] = Field(
description="When this model was last converted to diffusers", default_factory=time.time
@ -214,26 +205,6 @@ class LoRAConfigBase(ModelConfigBase):
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
class T5EncoderConfigBase(ModelConfigBase):
type: Literal[ModelType.T5Encoder] = ModelType.T5Encoder
class T5EncoderConfig(T5EncoderConfigBase):
format: Literal[ModelFormat.T5Encoder] = ModelFormat.T5Encoder
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.T5Encoder.value}.{ModelFormat.T5Encoder.value}")
class T5EncoderBnbQuantizedLlmInt8bConfig(T5EncoderConfigBase):
format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = ModelFormat.BnbQuantizedLlmInt8b
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.T5Encoder.value}.{ModelFormat.BnbQuantizedLlmInt8b.value}")
class LoRALyCORISConfig(LoRAConfigBase):
"""Model config for LoRA/Lycoris models."""
@ -258,6 +229,7 @@ class VAECheckpointConfig(CheckpointConfigBase):
"""Model config for standalone VAE models."""
type: Literal[ModelType.VAE] = ModelType.VAE
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
@staticmethod
def get_tag() -> Tag:
@ -296,6 +268,7 @@ class ControlNetCheckpointConfig(CheckpointConfigBase, ControlAdapterConfigBase)
"""Model config for ControlNet models (diffusers version)."""
type: Literal[ModelType.ControlNet] = ModelType.ControlNet
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
@staticmethod
def get_tag() -> Tag:
@ -344,21 +317,6 @@ class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase):
return Tag(f"{ModelType.Main.value}.{ModelFormat.Checkpoint.value}")
class MainBnbQuantized4bCheckpointConfig(CheckpointConfigBase, MainConfigBase):
"""Model config for main checkpoint models."""
prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon
upcast_attention: bool = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.format = ModelFormat.BnbQuantizednf4b
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.Main.value}.{ModelFormat.BnbQuantizednf4b.value}")
class MainDiffusersConfig(DiffusersConfigBase, MainConfigBase):
"""Model config for main diffusers models."""
@ -392,17 +350,6 @@ class IPAdapterCheckpointConfig(IPAdapterBaseConfig):
return Tag(f"{ModelType.IPAdapter.value}.{ModelFormat.Checkpoint.value}")
class CLIPEmbedDiffusersConfig(DiffusersConfigBase):
"""Model config for Clip Embeddings."""
type: Literal[ModelType.CLIPEmbed] = ModelType.CLIPEmbed
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
@staticmethod
def get_tag() -> Tag:
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}")
class CLIPVisionDiffusersConfig(DiffusersConfigBase):
"""Model config for CLIPVision."""
@ -461,15 +408,12 @@ AnyModelConfig = Annotated[
Union[
Annotated[MainDiffusersConfig, MainDiffusersConfig.get_tag()],
Annotated[MainCheckpointConfig, MainCheckpointConfig.get_tag()],
Annotated[MainBnbQuantized4bCheckpointConfig, MainBnbQuantized4bCheckpointConfig.get_tag()],
Annotated[VAEDiffusersConfig, VAEDiffusersConfig.get_tag()],
Annotated[VAECheckpointConfig, VAECheckpointConfig.get_tag()],
Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()],
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],
Annotated[T5EncoderConfig, T5EncoderConfig.get_tag()],
Annotated[T5EncoderBnbQuantizedLlmInt8bConfig, T5EncoderBnbQuantizedLlmInt8bConfig.get_tag()],
Annotated[TextualInversionFileConfig, TextualInversionFileConfig.get_tag()],
Annotated[TextualInversionFolderConfig, TextualInversionFolderConfig.get_tag()],
Annotated[IPAdapterInvokeAIConfig, IPAdapterInvokeAIConfig.get_tag()],
@ -477,7 +421,6 @@ AnyModelConfig = Annotated[
Annotated[T2IAdapterConfig, T2IAdapterConfig.get_tag()],
Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()],
Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()],
Annotated[CLIPEmbedDiffusersConfig, CLIPEmbedDiffusersConfig.get_tag()],
],
Discriminator(get_model_discriminator_value),
]

View File

@ -1,234 +0,0 @@
# Copyright (c) 2024, Brandon W. Rising and the InvokeAI Development Team
"""Class for Flux model loading in InvokeAI."""
from pathlib import Path
from typing import Optional
import accelerate
import torch
from safetensors.torch import load_file
from transformers import AutoConfig, AutoModelForTextEncoding, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
from invokeai.app.services.config.config_default import get_config
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
from invokeai.backend.flux.util import ae_params, params
from invokeai.backend.model_manager import (
AnyModel,
AnyModelConfig,
BaseModelType,
ModelFormat,
ModelType,
SubModelType,
)
from invokeai.backend.model_manager.config import (
CheckpointConfigBase,
CLIPEmbedDiffusersConfig,
MainBnbQuantized4bCheckpointConfig,
MainCheckpointConfig,
T5EncoderBnbQuantizedLlmInt8bConfig,
T5EncoderConfig,
VAECheckpointConfig,
)
from invokeai.backend.model_manager.load.load_default import ModelLoader
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
from invokeai.backend.util.silence_warnings import SilenceWarnings
try:
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
from invokeai.backend.quantization.bnb_nf4 import quantize_model_nf4
bnb_available = True
except ImportError:
bnb_available = False
app_config = get_config()
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.VAE, format=ModelFormat.Checkpoint)
class FluxVAELoader(ModelLoader):
"""Class to load VAE models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, VAECheckpointConfig):
raise ValueError("Only VAECheckpointConfig models are currently supported here.")
model_path = Path(config.path)
with SilenceWarnings():
model = AutoEncoder(ae_params[config.config_path])
sd = load_file(model_path)
model.load_state_dict(sd, assign=True)
model.to(dtype=self._torch_dtype)
return model
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPEmbed, format=ModelFormat.Diffusers)
class ClipCheckpointModel(ModelLoader):
"""Class to load main models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, CLIPEmbedDiffusersConfig):
raise ValueError("Only CLIPEmbedDiffusersConfig models are currently supported here.")
match submodel_type:
case SubModelType.Tokenizer:
return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer")
case SubModelType.TextEncoder:
return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder")
raise ValueError(
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.BnbQuantizedLlmInt8b)
class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
"""Class to load main models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, T5EncoderBnbQuantizedLlmInt8bConfig):
raise ValueError("Only T5EncoderBnbQuantizedLlmInt8bConfig models are currently supported here.")
if not bnb_available:
raise ImportError(
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
)
match submodel_type:
case SubModelType.Tokenizer2:
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2:
te2_model_path = Path(config.path) / "text_encoder_2"
model_config = AutoConfig.from_pretrained(te2_model_path)
with accelerate.init_empty_weights():
model = AutoModelForTextEncoding.from_config(model_config)
model = quantize_model_llm_int8(model, modules_to_not_convert=set())
state_dict_path = te2_model_path / "bnb_llm_int8_model.safetensors"
state_dict = load_file(state_dict_path)
self._load_state_dict_into_t5(model, state_dict)
return model
raise ValueError(
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
)
@classmethod
def _load_state_dict_into_t5(cls, model: T5EncoderModel, state_dict: dict[str, torch.Tensor]):
# There is a shared reference to a single weight tensor in the model.
# Both "encoder.embed_tokens.weight" and "shared.weight" refer to the same tensor, so only the latter should
# be present in the state_dict.
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False, assign=True)
assert len(unexpected_keys) == 0
assert set(missing_keys) == {"encoder.embed_tokens.weight"}
# Assert that the layers we expect to be shared are actually shared.
assert model.encoder.embed_tokens.weight is model.shared.weight
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder)
class T5EncoderCheckpointModel(ModelLoader):
"""Class to load main models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, T5EncoderConfig):
raise ValueError("Only T5EncoderConfig models are currently supported here.")
match submodel_type:
case SubModelType.Tokenizer2:
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
case SubModelType.TextEncoder2:
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2")
raise ValueError(
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
)
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.Checkpoint)
class FluxCheckpointModel(ModelLoader):
"""Class to load main models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, CheckpointConfigBase):
raise ValueError("Only CheckpointConfigBase models are currently supported here.")
match submodel_type:
case SubModelType.Transformer:
return self._load_from_singlefile(config)
raise ValueError(
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
)
def _load_from_singlefile(
self,
config: AnyModelConfig,
) -> AnyModel:
assert isinstance(config, MainCheckpointConfig)
model_path = Path(config.path)
with SilenceWarnings():
model = Flux(params[config.config_path])
sd = load_file(model_path)
model.load_state_dict(sd, assign=True)
return model
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.BnbQuantizednf4b)
class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
"""Class to load main models."""
def _load_model(
self,
config: AnyModelConfig,
submodel_type: Optional[SubModelType] = None,
) -> AnyModel:
if not isinstance(config, CheckpointConfigBase):
raise ValueError("Only CheckpointConfigBase models are currently supported here.")
match submodel_type:
case SubModelType.Transformer:
return self._load_from_singlefile(config)
raise ValueError(
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
)
def _load_from_singlefile(
self,
config: AnyModelConfig,
) -> AnyModel:
assert isinstance(config, MainBnbQuantized4bCheckpointConfig)
if not bnb_available:
raise ImportError(
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
)
model_path = Path(config.path)
with SilenceWarnings():
with accelerate.init_empty_weights():
model = Flux(params[config.config_path])
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
sd = load_file(model_path)
model.load_state_dict(sd, assign=True)
return model

View File

@ -78,12 +78,7 @@ class GenericDiffusersLoader(ModelLoader):
# TO DO: Add exception handling
def _hf_definition_to_type(self, module: str, class_name: str) -> ModelMixin: # fix with correct type
if module in [
"diffusers",
"transformers",
"invokeai.backend.quantization.fast_quantized_transformers_model",
"invokeai.backend.quantization.fast_quantized_diffusion_model",
]:
if module in ["diffusers", "transformers"]:
res_type = sys.modules[module]
else:
res_type = sys.modules["diffusers"].pipelines

View File

@ -36,18 +36,8 @@ VARIANT_TO_IN_CHANNEL_MAP = {
}
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Diffusers
)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Checkpoint)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Checkpoint)
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Checkpoint)
@ModelLoaderRegistry.register(
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Checkpoint
)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Diffusers)
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Checkpoint)
class StableDiffusionDiffusersModel(GenericDiffusersLoader):
"""Class to load main models."""

View File

@ -9,7 +9,7 @@ from typing import Optional
import torch
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from transformers import CLIPTokenizer, T5Tokenizer, T5TokenizerFast
from transformers import CLIPTokenizer
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
@ -50,17 +50,6 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
),
):
return model.calc_size()
elif isinstance(
model,
(
T5TokenizerFast,
T5Tokenizer,
),
):
# HACK(ryand): len(model) just returns the vocabulary size, so this is blatantly wrong. It should be small
# relative to the text encoder that it's used with, so shouldn't matter too much, but we should fix this at some
# point.
return len(model)
else:
# TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the
# supported model types.

View File

@ -95,7 +95,6 @@ class ModelProbe(object):
}
CLASS2TYPE = {
"FluxPipeline": ModelType.Main,
"StableDiffusionPipeline": ModelType.Main,
"StableDiffusionInpaintPipeline": ModelType.Main,
"StableDiffusionXLPipeline": ModelType.Main,
@ -107,7 +106,6 @@ class ModelProbe(object):
"ControlNetModel": ModelType.ControlNet,
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
"T2IAdapter": ModelType.T2IAdapter,
"CLIPModel": ModelType.CLIPEmbed,
}
@classmethod
@ -163,7 +161,7 @@ class ModelProbe(object):
fields["description"] = (
fields.get("description") or f"{fields['base'].value} {model_type.value} model {fields['name']}"
)
fields["format"] = ModelFormat(fields.get("format")) if "format" in fields else probe.get_format()
fields["format"] = fields.get("format") or probe.get_format()
fields["hash"] = fields.get("hash") or ModelHash(algorithm=hash_algo).hash(model_path)
fields["default_settings"] = fields.get("default_settings")
@ -178,10 +176,10 @@ class ModelProbe(object):
fields["repo_variant"] = fields.get("repo_variant") or probe.get_repo_variant()
# additional fields needed for main and controlnet models
if fields["type"] in [ModelType.Main, ModelType.ControlNet, ModelType.VAE] and fields["format"] in [
ModelFormat.Checkpoint,
ModelFormat.BnbQuantizednf4b,
]:
if (
fields["type"] in [ModelType.Main, ModelType.ControlNet, ModelType.VAE]
and fields["format"] is ModelFormat.Checkpoint
):
ckpt_config_path = cls._get_checkpoint_config_path(
model_path,
model_type=fields["type"],
@ -224,8 +222,7 @@ class ModelProbe(object):
ckpt = ckpt.get("state_dict", ckpt)
for key in [str(k) for k in ckpt.keys()]:
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.", "double_blocks.")):
# Keys starting with double_blocks are associated with Flux models
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.")):
return ModelType.Main
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
return ModelType.VAE
@ -324,27 +321,10 @@ class ModelProbe(object):
return possible_conf.absolute()
if model_type is ModelType.Main:
if base_type == BaseModelType.Flux:
# TODO: Decide between dev/schnell
checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
state_dict = checkpoint.get("state_dict") or checkpoint
if "guidance_in.out_layer.weight" in state_dict:
# For flux, this is a key in invokeai.backend.flux.util.params
# Due to model type and format being the descriminator for model configs this
# is used rather than attempting to support flux with separate model types and format
# If changed in the future, please fix me
config_file = "flux-dev"
else:
# For flux, this is a key in invokeai.backend.flux.util.params
# Due to model type and format being the descriminator for model configs this
# is used rather than attempting to support flux with separate model types and format
# If changed in the future, please fix me
config_file = "flux-schnell"
else:
config_file = LEGACY_CONFIGS[base_type][variant_type]
if isinstance(config_file, dict): # need another tier for sd-2.x models
config_file = config_file[prediction_type]
config_file = f"stable-diffusion/{config_file}"
config_file = LEGACY_CONFIGS[base_type][variant_type]
if isinstance(config_file, dict): # need another tier for sd-2.x models
config_file = config_file[prediction_type]
config_file = f"stable-diffusion/{config_file}"
elif model_type is ModelType.ControlNet:
config_file = (
"controlnet/cldm_v15.yaml"
@ -353,13 +333,7 @@ class ModelProbe(object):
)
elif model_type is ModelType.VAE:
config_file = (
# For flux, this is a key in invokeai.backend.flux.util.ae_params
# Due to model type and format being the descriminator for model configs this
# is used rather than attempting to support flux with separate model types and format
# If changed in the future, please fix me
"flux"
if base_type is BaseModelType.Flux
else "stable-diffusion/v1-inference.yaml"
"stable-diffusion/v1-inference.yaml"
if base_type is BaseModelType.StableDiffusion1
else "stable-diffusion/sd_xl_base.yaml"
if base_type is BaseModelType.StableDiffusionXL
@ -442,15 +416,11 @@ class CheckpointProbeBase(ProbeBase):
self.checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
def get_format(self) -> ModelFormat:
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
if "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict:
return ModelFormat.BnbQuantizednf4b
return ModelFormat("checkpoint")
def get_variant_type(self) -> ModelVariantType:
model_type = ModelProbe.get_model_type_from_checkpoint(self.model_path, self.checkpoint)
base_type = self.get_base_type()
if model_type != ModelType.Main or base_type == BaseModelType.Flux:
if model_type != ModelType.Main:
return ModelVariantType.Normal
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1]
@ -470,8 +440,6 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
def get_base_type(self) -> BaseModelType:
checkpoint = self.checkpoint
state_dict = self.checkpoint.get("state_dict") or checkpoint
if "double_blocks.0.img_attn.norm.key_norm.scale" in state_dict:
return BaseModelType.Flux
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
return BaseModelType.StableDiffusion1
@ -514,7 +482,6 @@ class VaeCheckpointProbe(CheckpointProbeBase):
(r"xl", BaseModelType.StableDiffusionXL),
(r"sd2", BaseModelType.StableDiffusion2),
(r"vae", BaseModelType.StableDiffusion1),
(r"FLUX.1-schnell_ae", BaseModelType.Flux),
]:
if re.search(regexp, self.model_path.name, re.IGNORECASE):
return basetype
@ -746,11 +713,6 @@ class TextualInversionFolderProbe(FolderProbeBase):
return TextualInversionCheckpointProbe(path).get_base_type()
class T5EncoderFolderProbe(FolderProbeBase):
def get_format(self) -> ModelFormat:
return ModelFormat.T5Encoder
class ONNXFolderProbe(PipelineFolderProbe):
def get_base_type(self) -> BaseModelType:
# Due to the way the installer is set up, the configuration file for safetensors
@ -843,11 +805,6 @@ class CLIPVisionFolderProbe(FolderProbeBase):
return BaseModelType.Any
class CLIPEmbedFolderProbe(FolderProbeBase):
def get_base_type(self) -> BaseModelType:
return BaseModelType.Any
class SpandrelImageToImageFolderProbe(FolderProbeBase):
def get_base_type(self) -> BaseModelType:
raise NotImplementedError()
@ -878,10 +835,8 @@ ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.VAE, VaeFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.LoRA, LoRAFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.T5Encoder, T5EncoderFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.CLIPEmbed, CLIPEmbedFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe)
ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelImageToImageFolderProbe)

View File

@ -2,7 +2,7 @@ from typing import Optional
from pydantic import BaseModel
from invokeai.backend.model_manager.config import BaseModelType, ModelFormat, ModelType
from invokeai.backend.model_manager.config import BaseModelType, ModelType
class StarterModelWithoutDependencies(BaseModel):
@ -11,7 +11,6 @@ class StarterModelWithoutDependencies(BaseModel):
name: str
base: BaseModelType
type: ModelType
format: Optional[ModelFormat] = None
is_installed: bool = False
@ -52,76 +51,10 @@ cyberrealistic_negative = StarterModel(
type=ModelType.TextualInversion,
)
t5_base_encoder = StarterModel(
name="t5_base_encoder",
base=BaseModelType.Any,
source="InvokeAI/t5-v1_1-xxl::bfloat16",
description="T5-XXL text encoder (used in FLUX pipelines). ~8GB",
type=ModelType.T5Encoder,
)
t5_8b_quantized_encoder = StarterModel(
name="t5_bnb_int8_quantized_encoder",
base=BaseModelType.Any,
source="InvokeAI/t5-v1_1-xxl::bnb_llm_int8",
description="T5-XXL text encoder with bitsandbytes LLM.int8() quantization (used in FLUX pipelines). ~5GB",
type=ModelType.T5Encoder,
format=ModelFormat.BnbQuantizedLlmInt8b,
)
clip_l_encoder = StarterModel(
name="clip-vit-large-patch14",
base=BaseModelType.Any,
source="InvokeAI/clip-vit-large-patch14-text-encoder::bfloat16",
description="CLIP-L text encoder (used in FLUX pipelines). ~250MB",
type=ModelType.CLIPEmbed,
)
flux_vae = StarterModel(
name="FLUX.1-schnell_ae",
base=BaseModelType.Flux,
source="black-forest-labs/FLUX.1-schnell::ae.safetensors",
description="FLUX VAE compatible with both schnell and dev variants.",
type=ModelType.VAE,
)
# List of starter models, displayed on the frontend.
# The order/sort of this list is not changed by the frontend - set it how you want it here.
STARTER_MODELS: list[StarterModel] = [
# region: Main
StarterModel(
name="FLUX Schnell (Quantized)",
base=BaseModelType.Flux,
source="InvokeAI/flux_schnell::transformer/bnb_nf4/flux1-schnell-bnb_nf4.safetensors",
description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
),
StarterModel(
name="FLUX Dev (Quantized)",
base=BaseModelType.Flux,
source="InvokeAI/flux_dev::transformer/bnb_nf4/flux1-dev-bnb_nf4.safetensors",
description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
type=ModelType.Main,
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
),
StarterModel(
name="FLUX Schnell",
base=BaseModelType.Flux,
source="InvokeAI/flux_schnell::transformer/base/flux1-schnell.safetensors",
description="FLUX schnell transformer in bfloat16. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
),
StarterModel(
name="FLUX Dev",
base=BaseModelType.Flux,
source="InvokeAI/flux_dev::transformer/base/flux1-dev.safetensors",
description="FLUX dev transformer in bfloat16. Total size with dependencies: ~33GB",
type=ModelType.Main,
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
),
StarterModel(
name="CyberRealistic v4.1",
base=BaseModelType.StableDiffusion1,
@ -192,7 +125,6 @@ STARTER_MODELS: list[StarterModel] = [
# endregion
# region VAE
sdxl_fp16_vae_fix,
flux_vae,
# endregion
# region LoRA
StarterModel(
@ -518,11 +450,6 @@ STARTER_MODELS: list[StarterModel] = [
type=ModelType.SpandrelImageToImage,
),
# endregion
# region TextEncoders
t5_base_encoder,
t5_8b_quantized_encoder,
clip_l_encoder,
# endregion
]
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"

View File

@ -54,7 +54,6 @@ def filter_files(
"lora_weights.safetensors",
"weights.pb",
"onnx_data",
"spiece.model", # Added for `black-forest-labs/FLUX.1-schnell`.
)
):
paths.append(file)
@ -63,13 +62,13 @@ def filter_files(
# downloading random checkpoints that might also be in the repo. However there is no guarantee
# that a checkpoint doesn't contain "model" in its name, and no guarantee that future diffusers models
# will adhere to this naming convention, so this is an area to be careful of.
elif re.search(r"model.*\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name):
elif re.search(r"model(\.[^.]+)?\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name):
paths.append(file)
# limit search to subfolder if requested
if subfolder:
subfolder = root / subfolder
paths = [x for x in paths if Path(subfolder) in x.parents]
paths = [x for x in paths if x.parent == Path(subfolder)]
# _filter_by_variant uniquifies the paths and returns a set
return sorted(_filter_by_variant(paths, variant))
@ -98,9 +97,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
if variant == ModelRepoVariant.Flax:
result.add(path)
# Note: '.model' was added to support:
# https://huggingface.co/black-forest-labs/FLUX.1-schnell/blob/768d12a373ed5cc9ef9a9dea7504dc09fcc14842/tokenizer_2/spiece.model
elif path.suffix in [".json", ".txt", ".model"]:
elif path.suffix in [".json", ".txt"]:
result.add(path)
elif variant in [
@ -143,23 +140,6 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
continue
for candidate_list in subfolder_weights.values():
# Check if at least one of the files has the explicit fp16 variant.
at_least_one_fp16 = False
for candidate in candidate_list:
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0] == ".fp16":
at_least_one_fp16 = True
break
if not at_least_one_fp16:
# If none of the candidates in this candidate_list have the explicit fp16 variant label, then this
# candidate_list probably doesn't adhere to the variant naming convention that we expected. In this case,
# we'll simply keep all the candidates. An example of a model that hits this case is
# `black-forest-labs/FLUX.1-schnell` (as of commit 012d2fd).
for candidate in candidate_list:
result.add(candidate.path)
# The candidate_list seems to have the expected variant naming convention. We'll select the highest scoring
# candidate.
highest_score_candidate = max(candidate_list, key=lambda candidate: candidate.score)
if highest_score_candidate:
result.add(highest_score_candidate.path)

View File

@ -1,125 +0,0 @@
import bitsandbytes as bnb
import torch
# This file contains utils for working with models that use bitsandbytes LLM.int8() quantization.
# The utils in this file are partially inspired by:
# https://github.com/Lightning-AI/pytorch-lightning/blob/1551a16b94f5234a4a78801098f64d0732ef5cb5/src/lightning/fabric/plugins/precision/bitsandbytes.py
# NOTE(ryand): All of the custom state_dict manipulation logic in this file is pretty hacky. This could be made much
# cleaner by re-implementing bnb.nn.Linear8bitLt with proper use of buffers and less magic. But, for now, we try to
# stick close to the bitsandbytes classes to make interoperability easier with other models that might use bitsandbytes.
class InvokeInt8Params(bnb.nn.Int8Params):
"""We override cuda() to avoid re-quantizing the weights in the following cases:
- We loaded quantized weights from a state_dict on the cpu, and then moved the model to the gpu.
- We are moving the model back-and-forth between the cpu and gpu.
"""
def cuda(self, device):
if self.has_fp16_weights:
return super().cuda(device)
elif self.CB is not None and self.SCB is not None:
self.data = self.data.cuda()
self.CB = self.data
self.SCB = self.SCB.cuda()
else:
# we store the 8-bit rows-major weight
# we convert this weight to the turning/ampere weight during the first inference pass
B = self.data.contiguous().half().cuda(device)
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
del CBt
del SCBt
self.data = CB
self.CB = CB
self.SCB = SCB
return self
class InvokeLinear8bitLt(bnb.nn.Linear8bitLt):
def _load_from_state_dict(
self,
state_dict: dict[str, torch.Tensor],
prefix: str,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
weight = state_dict.pop(prefix + "weight")
bias = state_dict.pop(prefix + "bias", None)
# See `bnb.nn.Linear8bitLt._save_to_state_dict()` for the serialization logic of SCB and weight_format.
scb = state_dict.pop(prefix + "SCB", None)
# weight_format is unused, but we pop it so we can validate that there are no unexpected keys.
_weight_format = state_dict.pop(prefix + "weight_format", None)
# TODO(ryand): Technically, we should be using `strict`, `missing_keys`, `unexpected_keys`, and `error_msgs`
# rather than raising an exception to correctly implement this API.
assert len(state_dict) == 0
if scb is not None:
# We are loading a pre-quantized state dict.
self.weight = InvokeInt8Params(
data=weight,
requires_grad=self.weight.requires_grad,
has_fp16_weights=False,
# Note: After quantization, CB is the same as weight.
CB=weight,
SCB=scb,
)
self.bias = bias if bias is None else torch.nn.Parameter(bias)
else:
# We are loading a non-quantized state dict.
# We could simply call the `super()._load_from_state_dict()` method here, but then we wouldn't be able to
# load from a state_dict into a model on the "meta" device. Attempting to load into a model on the "meta"
# device requires setting `assign=True`, doing this with the default `super()._load_from_state_dict()`
# implementation causes `Params4Bit` to be replaced by a `torch.nn.Parameter`. By initializing a new
# `Params4bit` object, we work around this issue. It's a bit hacky, but it gets the job done.
self.weight = InvokeInt8Params(
data=weight,
requires_grad=self.weight.requires_grad,
has_fp16_weights=False,
CB=None,
SCB=None,
)
self.bias = bias if bias is None else torch.nn.Parameter(bias)
def _convert_linear_layers_to_llm_8bit(
module: torch.nn.Module, ignore_modules: set[str], outlier_threshold: float, prefix: str = ""
) -> None:
"""Convert all linear layers in the module to bnb.nn.Linear8bitLt layers."""
for name, child in module.named_children():
fullname = f"{prefix}.{name}" if prefix else name
if isinstance(child, torch.nn.Linear) and not any(fullname.startswith(s) for s in ignore_modules):
has_bias = child.bias is not None
replacement = InvokeLinear8bitLt(
child.in_features,
child.out_features,
bias=has_bias,
has_fp16_weights=False,
threshold=outlier_threshold,
)
replacement.weight.data = child.weight.data
if has_bias:
replacement.bias.data = child.bias.data
replacement.requires_grad_(False)
module.__setattr__(name, replacement)
else:
_convert_linear_layers_to_llm_8bit(
child, ignore_modules, outlier_threshold=outlier_threshold, prefix=fullname
)
def quantize_model_llm_int8(model: torch.nn.Module, modules_to_not_convert: set[str], outlier_threshold: float = 6.0):
"""Apply bitsandbytes LLM.8bit() quantization to the model."""
_convert_linear_layers_to_llm_8bit(
module=model, ignore_modules=modules_to_not_convert, outlier_threshold=outlier_threshold
)
return model

View File

@ -1,156 +0,0 @@
import bitsandbytes as bnb
import torch
# This file contains utils for working with models that use bitsandbytes NF4 quantization.
# The utils in this file are partially inspired by:
# https://github.com/Lightning-AI/pytorch-lightning/blob/1551a16b94f5234a4a78801098f64d0732ef5cb5/src/lightning/fabric/plugins/precision/bitsandbytes.py
# NOTE(ryand): All of the custom state_dict manipulation logic in this file is pretty hacky. This could be made much
# cleaner by re-implementing bnb.nn.LinearNF4 with proper use of buffers and less magic. But, for now, we try to stick
# close to the bitsandbytes classes to make interoperability easier with other models that might use bitsandbytes.
class InvokeLinearNF4(bnb.nn.LinearNF4):
"""A class that extends `bnb.nn.LinearNF4` to add the following functionality:
- Ability to load Linear NF4 layers from a pre-quantized state_dict.
- Ability to load Linear NF4 layers from a state_dict when the model is on the "meta" device.
"""
def _load_from_state_dict(
self,
state_dict: dict[str, torch.Tensor],
prefix: str,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""This method is based on the logic in the bitsandbytes serialization unit tests for `Linear4bit`:
https://github.com/bitsandbytes-foundation/bitsandbytes/blob/6d714a5cce3db5bd7f577bc447becc7a92d5ccc7/tests/test_linear4bit.py#L52-L71
"""
weight = state_dict.pop(prefix + "weight")
bias = state_dict.pop(prefix + "bias", None)
# We expect the remaining keys to be quant_state keys.
quant_state_sd = state_dict
# During serialization, the quant_state is stored as subkeys of "weight." (See
# `bnb.nn.LinearNF4._save_to_state_dict()`). We validate that they at least have the correct prefix.
# TODO(ryand): Technically, we should be using `strict`, `missing_keys`, `unexpected_keys`, and `error_msgs`
# rather than raising an exception to correctly implement this API.
assert all(k.startswith(prefix + "weight.") for k in quant_state_sd.keys())
if len(quant_state_sd) > 0:
# We are loading a pre-quantized state dict.
self.weight = bnb.nn.Params4bit.from_prequantized(
data=weight, quantized_stats=quant_state_sd, device=weight.device
)
self.bias = bias if bias is None else torch.nn.Parameter(bias, requires_grad=False)
else:
# We are loading a non-quantized state dict.
# We could simply call the `super()._load_from_state_dict()` method here, but then we wouldn't be able to
# load from a state_dict into a model on the "meta" device. Attempting to load into a model on the "meta"
# device requires setting `assign=True`, doing this with the default `super()._load_from_state_dict()`
# implementation causes `Params4Bit` to be replaced by a `torch.nn.Parameter`. By initializing a new
# `Params4bit` object, we work around this issue. It's a bit hacky, but it gets the job done.
self.weight = bnb.nn.Params4bit(
data=weight,
requires_grad=self.weight.requires_grad,
compress_statistics=self.weight.compress_statistics,
quant_type=self.weight.quant_type,
quant_storage=self.weight.quant_storage,
module=self,
)
self.bias = bias if bias is None else torch.nn.Parameter(bias)
def _replace_param(
param: torch.nn.Parameter | bnb.nn.Params4bit,
data: torch.Tensor,
) -> torch.nn.Parameter:
"""A helper function to replace the data of a model parameter with new data in a way that allows replacing params on
the "meta" device.
Supports both `torch.nn.Parameter` and `bnb.nn.Params4bit` parameters.
"""
if param.device.type == "meta":
# Doing `param.data = data` raises a RuntimeError if param.data was on the "meta" device, so we need to
# re-create the param instead of overwriting the data.
if isinstance(param, bnb.nn.Params4bit):
return bnb.nn.Params4bit(
data,
requires_grad=data.requires_grad,
quant_state=param.quant_state,
compress_statistics=param.compress_statistics,
quant_type=param.quant_type,
)
return torch.nn.Parameter(data, requires_grad=data.requires_grad)
param.data = data
return param
def _convert_linear_layers_to_nf4(
module: torch.nn.Module,
ignore_modules: set[str],
compute_dtype: torch.dtype,
compress_statistics: bool = False,
prefix: str = "",
) -> None:
"""Convert all linear layers in the model to NF4 quantized linear layers.
Args:
module: All linear layers in this module will be converted.
ignore_modules: A set of module prefixes to ignore when converting linear layers.
compute_dtype: The dtype to use for computation in the quantized linear layers.
compress_statistics: Whether to enable nested quantization (aka double quantization) where the quantization
constants from the first quantization are quantized again.
prefix: The prefix of the current module in the model. Used to call this function recursively.
"""
for name, child in module.named_children():
fullname = f"{prefix}.{name}" if prefix else name
if isinstance(child, torch.nn.Linear) and not any(fullname.startswith(s) for s in ignore_modules):
has_bias = child.bias is not None
replacement = InvokeLinearNF4(
child.in_features,
child.out_features,
bias=has_bias,
compute_dtype=compute_dtype,
compress_statistics=compress_statistics,
)
if has_bias:
replacement.bias = _replace_param(replacement.bias, child.bias.data)
replacement.weight = _replace_param(replacement.weight, child.weight.data)
replacement.requires_grad_(False)
module.__setattr__(name, replacement)
else:
_convert_linear_layers_to_nf4(child, ignore_modules, compute_dtype=compute_dtype, prefix=fullname)
def quantize_model_nf4(model: torch.nn.Module, modules_to_not_convert: set[str], compute_dtype: torch.dtype):
"""Apply bitsandbytes nf4 quantization to the model.
You likely want to call this function inside a `accelerate.init_empty_weights()` context.
Example usage:
```
# Initialize the model from a config on the meta device.
with accelerate.init_empty_weights():
model = ModelClass.from_config(...)
# Add NF4 quantization linear layers to the model - still on the meta device.
with accelerate.init_empty_weights():
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.float16)
# Load a state_dict into the model. (Could be either a prequantized or non-quantized state_dict.)
model.load_state_dict(state_dict, strict=True, assign=True)
# Move the model to the "cuda" device. If the model was non-quantized, this is where the weight quantization takes
# place.
model.to("cuda")
```
"""
_convert_linear_layers_to_nf4(module=model, ignore_modules=modules_to_not_convert, compute_dtype=compute_dtype)
return model

View File

@ -1,79 +0,0 @@
from pathlib import Path
import accelerate
from safetensors.torch import load_file, save_file
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.util import params
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time
def main():
"""A script for quantizing a FLUX transformer model using the bitsandbytes LLM.int8() quantization method.
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
etc.) are hardcoded and would need to be modified for other use cases.
"""
# Load the FLUX transformer model onto the meta device.
model_path = Path(
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
)
with log_time("Intialize FLUX transformer on meta device"):
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
p = params["flux-schnell"]
# Initialize the model on the "meta" device.
with accelerate.init_empty_weights():
model = Flux(p)
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
modules_to_not_convert: set[str] = set()
model_int8_path = model_path.parent / "bnb_llm_int8.safetensors"
if model_int8_path.exists():
# The quantized model already exists, load it and return it.
print(f"A pre-quantized model already exists at '{model_int8_path}'. Attempting to load it...")
# Replace the linear layers with LLM.int8() quantized linear layers (still on the meta device).
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
with log_time("Load state dict into model"):
sd = load_file(model_int8_path)
model.load_state_dict(sd, strict=True, assign=True)
with log_time("Move model to cuda"):
model = model.to("cuda")
print(f"Successfully loaded pre-quantized model from '{model_int8_path}'.")
else:
# The quantized model does not exist, quantize the model and save it.
print(f"No pre-quantized model found at '{model_int8_path}'. Quantizing the model...")
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
with log_time("Load state dict into model"):
state_dict = load_file(model_path)
# TODO(ryand): Cast the state_dict to the appropriate dtype?
model.load_state_dict(state_dict, strict=True, assign=True)
with log_time("Move model to cuda and quantize"):
model = model.to("cuda")
with log_time("Save quantized model"):
model_int8_path.parent.mkdir(parents=True, exist_ok=True)
save_file(model.state_dict(), model_int8_path)
print(f"Successfully quantized and saved model to '{model_int8_path}'.")
assert isinstance(model, Flux)
return model
if __name__ == "__main__":
main()

View File

@ -1,96 +0,0 @@
import time
from contextlib import contextmanager
from pathlib import Path
import accelerate
import torch
from safetensors.torch import load_file, save_file
from invokeai.backend.flux.model import Flux
from invokeai.backend.flux.util import params
from invokeai.backend.quantization.bnb_nf4 import quantize_model_nf4
@contextmanager
def log_time(name: str):
"""Helper context manager to log the time taken by a block of code."""
start = time.time()
try:
yield None
finally:
end = time.time()
print(f"'{name}' took {end - start:.4f} secs")
def main():
"""A script for quantizing a FLUX transformer model using the bitsandbytes NF4 quantization method.
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
etc.) are hardcoded and would need to be modified for other use cases.
"""
model_path = Path(
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
)
# inference_dtype = torch.bfloat16
with log_time("Intialize FLUX transformer on meta device"):
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
p = params["flux-schnell"]
# Initialize the model on the "meta" device.
with accelerate.init_empty_weights():
model = Flux(p)
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
modules_to_not_convert: set[str] = set()
model_nf4_path = model_path.parent / "bnb_nf4.safetensors"
if model_nf4_path.exists():
# The quantized model already exists, load it and return it.
print(f"A pre-quantized model already exists at '{model_nf4_path}'. Attempting to load it...")
# Replace the linear layers with NF4 quantized linear layers (still on the meta device).
with log_time("Replace linear layers with NF4 layers"), accelerate.init_empty_weights():
model = quantize_model_nf4(
model, modules_to_not_convert=modules_to_not_convert, compute_dtype=torch.bfloat16
)
with log_time("Load state dict into model"):
state_dict = load_file(model_nf4_path)
model.load_state_dict(state_dict, strict=True, assign=True)
with log_time("Move model to cuda"):
model = model.to("cuda")
print(f"Successfully loaded pre-quantized model from '{model_nf4_path}'.")
else:
# The quantized model does not exist, quantize the model and save it.
print(f"No pre-quantized model found at '{model_nf4_path}'. Quantizing the model...")
with log_time("Replace linear layers with NF4 layers"), accelerate.init_empty_weights():
model = quantize_model_nf4(
model, modules_to_not_convert=modules_to_not_convert, compute_dtype=torch.bfloat16
)
with log_time("Load state dict into model"):
state_dict = load_file(model_path)
# TODO(ryand): Cast the state_dict to the appropriate dtype?
model.load_state_dict(state_dict, strict=True, assign=True)
with log_time("Move model to cuda and quantize"):
model = model.to("cuda")
with log_time("Save quantized model"):
model_nf4_path.parent.mkdir(parents=True, exist_ok=True)
save_file(model.state_dict(), model_nf4_path)
print(f"Successfully quantized and saved model to '{model_nf4_path}'.")
assert isinstance(model, Flux)
return model
if __name__ == "__main__":
main()

View File

@ -1,92 +0,0 @@
from pathlib import Path
import accelerate
from safetensors.torch import load_file, save_file
from transformers import AutoConfig, AutoModelForTextEncoding, T5EncoderModel
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time
def load_state_dict_into_t5(model: T5EncoderModel, state_dict: dict):
# There is a shared reference to a single weight tensor in the model.
# Both "encoder.embed_tokens.weight" and "shared.weight" refer to the same tensor, so only the latter should
# be present in the state_dict.
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False, assign=True)
assert len(unexpected_keys) == 0
assert set(missing_keys) == {"encoder.embed_tokens.weight"}
# Assert that the layers we expect to be shared are actually shared.
assert model.encoder.embed_tokens.weight is model.shared.weight
def main():
"""A script for quantizing a T5 text encoder model using the bitsandbytes LLM.int8() quantization method.
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
etc.) are hardcoded and would need to be modified for other use cases.
"""
model_path = Path("/data/misc/text_encoder_2")
with log_time("Intialize T5 on meta device"):
model_config = AutoConfig.from_pretrained(model_path)
with accelerate.init_empty_weights():
model = AutoModelForTextEncoding.from_config(model_config)
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
modules_to_not_convert: set[str] = set()
model_int8_path = model_path / "bnb_llm_int8.safetensors"
if model_int8_path.exists():
# The quantized model already exists, load it and return it.
print(f"A pre-quantized model already exists at '{model_int8_path}'. Attempting to load it...")
# Replace the linear layers with LLM.int8() quantized linear layers (still on the meta device).
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
with log_time("Load state dict into model"):
sd = load_file(model_int8_path)
load_state_dict_into_t5(model, sd)
with log_time("Move model to cuda"):
model = model.to("cuda")
print(f"Successfully loaded pre-quantized model from '{model_int8_path}'.")
else:
# The quantized model does not exist, quantize the model and save it.
print(f"No pre-quantized model found at '{model_int8_path}'. Quantizing the model...")
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
with log_time("Load state dict into model"):
# Load sharded state dict.
files = list(model_path.glob("*.safetensors"))
state_dict = {}
for file in files:
sd = load_file(file)
state_dict.update(sd)
load_state_dict_into_t5(model, state_dict)
with log_time("Move model to cuda and quantize"):
model = model.to("cuda")
with log_time("Save quantized model"):
model_int8_path.parent.mkdir(parents=True, exist_ok=True)
state_dict = model.state_dict()
state_dict.pop("encoder.embed_tokens.weight")
save_file(state_dict, model_int8_path)
# This handling of shared weights could also be achieved with save_model(...), but then we'd lose control
# over which keys are kept. And, the corresponding load_model(...) function does not support assign=True.
# save_model(model, model_int8_path)
print(f"Successfully quantized and saved model to '{model_int8_path}'.")
assert isinstance(model, T5EncoderModel)
return model
if __name__ == "__main__":
main()

View File

@ -25,6 +25,11 @@ class BasicConditioningInfo:
return self
@dataclass
class ConditioningFieldData:
conditionings: List[BasicConditioningInfo]
@dataclass
class SDXLConditioningInfo(BasicConditioningInfo):
"""SDXL text conditioning information produced by Compel."""
@ -38,17 +43,6 @@ class SDXLConditioningInfo(BasicConditioningInfo):
return super().to(device=device, dtype=dtype)
@dataclass
class FLUXConditioningInfo:
clip_embeds: torch.Tensor
t5_embeds: torch.Tensor
@dataclass
class ConditioningFieldData:
conditionings: List[BasicConditioningInfo] | List[SDXLConditioningInfo] | List[FLUXConditioningInfo]
@dataclass
class IPAdapterConditioningInfo:
cond_image_prompt_embeds: torch.Tensor

View File

@ -1,5 +1,5 @@
import { PropsWithChildren, memo, useEffect } from 'react';
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
import { modelChanged } from '../src/features/controlLayers/store/canvasV2Slice';
import { useAppDispatch } from '../src/app/store/storeHooks';
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
/**

View File

@ -58,7 +58,7 @@
"@dnd-kit/sortable": "^8.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@fontsource-variable/inter": "^5.0.20",
"@invoke-ai/ui-library": "^0.0.32",
"@invoke-ai/ui-library": "^0.0.31",
"@nanostores/react": "^0.7.3",
"@reduxjs/toolkit": "2.2.3",
"@roarr/browser-log-writer": "^1.3.0",

View File

@ -24,8 +24,8 @@ dependencies:
specifier: ^5.0.20
version: 5.0.20
'@invoke-ai/ui-library':
specifier: ^0.0.32
version: 0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
specifier: ^0.0.31
version: 0.0.31(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
'@nanostores/react':
specifier: ^0.7.3
version: 0.7.3(nanostores@0.11.2)(react@18.3.1)
@ -40,7 +40,7 @@ dependencies:
version: 0.5.0
chakra-react-select:
specifier: ^4.9.1
version: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
version: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
compare-versions:
specifier: ^6.1.1
version: 6.1.1
@ -1752,13 +1752,6 @@ packages:
dependencies:
regenerator-runtime: 0.14.1
/@babel/runtime@7.25.4:
resolution: {integrity: sha512-DSgLeL/FNcpXuzav5wfYvHCGvynXkJbn3Zvc3823AEe9nPwW9IK4UoCSS5yGymmQzN0pCPvivtgS6/8U2kkm1w==}
engines: {node: '>=6.9.0'}
dependencies:
regenerator-runtime: 0.14.1
dev: false
/@babel/template@7.24.0:
resolution: {integrity: sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==}
engines: {node: '>=6.9.0'}
@ -1845,7 +1838,7 @@ packages:
'@chakra-ui/react-use-controllable-state': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
@ -1861,7 +1854,7 @@ packages:
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1879,7 +1872,7 @@ packages:
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1892,7 +1885,7 @@ packages:
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1912,7 +1905,7 @@ packages:
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1923,7 +1916,7 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1942,7 +1935,7 @@ packages:
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@zag-js/focus-visible': 0.16.0
react: 18.3.1
@ -1965,7 +1958,7 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1984,7 +1977,7 @@ packages:
'@chakra-ui/system': '>=2.0.0'
react: '>=18'
dependencies:
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -1999,13 +1992,13 @@ packages:
react: 18.3.1
dev: false
/@chakra-ui/css-reset@2.3.0(@emotion/react@11.13.3)(react@18.3.1):
/@chakra-ui/css-reset@2.3.0(@emotion/react@11.13.0)(react@18.3.1):
resolution: {integrity: sha512-cQwwBy5O0jzvl0K7PLTLgp8ijqLPKyuEMiDXwYzl95seD3AoeuoCLyzZcJtVqaUZ573PiBdAbY/IlZcwDOItWg==}
peerDependencies:
'@emotion/react': '>=10.0.35'
react: '>=18'
dependencies:
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
react: 18.3.1
dev: false
@ -2038,7 +2031,7 @@ packages:
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2069,7 +2062,7 @@ packages:
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2092,7 +2085,7 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2103,7 +2096,7 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2115,7 +2108,7 @@ packages:
dependencies:
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2130,7 +2123,7 @@ packages:
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2146,7 +2139,7 @@ packages:
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2171,7 +2164,7 @@ packages:
'@chakra-ui/breakpoint-utils': 2.0.8
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2196,7 +2189,7 @@ packages:
'@chakra-ui/react-use-outside-click': 2.2.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
@ -2223,7 +2216,7 @@ packages:
'@chakra-ui/react-use-outside-click': 2.2.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/transition': 2.1.0(framer-motion@11.3.24)(react@18.3.1)
framer-motion: 11.3.24(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
@ -2244,7 +2237,7 @@ packages:
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
aria-hidden: 1.2.4
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
@ -2273,7 +2266,7 @@ packages:
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2297,7 +2290,7 @@ packages:
'@chakra-ui/react-use-controllable-state': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2319,7 +2312,7 @@ packages:
'@chakra-ui/react-use-focus-on-pointer-down': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
dev: false
@ -2354,11 +2347,11 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
/@chakra-ui/provider@2.4.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1):
/@chakra-ui/provider@2.4.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-w0Tef5ZCJK1mlJorcSjItCSbyvVuqpvyWdxZiVQmE6fvSJR83wZof42ux0+sfWD+I7rHSfj+f9nzhNaEWClysw==}
peerDependencies:
'@emotion/react': ^11.0.0
@ -2366,13 +2359,13 @@ packages:
react: '>=18'
react-dom: '>=18'
dependencies:
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.3)(react@18.3.1)
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.0)(react@18.3.1)
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/utils': 2.0.15
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
dev: false
@ -2388,7 +2381,7 @@ packages:
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@zag-js/focus-visible': 0.16.0
react: 18.3.1
dev: false
@ -2588,7 +2581,7 @@ packages:
react: 18.3.1
dev: false
/@chakra-ui/react@2.8.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1):
/@chakra-ui/react@2.8.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-Hn0moyxxyCDKuR9ywYpqgX8dvjqwu9ArwpIb9wHNYjnODETjLwazgNIliCVBRcJvysGRiV51U2/JtJVrpeCjUQ==}
peerDependencies:
'@emotion/react': ^11.0.0
@ -2607,7 +2600,7 @@ packages:
'@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/control-box': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/counter': 2.1.0(react@18.3.1)
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.3)(react@18.3.1)
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.0)(react@18.3.1)
'@chakra-ui/editable': 3.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/focus-lock': 2.1.0(@types/react@18.3.3)(react@18.3.1)
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
@ -2626,7 +2619,7 @@ packages:
'@chakra-ui/popper': 3.1.0(react@18.3.1)
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/progress': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/provider': 2.4.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/provider': 2.4.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/radio': 2.1.2(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
'@chakra-ui/select': 2.1.2(@chakra-ui/system@2.6.2)(react@18.3.1)
@ -2638,7 +2631,7 @@ packages:
'@chakra-ui/stepper': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/styled-system': 2.9.2
'@chakra-ui/switch': 2.1.2(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/table': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/tabs': 3.0.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/tag': 3.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
@ -2650,8 +2643,8 @@ packages:
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
'@chakra-ui/utils': 2.0.15
'@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
@ -2667,7 +2660,7 @@ packages:
dependencies:
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2684,7 +2677,7 @@ packages:
'@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/react-use-previous': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2694,7 +2687,7 @@ packages:
'@chakra-ui/system': '>=2.0.0'
react: '>=18'
dependencies:
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2714,7 +2707,7 @@ packages:
'@chakra-ui/react-use-pan-event': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-size': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2725,7 +2718,7 @@ packages:
react: '>=18'
dependencies:
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2738,7 +2731,7 @@ packages:
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2751,7 +2744,7 @@ packages:
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2772,12 +2765,12 @@ packages:
dependencies:
'@chakra-ui/checkbox': 2.3.2(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
dev: false
/@chakra-ui/system@2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1):
/@chakra-ui/system@2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1):
resolution: {integrity: sha512-EGtpoEjLrUu4W1fHD+a62XR+hzC5YfsWm+6lO0Kybcga3yYEij9beegO0jZgug27V+Rf7vns95VPVP6mFd/DEQ==}
peerDependencies:
'@emotion/react': ^11.0.0
@ -2790,8 +2783,8 @@ packages:
'@chakra-ui/styled-system': 2.9.2
'@chakra-ui/theme-utils': 2.0.21
'@chakra-ui/utils': 2.0.15
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
react: 18.3.1
react-fast-compare: 3.2.2
dev: false
@ -2804,7 +2797,7 @@ packages:
dependencies:
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2823,7 +2816,7 @@ packages:
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2835,7 +2828,7 @@ packages:
dependencies:
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2847,7 +2840,7 @@ packages:
dependencies:
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -2898,7 +2891,7 @@ packages:
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/styled-system': 2.9.2
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/theme': 3.3.1(@chakra-ui/styled-system@2.9.2)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
@ -2921,7 +2914,7 @@ packages:
'@chakra-ui/react-use-event-listener': 2.1.0(react@18.3.1)
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
'@chakra-ui/shared-utils': 2.0.5
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
@ -2964,7 +2957,7 @@ packages:
'@chakra-ui/system': '>=2.0.0'
react: '>=18'
dependencies:
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
react: 18.3.1
dev: false
@ -3047,10 +3040,10 @@ packages:
resolution: {integrity: sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw==}
dependencies:
'@babel/helper-module-imports': 7.24.7
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
'@emotion/hash': 0.9.2
'@emotion/memoize': 0.9.0
'@emotion/serialize': 1.3.1
'@emotion/serialize': 1.3.0
babel-plugin-macros: 3.1.0
convert-source-map: 1.9.0
escape-string-regexp: 4.0.0
@ -3138,8 +3131,8 @@ packages:
react: 18.3.1
dev: false
/@emotion/react@11.13.3(@types/react@18.3.3)(react@18.3.1):
resolution: {integrity: sha512-lIsdU6JNrmYfJ5EbUCf4xW1ovy5wKQ2CkPRM4xogziOxH1nXxBSjpC9YqbFAP7circxMfYp+6x676BqWcEiixg==}
/@emotion/react@11.13.0(@types/react@18.3.3)(react@18.3.1):
resolution: {integrity: sha512-WkL+bw1REC2VNV1goQyfxjx1GYJkcc23CRQkXX+vZNLINyfI7o+uUn/rTGPt/xJ3bJHd5GcljgnxHf4wRw5VWQ==}
peerDependencies:
'@types/react': '*'
react: '>=16.8.0'
@ -3147,10 +3140,10 @@ packages:
'@types/react':
optional: true
dependencies:
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
'@emotion/babel-plugin': 11.12.0
'@emotion/cache': 11.13.1
'@emotion/serialize': 1.3.1
'@emotion/serialize': 1.3.0
'@emotion/use-insertion-effect-with-fallbacks': 1.1.0(react@18.3.1)
'@emotion/utils': 1.4.0
'@emotion/weak-memoize': 0.4.0
@ -3171,12 +3164,12 @@ packages:
csstype: 3.1.3
dev: false
/@emotion/serialize@1.3.1:
resolution: {integrity: sha512-dEPNKzBPU+vFPGa+z3axPRn8XVDetYORmDC0wAiej+TNcOZE70ZMJa0X7JdeoM6q/nWTMZeLpN/fTnD9o8MQBA==}
/@emotion/serialize@1.3.0:
resolution: {integrity: sha512-jACuBa9SlYajnpIVXB+XOXnfJHyckDfe6fOpORIM6yhBDlqGuExvDdZYHDQGoDf3bZXGv7tNr+LpLjJqiEQ6EA==}
dependencies:
'@emotion/hash': 0.9.2
'@emotion/memoize': 0.9.0
'@emotion/unitless': 0.10.0
'@emotion/unitless': 0.9.0
'@emotion/utils': 1.4.0
csstype: 3.1.3
dev: false
@ -3189,7 +3182,7 @@ packages:
resolution: {integrity: sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==}
dev: false
/@emotion/styled@11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1):
/@emotion/styled@11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1):
resolution: {integrity: sha512-tkzkY7nQhW/zC4hztlwucpT8QEZ6eUzpXDRhww/Eej4tFfO0FxQYWRyg/c5CCXa4d/f174kqeXYjuQRnhzf6dA==}
peerDependencies:
'@emotion/react': ^11.0.0-rc.0
@ -3199,11 +3192,11 @@ packages:
'@types/react':
optional: true
dependencies:
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
'@emotion/babel-plugin': 11.12.0
'@emotion/is-prop-valid': 1.3.0
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/serialize': 1.3.1
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
'@emotion/serialize': 1.3.0
'@emotion/use-insertion-effect-with-fallbacks': 1.1.0(react@18.3.1)
'@emotion/utils': 1.4.0
'@types/react': 18.3.3
@ -3212,14 +3205,14 @@ packages:
- supports-color
dev: false
/@emotion/unitless@0.10.0:
resolution: {integrity: sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==}
dev: false
/@emotion/unitless@0.8.1:
resolution: {integrity: sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==}
dev: false
/@emotion/unitless@0.9.0:
resolution: {integrity: sha512-TP6GgNZtmtFaFcsOgExdnfxLLpRDla4Q66tnenA9CktvVSdNKDvMVuUah4QvWPIpNjrWsGg3qeGo9a43QooGZQ==}
dev: false
/@emotion/use-insertion-effect-with-fallbacks@1.0.1(react@18.3.1):
resolution: {integrity: sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==}
peerDependencies:
@ -3571,8 +3564,8 @@ packages:
prettier: 3.3.3
dev: true
/@invoke-ai/ui-library@0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-JxAoblrDu/cZ4ha9KO4ry5OWvyLUE1Dj28i+ciMaDNUpC/cN+IyiTbUBoFoPaoN5JP8Zpd/MYCcmF2qsziHDzg==}
/@invoke-ai/ui-library@0.0.31(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-7LtOUN/bcGHc8jCRd2m22DvP2eeogqwM/shdXQpLH5RY2FzWJNXlWdVT4hIPGDu7znnk3xvXlZvo6tiGSjbnCQ==}
peerDependencies:
'@fontsource-variable/inter': ^5.0.16
react: ^18.2.0
@ -3582,14 +3575,14 @@ packages:
'@chakra-ui/icons': 2.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/react': 2.8.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/react': 2.8.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1)
'@chakra-ui/styled-system': 2.9.2
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
'@fontsource-variable/inter': 5.0.20
'@nanostores/react': 0.7.3(nanostores@0.11.2)(react@18.3.1)
chakra-react-select: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
chakra-react-select: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
lodash-es: 4.17.21
nanostores: 0.11.2
@ -5781,7 +5774,7 @@ packages:
resolution: {integrity: sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==}
engines: {node: '>=10'}
dependencies:
tslib: 2.7.0
tslib: 2.6.3
dev: false
/aria-query@5.3.0:
@ -6133,7 +6126,7 @@ packages:
type-detect: 4.0.8
dev: true
/chakra-react-select@4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1):
/chakra-react-select@4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1):
resolution: {integrity: sha512-jmgfN+S/wnTaCp3pW30GYDIZ5J8jWcT1gIbhpw6RdKV+atm/U4/sT+gaHOHHhRL8xeaYip+iI/m8MPGREkve0w==}
peerDependencies:
'@chakra-ui/form-control': ^2.0.0
@ -6153,8 +6146,8 @@ packages:
'@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.3.24)(react@18.3.1)
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
react-select: 5.8.0(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
@ -7579,7 +7572,7 @@ packages:
resolution: {integrity: sha512-QFaHbhv9WPUeLYBDe/PAuLKJ4Dd9OPvKs9xZBr3yLXnUrDNaVXKu2baDBXe3naPY30hgHYSsf2JW4jzas2mDEQ==}
engines: {node: '>=10'}
dependencies:
tslib: 2.7.0
tslib: 2.6.3
dev: false
/for-each@0.3.3:
@ -7618,7 +7611,7 @@ packages:
dependencies:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
tslib: 2.7.0
tslib: 2.6.3
optionalDependencies:
'@emotion/is-prop-valid': 0.8.8
dev: false
@ -9626,7 +9619,7 @@ packages:
peerDependencies:
react: ^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0
dependencies:
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
react: 18.3.1
dev: false
@ -9721,7 +9714,7 @@ packages:
'@types/react':
optional: true
dependencies:
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
'@types/react': 18.3.3
focus-lock: 1.3.5
prop-types: 15.8.1
@ -9783,7 +9776,7 @@ packages:
react-native:
optional: true
dependencies:
'@babel/runtime': 7.25.4
'@babel/runtime': 7.25.0
html-parse-stringify: 3.0.1
i18next: 23.12.2
react: 18.3.1
@ -9845,7 +9838,7 @@ packages:
'@types/react': 18.3.3
react: 18.3.1
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
tslib: 2.7.0
tslib: 2.6.3
dev: false
/react-remove-scroll@2.5.10(@types/react@18.3.3)(react@18.3.1):
@ -9862,7 +9855,7 @@ packages:
react: 18.3.1
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
tslib: 2.7.0
tslib: 2.6.3
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
dev: false
@ -9912,7 +9905,7 @@ packages:
get-nonce: 1.0.1
invariant: 2.2.4
react: 18.3.1
tslib: 2.7.0
tslib: 2.6.3
dev: false
/react-transition-group@4.4.5(react-dom@18.3.1)(react@18.3.1):
@ -11052,10 +11045,6 @@ packages:
/tslib@2.6.3:
resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==}
/tslib@2.7.0:
resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==}
dev: false
/tsutils@3.21.0(typescript@5.5.4):
resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==}
engines: {node: '>= 6'}
@ -11303,7 +11292,7 @@ packages:
dependencies:
'@types/react': 18.3.3
react: 18.3.1
tslib: 2.7.0
tslib: 2.6.3
dev: false
/use-debounce@10.0.2(react@18.3.1):
@ -11349,7 +11338,7 @@ packages:
'@types/react': 18.3.3
detect-node-es: 1.1.0
react: 18.3.1
tslib: 2.7.0
tslib: 2.6.3
dev: false
/use-sync-external-store@1.2.0(react@18.3.1):

View File

@ -790,7 +790,6 @@
"simpleModelPlaceholder": "URL or path to a local file or diffusers folder",
"source": "Source",
"starterModels": "Starter Models",
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
"syncModels": "Sync Models",
"textualInversions": "Textual Inversions",
"triggerPhrases": "Trigger Phrases",
@ -1646,7 +1645,6 @@
"storeNotInitialized": "Store is not initialized"
},
"controlLayers": {
"clearHistory": "Clear History",
"generateMode": "Generate",
"generateModeDesc": "Create individual images. Generated images are added directly to the gallery.",
"composeMode": "Compose",
@ -1654,6 +1652,7 @@
"autoSave": "Auto-save to Gallery",
"resetCanvas": "Reset Canvas",
"resetAll": "Reset All",
"deleteAll": "Delete All",
"clearCaches": "Clear Caches",
"recalculateRects": "Recalculate Rects",
"clipToBbox": "Clip Strokes to Bbox",
@ -1676,7 +1675,7 @@
"resetRegion": "Reset Region",
"debugLayers": "Debug Layers",
"rectangle": "Rectangle",
"maskFill": "Mask Fill",
"maskPreviewColor": "Mask Preview Color",
"addPositivePrompt": "Add $t(common.positivePrompt)",
"addNegativePrompt": "Add $t(common.negativePrompt)",
"addIPAdapter": "Add $t(common.ipAdapter)",
@ -1730,10 +1729,6 @@
"showingType": "Showing {{type}}",
"dynamicGrid": "Dynamic Grid",
"logDebugInfo": "Log Debug Info",
"locked": "Locked",
"unlocked": "Unlocked",
"deleteSelected": "Delete Selected",
"deleteAll": "Delete All",
"fill": {
"fillStyle": "Fill Style",
"solid": "Solid",

View File

@ -17,7 +17,7 @@ import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterM
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
import { configChanged } from 'features/system/store/configSlice';
import { selectLanguage } from 'features/system/store/systemSelectors';
import { languageSelector } from 'features/system/store/systemSelectors';
import { AppContent } from 'features/ui/components/AppContent';
import { setActiveTab } from 'features/ui/store/uiSlice';
import type { TabName } from 'features/ui/store/uiTypes';
@ -45,7 +45,7 @@ interface Props {
}
const App = ({ config = DEFAULT_CONFIG, selectedImage, selectedWorkflowId, destination }: Props) => {
const language = useAppSelector(selectLanguage);
const language = useAppSelector(languageSelector);
const logger = useLogger('system');
const dispatch = useAppDispatch();
const clearStorage = useClearStorage();

View File

@ -1,7 +1,5 @@
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { toast } from 'features/toast/toast';
import newGithubIssueUrl from 'new-github-issue-url';
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
@ -15,11 +13,9 @@ type Props = {
resetErrorBoundary: () => void;
};
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
const { t } = useTranslation();
const isLocal = useAppSelector(selectIsLocal);
const isLocal = useAppSelector((s) => s.config.isLocal);
const handleCopy = useCallback(() => {
const text = JSON.stringify(serializeError(error), null, 2);

View File

@ -1,10 +1,5 @@
import { createLogWriter } from '@roarr/browser-log-writer';
import { useAppSelector } from 'app/store/storeHooks';
import {
selectSystemLogIsEnabled,
selectSystemLogLevel,
selectSystemLogNamespaces,
} from 'features/system/store/systemSlice';
import { useEffect, useMemo } from 'react';
import { ROARR, Roarr } from 'roarr';
@ -12,9 +7,9 @@ import type { LogNamespace } from './logger';
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP, logger } from './logger';
export const useLogger = (namespace: LogNamespace) => {
const logLevel = useAppSelector(selectSystemLogLevel);
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
const logLevel = useAppSelector((s) => s.system.logLevel);
const logNamespaces = useAppSelector((s) => s.system.logNamespaces);
const logIsEnabled = useAppSelector((s) => s.system.logIsEnabled);
// The provided Roarr browser log writer uses localStorage to config logging to console
useEffect(() => {

View File

@ -1,3 +1,2 @@
export const STORAGE_PREFIX = '@@invokeai-';
export const EMPTY_ARRAY = [];
export const EMPTY_OBJECT = {};

View File

@ -1,11 +1,10 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import {
rasterLayerAdded,
sessionStagingAreaImageAccepted,
sessionStagingAreaReset,
} from 'features/controlLayers/store/canvasSessionSlice';
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
} from 'features/controlLayers/store/canvasV2Slice';
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
import { toast } from 'features/toast/toast';
@ -56,10 +55,10 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
effect: (action, api) => {
const { index } = action.payload;
const state = api.getState();
const stagingAreaImage = state.canvasSession.stagedImages[index];
const stagingAreaImage = state.canvasV2.session.stagedImages[index];
assert(stagingAreaImage, 'No staged image found to accept');
const { x, y } = selectCanvasSlice(state).bbox.rect;
const { x, y } = state.canvasV2.bbox.rect;
const { imageDTO, offsetX, offsetY } = stagingAreaImage;
const imageObject = imageDTOToImageObject(imageDTO);

View File

@ -1,5 +1,5 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
import { setInfillMethod } from 'features/controlLayers/store/canvasV2Slice';
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
@ -8,7 +8,7 @@ export const addAppConfigReceivedListener = (startAppListening: AppStartListenin
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: (action, { getState, dispatch }) => {
const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
const infillMethod = getState().params.infillMethod;
const infillMethod = getState().canvasV2.compositing.infillMethod;
if (!infill_methods.includes(infillMethod)) {
// if there is no infill method, set it to the first one

View File

@ -1,8 +1,6 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { imagesApi } from 'services/api/endpoints/images';
export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => {
@ -15,12 +13,10 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
let wasNodeEditorReset = false;
const state = getState();
const nodes = selectNodesSlice(state);
const canvas = selectCanvasSlice(state);
const { nodes, canvasV2 } = getState();
deleted_images.forEach((image_name) => {
const imageUsage = getImageUsage(nodes, canvas, image_name);
const imageUsage = getImageUsage(nodes.present, canvasV2, image_name);
if (imageUsage.isNodesImage && !wasNodeEditorReset) {
dispatch(nodeEditorReset());

View File

@ -1,11 +1,8 @@
import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { SerializableObject } from 'common/types';
import type { Result } from 'common/util/result';
import { isErr, withResult, withResultAsync } from 'common/util/result';
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasSessionSlice';
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasV2Slice';
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
@ -23,77 +20,55 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
enqueueRequested.match(action) && action.payload.tabName === 'generation',
effect: async (action, { getState, dispatch }) => {
const state = getState();
const model = state.params.model;
const model = state.canvasV2.params.model;
const { prepend } = action.payload;
const manager = $canvasManager.get();
assert(manager, 'No model found in state');
let didStartStaging = false;
if (!state.canvasSession.isStaging && state.canvasSession.mode === 'compose') {
if (!state.canvasV2.session.isStaging && state.canvasV2.session.mode === 'compose') {
dispatch(sessionStartedStaging());
didStartStaging = true;
}
const abortStaging = () => {
if (didStartStaging && getState().canvasSession.isStaging) {
try {
let g: Graph;
let noise: Invocation<'noise'>;
let posCond: Invocation<'compel' | 'sdxl_compel_prompt'>;
assert(model, 'No model found in state');
const base = model.base;
if (base === 'sdxl') {
const result = await buildSDXLGraph(state, manager);
g = result.g;
noise = result.noise;
posCond = result.posCond;
} else if (base === 'sd-1' || base === 'sd-2') {
const result = await buildSD1Graph(state, manager);
g = result.g;
noise = result.noise;
posCond = result.posCond;
} else {
assert(false, `No graph builders for base ${base}`);
}
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond);
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
fixedCacheKey: 'enqueueBatch',
})
);
req.reset();
await req.unwrap();
} catch (error) {
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
if (didStartStaging && getState().canvasV2.session.isStaging) {
dispatch(sessionStagingAreaReset());
}
};
let buildGraphResult: Result<
{ g: Graph; noise: Invocation<'noise'>; posCond: Invocation<'compel' | 'sdxl_compel_prompt'> },
Error
>;
assert(model, 'No model found in state');
const base = model.base;
switch (base) {
case 'sdxl':
buildGraphResult = await withResultAsync(() => buildSDXLGraph(state, manager));
break;
case 'sd-1':
case `sd-2`:
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
break;
default:
assert(false, `No graph builders for base ${base}`);
}
if (isErr(buildGraphResult)) {
log.error({ error: serializeError(buildGraphResult.error) }, 'Failed to build graph');
abortStaging();
return;
}
const { g, noise, posCond } = buildGraphResult.value;
const prepareBatchResult = withResult(() => prepareLinearUIBatch(state, g, prepend, noise, posCond));
if (isErr(prepareBatchResult)) {
log.error({ error: serializeError(prepareBatchResult.error) }, 'Failed to prepare batch');
abortStaging();
return;
}
const req = dispatch(
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
fixedCacheKey: 'enqueueBatch',
})
);
req.reset();
const enqueueResult = await withResultAsync(() => req.unwrap());
if (isErr(enqueueResult)) {
log.error({ error: serializeError(enqueueResult.error) }, 'Failed to enqueue batch');
abortStaging();
return;
}
log.debug({ batchConfig: prepareBatchResult.value } as SerializableObject, 'Enqueued batch');
},
});
};

View File

@ -1,6 +1,5 @@
import { enqueueRequested } from 'app/store/actions';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
import { queueApi } from 'services/api/endpoints/queue';
@ -12,12 +11,12 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
effect: async (action, { getState, dispatch }) => {
const state = getState();
const nodes = selectNodesSlice(state);
const { nodes, edges } = state.nodes.present;
const workflow = state.workflow;
const graph = buildNodesGraph(nodes);
const graph = buildNodesGraph(state.nodes.present);
const builtWorkflow = buildWorkflowWithValidation({
nodes: nodes.nodes,
edges: nodes.edges,
nodes,
edges,
workflow,
});
@ -30,7 +29,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
batch: {
graph,
workflow: builtWorkflow,
runs: state.params.iterations,
runs: state.canvasV2.params.iterations,
origin: 'workflows',
},
prepend: action.payload.prepend,

View File

@ -1,9 +1,7 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import type { AppDispatch, RootState } from 'app/store/store';
import { entityDeleted, ipaImageChanged } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
import { entityDeleted, ipaImageChanged } from 'features/controlLayers/store/canvasV2Slice';
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
@ -41,7 +39,7 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
};
// const deleteControlAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
// state.canvas.present.controlAdapters.entities.forEach(({ id, imageObject, processedImageObject }) => {
// state.canvasV2.controlAdapters.entities.forEach(({ id, imageObject, processedImageObject }) => {
// if (
// imageObject?.image.image_name === imageDTO.image_name ||
// processedImageObject?.image.image_name === imageDTO.image_name
@ -53,15 +51,15 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
// };
const deleteIPAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).ipAdapters.entities.forEach((entity) => {
if (entity.ipAdapter.image?.image_name === imageDTO.image_name) {
dispatch(ipaImageChanged({ entityIdentifier: getEntityIdentifier(entity), imageDTO: null }));
state.canvasV2.ipAdapters.entities.forEach(({ id, ipAdapter }) => {
if (ipAdapter.image?.image_name === imageDTO.image_name) {
dispatch(ipaImageChanged({ id, imageDTO: null }));
}
});
};
const deleteLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
selectCanvasSlice(state).rasterLayers.entities.forEach(({ id, objects }) => {
state.canvasV2.rasterLayers.entities.forEach(({ id, objects }) => {
let shouldDelete = false;
for (const obj of objects) {
if (obj.type === 'image' && obj.image.image_name === imageDTO.image_name) {

View File

@ -6,8 +6,7 @@ import {
ipaImageChanged,
rasterLayerAdded,
rgIPAdapterImageChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
} from 'features/controlLayers/store/canvasV2Slice';
import type { CanvasControlLayerState, CanvasRasterLayerState } from 'features/controlLayers/store/types';
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
@ -52,9 +51,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
activeData.payload.imageDTO
) {
const { id } = overData.context;
dispatch(
ipaImageChanged({ entityIdentifier: { id, type: 'ip_adapter' }, imageDTO: activeData.payload.imageDTO })
);
dispatch(ipaImageChanged({ id, imageDTO: activeData.payload.imageDTO }));
return;
}
@ -67,13 +64,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
activeData.payload.imageDTO
) {
const { id, ipAdapterId } = overData.context;
dispatch(
rgIPAdapterImageChanged({
entityIdentifier: { id, type: 'regional_guidance' },
ipAdapterId,
imageDTO: activeData.payload.imageDTO,
})
);
dispatch(rgIPAdapterImageChanged({ id, ipAdapterId, imageDTO: activeData.payload.imageDTO }));
return;
}
@ -86,7 +77,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
activeData.payload.imageDTO
) {
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
const { x, y } = getState().canvasV2.bbox.rect;
const overrides: Partial<CanvasRasterLayerState> = {
objects: [imageObject],
position: { x, y },
@ -104,7 +95,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
activeData.payload.imageDTO
) {
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
const { x, y } = getState().canvasV2.bbox.rect;
const overrides: Partial<CanvasControlLayerState> = {
objects: [imageObject],
position: { x, y },

View File

@ -1,6 +1,6 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasSlice';
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasV2Slice';
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
@ -89,16 +89,14 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
if (postUploadAction?.type === 'SET_IPA_IMAGE') {
const { id } = postUploadAction;
dispatch(ipaImageChanged({ entityIdentifier: { id, type: 'ip_adapter' }, imageDTO }));
dispatch(ipaImageChanged({ id, imageDTO }));
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
return;
}
if (postUploadAction?.type === 'SET_RG_IP_ADAPTER_IMAGE') {
const { id, ipAdapterId } = postUploadAction;
dispatch(
rgIPAdapterImageChanged({ entityIdentifier: { id, type: 'regional_guidance' }, ipAdapterId, imageDTO })
);
dispatch(rgIPAdapterImageChanged({ id, ipAdapterId, imageDTO }));
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
return;
}

View File

@ -1,7 +1,6 @@
import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
import { modelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
import { loraDeleted, modelChanged, vaeSelected } from 'features/controlLayers/store/canvasV2Slice';
import { modelSelected } from 'features/parameters/store/actions';
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
import { toast } from 'features/toast/toast';
@ -24,14 +23,14 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
const newModel = result.data;
const newBaseModel = newModel.base;
const didBaseModelChange = state.params.model?.base !== newBaseModel;
const didBaseModelChange = state.canvasV2.params.model?.base !== newBaseModel;
if (didBaseModelChange) {
// we may need to reset some incompatible submodels
let modelsCleared = 0;
// handle incompatible loras
state.loras.loras.forEach((lora) => {
state.canvasV2.loras.forEach((lora) => {
if (lora.model.base !== newBaseModel) {
dispatch(loraDeleted({ id: lora.id }));
modelsCleared += 1;
@ -39,14 +38,14 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
});
// handle incompatible vae
const { vae } = state.params;
const { vae } = state.canvasV2.params;
if (vae && vae.base !== newBaseModel) {
dispatch(vaeSelected(null));
modelsCleared += 1;
}
// handle incompatible controlnets
// state.canvas.present.controlAdapters.entities.forEach((ca) => {
// state.canvasV2.controlAdapters.entities.forEach((ca) => {
// if (ca.model?.base !== newBaseModel) {
// modelsCleared += 1;
// if (ca.isEnabled) {
@ -67,7 +66,7 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
}
}
dispatch(modelChanged({ model: newModel, previousModel: state.params.model }));
dispatch(modelChanged({ model: newModel, previousModel: state.canvasV2.params.model }));
},
});
};

View File

@ -7,12 +7,12 @@ import {
bboxWidthChanged,
controlLayerModelChanged,
ipaModelChanged,
loraDeleted,
modelChanged,
refinerModelChanged,
rgIPAdapterModelChanged,
} from 'features/controlLayers/store/canvasSlice';
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
import { modelChanged, refinerModelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { getEntityIdentifier } from 'features/controlLayers/store/types';
vaeSelected,
} from 'features/controlLayers/store/canvasV2Slice';
import { calculateNewSize } from 'features/parameters/components/DocumentSize/calculateNewSize';
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas';
@ -62,7 +62,7 @@ type ModelHandler = (
) => undefined;
const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
const currentModel = state.params.model;
const currentModel = state.canvasV2.params.model;
const mainModels = models.filter(isNonRefinerMainModelConfig);
if (mainModels.length === 0) {
// No models loaded at all
@ -82,12 +82,15 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
const result = zParameterModel.safeParse(defaultModelInList);
if (result.success) {
dispatch(modelChanged({ model: defaultModelInList, previousModel: currentModel }));
const { bbox } = selectCanvasSlice(state);
const optimalDimension = getOptimalDimension(defaultModelInList);
if (getIsSizeOptimal(bbox.rect.width, bbox.rect.height, optimalDimension)) {
if (getIsSizeOptimal(state.canvasV2.bbox.rect.width, state.canvasV2.bbox.rect.height, optimalDimension)) {
return;
}
const { width, height } = calculateNewSize(bbox.aspectRatio.value, optimalDimension * optimalDimension);
const { width, height } = calculateNewSize(
state.canvasV2.bbox.aspectRatio.value,
optimalDimension * optimalDimension
);
dispatch(bboxWidthChanged({ width }));
dispatch(bboxHeightChanged({ height }));
@ -106,7 +109,7 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
};
const handleRefinerModels: ModelHandler = (models, state, dispatch, _log) => {
const currentRefinerModel = state.params.refinerModel;
const currentRefinerModel = state.canvasV2.params.refinerModel;
const refinerModels = models.filter(isRefinerMainModelModelConfig);
if (models.length === 0) {
// No models loaded at all
@ -125,7 +128,7 @@ const handleRefinerModels: ModelHandler = (models, state, dispatch, _log) => {
};
const handleVAEModels: ModelHandler = (models, state, dispatch, log) => {
const currentVae = state.params.vae;
const currentVae = state.canvasV2.params.vae;
if (currentVae === null) {
// null is a valid VAE! it means "use the default with the main model"
@ -159,7 +162,7 @@ const handleVAEModels: ModelHandler = (models, state, dispatch, log) => {
const handleLoRAModels: ModelHandler = (models, state, dispatch, _log) => {
const loraModels = models.filter(isLoRAModelConfig);
state.loras.loras.forEach((lora) => {
state.canvasV2.loras.forEach((lora) => {
const isLoRAAvailable = loraModels.some((m) => m.key === lora.model.key);
if (isLoRAAvailable) {
return;
@ -170,34 +173,32 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, _log) => {
const handleControlAdapterModels: ModelHandler = (models, state, dispatch, _log) => {
const caModels = models.filter(isControlNetOrT2IAdapterModelConfig);
selectCanvasSlice(state).controlLayers.entities.forEach((entity) => {
state.canvasV2.controlLayers.entities.forEach((entity) => {
const isModelAvailable = caModels.some((m) => m.key === entity.controlAdapter.model?.key);
if (isModelAvailable) {
return;
}
dispatch(controlLayerModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
dispatch(controlLayerModelChanged({ id: entity.id, modelConfig: null }));
});
};
const handleIPAdapterModels: ModelHandler = (models, state, dispatch, _log) => {
const ipaModels = models.filter(isIPAdapterModelConfig);
selectCanvasSlice(state).ipAdapters.entities.forEach((entity) => {
state.canvasV2.ipAdapters.entities.forEach((entity) => {
const isModelAvailable = ipaModels.some((m) => m.key === entity.ipAdapter.model?.key);
if (isModelAvailable) {
return;
}
dispatch(ipaModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
dispatch(ipaModelChanged({ id: entity.id, modelConfig: null }));
});
selectCanvasSlice(state).regions.entities.forEach((entity) => {
entity.ipAdapters.forEach(({ id: ipAdapterId, model }) => {
state.canvasV2.regions.entities.forEach(({ id, ipAdapters }) => {
ipAdapters.forEach(({ id: ipAdapterId, model }) => {
const isModelAvailable = ipaModels.some((m) => m.key === model?.key);
if (isModelAvailable) {
return;
}
dispatch(
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), ipAdapterId, modelConfig: null })
);
dispatch(rgIPAdapterModelChanged({ id, ipAdapterId, modelConfig: null }));
});
});
};

View File

@ -1,6 +1,6 @@
import { isAnyOf } from '@reduxjs/toolkit';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { positivePromptChanged } from 'features/controlLayers/store/paramsSlice';
import { positivePromptChanged } from 'features/controlLayers/store/canvasV2Slice';
import {
combinatorialToggled,
isErrorChanged,

View File

@ -1,13 +1,14 @@
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
import {
bboxHeightChanged,
bboxWidthChanged,
setCfgRescaleMultiplier,
setCfgScale,
setScheduler,
setSteps,
vaePrecisionChanged,
vaeSelected,
} from 'features/controlLayers/store/paramsSlice';
} from 'features/controlLayers/store/canvasV2Slice';
import { setDefaultSettings } from 'features/parameters/store/actions';
import {
isParameterCFGRescaleMultiplier,
@ -30,7 +31,7 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
effect: async (action, { dispatch, getState }) => {
const state = getState();
const currentModel = state.params.model;
const currentModel = state.canvasV2.params.model;
if (!currentModel) {
return;

View File

@ -2,7 +2,6 @@ import { logger } from 'app/logging/logger';
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
import { updateAllNodesRequested } from 'features/nodes/store/actions';
import { $templates, nodesChanged } from 'features/nodes/store/nodesSlice';
import { selectNodes } from 'features/nodes/store/selectors';
import { NodeUpdateError } from 'features/nodes/types/error';
import { isInvocationNode } from 'features/nodes/types/invocation';
import { getNeedsUpdate, updateNode } from 'features/nodes/util/node/nodeUpdate';
@ -15,7 +14,7 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
startAppListening({
actionCreator: updateAllNodesRequested,
effect: (action, { dispatch, getState }) => {
const nodes = selectNodes(getState());
const { nodes } = getState().nodes.present;
const templates = $templates.get();
let unableToUpdateCount = 0;

View File

@ -6,12 +6,7 @@ import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
import type { SerializableObject } from 'common/types';
import { deepClone } from 'common/util/deepClone';
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
import { canvasSessionPersistConfig, canvasSessionSlice } from 'features/controlLayers/store/canvasSessionSlice';
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
import { toolPersistConfig, toolSlice } from 'features/controlLayers/store/toolSlice';
import { canvasV2PersistConfig, canvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
@ -58,15 +53,10 @@ const allReducers = {
[queueSlice.name]: queueSlice.reducer,
[workflowSlice.name]: workflowSlice.reducer,
[hrfSlice.name]: hrfSlice.reducer,
[canvasSlice.name]: undoable(canvasSlice.reducer, canvasUndoableConfig),
[canvasV2Slice.name]: canvasV2Slice.reducer,
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
[upscaleSlice.name]: upscaleSlice.reducer,
[stylePresetSlice.name]: stylePresetSlice.reducer,
[paramsSlice.name]: paramsSlice.reducer,
[toolSlice.name]: toolSlice.reducer,
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
[lorasSlice.name]: lorasSlice.reducer,
};
const rootReducer = combineReducers(allReducers);
@ -104,15 +94,10 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
[dynamicPromptsPersistConfig.name]: dynamicPromptsPersistConfig,
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
[hrfPersistConfig.name]: hrfPersistConfig,
[canvasPersistConfig.name]: canvasPersistConfig,
[canvasV2PersistConfig.name]: canvasV2PersistConfig,
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
[upscalePersistConfig.name]: upscalePersistConfig,
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
[paramsPersistConfig.name]: paramsPersistConfig,
[toolPersistConfig.name]: toolPersistConfig,
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
[canvasSessionPersistConfig.name]: canvasSessionPersistConfig,
[lorasPersistConfig.name]: lorasPersistConfig,
};
const unserialize: UnserializeFunction = (data, key) => {

View File

@ -13,9 +13,8 @@ import {
Spacer,
Text,
} from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectSystemSlice, setShouldEnableInformationalPopovers } from 'features/system/store/systemSlice';
import { setShouldEnableInformationalPopovers } from 'features/system/store/systemSlice';
import { toast } from 'features/toast/toast';
import { merge, omit } from 'lodash-es';
import type { ReactElement } from 'react';
@ -32,13 +31,8 @@ type Props = {
children: ReactElement;
};
const selectShouldEnableInformationalPopovers = createSelector(
selectSystemSlice,
(system) => system.shouldEnableInformationalPopovers
);
export const InformationalPopover = memo(({ feature, children, inPortal = true, ...rest }: Props) => {
const shouldEnableInformationalPopovers = useAppSelector(selectShouldEnableInformationalPopovers);
const shouldEnableInformationalPopovers = useAppSelector((s) => s.system.shouldEnableInformationalPopovers);
const data = useMemo(() => POPOVER_DATA[feature], [feature]);

View File

@ -1,6 +1,5 @@
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
import { toast } from 'features/toast/toast';
import { selectActiveTab } from 'features/ui/store/uiSelectors';
import { useCallback, useEffect, useState } from 'react';
@ -27,7 +26,7 @@ const selectPostUploadAction = createMemoizedSelector(selectActiveTab, (activeTa
export const useFullscreenDropzone = () => {
const { t } = useTranslation();
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
const [isHandlingUpload, setIsHandlingUpload] = useState<boolean>(false);
const postUploadAction = useAppSelector(selectPostUploadAction);
const [uploadImage] = useUploadImageMutation();

View File

@ -1,8 +1,6 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import type { GroupBase } from 'chakra-react-select';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import type { ModelIdentifierField } from 'features/nodes/types/common';
import { groupBy, reduce } from 'lodash-es';
import { useCallback, useMemo } from 'react';
@ -30,13 +28,11 @@ const groupByBaseFunc = <T extends AnyModelConfig>(model: T) => model.base.toUpp
const groupByBaseAndTypeFunc = <T extends AnyModelConfig>(model: T) =>
`${model.base.toUpperCase()} / ${model.type.replaceAll('_', ' ').toUpperCase()}`;
const selectBaseWithSDXLFallback = createSelector(selectParamsSlice, (params) => params.model?.base ?? 'sdxl');
export const useGroupedModelCombobox = <T extends AnyModelConfig>(
arg: UseGroupedModelComboboxArg<T>
): UseGroupedModelComboboxReturn => {
const { t } = useTranslation();
const base = useAppSelector(selectBaseWithSDXLFallback);
const base_model = useAppSelector((s) => s.canvasV2.params.model?.base ?? 'sdxl');
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
if (!modelConfigs) {
@ -58,9 +54,9 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
},
[] as GroupBase<ComboboxOption>[]
);
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base) ? -1 : 1));
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base_model) ? -1 : 1));
return _options;
}, [modelConfigs, groupByType, getIsDisabled, base]);
}, [modelConfigs, groupByType, getIsDisabled, base_model]);
const value = useMemo(
() =>

View File

@ -1,5 +1,4 @@
import { useAppSelector } from 'app/store/storeHooks';
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
import { useCallback } from 'react';
import { useDropzone } from 'react-dropzone';
import { useUploadImageMutation } from 'services/api/endpoints/images';
@ -30,7 +29,7 @@ type UseImageUploadButtonArgs = {
* <input {...getUploadInputProps()} /> // hidden, handles native upload functionality
*/
export const useImageUploadButton = ({ postUploadAction, isDisabled }: UseImageUploadButtonArgs) => {
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
const [uploadImage] = useUploadImageMutation();
const onDropAccepted = useCallback(
(files: File[]) => {

View File

@ -2,12 +2,10 @@ import { useStore } from '@nanostores/react';
import { $isConnected } from 'app/hooks/useSocketIO';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
import { $templates } from 'features/nodes/store/nodesSlice';
import { selectNodesSlice } from 'features/nodes/store/selectors';
import { $templates, selectNodesSlice } from 'features/nodes/store/nodesSlice';
import type { Templates } from 'features/nodes/store/types';
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
import { isInvocationNode } from 'features/nodes/types/invocation';
@ -35,15 +33,14 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
selectNodesSlice,
selectWorkflowSettingsSlice,
selectDynamicPromptsSlice,
selectCanvasSlice,
selectParamsSlice,
selectCanvasV2Slice,
selectUpscalelice,
selectConfigSlice,
selectActiveTab,
],
(system, nodes, workflowSettings, dynamicPrompts, canvas, params, upscale, config, activeTabName) => {
const { bbox } = canvas;
const { model, positivePrompt } = params;
(system, nodes, workflowSettings, dynamicPrompts, canvasV2, upscale, config, activeTabName) => {
const { bbox } = canvasV2;
const { model, positivePrompt } = canvasV2.params;
const reasons: { prefix?: string; content: string }[] = [];
@ -125,7 +122,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
}
canvas.controlLayers.entities
canvasV2.controlLayers.entities
.filter((controlLayer) => controlLayer.isEnabled)
.forEach((controlLayer, i) => {
const layerLiteral = i18n.t('controlLayers.layers_one');
@ -155,7 +152,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
}
});
canvas.ipAdapters.entities
canvasV2.ipAdapters.entities
.filter((entity) => entity.isEnabled)
.forEach((entity, i) => {
const layerLiteral = i18n.t('controlLayers.layers_one');
@ -183,7 +180,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
}
});
canvas.regions.entities
canvasV2.regions.entities
.filter((entity) => entity.isEnabled)
.forEach((entity, i) => {
const layerLiteral = i18n.t('controlLayers.layers_one');
@ -220,7 +217,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
}
});
canvas.rasterLayers.entities
canvasV2.rasterLayers.entities
.filter((entity) => entity.isEnabled)
.forEach((entity, i) => {
const layerLiteral = i18n.t('controlLayers.layers_one');

View File

@ -1,72 +0,0 @@
import type { Equals } from 'tsafe';
import { assert } from 'tsafe';
import { describe, expect, it } from 'vitest';
import type { ErrResult, OkResult } from './result';
import { Err, isErr, isOk, Ok, withResult, withResultAsync } from './result'; // Adjust import as needed
const promiseify = <T>(fn: () => T): (() => Promise<T>) => {
return () =>
new Promise((resolve) => {
resolve(fn());
});
};
describe('Result Utility Functions', () => {
it('Ok() should create an OkResult', () => {
const result = Ok(42);
expect(result).toEqual({ type: 'Ok', value: 42 });
expect(isOk(result)).toBe(true);
expect(isErr(result)).toBe(false);
assert<Equals<OkResult<number>, typeof result>>(result);
});
it('Err() should create an ErrResult', () => {
const error = new Error('Something went wrong');
const result = Err(error);
expect(result).toEqual({ type: 'Err', error });
expect(isOk(result)).toBe(false);
expect(isErr(result)).toBe(true);
assert<Equals<ErrResult<Error>, typeof result>>(result);
});
it('withResult() should return Ok on success', () => {
const fn = () => 42;
const result = withResult(fn);
expect(isOk(result)).toBe(true);
if (isOk(result)) {
expect(result.value).toBe(42);
}
});
it('withResult() should return Err on exception', () => {
const fn = () => {
throw new Error('Failure');
};
const result = withResult(fn);
expect(isErr(result)).toBe(true);
if (isErr(result)) {
expect(result.error.message).toBe('Failure');
}
});
it('withResultAsync() should return Ok on success', async () => {
const fn = promiseify(() => 42);
const result = await withResultAsync(fn);
expect(isOk(result)).toBe(true);
if (isOk(result)) {
expect(result.value).toBe(42);
}
});
it('withResultAsync() should return Err on exception', async () => {
const fn = promiseify(() => {
throw new Error('Async failure');
});
const result = await withResultAsync(fn);
expect(isErr(result)).toBe(true);
if (isErr(result)) {
expect(result.error.message).toBe('Async failure');
}
});
});

View File

@ -1,89 +0,0 @@
/**
* Represents a successful result.
* @template T The type of the value.
*/
export type OkResult<T> = { type: 'Ok'; value: T };
/**
* Represents a failed result.
* @template E The type of the error.
*/
export type ErrResult<E> = { type: 'Err'; error: E };
/**
* A union type that represents either a successful result (`Ok`) or a failed result (`Err`).
* @template T The type of the value in the `Ok` case.
* @template E The type of the error in the `Err` case.
*/
export type Result<T, E = Error> = OkResult<T> | ErrResult<E>;
/**
* Creates a successful result.
* @template T The type of the value.
* @param {T} value The value to wrap in an `Ok` result.
* @returns {OkResult<T>} The `Ok` result containing the value.
*/
export function Ok<T>(value: T): OkResult<T> {
return { type: 'Ok', value };
}
/**
* Creates a failed result.
* @template E The type of the error.
* @param {E} error The error to wrap in an `Err` result.
* @returns {ErrResult<E>} The `Err` result containing the error.
*/
export function Err<E>(error: E): ErrResult<E> {
return { type: 'Err', error };
}
/**
* Wraps a synchronous function in a try-catch block, returning a `Result`.
* @template T The type of the value returned by the function.
* @param {() => T} fn The function to execute.
* @returns {Result<T>} An `Ok` result if the function succeeds, or an `Err` result if it throws an error.
*/
export function withResult<T>(fn: () => T): Result<T> {
try {
return Ok(fn());
} catch (error) {
return Err(error instanceof Error ? error : new Error(String(error)));
}
}
/**
* Wraps an asynchronous function in a try-catch block, returning a `Promise` of a `Result`.
* @template T The type of the value returned by the function.
* @param {() => Promise<T>} fn The asynchronous function to execute.
* @returns {Promise<Result<T>>} A `Promise` resolving to an `Ok` result if the function succeeds, or an `Err` result if it throws an error.
*/
export async function withResultAsync<T>(fn: () => Promise<T>): Promise<Result<T>> {
try {
const result = await fn();
return Ok(result);
} catch (error) {
return Err(error instanceof Error ? error : new Error(String(error)));
}
}
/**
* Type guard to check if a `Result` is an `Ok` result.
* @template T The type of the value in the `Ok` result.
* @template E The type of the error in the `Err` result.
* @param {Result<T, E>} result The result to check.
* @returns {result is OkResult<T>} `true` if the result is an `Ok` result, otherwise `false`.
*/
export function isOk<T, E>(result: Result<T, E>): result is OkResult<T> {
return result.type === 'Ok';
}
/**
* Type guard to check if a `Result` is an `Err` result.
* @template T The type of the value in the `Ok` result.
* @template E The type of the error in the `Err` result.
* @param {Result<T, E>} result The result to check.
* @returns {result is ErrResult<E>} `true` if the result is an `Err` result, otherwise `false`.
*/
export function isErr<T, E>(result: Result<T, E>): result is ErrResult<E> {
return result.type === 'Err';
}

View File

@ -1,3 +1,7 @@
export const stopPropagation = (e: React.MouseEvent) => {
e.stopPropagation();
};
export const preventDefault = (e: React.MouseEvent) => {
e.preventDefault();
};

View File

@ -1,6 +1,5 @@
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
import { Combobox, ConfirmationAlertDialog, Flex, FormControl, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
@ -19,17 +18,12 @@ const selectImagesToChange = createMemoizedSelector(
(changeBoardModal) => changeBoardModal.imagesToChange
);
const selectIsModalOpen = createSelector(
selectChangeBoardModalSlice,
(changeBoardModal) => changeBoardModal.isModalOpen
);
const ChangeBoardModal = () => {
const dispatch = useAppDispatch();
const [selectedBoard, setSelectedBoard] = useState<string | null>();
const queryArgs = useAppSelector(selectListBoardsQueryArgs);
const { data: boards, isFetching } = useListAllBoardsQuery(queryArgs);
const isModalOpen = useAppSelector(selectIsModalOpen);
const isModalOpen = useAppSelector((s) => s.changeBoardModal.isModalOpen);
const imagesToChange = useAppSelector(selectImagesToChange);
const [addImagesToBoard] = useAddImagesToBoardMutation();
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();

View File

@ -6,7 +6,7 @@ import {
ipaAdded,
rasterLayerAdded,
rgAdded,
} from 'features/controlLayers/store/canvasSlice';
} from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';

View File

@ -1,5 +1,6 @@
import { Flex } from '@invoke-ai/ui-library';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { CanvasEntityOpacity } from 'features/controlLayers/components/common/CanvasEntityOpacity';
import { ControlLayerEntityList } from 'features/controlLayers/components/ControlLayer/ControlLayerEntityList';
import { InpaintMaskList } from 'features/controlLayers/components/InpaintMask/InpaintMaskList';
import { IPAdapterList } from 'features/controlLayers/components/IPAdapter/IPAdapterList';
@ -10,7 +11,8 @@ import { memo } from 'react';
export const CanvasEntityList = memo(() => {
return (
<ScrollableContent>
<Flex flexDir="column" gap={2} data-testid="control-layers-layer-list" w="full" h="full">
<Flex flexDir="column" gap={4} pt={2} data-testid="control-layers-layer-list">
<CanvasEntityOpacity />
<InpaintMaskList />
<RegionalGuidanceEntityList />
<IPAdapterList />

View File

@ -1,20 +0,0 @@
import { Flex, Spacer } from '@invoke-ai/ui-library';
import { EntityListActionBarAddLayerButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuButton';
import { EntityListActionBarDeleteButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarDeleteButton';
import { EntityListActionBarSelectedEntityFill } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityFill';
import { SelectedEntityOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityOpacity';
import { memo } from 'react';
export const EntityListActionBar = memo(() => {
return (
<Flex w="full" py={1} px={1} gap={2} alignItems="center">
<SelectedEntityOpacity />
<Spacer />
<EntityListActionBarSelectedEntityFill />
<EntityListActionBarAddLayerButton />
<EntityListActionBarDeleteButton />
</Flex>
);
});
EntityListActionBar.displayName = 'EntityListActionBar';

View File

@ -1,28 +0,0 @@
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
import { CanvasEntityListMenuItems } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuItems';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
export const EntityListActionBarAddLayerButton = memo(() => {
const { t } = useTranslation();
return (
<Menu>
<MenuButton
as={IconButton}
size="sm"
tooltip={t('controlLayers.addLayer')}
aria-label={t('controlLayers.addLayer')}
icon={<PiPlusBold />}
variant="ghost"
data-testid="control-layers-add-layer-menu-button"
/>
<MenuList>
<CanvasEntityListMenuItems />
</MenuList>
</Menu>
);
});
EntityListActionBarAddLayerButton.displayName = 'EntityListActionBarAddLayerButton';

View File

@ -1,54 +0,0 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import {
controlLayerAdded,
inpaintMaskAdded,
ipaAdded,
rasterLayerAdded,
rgAdded,
} from 'features/controlLayers/store/canvasSlice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
export const CanvasEntityListMenuItems = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const addInpaintMask = useCallback(() => {
dispatch(inpaintMaskAdded({ isSelected: true }));
}, [dispatch]);
const addRegionalGuidance = useCallback(() => {
dispatch(rgAdded({ isSelected: true }));
}, [dispatch]);
const addRasterLayer = useCallback(() => {
dispatch(rasterLayerAdded({ isSelected: true }));
}, [dispatch]);
const addControlLayer = useCallback(() => {
dispatch(controlLayerAdded({ isSelected: true }));
}, [dispatch]);
const addIPAdapter = useCallback(() => {
dispatch(ipaAdded({ isSelected: true }));
}, [dispatch]);
return (
<>
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
{t('controlLayers.inpaintMask', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
{t('controlLayers.regionalGuidance', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
{t('controlLayers.rasterLayer', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
{t('controlLayers.controlLayer', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
{t('controlLayers.ipAdapter', { count: 1 })}
</MenuItem>
</>
);
});
CanvasEntityListMenuItems.displayName = 'CanvasEntityListMenu';

View File

@ -1,39 +0,0 @@
import { IconButton, useShiftModifier } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { allEntitiesDeleted, entityDeleted } from 'features/controlLayers/store/canvasSlice';
import { selectEntityCount, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiTrashSimpleFill } from 'react-icons/pi';
export const EntityListActionBarDeleteButton = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const entityCount = useAppSelector(selectEntityCount);
const shift = useShiftModifier();
const onClick = useCallback(() => {
if (shift) {
dispatch(allEntitiesDeleted());
return;
}
if (!selectedEntityIdentifier) {
return;
}
dispatch(entityDeleted({ entityIdentifier: selectedEntityIdentifier }));
}, [dispatch, selectedEntityIdentifier, shift]);
return (
<IconButton
onClick={onClick}
isDisabled={shift ? entityCount === 0 : !selectedEntityIdentifier}
size="sm"
variant="ghost"
aria-label={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
tooltip={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
icon={<PiTrashSimpleFill />}
/>
);
});
EntityListActionBarDeleteButton.displayName = 'EntityListActionBarDeleteButton';

View File

@ -1,70 +0,0 @@
import { Box, Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger, Tooltip } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import RgbColorPicker from 'common/components/RgbColorPicker';
import { rgbColorToString } from 'common/util/colorCodeTransformers';
import { MaskFillStyle } from 'features/controlLayers/components/common/MaskFillStyle';
import { entityFillColorChanged, entityFillStyleChanged } from 'features/controlLayers/store/canvasSlice';
import { selectSelectedEntityFill, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { type FillStyle, isMaskEntityIdentifier, type RgbColor } from 'features/controlLayers/store/types';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const EntityListActionBarSelectedEntityFill = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
const fill = useAppSelector(selectSelectedEntityFill);
const onChangeFillColor = useCallback(
(color: RgbColor) => {
if (!selectedEntityIdentifier) {
return;
}
if (!isMaskEntityIdentifier(selectedEntityIdentifier)) {
return;
}
dispatch(entityFillColorChanged({ entityIdentifier: selectedEntityIdentifier, color }));
},
[dispatch, selectedEntityIdentifier]
);
const onChangeFillStyle = useCallback(
(style: FillStyle) => {
if (!selectedEntityIdentifier) {
return;
}
if (!isMaskEntityIdentifier(selectedEntityIdentifier)) {
return;
}
dispatch(entityFillStyleChanged({ entityIdentifier: selectedEntityIdentifier, style }));
},
[dispatch, selectedEntityIdentifier]
);
if (!selectedEntityIdentifier || !fill) {
return null;
}
return (
<Popover isLazy>
<PopoverTrigger>
<Flex role="button" aria-label={t('controlLayers.maskFill')} tabIndex={-1} w={8} h={8}>
<Tooltip label={t('controlLayers.maskFill')}>
<Flex w="full" h="full" alignItems="center" justifyContent="center">
<Box borderRadius="full" w={6} h={6} borderWidth={1} bg={rgbColorToString(fill.color)} />
</Flex>
</Tooltip>
</Flex>
</PopoverTrigger>
<PopoverContent>
<PopoverBody minH={64}>
<Flex flexDir="column" gap={4}>
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput />
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
</Flex>
</PopoverBody>
</PopoverContent>
</Popover>
);
});
EntityListActionBarSelectedEntityFill.displayName = 'EntityListActionBarSelectedEntityFill';

View File

@ -0,0 +1,77 @@
import { IconButton, Menu, MenuButton, MenuDivider, MenuItem, MenuList } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
allEntitiesDeleted,
controlLayerAdded,
inpaintMaskAdded,
ipaAdded,
rasterLayerAdded,
rgAdded,
} from 'features/controlLayers/store/canvasV2Slice';
import { selectEntityCount } from 'features/controlLayers/store/selectors';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDotsThreeOutlineFill, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
export const CanvasEntityListMenu = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const hasEntities = useAppSelector((s) => {
const count = selectEntityCount(s);
return count > 0;
});
const addInpaintMask = useCallback(() => {
dispatch(inpaintMaskAdded({ isSelected: true }));
}, [dispatch]);
const addRegionalGuidance = useCallback(() => {
dispatch(rgAdded({ isSelected: true }));
}, [dispatch]);
const addRasterLayer = useCallback(() => {
dispatch(rasterLayerAdded({ isSelected: true }));
}, [dispatch]);
const addControlLayer = useCallback(() => {
dispatch(controlLayerAdded({ isSelected: true }));
}, [dispatch]);
const addIPAdapter = useCallback(() => {
dispatch(ipaAdded({ isSelected: true }));
}, [dispatch]);
const deleteAll = useCallback(() => {
dispatch(allEntitiesDeleted());
}, [dispatch]);
return (
<Menu>
<MenuButton
as={IconButton}
aria-label={t('accessibility.menu')}
icon={<PiDotsThreeOutlineFill />}
variant="link"
data-testid="control-layers-add-layer-menu-button"
alignSelf="stretch"
/>
<MenuList>
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
{t('controlLayers.inpaintMask', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
{t('controlLayers.regionalGuidance', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
{t('controlLayers.rasterLayer', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
{t('controlLayers.controlLayer', { count: 1 })}
</MenuItem>
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
{t('controlLayers.ipAdapter', { count: 1 })}
</MenuItem>
<MenuDivider />
<MenuItem onClick={deleteAll} icon={<PiTrashSimpleBold />} color="error.300" isDisabled={!hasEntities}>
{t('controlLayers.deleteAll', { count: 1 })}
</MenuItem>
</MenuList>
</Menu>
);
});
CanvasEntityListMenu.displayName = 'CanvasEntityListMenu';

View File

@ -1,16 +1,13 @@
import { Button, ButtonGroup } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { selectCanvasSessionSlice, sessionModeChanged } from 'features/controlLayers/store/canvasSessionSlice';
import { sessionModeChanged } from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const selectCanvasMode = createSelector(selectCanvasSessionSlice, (canvasSession) => canvasSession.mode);
export const CanvasModeSwitcher = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const mode = useAppSelector(selectCanvasMode);
const mode = useAppSelector((s) => s.canvasV2.session.mode);
const onClickGenerate = useCallback(() => dispatch(sessionModeChanged({ mode: 'generate' })), [dispatch]);
const onClickCompose = useCallback(() => dispatch(sessionModeChanged({ mode: 'compose' })), [dispatch]);

View File

@ -1,22 +1,16 @@
import { Divider, Flex } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasAddEntityButtons } from 'features/controlLayers/components/CanvasAddEntityButtons';
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList/CanvasEntityList';
import { EntityListActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBar';
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { selectHasEntities } from 'features/controlLayers/store/selectors';
import { selectEntityCount } from 'features/controlLayers/store/selectors';
import { memo } from 'react';
export const CanvasPanelContent = memo(() => {
const hasEntities = useAppSelector(selectHasEntities);
const hasEntities = useAppSelector((s) => selectEntityCount(s) > 0);
return (
<CanvasManagerProviderGate>
<Flex flexDir="column" gap={2} w="full" h="full">
<EntityListActionBar />
<Divider py={0} />
{!hasEntities && <CanvasAddEntityButtons />}
{hasEntities && <CanvasEntityList />}
</Flex>
{!hasEntities && <CanvasAddEntityButtons />}
{hasEntities && <CanvasEntityList />}
</CanvasManagerProviderGate>
);
});

View File

@ -14,9 +14,10 @@ import {
PopoverTrigger,
} from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
import { MAX_CANVAS_SCALE, MIN_CANVAS_SCALE } from 'features/controlLayers/konva/constants';
import { snapToNearest } from 'features/controlLayers/konva/util';
import { $stageAttrs } from 'features/controlLayers/store/canvasV2Slice';
import { clamp, round } from 'lodash-es';
import { computed } from 'nanostores';
import type { KeyboardEvent } from 'react';
@ -71,10 +72,12 @@ const sliderDefaultValue = mapScaleToSliderValue(100);
const snapCandidates = marks.slice(1, marks.length - 1);
const $scale = computed($stageAttrs, (attrs) => attrs.scale);
export const CanvasScale = memo(() => {
const { t } = useTranslation();
const canvasManager = useCanvasManager();
const scale = useStore(computed(canvasManager.stateApi.$stageAttrs, (attrs) => attrs.scale));
const canvasManager = useStore($canvasManager);
const scale = useStore($scale);
const [localScale, setLocalScale] = useState(scale * 100);
const onChangeSlider = useCallback(

View File

@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
@ -29,7 +28,6 @@ export const ControlLayer = memo(({ id }: Props) => {
<CanvasEntityEditableTitle />
<Spacer />
<ControlLayerBadges />
<CanvasEntityIsLockedToggle />
<CanvasEntityEnabledToggle />
</CanvasEntityHeader>
<CanvasEntitySettingsWrapper>

View File

@ -1,15 +1,15 @@
import { Badge } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { selectControlLayerEntityOrThrow } from 'features/controlLayers/store/controlLayersReducers';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
export const ControlLayerBadges = memo(() => {
const entityIdentifier = useEntityIdentifierContext('control_layer');
const { id } = useEntityIdentifierContext();
const { t } = useTranslation();
const withTransparencyEffect = useAppSelector(
(s) => selectEntityOrThrow(selectCanvasSlice(s), entityIdentifier).withTransparencyEffect
(s) => selectControlLayerEntityOrThrow(s.canvasV2, id).withTransparencyEffect
);
return (

View File

@ -11,42 +11,42 @@ import {
controlLayerControlModeChanged,
controlLayerModelChanged,
controlLayerWeightChanged,
} from 'features/controlLayers/store/canvasSlice';
} from 'features/controlLayers/store/canvasV2Slice';
import type { ControlModeV2 } from 'features/controlLayers/store/types';
import { memo, useCallback } from 'react';
import type { ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
export const ControlLayerControlAdapter = memo(() => {
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('control_layer');
const entityIdentifier = useEntityIdentifierContext();
const controlAdapter = useControlLayerControlAdapter(entityIdentifier);
const onChangeBeginEndStepPct = useCallback(
(beginEndStepPct: [number, number]) => {
dispatch(controlLayerBeginEndStepPctChanged({ entityIdentifier, beginEndStepPct }));
dispatch(controlLayerBeginEndStepPctChanged({ id: entityIdentifier.id, beginEndStepPct }));
},
[dispatch, entityIdentifier]
[dispatch, entityIdentifier.id]
);
const onChangeControlMode = useCallback(
(controlMode: ControlModeV2) => {
dispatch(controlLayerControlModeChanged({ entityIdentifier, controlMode }));
dispatch(controlLayerControlModeChanged({ id: entityIdentifier.id, controlMode }));
},
[dispatch, entityIdentifier]
[dispatch, entityIdentifier.id]
);
const onChangeWeight = useCallback(
(weight: number) => {
dispatch(controlLayerWeightChanged({ entityIdentifier, weight }));
dispatch(controlLayerWeightChanged({ id: entityIdentifier.id, weight }));
},
[dispatch, entityIdentifier]
[dispatch, entityIdentifier.id]
);
const onChangeModel = useCallback(
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => {
dispatch(controlLayerModelChanged({ entityIdentifier, modelConfig }));
dispatch(controlLayerModelChanged({ id: entityIdentifier.id, modelConfig }));
},
[dispatch, entityIdentifier]
[dispatch, entityIdentifier.id]
);
return (

View File

@ -3,7 +3,6 @@ import { useAppSelector } from 'app/store/storeHooks';
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
@ -19,7 +18,7 @@ export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onCha
const { t } = useTranslation();
const entityIdentifier = useEntityIdentifierContext();
const canvasManager = useCanvasManager();
const currentBaseModel = useAppSelector(selectBase);
const currentBaseModel = useAppSelector((s) => s.canvasV2.params.model?.base);
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);

View File

@ -1,22 +1,17 @@
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
import { ControlLayer } from 'features/controlLayers/components/ControlLayer/ControlLayer';
import { mapId } from 'features/controlLayers/konva/util';
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { memo } from 'react';
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
return canvas.controlLayers.entities.map(mapId).reverse();
});
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
return selectedEntityIdentifier?.type === 'control_layer';
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
return canvasV2.controlLayers.entities.map(mapId).reverse();
});
export const ControlLayerEntityList = memo(() => {
const isSelected = useAppSelector(selectIsSelected);
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'control_layer'));
const layerIds = useAppSelector(selectEntityIds);
if (layerIds.length === 0) {

View File

@ -1,7 +1,7 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasSlice';
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiLightningBold } from 'react-icons/pi';
@ -9,11 +9,11 @@ import { PiLightningBold } from 'react-icons/pi';
export const ControlLayerMenuItemsControlToRaster = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('control_layer');
const entityIdentifier = useEntityIdentifierContext();
const convertControlLayerToRasterLayer = useCallback(() => {
dispatch(controlLayerConvertedToRasterLayer({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
dispatch(controlLayerConvertedToRasterLayer({ id: entityIdentifier.id }));
}, [dispatch, entityIdentifier.id]);
return (
<MenuItem onClick={convertControlLayerToRasterLayer} icon={<PiLightningBold />}>

View File

@ -2,8 +2,11 @@ import { MenuItem } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { controlLayerWithTransparencyEffectToggled } from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import {
controlLayerWithTransparencyEffectToggled,
selectCanvasV2Slice,
} from 'features/controlLayers/store/canvasV2Slice';
import { selectControlLayerEntityOrThrow } from 'features/controlLayers/store/controlLayersReducers';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiDropHalfBold } from 'react-icons/pi';
@ -11,18 +14,18 @@ import { PiDropHalfBold } from 'react-icons/pi';
export const ControlLayerMenuItemsTransparencyEffect = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('control_layer');
const entityIdentifier = useEntityIdentifierContext();
const selectWithTransparencyEffect = useMemo(
() =>
createSelector(selectCanvasSlice, (canvas) => {
const entity = selectEntityOrThrow(canvas, entityIdentifier);
createSelector(selectCanvasV2Slice, (canvasV2) => {
const entity = selectControlLayerEntityOrThrow(canvasV2, entityIdentifier.id);
return entity.withTransparencyEffect;
}),
[entityIdentifier]
[entityIdentifier.id]
);
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
const onToggle = useCallback(() => {
dispatch(controlLayerWithTransparencyEffectToggled({ entityIdentifier }));
dispatch(controlLayerWithTransparencyEffectToggled({ id: entityIdentifier.id }));
}, [dispatch, entityIdentifier]);
return (

View File

@ -33,11 +33,9 @@ export const CanvasEditor = memo(() => {
<ControlLayersToolbar />
<StageComponent />
<Flex position="absolute" bottom={16} gap={2} align="center" justify="center">
<CanvasManagerProviderGate>
<StagingAreaIsStagingGate>
<StagingAreaToolbar />
</StagingAreaIsStagingGate>
</CanvasManagerProviderGate>
<StagingAreaIsStagingGate>
<StagingAreaToolbar />
</StagingAreaIsStagingGate>
</Flex>
<Flex position="absolute" bottom={16}>
<CanvasManagerProviderGate>

View File

@ -1,12 +1,14 @@
/* eslint-disable i18next/no-literal-string */
import { Flex, Spacer } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasModeSwitcher } from 'features/controlLayers/components/CanvasModeSwitcher';
import { CanvasResetViewButton } from 'features/controlLayers/components/CanvasResetViewButton';
import { CanvasScale } from 'features/controlLayers/components/CanvasScale';
import { CanvasSettingsPopover } from 'features/controlLayers/components/Settings/CanvasSettingsPopover';
import { ToolBrushWidth } from 'features/controlLayers/components/Tool/ToolBrushWidth';
import { ToolChooser } from 'features/controlLayers/components/Tool/ToolChooser';
import { ToolEraserWidth } from 'features/controlLayers/components/Tool/ToolEraserWidth';
import { ToolFillColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
import { UndoRedoButtonGroup } from 'features/controlLayers/components/UndoRedoButtonGroup';
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton';
@ -14,13 +16,15 @@ import { ViewerToggleMenu } from 'features/gallery/components/ImageViewer/Viewer
import { memo } from 'react';
export const ControlLayersToolbar = memo(() => {
const tool = useAppSelector((s) => s.canvasV2.tool.selected);
return (
<CanvasManagerProviderGate>
<Flex w="full" gap={2} alignItems="center">
<ToggleProgressButton />
<ToolChooser />
<Spacer />
<ToolSettings />
{tool === 'brush' && <ToolBrushWidth />}
{tool === 'eraser' && <ToolEraserWidth />}
<Spacer />
<CanvasScale />
<CanvasResetViewButton />

View File

@ -1,17 +1,20 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import type { FilterConfig } from 'features/controlLayers/store/types';
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
import { selectConfigSlice } from 'features/system/store/configSlice';
import { configSelector } from 'features/system/store/configSelectors';
import { includes, map } from 'lodash-es';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { assert } from 'tsafe';
const selectDisabledProcessors = createSelector(selectConfigSlice, (config) => config.sd.disabledControlNetProcessors);
const selectDisabledProcessors = createMemoizedSelector(
configSelector,
(config) => config.sd.disabledControlNetProcessors
);
type Props = {
filterType: FilterConfig['type'];

View File

@ -1,23 +1,25 @@
import { Box, Flex, Text } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
import {
$isDrawing,
$isMouseDown,
$lastAddedPoint,
$lastCursorPos,
$lastMouseDownPos,
$stageAttrs,
} from 'features/controlLayers/store/canvasV2Slice';
import { round } from 'lodash-es';
import { memo } from 'react';
const selectBbox = createSelector(selectCanvasSlice, (canvas) => canvas.bbox);
export const HeadsUpDisplay = memo(() => {
const canvasManager = useCanvasManager();
const stageAttrs = useStore(canvasManager.stateApi.$stageAttrs);
const cursorPos = useStore(canvasManager.stateApi.$lastCursorPos);
const isDrawing = useStore(canvasManager.stateApi.$isDrawing);
const isMouseDown = useStore(canvasManager.stateApi.$isMouseDown);
const lastMouseDownPos = useStore(canvasManager.stateApi.$lastMouseDownPos);
const lastAddedPoint = useStore(canvasManager.stateApi.$lastAddedPoint);
const bbox = useAppSelector(selectBbox);
const stageAttrs = useStore($stageAttrs);
const cursorPos = useStore($lastCursorPos);
const isDrawing = useStore($isDrawing);
const isMouseDown = useStore($isMouseDown);
const lastMouseDownPos = useStore($lastMouseDownPos);
const lastAddedPoint = useStore($lastAddedPoint);
const bbox = useAppSelector((s) => s.canvasV2.bbox);
return (
<Flex flexDir="column" bg="blackAlpha.400" borderBottomEndRadius="base" p={2} minW={64} gap={2}>

View File

@ -5,7 +5,7 @@ import { $isConnected } from 'app/hooks/useSocketIO';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage';
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasV2Slice';
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
import type { ImageWithDims } from 'features/controlLayers/store/types';
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';

View File

@ -1,22 +1,18 @@
/* eslint-disable i18next/no-literal-string */
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
import { IPAdapter } from 'features/controlLayers/components/IPAdapter/IPAdapter';
import { mapId } from 'features/controlLayers/konva/util';
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { memo } from 'react';
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
return canvas.ipAdapters.entities.map(mapId).reverse();
});
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
return selectedEntityIdentifier?.type === 'ip_adapter';
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
return canvasV2.ipAdapters.entities.map(mapId).reverse();
});
export const IPAdapterList = memo(() => {
const isSelected = useAppSelector(selectIsSelected);
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'ip_adapter'));
const ipaIds = useAppSelector(selectEntityIds);
if (ipaIds.length === 0) {

View File

@ -2,7 +2,6 @@ import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
import { selectBase } from 'features/controlLayers/store/paramsSlice';
import type { CLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { isCLIPVisionModelV2 } from 'features/controlLayers/store/types';
import { memo, useCallback, useMemo } from 'react';
@ -25,7 +24,7 @@ type Props = {
export const IPAdapterModel = memo(({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => {
const { t } = useTranslation();
const currentBaseModel = useAppSelector(selectBase);
const currentBaseModel = useAppSelector((s) => s.canvasV2.params.model?.base);
const [modelConfigs, { isLoading }] = useIPAdapterModels();
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);

View File

@ -1,5 +1,4 @@
import { Box, Flex } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
@ -13,8 +12,8 @@ import {
ipaMethodChanged,
ipaModelChanged,
ipaWeightChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
} from 'features/controlLayers/store/canvasV2Slice';
import { selectIPAdapterEntityOrThrow } from 'features/controlLayers/store/ipAdaptersReducers';
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
import type { IPAImageDropData } from 'features/dnd/types';
import { memo, useCallback, useMemo } from 'react';
@ -25,63 +24,53 @@ import { IPAdapterModel } from './IPAdapterModel';
export const IPAdapterSettings = memo(() => {
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('ip_adapter');
const selectIPAdapter = useMemo(
() => createSelector(selectCanvasSlice, (s) => selectEntityOrThrow(s, entityIdentifier).ipAdapter),
[entityIdentifier]
);
const ipAdapter = useAppSelector(selectIPAdapter);
const { id } = useEntityIdentifierContext();
const ipAdapter = useAppSelector((s) => selectIPAdapterEntityOrThrow(s.canvasV2, id).ipAdapter);
const onChangeBeginEndStepPct = useCallback(
(beginEndStepPct: [number, number]) => {
dispatch(ipaBeginEndStepPctChanged({ entityIdentifier, beginEndStepPct }));
dispatch(ipaBeginEndStepPctChanged({ id, beginEndStepPct }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const onChangeWeight = useCallback(
(weight: number) => {
dispatch(ipaWeightChanged({ entityIdentifier, weight }));
dispatch(ipaWeightChanged({ id, weight }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const onChangeIPMethod = useCallback(
(method: IPMethodV2) => {
dispatch(ipaMethodChanged({ entityIdentifier, method }));
dispatch(ipaMethodChanged({ id, method }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const onChangeModel = useCallback(
(modelConfig: IPAdapterModelConfig) => {
dispatch(ipaModelChanged({ entityIdentifier, modelConfig }));
dispatch(ipaModelChanged({ id, modelConfig }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const onChangeCLIPVisionModel = useCallback(
(clipVisionModel: CLIPVisionModelV2) => {
dispatch(ipaCLIPVisionModelChanged({ entityIdentifier, clipVisionModel }));
dispatch(ipaCLIPVisionModelChanged({ id, clipVisionModel }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const onChangeImage = useCallback(
(imageDTO: ImageDTO | null) => {
dispatch(ipaImageChanged({ entityIdentifier, imageDTO }));
dispatch(ipaImageChanged({ id, imageDTO }));
},
[dispatch, entityIdentifier]
[dispatch, id]
);
const droppableData = useMemo<IPAImageDropData>(
() => ({ actionType: 'SET_IPA_IMAGE', context: { id: entityIdentifier.id }, id: entityIdentifier.id }),
[entityIdentifier.id]
);
const postUploadAction = useMemo<IPALayerImagePostUploadAction>(
() => ({ type: 'SET_IPA_IMAGE', id: entityIdentifier.id }),
[entityIdentifier.id]
);
const droppableData = useMemo<IPAImageDropData>(() => ({ actionType: 'SET_IPA_IMAGE', context: { id }, id }), [id]);
const postUploadAction = useMemo<IPALayerImagePostUploadAction>(() => ({ type: 'SET_IPA_IMAGE', id }), [id]);
return (
<CanvasEntitySettingsWrapper>
@ -106,7 +95,7 @@ export const IPAdapterSettings = memo(() => {
<IPAdapterImagePreview
image={ipAdapter.image ?? null}
onChangeImage={onChangeImage}
ipAdapterId={entityIdentifier.id}
ipAdapterId={id}
droppableData={droppableData}
postUploadAction={postUploadAction}
/>

View File

@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
import { EntityMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
@ -10,6 +9,8 @@ import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityI
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
import { memo, useMemo } from 'react';
import { InpaintMaskMaskFillColorPicker } from './InpaintMaskMaskFillColorPicker';
type Props = {
id: string;
};
@ -25,7 +26,7 @@ export const InpaintMask = memo(({ id }: Props) => {
<CanvasEntityPreviewImage />
<CanvasEntityEditableTitle />
<Spacer />
<CanvasEntityIsLockedToggle />
<InpaintMaskMaskFillColorPicker />
<CanvasEntityEnabledToggle />
</CanvasEntityHeader>
</CanvasEntityContainer>

View File

@ -1,22 +1,17 @@
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
import { InpaintMask } from 'features/controlLayers/components/InpaintMask/InpaintMask';
import { mapId } from 'features/controlLayers/konva/util';
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { memo } from 'react';
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
return canvas.inpaintMasks.entities.map(mapId).reverse();
});
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
return selectedEntityIdentifier?.type === 'inpaint_mask';
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
return canvasV2.inpaintMasks.entities.map(mapId).reverse();
});
export const InpaintMaskList = memo(() => {
const isSelected = useAppSelector(selectIsSelected);
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'inpaint_mask'));
const entityIds = useAppSelector(selectEntityIds);
if (entityIds.length === 0) {

View File

@ -0,0 +1,57 @@
import { Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import RgbColorPicker from 'common/components/RgbColorPicker';
import { rgbColorToString } from 'common/util/colorCodeTransformers';
import { MaskFillStyle } from 'features/controlLayers/components/common/MaskFillStyle';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { inpaintMaskFillColorChanged, inpaintMaskFillStyleChanged } from 'features/controlLayers/store/canvasV2Slice';
import { selectInpaintMaskEntityOrThrow } from 'features/controlLayers/store/inpaintMaskReducers';
import type { FillStyle, RgbColor } from 'features/controlLayers/store/types';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
export const InpaintMaskMaskFillColorPicker = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext();
const fill = useAppSelector((s) => selectInpaintMaskEntityOrThrow(s.canvasV2, entityIdentifier.id).fill);
const onChangeFillColor = useCallback(
(color: RgbColor) => {
dispatch(inpaintMaskFillColorChanged({ entityIdentifier, color }));
},
[dispatch, entityIdentifier]
);
const onChangeFillStyle = useCallback(
(style: FillStyle) => {
dispatch(inpaintMaskFillStyleChanged({ entityIdentifier, style }));
},
[dispatch, entityIdentifier]
);
return (
<Popover isLazy>
<PopoverTrigger>
<Flex
role="button"
aria-label={t('controlLayers.maskPreviewColor')}
borderRadius="full"
borderWidth={1}
bg={rgbColorToString(fill.color)}
w="22px"
h="22px"
tabIndex={-1}
/>
</PopoverTrigger>
<PopoverContent>
<PopoverBody minH={64}>
<Flex flexDir="column" gap={4}>
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput />
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
</Flex>
</PopoverBody>
</PopoverContent>
</Popover>
);
});
InpaintMaskMaskFillColorPicker.displayName = 'InpaintMaskMaskFillColorPicker';

View File

@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
import { EntityLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
@ -25,7 +24,6 @@ export const RasterLayer = memo(({ id }: Props) => {
<CanvasEntityPreviewImage />
<CanvasEntityEditableTitle />
<Spacer />
<CanvasEntityIsLockedToggle />
<CanvasEntityEnabledToggle />
</CanvasEntityHeader>
</CanvasEntityContainer>

View File

@ -1,21 +1,17 @@
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
import { RasterLayer } from 'features/controlLayers/components/RasterLayer/RasterLayer';
import { mapId } from 'features/controlLayers/konva/util';
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { memo } from 'react';
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
return canvas.rasterLayers.entities.map(mapId).reverse();
});
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
return selectedEntityIdentifier?.type === 'raster_layer';
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
return canvasV2.rasterLayers.entities.map(mapId).reverse();
});
export const RasterLayerEntityList = memo(() => {
const isSelected = useAppSelector(selectIsSelected);
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'raster_layer'));
const layerIds = useAppSelector(selectEntityIds);
if (layerIds.length === 0) {

View File

@ -1,7 +1,7 @@
import { MenuItem } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { rasterLayerConvertedToControlLayer } from 'features/controlLayers/store/canvasSlice';
import { rasterLayerConvertedToControlLayer } from 'features/controlLayers/store/canvasV2Slice';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiLightningBold } from 'react-icons/pi';
@ -9,11 +9,11 @@ import { PiLightningBold } from 'react-icons/pi';
export const RasterLayerMenuItemsRasterToControl = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('raster_layer');
const entityIdentifier = useEntityIdentifierContext();
const convertRasterLayerToControlLayer = useCallback(() => {
dispatch(rasterLayerConvertedToControlLayer({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
dispatch(rasterLayerConvertedToControlLayer({ id: entityIdentifier.id }));
}, [dispatch, entityIdentifier.id]);
return (
<MenuItem onClick={convertRasterLayerToControlLayer} icon={<PiLightningBold />}>

View File

@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
import { RegionalGuidanceBadges } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceBadges';
@ -12,6 +11,8 @@ import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityI
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
import { memo, useMemo } from 'react';
import { RegionalGuidanceMaskFillColorPicker } from './RegionalGuidanceMaskFillColorPicker';
type Props = {
id: string;
};
@ -28,7 +29,7 @@ export const RegionalGuidance = memo(({ id }: Props) => {
<CanvasEntityEditableTitle />
<Spacer />
<RegionalGuidanceBadges />
<CanvasEntityIsLockedToggle />
<RegionalGuidanceMaskFillColorPicker />
<CanvasEntityEnabledToggle />
</CanvasEntityHeader>
<RegionalGuidanceSettings />

View File

@ -1,42 +1,44 @@
import { Button, Flex } from '@invoke-ai/ui-library';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import {
rgIPAdapterAdded,
rgNegativePromptChanged,
rgPositivePromptChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
selectCanvasV2Slice,
} from 'features/controlLayers/store/canvasV2Slice';
import { useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiPlusBold } from 'react-icons/pi';
export const RegionalGuidanceAddPromptsIPAdapterButtons = () => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
type AddPromptButtonProps = {
id: string;
};
export const RegionalGuidanceAddPromptsIPAdapterButtons = ({ id }: AddPromptButtonProps) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const selectValidActions = useMemo(
() =>
createMemoizedSelector(selectCanvasSlice, (canvas) => {
const entity = selectEntityOrThrow(canvas, entityIdentifier);
createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
const rg = canvasV2.regions.entities.find((rg) => rg.id === id);
return {
canAddPositivePrompt: entity?.positivePrompt === null,
canAddNegativePrompt: entity?.negativePrompt === null,
canAddPositivePrompt: rg?.positivePrompt === null,
canAddNegativePrompt: rg?.negativePrompt === null,
};
}),
[entityIdentifier]
[id]
);
const validActions = useAppSelector(selectValidActions);
const addPositivePrompt = useCallback(() => {
dispatch(rgPositivePromptChanged({ entityIdentifier, prompt: '' }));
}, [dispatch, entityIdentifier]);
dispatch(rgPositivePromptChanged({ id, prompt: '' }));
}, [dispatch, id]);
const addNegativePrompt = useCallback(() => {
dispatch(rgNegativePromptChanged({ entityIdentifier, prompt: '' }));
}, [dispatch, entityIdentifier]);
dispatch(rgNegativePromptChanged({ id, prompt: '' }));
}, [dispatch, id]);
const addIPAdapter = useCallback(() => {
dispatch(rgIPAdapterAdded({ entityIdentifier }));
}, [dispatch, entityIdentifier]);
dispatch(rgIPAdapterAdded({ id }));
}, [dispatch, id]);
return (
<Flex w="full" p={2} justifyContent="space-between">

View File

@ -1,19 +1,14 @@
import { Badge } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppSelector } from 'app/store/storeHooks';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { memo, useMemo } from 'react';
import { selectRegionalGuidanceEntityOrThrow } from 'features/controlLayers/store/regionsReducers';
import { memo } from 'react';
import { useTranslation } from 'react-i18next';
export const RegionalGuidanceBadges = memo(() => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
const { id } = useEntityIdentifierContext();
const { t } = useTranslation();
const selectAutoNegative = useMemo(
() => createSelector(selectCanvasSlice, (canvas) => selectEntityOrThrow(canvas, entityIdentifier).autoNegative),
[entityIdentifier]
);
const autoNegative = useAppSelector(selectAutoNegative);
const autoNegative = useAppSelector((s) => selectRegionalGuidanceEntityOrThrow(s.canvasV2, id).autoNegative);
return (
<>

View File

@ -1,21 +1,17 @@
import { createSelector } from '@reduxjs/toolkit';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
import { RegionalGuidance } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidance';
import { mapId } from 'features/controlLayers/konva/util';
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { memo } from 'react';
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
return canvas.regions.entities.map(mapId).reverse();
});
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
return selectedEntityIdentifier?.type === 'regional_guidance';
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
return canvasV2.regions.entities.map(mapId).reverse();
});
export const RegionalGuidanceEntityList = memo(() => {
const isSelected = useAppSelector(selectIsSelected);
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'regional_guidance'));
const rgIds = useAppSelector(selectEntityIds);
if (rgIds.length === 0) {

View File

@ -1,12 +1,10 @@
import { Box, Flex, IconButton, Spacer, Text } from '@invoke-ai/ui-library';
import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
import { Weight } from 'features/controlLayers/components/common/Weight';
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import {
rgIPAdapterBeginEndStepPctChanged,
rgIPAdapterCLIPVisionModelChanged,
@ -15,8 +13,8 @@ import {
rgIPAdapterMethodChanged,
rgIPAdapterModelChanged,
rgIPAdapterWeightChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectRegionalGuidanceIPAdapter } from 'features/controlLayers/store/selectors';
} from 'features/controlLayers/store/canvasV2Slice';
import { selectRegionalGuidanceEntityOrThrow } from 'features/controlLayers/store/regionsReducers';
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
import type { RGIPAdapterImageDropData } from 'features/dnd/types';
import { memo, useCallback, useMemo } from 'react';
@ -25,80 +23,71 @@ import type { ImageDTO, IPAdapterModelConfig, RGIPAdapterImagePostUploadAction }
import { assert } from 'tsafe';
type Props = {
id: string;
ipAdapterId: string;
ipAdapterNumber: number;
};
export const RegionalGuidanceIPAdapterSettings = memo(({ ipAdapterId, ipAdapterNumber }: Props) => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
export const RegionalGuidanceIPAdapterSettings = memo(({ id, ipAdapterId, ipAdapterNumber }: Props) => {
const dispatch = useAppDispatch();
const onDeleteIPAdapter = useCallback(() => {
dispatch(rgIPAdapterDeleted({ entityIdentifier, ipAdapterId }));
}, [dispatch, entityIdentifier, ipAdapterId]);
const selectIPAdapter = useMemo(
() =>
createSelector(selectCanvasSlice, (canvas) => {
const ipAdapter = selectRegionalGuidanceIPAdapter(canvas, entityIdentifier, ipAdapterId);
assert(ipAdapter, `Regional GuidanceIP Adapter with id ${ipAdapterId} not found`);
return ipAdapter;
}),
[entityIdentifier, ipAdapterId]
);
const ipAdapter = useAppSelector(selectIPAdapter);
dispatch(rgIPAdapterDeleted({ id, ipAdapterId }));
}, [dispatch, ipAdapterId, id]);
const ipAdapter = useAppSelector((s) => {
const ipa = selectRegionalGuidanceEntityOrThrow(s.canvasV2, id).ipAdapters.find((ipa) => ipa.id === ipAdapterId);
assert(ipa, `Regional GuidanceIP Adapter with id ${ipAdapterId} not found`);
return ipa;
});
const onChangeBeginEndStepPct = useCallback(
(beginEndStepPct: [number, number]) => {
dispatch(rgIPAdapterBeginEndStepPctChanged({ entityIdentifier, ipAdapterId, beginEndStepPct }));
dispatch(rgIPAdapterBeginEndStepPctChanged({ id, ipAdapterId, beginEndStepPct }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const onChangeWeight = useCallback(
(weight: number) => {
dispatch(rgIPAdapterWeightChanged({ entityIdentifier, ipAdapterId, weight }));
dispatch(rgIPAdapterWeightChanged({ id, ipAdapterId, weight }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const onChangeIPMethod = useCallback(
(method: IPMethodV2) => {
dispatch(rgIPAdapterMethodChanged({ entityIdentifier, ipAdapterId, method }));
dispatch(rgIPAdapterMethodChanged({ id, ipAdapterId, method }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const onChangeModel = useCallback(
(modelConfig: IPAdapterModelConfig) => {
dispatch(rgIPAdapterModelChanged({ entityIdentifier, ipAdapterId, modelConfig }));
dispatch(rgIPAdapterModelChanged({ id, ipAdapterId, modelConfig }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const onChangeCLIPVisionModel = useCallback(
(clipVisionModel: CLIPVisionModelV2) => {
dispatch(rgIPAdapterCLIPVisionModelChanged({ entityIdentifier, ipAdapterId, clipVisionModel }));
dispatch(rgIPAdapterCLIPVisionModelChanged({ id, ipAdapterId, clipVisionModel }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const onChangeImage = useCallback(
(imageDTO: ImageDTO | null) => {
dispatch(rgIPAdapterImageChanged({ entityIdentifier, ipAdapterId, imageDTO }));
dispatch(rgIPAdapterImageChanged({ id, ipAdapterId, imageDTO }));
},
[dispatch, entityIdentifier, ipAdapterId]
[dispatch, ipAdapterId, id]
);
const droppableData = useMemo<RGIPAdapterImageDropData>(
() => ({
actionType: 'SET_RG_IP_ADAPTER_IMAGE',
context: { id: entityIdentifier.id, ipAdapterId },
id: entityIdentifier.id,
}),
[entityIdentifier.id, ipAdapterId]
() => ({ actionType: 'SET_RG_IP_ADAPTER_IMAGE', context: { id, ipAdapterId }, id }),
[ipAdapterId, id]
);
const postUploadAction = useMemo<RGIPAdapterImagePostUploadAction>(
() => ({ type: 'SET_RG_IP_ADAPTER_IMAGE', id: entityIdentifier.id, ipAdapterId }),
[entityIdentifier.id, ipAdapterId]
() => ({ type: 'SET_RG_IP_ADAPTER_IMAGE', id, ipAdapterId }),
[ipAdapterId, id]
);
return (

View File

@ -3,23 +3,25 @@ import { EMPTY_ARRAY } from 'app/store/constants';
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
import { useAppSelector } from 'app/store/storeHooks';
import { RegionalGuidanceIPAdapterSettings } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettings';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
import { selectRegionalGuidanceEntityOrThrow } from 'features/controlLayers/store/regionsReducers';
import { Fragment, memo, useMemo } from 'react';
export const RegionalGuidanceIPAdapters = memo(() => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
type Props = {
id: string;
};
export const RegionalGuidanceIPAdapters = memo(({ id }: Props) => {
const selectIPAdapterIds = useMemo(
() =>
createMemoizedSelector(selectCanvasSlice, (canvas) => {
const ipAdapterIds = selectEntityOrThrow(canvas, entityIdentifier).ipAdapters.map(({ id }) => id);
createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
const ipAdapterIds = selectRegionalGuidanceEntityOrThrow(canvasV2, id).ipAdapters.map(({ id }) => id);
if (ipAdapterIds.length === 0) {
return EMPTY_ARRAY;
}
return ipAdapterIds;
}),
[entityIdentifier]
[id]
);
const ipAdapterIds = useAppSelector(selectIPAdapterIds);
@ -33,7 +35,7 @@ export const RegionalGuidanceIPAdapters = memo(() => {
{ipAdapterIds.map((ipAdapterId, index) => (
<Fragment key={ipAdapterId}>
{index > 0 && <Divider />}
<RegionalGuidanceIPAdapterSettings ipAdapterId={ipAdapterId} ipAdapterNumber={index + 1} />
<RegionalGuidanceIPAdapterSettings id={id} ipAdapterId={ipAdapterId} ipAdapterNumber={index + 1} />
</Fragment>
))}
</>

Some files were not shown because too many files have changed in this diff Show More