mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
465 Commits
v4.2.9.dev
...
psyche/fea
Author | SHA1 | Date | |
---|---|---|---|
a2ad5f1a9a | |||
ff74a5356f | |||
92dc30dace | |||
3af577b210 | |||
d0464330f7 | |||
dd3ef4a80f | |||
0ced891944 | |||
10a5452df9 | |||
cb97969bbc | |||
71e742e238 | |||
fadd20fb8e | |||
01b9ca78e4 | |||
2baf825f34 | |||
1fa8048509 | |||
a000ad75f6 | |||
aefb2339bb | |||
a4f8671f86 | |||
73530ba54f | |||
685eb9927d | |||
ee57302fc3 | |||
c1fb9cdb93 | |||
aa6d441552 | |||
25d8d4c2e9 | |||
427ea6da5c | |||
d9f4266630 | |||
96f6e9e683 | |||
f10248e3f5 | |||
6a21f5fde1 | |||
ff20dd509a | |||
78a59b5b78 | |||
46bfbbbc87 | |||
a6d73d0773 | |||
6578e8bef8 | |||
0596d25e07 | |||
86e8ce9139 | |||
5aa2957da4 | |||
82f0cb2c8c | |||
fa48145cbc | |||
6d1edc330d | |||
97c0d3f6be | |||
a79a25ad63 | |||
6a8ceef404 | |||
3539670d93 | |||
c54bc32ef6 | |||
fee293e289 | |||
747eef9ccc | |||
7d2df399ed | |||
68fad5cdcc | |||
b4d656c203 | |||
3136d89d52 | |||
27e829b955 | |||
e03e870d5b | |||
9465ff450b | |||
92906a9575 | |||
77f206abe4 | |||
44a3f61580 | |||
0a2afed08b | |||
9b3b961105 | |||
9b1828e1aa | |||
5101873f49 | |||
c612f18114 | |||
7e400d876f | |||
677dddcfc9 | |||
0792b9175e | |||
e4829f80af | |||
bb760f3eb4 | |||
388c65287b | |||
12cd41e05c | |||
7765c03949 | |||
3daa80c57f | |||
5dbbef4ebd | |||
db33b3f7b5 | |||
8ffcf2a6be | |||
2e7ae6a07e | |||
fea1711f0c | |||
2a3546db97 | |||
285c266612 | |||
426ad54c53 | |||
fc75f7919f | |||
6c6b1aaff6 | |||
c319d653ac | |||
d887e474e7 | |||
da7b52d6ba | |||
b5aa308593 | |||
0b7ceb3bb6 | |||
3a70cefda2 | |||
4b609251e1 | |||
0839eac0f7 | |||
5f2a7feeee | |||
982535eb92 | |||
0c2b8edc8d | |||
f78f4ca25f | |||
d6b3e6c07d | |||
071ff8e74a | |||
1ea8aafca1 | |||
533dd221f8 | |||
2b325c6683 | |||
3845b1b3e6 | |||
cea7890a67 | |||
c38fe8025d | |||
f1de95349c | |||
2950775fa7 | |||
cb293fd7ac | |||
43b3fab6be | |||
d4b0dbce49 | |||
137b810669 | |||
c172657324 | |||
97c966b04f | |||
7178fc6253 | |||
4adb2eabf5 | |||
9f2c815e13 | |||
1435557d1d | |||
96abf687f6 | |||
636d9a7209 | |||
3b36eb0223 | |||
388c97bff0 | |||
b1cb018695 | |||
df78dd7953 | |||
0dc344a22e | |||
350d7f6f14 | |||
11059ee2d4 | |||
c90d3f3bb9 | |||
7f6d439fd1 | |||
783a78f069 | |||
0ff031950d | |||
d7e8f3d756 | |||
4668ea449b | |||
30d318d021 | |||
de96f97e5f | |||
57c0a2dfb1 | |||
cd4e464bde | |||
49e48c3eb7 | |||
edd3b3bce9 | |||
f8bfb66108 | |||
3b6a76cbf3 | |||
e0b60e4320 | |||
2159319035 | |||
b170fc232e | |||
594da60f2f | |||
6a432f6518 | |||
eb8eacfec6 | |||
c8d04d42e2 | |||
d39c9de81e | |||
a27d39b9ff | |||
6b385614f0 | |||
3ae7250ef7 | |||
a42d0ce1d2 | |||
d9131f7563 | |||
bdce958f29 | |||
3c86f1e979 | |||
894b8a29b9 | |||
8436a44973 | |||
f9f9ec3688 | |||
5a98d7a1f6 | |||
f9bc96e497 | |||
56350ff5dc | |||
6c1139340c | |||
641b1a7e6f | |||
674a3f462f | |||
2283186d3a | |||
340af1fe50 | |||
9378656d78 | |||
5f0413e222 | |||
c3e47515b1 | |||
5dcef6fa0d | |||
31ac02cd93 | |||
ab16976084 | |||
8e2b7845e1 | |||
3973bce342 | |||
f63847a504 | |||
07e3529948 | |||
03e1c60694 | |||
d766ed71fc | |||
ae68ef142a | |||
20f55768c4 | |||
c4fad4456e | |||
78f5ec44ad | |||
e14ba86942 | |||
d4e7720f6b | |||
a3f0e7e1cb | |||
30a696c476 | |||
66d6c64e16 | |||
d15be9b57c | |||
e5da902fd0 | |||
fc558094c2 | |||
ad9312e989 | |||
8e1a70b008 | |||
17f88cd5ad | |||
298f1919fa | |||
4d20cc11d4 | |||
14f249a2f0 | |||
b9746a6c2c | |||
94f298a6f4 | |||
8d3a8178da | |||
cad4212fe8 | |||
cff28dfaa9 | |||
70d7509fcc | |||
cf83af7a27 | |||
5c5a405c0f | |||
0208e4b232 | |||
e940754795 | |||
dc9fa1a735 | |||
08591fbf6d | |||
74db71bb5d | |||
60dbe798a5 | |||
0e676605fe | |||
3f781016f6 | |||
17cd2f6b02 | |||
99102a1b34 | |||
8d72e7d9e8 | |||
0b6b6f97ad | |||
fb2f6382b1 | |||
1ddea87c35 | |||
ea02323095 | |||
49733091c7 | |||
cf833fd6e2 | |||
ba5cf07ab8 | |||
d15321a373 | |||
de597a5eb4 | |||
e5f5cbdf5c | |||
7d4342bbff | |||
7f8a1d8d20 | |||
65353ac1e1 | |||
7f9a31ca4a | |||
592eb2886c | |||
c220dd8987 | |||
a263beb0d5 | |||
46b7c510eb | |||
f405e472ea | |||
7bdfd3ef5f | |||
778ee2c679 | |||
e70339ff3e | |||
88c57a9750 | |||
137252128b | |||
d4297b1345 | |||
6059bc7b47 | |||
c3ff3eb51f | |||
0b7751c413 | |||
d7f1c30624 | |||
3f4d7dbeea | |||
19b6ae2907 | |||
769f96ff9f | |||
fdaf75faa4 | |||
1380bb7ae6 | |||
9483c8cc29 | |||
2ef8a8cf5a | |||
d296ec1932 | |||
444ad3dae1 | |||
8cdcc71378 | |||
e8bc06cfd3 | |||
67a0a024e9 | |||
bd2c46c267 | |||
5acb27f350 | |||
7271b12d0f | |||
4a79467a33 | |||
5501bb87a3 | |||
561610e296 | |||
b76609ef18 | |||
070b78501b | |||
50df4f4ab6 | |||
9bbf430125 | |||
384a90958a | |||
0e4a25b029 | |||
4a44e171fd | |||
9bc57a6f59 | |||
4341ed7ab4 | |||
97ce72c542 | |||
a2c78a57a7 | |||
044a713dc9 | |||
b8479c5fe2 | |||
4e5d056824 | |||
118278b372 | |||
8e8c255f3f | |||
1575bee401 | |||
249bbfc883 | |||
3993ae410f | |||
edf040e3d2 | |||
66fd077ee7 | |||
b93462ebb6 | |||
aae60d0cdc | |||
d4da00e607 | |||
0c539ff00b | |||
5983cbf26c | |||
c513d6e3af | |||
9d57c0e631 | |||
a1923a8966 | |||
d988e18731 | |||
51008da2dd | |||
6ccc1f5672 | |||
4a556f84e0 | |||
2f21a2220d | |||
91a420b13e | |||
c27da3581b | |||
961dfbce93 | |||
df39c825ae | |||
3f6ee1b7a4 | |||
908e504a6f | |||
f2fa41afc5 | |||
440ff40ad5 | |||
5c15458e15 | |||
be5b474f1e | |||
cee178c2b6 | |||
27657f8b7a | |||
e0cde3815a | |||
09d0421de4 | |||
47b94d563c | |||
0b5d20c9f0 | |||
80e7e1293a | |||
3a82b0cbc1 | |||
a27cbc13b6 | |||
a8f962eb3f | |||
7f40d23f19 | |||
918354cd9d | |||
eef9278ee6 | |||
2c32e2e5c1 | |||
6f05654db5 | |||
1d31b6902f | |||
5a7d615e64 | |||
1dbf9e4ed4 | |||
5dcc6ee203 | |||
84a4e6ae3f | |||
f283bfd68f | |||
6e5ff7b79c | |||
7c3800d03f | |||
941db90518 | |||
0d9ecf0f90 | |||
9c77023a11 | |||
b55378c63c | |||
946c2a49ab | |||
b823c31ec6 | |||
ec6361e5cb | |||
0c26d28278 | |||
c5172d4c5a | |||
89de04775e | |||
b4c3c940b5 | |||
aee2aad959 | |||
5ca48a8a5f | |||
1806aa187b | |||
7824cb7a1a | |||
9807a896f4 | |||
19866f057d | |||
ec4eae3c9c | |||
bea0cba038 | |||
48ee75af9c | |||
929c593d2f | |||
221f32eca7 | |||
c3acc15e8b | |||
1b653278fc | |||
cc9062ee46 | |||
91c0feb0ad | |||
ae60292ac8 | |||
a6ca17b19d | |||
6a4a5ece74 | |||
9b81860307 | |||
5f4a3928d2 | |||
b703884763 | |||
32da98ab8f | |||
bd5a85bf70 | |||
d045f24014 | |||
2aad3f89c3 | |||
dd54d19f00 | |||
0ed6591d8c | |||
712e090134 | |||
8fc2a1d1cf | |||
cc15c1593e | |||
9997d3abda | |||
031471e785 | |||
2e860c6791 | |||
d071a9e17d | |||
ed53d33321 | |||
382bc6d978 | |||
dab42e258c | |||
81556410bb | |||
1f2dfd473c | |||
8f0f51be2c | |||
7179e250ed | |||
5bec091fd6 | |||
2c5896cb0c | |||
93ff252dc0 | |||
ac52224455 | |||
4087cad23d | |||
e936b1ff8f | |||
b7f9c5e221 | |||
fc5467150e | |||
4dcab357a0 | |||
695e464255 | |||
9999b60c3b | |||
e7df53e260 | |||
844590a571 | |||
9622beaa0d | |||
007e2553a8 | |||
15ad4e3f5e | |||
be5094fcb4 | |||
a20a861680 | |||
396d0a4bc0 | |||
ca9314e077 | |||
4b848798e7 | |||
083bcbc77d | |||
e8cdc9ae62 | |||
8abfa759a4 | |||
f6a324b633 | |||
f083be9391 | |||
091e2fb751 | |||
d8539daf1f | |||
7ec059f5fa | |||
4f2ecdefd2 | |||
e8891a1988 | |||
37d2607f34 | |||
0e7b10d3d9 | |||
1f85888638 | |||
c1f9a129fa | |||
7ccc5ba398 | |||
5e1a6ae334 | |||
3f6cf638f9 | |||
46e062a828 | |||
cc3a0b5d6c | |||
775479ab7b | |||
6b9e0e6d63 | |||
83a5c87f5e | |||
84fde74331 | |||
a517e29b39 | |||
ccceba7565 | |||
5fc7a03669 | |||
8864ad1b50 | |||
f2989885fb | |||
19c66e5c76 | |||
8a6690a57c | |||
2cc60f253a | |||
cb69872dd3 | |||
ba66d7c9a6 | |||
9fe727c9f8 | |||
58c656224f | |||
c51253f5f6 | |||
6c1d1588fc | |||
95d6183a6c | |||
f4c9facdaf | |||
a274e6f165 | |||
154e3e6f64 | |||
2c3ac972e5 | |||
2e2e072b0b | |||
9d8dd2bf66 | |||
9047f6db30 | |||
5ab345ee63 | |||
d8a83acd3a | |||
1f58e5756b | |||
744acb8f07 | |||
ae7228d821 | |||
99d8b3a7bf | |||
3fbe65bbcf | |||
f5d879d8e7 | |||
cbc5a4f8e6 | |||
37ac7d8ed5 | |||
bee3fa339d | |||
c171fe2b96 | |||
1fa8032fdb | |||
5c0676bcc2 | |||
cefd9a027c | |||
1bce156de1 | |||
cd4f63f2fd | |||
3c7140cbf3 | |||
b71ba63b5a | |||
d540e2c0d3 | |||
d79fafc5f5 | |||
9e93fa2092 | |||
392e9b4882 |
2
.github/workflows/python-tests.yml
vendored
2
.github/workflows/python-tests.yml
vendored
@ -60,7 +60,7 @@ jobs:
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: macos-default
|
||||
os: macOS-14
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- platform: windows-cpu
|
||||
os: windows-2022
|
||||
|
@ -40,7 +40,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
|
||||
# region Model Field Types
|
||||
MainModel = "MainModelField"
|
||||
FluxMainModel = "FluxMainModelField"
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
@ -49,7 +48,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
ControlNetModel = "ControlNetModelField"
|
||||
IPAdapterModel = "IPAdapterModelField"
|
||||
T2IAdapterModel = "T2IAdapterModelField"
|
||||
T5EncoderModel = "T5EncoderModelField"
|
||||
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
||||
# endregion
|
||||
|
||||
@ -127,16 +125,13 @@ class FieldDescriptions:
|
||||
negative_cond = "Negative conditioning tensor"
|
||||
noise = "Noise tensor"
|
||||
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
||||
t5_encoder = "T5 tokenizer and text encoder"
|
||||
unet = "UNet (scheduler, LoRAs)"
|
||||
transformer = "Transformer"
|
||||
vae = "VAE"
|
||||
cond = "Conditioning tensor"
|
||||
controlnet_model = "ControlNet model to load"
|
||||
vae_model = "VAE model to load"
|
||||
lora_model = "LoRA model to load"
|
||||
main_model = "Main model (UNet, VAE, CLIP) to load"
|
||||
flux_model = "Flux model (Transformer) to load"
|
||||
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
||||
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
||||
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
||||
@ -236,12 +231,6 @@ class ColorField(BaseModel):
|
||||
return (self.r, self.g, self.b, self.a)
|
||||
|
||||
|
||||
class FluxConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
conditioning_name: str = Field(description="The name of conditioning tensor")
|
||||
|
||||
|
||||
class ConditioningField(BaseModel):
|
||||
"""A conditioning tensor primitive value"""
|
||||
|
||||
|
@ -1,86 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
import torch
|
||||
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
||||
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
||||
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_text_encoder",
|
||||
title="FLUX Text Encoding",
|
||||
tags=["prompt", "conditioning", "flux"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextEncoderInvocation(BaseInvocation):
|
||||
"""Encodes and preps a prompt for a flux image."""
|
||||
|
||||
clip: CLIPField = InputField(
|
||||
title="CLIP",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_encoder: T5EncoderField = InputField(
|
||||
title="T5Encoder",
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
input=Input.Connection,
|
||||
)
|
||||
t5_max_seq_len: Literal[256, 512] = InputField(
|
||||
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
||||
)
|
||||
prompt: str = InputField(description="Text prompt to encode.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
||||
t5_embeddings, clip_embeddings = self._encode_prompt(context)
|
||||
conditioning_data = ConditioningFieldData(
|
||||
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
|
||||
)
|
||||
|
||||
conditioning_name = context.conditioning.save(conditioning_data)
|
||||
return FluxConditioningOutput.build(conditioning_name)
|
||||
|
||||
def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
# Load CLIP.
|
||||
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
|
||||
# Load T5.
|
||||
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
||||
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
||||
|
||||
prompt = [self.prompt]
|
||||
|
||||
with (
|
||||
t5_text_encoder_info as t5_text_encoder,
|
||||
t5_tokenizer_info as t5_tokenizer,
|
||||
):
|
||||
assert isinstance(t5_text_encoder, T5EncoderModel)
|
||||
assert isinstance(t5_tokenizer, T5Tokenizer)
|
||||
|
||||
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
||||
|
||||
prompt_embeds = t5_encoder(prompt)
|
||||
|
||||
with (
|
||||
clip_text_encoder_info as clip_text_encoder,
|
||||
clip_tokenizer_info as clip_tokenizer,
|
||||
):
|
||||
assert isinstance(clip_text_encoder, CLIPTextModel)
|
||||
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
||||
|
||||
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
||||
|
||||
pooled_prompt_embeds = clip_encoder(prompt)
|
||||
|
||||
assert isinstance(prompt_embeds, torch.Tensor)
|
||||
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
||||
return prompt_embeds, pooled_prompt_embeds
|
@ -1,172 +0,0 @@
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
Input,
|
||||
InputField,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import TransformerField, VAEField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.sampling import denoise, get_noise, get_schedule, prepare_latent_img_patches, unpack
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_text_to_image",
|
||||
title="FLUX Text to Image",
|
||||
tags=["image", "flux"],
|
||||
category="image",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Text-to-image generation using a FLUX model."""
|
||||
|
||||
transformer: TransformerField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
input=Input.Connection,
|
||||
title="Transformer",
|
||||
)
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
positive_text_conditioning: FluxConditioningField = InputField(
|
||||
description=FieldDescriptions.positive_cond, input=Input.Connection
|
||||
)
|
||||
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
||||
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
||||
num_steps: int = InputField(
|
||||
default=4, description="Number of diffusion steps. Recommend values are schnell: 4, dev: 50."
|
||||
)
|
||||
guidance: float = InputField(
|
||||
default=4.0,
|
||||
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
|
||||
)
|
||||
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
# Load the conditioning data.
|
||||
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
|
||||
assert len(cond_data.conditionings) == 1
|
||||
flux_conditioning = cond_data.conditionings[0]
|
||||
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
||||
|
||||
latents = self._run_diffusion(context, flux_conditioning.clip_embeds, flux_conditioning.t5_embeds)
|
||||
image = self._run_vae_decoding(context, latents)
|
||||
image_dto = context.images.save(image=image)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _run_diffusion(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
clip_embeddings: torch.Tensor,
|
||||
t5_embeddings: torch.Tensor,
|
||||
):
|
||||
transformer_info = context.models.load(self.transformer.transformer)
|
||||
inference_dtype = torch.bfloat16
|
||||
|
||||
# Prepare input noise.
|
||||
x = get_noise(
|
||||
num_samples=1,
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
dtype=inference_dtype,
|
||||
seed=self.seed,
|
||||
)
|
||||
|
||||
img, img_ids = prepare_latent_img_patches(x)
|
||||
|
||||
is_schnell = "schnell" in transformer_info.config.config_path
|
||||
|
||||
timesteps = get_schedule(
|
||||
num_steps=self.num_steps,
|
||||
image_seq_len=img.shape[1],
|
||||
shift=not is_schnell,
|
||||
)
|
||||
|
||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||
|
||||
# HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from
|
||||
# disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems
|
||||
# if the cache is not empty.
|
||||
context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30)
|
||||
|
||||
with transformer_info as transformer:
|
||||
assert isinstance(transformer, Flux)
|
||||
|
||||
def step_callback() -> None:
|
||||
if context.util.is_canceled():
|
||||
raise CanceledException
|
||||
|
||||
# TODO: Make this look like the image before re-enabling
|
||||
# latent_image = unpack(img.float(), self.height, self.width)
|
||||
# latent_image = latent_image.squeeze() # Remove unnecessary dimensions
|
||||
# flattened_tensor = latent_image.reshape(-1) # Flatten to shape [48*128*128]
|
||||
|
||||
# # Create a new tensor of the required shape [255, 255, 3]
|
||||
# latent_image = flattened_tensor[: 255 * 255 * 3].reshape(255, 255, 3) # Reshape to RGB format
|
||||
|
||||
# # Convert to a NumPy array and then to a PIL Image
|
||||
# image = Image.fromarray(latent_image.cpu().numpy().astype(np.uint8))
|
||||
|
||||
# (width, height) = image.size
|
||||
# width *= 8
|
||||
# height *= 8
|
||||
|
||||
# dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
# # TODO: move this whole function to invocation context to properly reference these variables
|
||||
# context._services.events.emit_invocation_denoise_progress(
|
||||
# context._data.queue_item,
|
||||
# context._data.invocation,
|
||||
# state,
|
||||
# ProgressImage(dataURL=dataURL, width=width, height=height),
|
||||
# )
|
||||
|
||||
x = denoise(
|
||||
model=transformer,
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=t5_embeddings,
|
||||
txt_ids=txt_ids,
|
||||
vec=clip_embeddings,
|
||||
timesteps=timesteps,
|
||||
step_callback=step_callback,
|
||||
guidance=self.guidance,
|
||||
)
|
||||
|
||||
x = unpack(x.float(), self.height, self.width)
|
||||
|
||||
return x
|
||||
|
||||
def _run_vae_decoding(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
latents: torch.Tensor,
|
||||
) -> Image.Image:
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
with vae_info as vae:
|
||||
assert isinstance(vae, AutoEncoder)
|
||||
latents = latents.to(dtype=TorchDevice.choose_torch_dtype())
|
||||
img = vae.decode(latents)
|
||||
|
||||
img = img.clamp(-1, 1)
|
||||
img = rearrange(img[0], "c h w -> h w c")
|
||||
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
||||
|
||||
return img_pil
|
@ -1032,11 +1032,7 @@ class CanvasV2MaskAndCropOutput(ImageOutput):
|
||||
class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Handles Canvas V2 image output masking and cropping"""
|
||||
|
||||
source_image: ImageField | None = InputField(
|
||||
default=None,
|
||||
description="The source image onto which the masked generated image is pasted. If omitted, the masked generated image is returned with transparency.",
|
||||
)
|
||||
generated_image: ImageField = InputField(description="The image to apply the mask to")
|
||||
image: ImageField = InputField(description="The image to apply the mask to")
|
||||
mask: ImageField = InputField(description="The mask to apply")
|
||||
mask_blur: int = InputField(default=0, ge=0, description="The amount to blur the mask by")
|
||||
|
||||
@ -1050,25 +1046,33 @@ class CanvasV2MaskAndCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
return ImageOps.invert(mask.convert("L"))
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CanvasV2MaskAndCropOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
mask = self._prepare_mask(context.images.get_pil(self.mask.image_name))
|
||||
|
||||
if self.source_image:
|
||||
generated_image = context.images.get_pil(self.generated_image.image_name)
|
||||
source_image = context.images.get_pil(self.source_image.image_name)
|
||||
source_image.paste(generated_image, (0, 0), mask)
|
||||
image_dto = context.images.save(image=source_image)
|
||||
else:
|
||||
generated_image = context.images.get_pil(self.generated_image.image_name)
|
||||
generated_image.putalpha(mask)
|
||||
image_dto = context.images.save(image=generated_image)
|
||||
|
||||
image.putalpha(mask)
|
||||
# bbox = image.getbbox()
|
||||
# image = image.crop(bbox)
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
return CanvasV2MaskAndCropOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
offset_x=0,
|
||||
offset_y=0,
|
||||
width=image_dto.width,
|
||||
height=image_dto.height,
|
||||
width=image.width,
|
||||
height=image.height,
|
||||
)
|
||||
|
||||
# def invoke(self, context: InvocationContext) -> CanvasV2MaskAndCropOutput:
|
||||
# image = context.images.get_pil(self.image.image_name)
|
||||
# mask = self._prepare_mask(context.images.get_pil(self.mask.image_name))
|
||||
# image.putalpha(mask)
|
||||
# bbox = image.getbbox()
|
||||
# image = image.crop(bbox)
|
||||
# image_dto = context.images.save(image=image)
|
||||
|
||||
# return CanvasV2MaskAndCropOutput(
|
||||
# image=ImageField(image_name=image_dto.image_name),
|
||||
# offset_x=bbox[0],
|
||||
# offset_y=bbox[1],
|
||||
# width=image.width,
|
||||
# height=image.height,
|
||||
# )
|
||||
|
@ -1,5 +1,5 @@
|
||||
import copy
|
||||
from typing import List, Literal, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@ -13,14 +13,7 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.flux.util import max_seq_lengths
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
CheckpointConfigBase,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
|
||||
|
||||
|
||||
class ModelIdentifierField(BaseModel):
|
||||
@ -67,15 +60,6 @@ class CLIPField(BaseModel):
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
|
||||
|
||||
class TransformerField(BaseModel):
|
||||
transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel")
|
||||
|
||||
|
||||
class T5EncoderField(BaseModel):
|
||||
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
|
||||
|
||||
|
||||
class VAEField(BaseModel):
|
||||
vae: ModelIdentifierField = Field(description="Info to load vae submodel")
|
||||
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
|
||||
@ -138,112 +122,6 @@ class ModelIdentifierInvocation(BaseInvocation):
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation_output("flux_model_loader_output")
|
||||
class FluxModelLoaderOutput(BaseInvocationOutput):
|
||||
"""Flux base model loader output"""
|
||||
|
||||
transformer: TransformerField = OutputField(description=FieldDescriptions.transformer, title="Transformer")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
t5_encoder: T5EncoderField = OutputField(description=FieldDescriptions.t5_encoder, title="T5 Encoder")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
max_seq_len: Literal[256, 512] = OutputField(
|
||||
description="The max sequence length to used for the T5 encoder. (256 for schnell transformer, 512 for dev transformer)",
|
||||
title="Max Seq Length",
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"flux_model_loader",
|
||||
title="Flux Main Model",
|
||||
tags=["model", "flux"],
|
||||
category="model",
|
||||
version="1.0.3",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class FluxModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a flux base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.flux_model,
|
||||
ui_type=UIType.FluxMainModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
t5_encoder: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.t5_encoder,
|
||||
ui_type=UIType.T5EncoderModel,
|
||||
input=Input.Direct,
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> FluxModelLoaderOutput:
|
||||
model_key = self.model.key
|
||||
|
||||
if not context.models.exists(model_key):
|
||||
raise ValueError(f"Unknown model: {model_key}")
|
||||
transformer = self._get_model(context, SubModelType.Transformer)
|
||||
tokenizer = self._get_model(context, SubModelType.Tokenizer)
|
||||
tokenizer2 = self._get_model(context, SubModelType.Tokenizer2)
|
||||
clip_encoder = self._get_model(context, SubModelType.TextEncoder)
|
||||
t5_encoder = self._get_model(context, SubModelType.TextEncoder2)
|
||||
vae = self._get_model(context, SubModelType.VAE)
|
||||
transformer_config = context.models.get_config(transformer)
|
||||
assert isinstance(transformer_config, CheckpointConfigBase)
|
||||
|
||||
return FluxModelLoaderOutput(
|
||||
transformer=TransformerField(transformer=transformer),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=clip_encoder, loras=[], skipped_layers=0),
|
||||
t5_encoder=T5EncoderField(tokenizer=tokenizer2, text_encoder=t5_encoder),
|
||||
vae=VAEField(vae=vae),
|
||||
max_seq_len=max_seq_lengths[transformer_config.config_path],
|
||||
)
|
||||
|
||||
def _get_model(self, context: InvocationContext, submodel: SubModelType) -> ModelIdentifierField:
|
||||
match submodel:
|
||||
case SubModelType.Transformer:
|
||||
return self.model.model_copy(update={"submodel_type": SubModelType.Transformer})
|
||||
case SubModelType.VAE:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
SubModelType.VAE,
|
||||
"FLUX.1-schnell_ae",
|
||||
ModelType.VAE,
|
||||
BaseModelType.Flux,
|
||||
)
|
||||
case submodel if submodel in [SubModelType.Tokenizer, SubModelType.TextEncoder]:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
submodel,
|
||||
"clip-vit-large-patch14",
|
||||
ModelType.CLIPEmbed,
|
||||
BaseModelType.Any,
|
||||
)
|
||||
case submodel if submodel in [SubModelType.Tokenizer2, SubModelType.TextEncoder2]:
|
||||
return self._pull_model_from_mm(
|
||||
context,
|
||||
submodel,
|
||||
self.t5_encoder.name,
|
||||
ModelType.T5Encoder,
|
||||
BaseModelType.Any,
|
||||
)
|
||||
case _:
|
||||
raise Exception(f"{submodel.value} is not a supported submodule for a flux model")
|
||||
|
||||
def _pull_model_from_mm(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
submodel: SubModelType,
|
||||
name: str,
|
||||
type: ModelType,
|
||||
base: BaseModelType,
|
||||
):
|
||||
if models := context.models.search_by_attrs(name=name, base=base, type=type):
|
||||
if len(models) != 1:
|
||||
raise Exception(f"Multiple models detected for selected model with name {name}")
|
||||
return ModelIdentifierField.from_config(models[0]).model_copy(update={"submodel_type": submodel})
|
||||
else:
|
||||
raise ValueError(f"Please install the {base}:{type} model named {name} via starter models")
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
|
@ -12,7 +12,6 @@ from invokeai.app.invocations.fields import (
|
||||
ConditioningField,
|
||||
DenoiseMaskField,
|
||||
FieldDescriptions,
|
||||
FluxConditioningField,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
@ -415,17 +414,6 @@ class MaskOutput(BaseInvocationOutput):
|
||||
height: int = OutputField(description="The height of the mask in pixels.")
|
||||
|
||||
|
||||
@invocation_output("flux_conditioning_output")
|
||||
class FluxConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single conditioning tensor"""
|
||||
|
||||
conditioning: FluxConditioningField = OutputField(description=FieldDescriptions.cond)
|
||||
|
||||
@classmethod
|
||||
def build(cls, conditioning_name: str) -> "FluxConditioningOutput":
|
||||
return cls(conditioning=FluxConditioningField(conditioning_name=conditioning_name))
|
||||
|
||||
|
||||
@invocation_output("conditioning_output")
|
||||
class ConditioningOutput(BaseInvocationOutput):
|
||||
"""Base class for nodes that output a single conditioning tensor"""
|
||||
|
@ -783,9 +783,8 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# So what we do is to synthesize a folder named "sdxl-turbo_vae" here.
|
||||
if subfolder:
|
||||
top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/"
|
||||
path_to_remove = top / subfolder # sdxl-turbo/vae/
|
||||
subfolder_rename = subfolder.name.replace("/", "_").replace("\\", "_")
|
||||
path_to_add = Path(f"{top}_{subfolder_rename}")
|
||||
path_to_remove = top / subfolder.parts[-1] # sdxl-turbo/vae/
|
||||
path_to_add = Path(f"{top}_{subfolder}")
|
||||
else:
|
||||
path_to_remove = Path(".")
|
||||
path_to_add = Path(".")
|
||||
|
@ -77,7 +77,6 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
type: Optional[ModelType] = Field(description="Type of model", default=None)
|
||||
key: Optional[str] = Field(description="Database ID for this model", default=None)
|
||||
hash: Optional[str] = Field(description="hash of model file", default=None)
|
||||
format: Optional[str] = Field(description="format of model file", default=None)
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
|
@ -1,266 +0,0 @@
|
||||
{
|
||||
"name": "FLUX Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "A simple text-to-image workflow using FLUX dev or schnell models. Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||
"version": "1.0.0",
|
||||
"contact": "",
|
||||
"tags": "text2image, flux",
|
||||
"notes": "Prerequisite model downloads: T5 Encoder, CLIP-L Encoder, and FLUX VAE. Quantized and un-quantized versions can be found in the starter models tab within your Model Manager. We recommend 4 steps for FLUX schnell models and 30 steps for FLUX dev models.",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"fieldName": "num_steps"
|
||||
},
|
||||
{
|
||||
"nodeId": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"fieldName": "t5_encoder"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"version": "3.0.0",
|
||||
"category": "default"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"type": "flux_model_loader",
|
||||
"version": "1.0.3",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"inputs": {
|
||||
"model": {
|
||||
"name": "model",
|
||||
"label": "Model (Starter Models can be found in Model Manager)",
|
||||
"value": {
|
||||
"key": "f04a7a2f-c74d-4538-8d5e-879a53501662",
|
||||
"hash": "random:4875da7a9508444ffa706f61961c260d0c6729f6181a86b31fad06df1277b850",
|
||||
"name": "FLUX Dev (Quantized)",
|
||||
"base": "flux",
|
||||
"type": "main"
|
||||
}
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": "T 5 Encoder (Starter Models can be found in Model Manager)",
|
||||
"value": {
|
||||
"key": "20dcd9ec-5fbb-4012-8401-049e707da5e5",
|
||||
"hash": "random:f986be43ff3502169e4adbdcee158afb0e0a65a1edc4cab16ae59963630cfd8f",
|
||||
"name": "t5_bnb_int8_quantized_encoder",
|
||||
"base": "any",
|
||||
"type": "t5_encoder"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 337.09365228062825,
|
||||
"y": 40.63469521079861
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"type": "flux_text_encoder",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"clip": {
|
||||
"name": "clip",
|
||||
"label": ""
|
||||
},
|
||||
"t5_encoder": {
|
||||
"name": "t5_encoder",
|
||||
"label": ""
|
||||
},
|
||||
"t5_max_seq_len": {
|
||||
"name": "t5_max_seq_len",
|
||||
"label": "T5 Max Seq Len",
|
||||
"value": 256
|
||||
},
|
||||
"prompt": {
|
||||
"name": "prompt",
|
||||
"label": "",
|
||||
"value": "a cat"
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 824.1970602278849,
|
||||
"y": 146.98251001061735
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"type": "rand_int",
|
||||
"version": "1.0.1",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"inputs": {
|
||||
"low": {
|
||||
"name": "low",
|
||||
"label": "",
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"name": "high",
|
||||
"label": "",
|
||||
"value": 2147483647
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 822.9899179655476,
|
||||
"y": 360.9657214885052
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"type": "flux_text_to_image",
|
||||
"version": "1.0.0",
|
||||
"label": "",
|
||||
"notes": "",
|
||||
"isOpen": true,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"inputs": {
|
||||
"board": {
|
||||
"name": "board",
|
||||
"label": ""
|
||||
},
|
||||
"metadata": {
|
||||
"name": "metadata",
|
||||
"label": ""
|
||||
},
|
||||
"transformer": {
|
||||
"name": "transformer",
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"name": "vae",
|
||||
"label": ""
|
||||
},
|
||||
"positive_text_conditioning": {
|
||||
"name": "positive_text_conditioning",
|
||||
"label": ""
|
||||
},
|
||||
"width": {
|
||||
"name": "width",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"height": {
|
||||
"name": "height",
|
||||
"label": "",
|
||||
"value": 1024
|
||||
},
|
||||
"num_steps": {
|
||||
"name": "num_steps",
|
||||
"label": "Steps (Recommend 30 for Dev, 4 for Schnell)",
|
||||
"value": 30
|
||||
},
|
||||
"guidance": {
|
||||
"name": "guidance",
|
||||
"label": "",
|
||||
"value": 4
|
||||
},
|
||||
"seed": {
|
||||
"name": "seed",
|
||||
"label": "",
|
||||
"value": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1216.3900791301849,
|
||||
"y": 5.500841807102248
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33amax_seq_len-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_max_seq_len",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "max_seq_len",
|
||||
"targetHandle": "t5_max_seq_len"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33avae-159bdf1b-79e7-4174-b86e-d40e646964c8vae",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33atransformer-159bdf1b-79e7-4174-b86e-d40e646964c8transformer",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "transformer",
|
||||
"targetHandle": "transformer"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33at5_encoder-01f674f8-b3d1-4df1-acac-6cb8e0bfb63ct5_encoder",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "t5_encoder",
|
||||
"targetHandle": "t5_encoder"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4f0207c2-ff40-41fd-b047-ad33fbb1c33aclip-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cclip",
|
||||
"type": "default",
|
||||
"source": "4f0207c2-ff40-41fd-b047-ad33fbb1c33a",
|
||||
"target": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-01f674f8-b3d1-4df1-acac-6cb8e0bfb63cconditioning-159bdf1b-79e7-4174-b86e-d40e646964c8positive_text_conditioning",
|
||||
"type": "default",
|
||||
"source": "01f674f8-b3d1-4df1-acac-6cb8e0bfb63c",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_text_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-4754c534-a5f3-4ad0-9382-7887985e668cvalue-159bdf1b-79e7-4174-b86e-d40e646964c8seed",
|
||||
"type": "default",
|
||||
"source": "4754c534-a5f3-4ad0-9382-7887985e668c",
|
||||
"target": "159bdf1b-79e7-4174-b86e-d40e646964c8",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
|
||||
q, k = apply_rope(q, k, pe)
|
||||
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
|
||||
return x
|
||||
|
||||
|
||||
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
||||
assert dim % 2 == 0
|
||||
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
|
||||
omega = 1.0 / (theta**scale)
|
||||
out = torch.einsum("...n,d->...nd", pos, omega)
|
||||
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
||||
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
||||
return out.float()
|
||||
|
||||
|
||||
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
|
||||
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
||||
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
||||
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
||||
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
||||
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
@ -1,117 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.modules.layers import (
|
||||
DoubleStreamBlock,
|
||||
EmbedND,
|
||||
LastLayer,
|
||||
MLPEmbedder,
|
||||
SingleStreamBlock,
|
||||
timestep_embedding,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FluxParams:
|
||||
in_channels: int
|
||||
vec_in_dim: int
|
||||
context_in_dim: int
|
||||
hidden_size: int
|
||||
mlp_ratio: float
|
||||
num_heads: int
|
||||
depth: int
|
||||
depth_single_blocks: int
|
||||
axes_dim: list[int]
|
||||
theta: int
|
||||
qkv_bias: bool
|
||||
guidance_embed: bool
|
||||
|
||||
|
||||
class Flux(nn.Module):
|
||||
"""
|
||||
Transformer model for flow matching on sequences.
|
||||
"""
|
||||
|
||||
def __init__(self, params: FluxParams):
|
||||
super().__init__()
|
||||
|
||||
self.params = params
|
||||
self.in_channels = params.in_channels
|
||||
self.out_channels = self.in_channels
|
||||
if params.hidden_size % params.num_heads != 0:
|
||||
raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}")
|
||||
pe_dim = params.hidden_size // params.num_heads
|
||||
if sum(params.axes_dim) != pe_dim:
|
||||
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
|
||||
self.hidden_size = params.hidden_size
|
||||
self.num_heads = params.num_heads
|
||||
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
|
||||
self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
|
||||
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
|
||||
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
|
||||
self.guidance_in = (
|
||||
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
|
||||
)
|
||||
self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
|
||||
|
||||
self.double_blocks = nn.ModuleList(
|
||||
[
|
||||
DoubleStreamBlock(
|
||||
self.hidden_size,
|
||||
self.num_heads,
|
||||
mlp_ratio=params.mlp_ratio,
|
||||
qkv_bias=params.qkv_bias,
|
||||
)
|
||||
for _ in range(params.depth)
|
||||
]
|
||||
)
|
||||
|
||||
self.single_blocks = nn.ModuleList(
|
||||
[
|
||||
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio)
|
||||
for _ in range(params.depth_single_blocks)
|
||||
]
|
||||
)
|
||||
|
||||
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
img: Tensor,
|
||||
img_ids: Tensor,
|
||||
txt: Tensor,
|
||||
txt_ids: Tensor,
|
||||
timesteps: Tensor,
|
||||
y: Tensor,
|
||||
guidance: Tensor | None = None,
|
||||
) -> Tensor:
|
||||
if img.ndim != 3 or txt.ndim != 3:
|
||||
raise ValueError("Input img and txt tensors must have 3 dimensions.")
|
||||
|
||||
# running on sequences img
|
||||
img = self.img_in(img)
|
||||
vec = self.time_in(timestep_embedding(timesteps, 256))
|
||||
if self.params.guidance_embed:
|
||||
if guidance is None:
|
||||
raise ValueError("Didn't get guidance strength for guidance distilled model.")
|
||||
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
|
||||
vec = vec + self.vector_in(y)
|
||||
txt = self.txt_in(txt)
|
||||
|
||||
ids = torch.cat((txt_ids, img_ids), dim=1)
|
||||
pe = self.pe_embedder(ids)
|
||||
|
||||
for block in self.double_blocks:
|
||||
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
|
||||
|
||||
img = torch.cat((txt, img), 1)
|
||||
for block in self.single_blocks:
|
||||
img = block(img, vec=vec, pe=pe)
|
||||
img = img[:, txt.shape[1] :, ...]
|
||||
|
||||
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
|
||||
return img
|
@ -1,310 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from torch import Tensor, nn
|
||||
|
||||
|
||||
@dataclass
|
||||
class AutoEncoderParams:
|
||||
resolution: int
|
||||
in_channels: int
|
||||
ch: int
|
||||
out_ch: int
|
||||
ch_mult: list[int]
|
||||
num_res_blocks: int
|
||||
z_channels: int
|
||||
scale_factor: float
|
||||
shift_factor: float
|
||||
|
||||
|
||||
class AttnBlock(nn.Module):
|
||||
def __init__(self, in_channels: int):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
|
||||
self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
||||
|
||||
self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
||||
self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
||||
self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
||||
self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
|
||||
|
||||
def attention(self, h_: Tensor) -> Tensor:
|
||||
h_ = self.norm(h_)
|
||||
q = self.q(h_)
|
||||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
|
||||
b, c, h, w = q.shape
|
||||
q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
|
||||
k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
|
||||
v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
|
||||
h_ = nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
|
||||
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
return x + self.proj_out(self.attention(x))
|
||||
|
||||
|
||||
class ResnetBlock(nn.Module):
|
||||
def __init__(self, in_channels: int, out_channels: int):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
out_channels = in_channels if out_channels is None else out_channels
|
||||
self.out_channels = out_channels
|
||||
|
||||
self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
||||
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
||||
self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
|
||||
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
||||
if self.in_channels != self.out_channels:
|
||||
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
||||
|
||||
def forward(self, x):
|
||||
h = x
|
||||
h = self.norm1(h)
|
||||
h = torch.nn.functional.silu(h)
|
||||
h = self.conv1(h)
|
||||
|
||||
h = self.norm2(h)
|
||||
h = torch.nn.functional.silu(h)
|
||||
h = self.conv2(h)
|
||||
|
||||
if self.in_channels != self.out_channels:
|
||||
x = self.nin_shortcut(x)
|
||||
|
||||
return x + h
|
||||
|
||||
|
||||
class Downsample(nn.Module):
|
||||
def __init__(self, in_channels: int):
|
||||
super().__init__()
|
||||
# no asymmetric padding in torch conv, must do it ourselves
|
||||
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
|
||||
|
||||
def forward(self, x: Tensor):
|
||||
pad = (0, 1, 0, 1)
|
||||
x = nn.functional.pad(x, pad, mode="constant", value=0)
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class Upsample(nn.Module):
|
||||
def __init__(self, in_channels: int):
|
||||
super().__init__()
|
||||
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
def forward(self, x: Tensor):
|
||||
x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class Encoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
resolution: int,
|
||||
in_channels: int,
|
||||
ch: int,
|
||||
ch_mult: list[int],
|
||||
num_res_blocks: int,
|
||||
z_channels: int,
|
||||
):
|
||||
super().__init__()
|
||||
self.ch = ch
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
# downsampling
|
||||
self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
curr_res = resolution
|
||||
in_ch_mult = (1,) + tuple(ch_mult)
|
||||
self.in_ch_mult = in_ch_mult
|
||||
self.down = nn.ModuleList()
|
||||
block_in = self.ch
|
||||
for i_level in range(self.num_resolutions):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_in = ch * in_ch_mult[i_level]
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for _ in range(self.num_res_blocks):
|
||||
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
||||
block_in = block_out
|
||||
down = nn.Module()
|
||||
down.block = block
|
||||
down.attn = attn
|
||||
if i_level != self.num_resolutions - 1:
|
||||
down.downsample = Downsample(block_in)
|
||||
curr_res = curr_res // 2
|
||||
self.down.append(down)
|
||||
|
||||
# middle
|
||||
self.mid = nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
||||
self.mid.attn_1 = AttnBlock(block_in)
|
||||
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
||||
|
||||
# end
|
||||
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
||||
self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
# downsampling
|
||||
hs = [self.conv_in(x)]
|
||||
for i_level in range(self.num_resolutions):
|
||||
for i_block in range(self.num_res_blocks):
|
||||
h = self.down[i_level].block[i_block](hs[-1])
|
||||
if len(self.down[i_level].attn) > 0:
|
||||
h = self.down[i_level].attn[i_block](h)
|
||||
hs.append(h)
|
||||
if i_level != self.num_resolutions - 1:
|
||||
hs.append(self.down[i_level].downsample(hs[-1]))
|
||||
|
||||
# middle
|
||||
h = hs[-1]
|
||||
h = self.mid.block_1(h)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h)
|
||||
# end
|
||||
h = self.norm_out(h)
|
||||
h = torch.nn.functional.silu(h)
|
||||
h = self.conv_out(h)
|
||||
return h
|
||||
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
ch: int,
|
||||
out_ch: int,
|
||||
ch_mult: list[int],
|
||||
num_res_blocks: int,
|
||||
in_channels: int,
|
||||
resolution: int,
|
||||
z_channels: int,
|
||||
):
|
||||
super().__init__()
|
||||
self.ch = ch
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
self.ffactor = 2 ** (self.num_resolutions - 1)
|
||||
|
||||
# compute in_ch_mult, block_in and curr_res at lowest res
|
||||
block_in = ch * ch_mult[self.num_resolutions - 1]
|
||||
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
||||
self.z_shape = (1, z_channels, curr_res, curr_res)
|
||||
|
||||
# z to block_in
|
||||
self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
# middle
|
||||
self.mid = nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
||||
self.mid.attn_1 = AttnBlock(block_in)
|
||||
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
|
||||
|
||||
# upsampling
|
||||
self.up = nn.ModuleList()
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for _ in range(self.num_res_blocks + 1):
|
||||
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
|
||||
block_in = block_out
|
||||
up = nn.Module()
|
||||
up.block = block
|
||||
up.attn = attn
|
||||
if i_level != 0:
|
||||
up.upsample = Upsample(block_in)
|
||||
curr_res = curr_res * 2
|
||||
self.up.insert(0, up) # prepend to get consistent order
|
||||
|
||||
# end
|
||||
self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
|
||||
self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
def forward(self, z: Tensor) -> Tensor:
|
||||
# z to block_in
|
||||
h = self.conv_in(z)
|
||||
|
||||
# middle
|
||||
h = self.mid.block_1(h)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h)
|
||||
|
||||
# upsampling
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
h = self.up[i_level].block[i_block](h)
|
||||
if len(self.up[i_level].attn) > 0:
|
||||
h = self.up[i_level].attn[i_block](h)
|
||||
if i_level != 0:
|
||||
h = self.up[i_level].upsample(h)
|
||||
|
||||
# end
|
||||
h = self.norm_out(h)
|
||||
h = torch.nn.functional.silu(h)
|
||||
h = self.conv_out(h)
|
||||
return h
|
||||
|
||||
|
||||
class DiagonalGaussian(nn.Module):
|
||||
def __init__(self, sample: bool = True, chunk_dim: int = 1):
|
||||
super().__init__()
|
||||
self.sample = sample
|
||||
self.chunk_dim = chunk_dim
|
||||
|
||||
def forward(self, z: Tensor) -> Tensor:
|
||||
mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
|
||||
if self.sample:
|
||||
std = torch.exp(0.5 * logvar)
|
||||
return mean + std * torch.randn_like(mean)
|
||||
else:
|
||||
return mean
|
||||
|
||||
|
||||
class AutoEncoder(nn.Module):
|
||||
def __init__(self, params: AutoEncoderParams):
|
||||
super().__init__()
|
||||
self.encoder = Encoder(
|
||||
resolution=params.resolution,
|
||||
in_channels=params.in_channels,
|
||||
ch=params.ch,
|
||||
ch_mult=params.ch_mult,
|
||||
num_res_blocks=params.num_res_blocks,
|
||||
z_channels=params.z_channels,
|
||||
)
|
||||
self.decoder = Decoder(
|
||||
resolution=params.resolution,
|
||||
in_channels=params.in_channels,
|
||||
ch=params.ch,
|
||||
out_ch=params.out_ch,
|
||||
ch_mult=params.ch_mult,
|
||||
num_res_blocks=params.num_res_blocks,
|
||||
z_channels=params.z_channels,
|
||||
)
|
||||
self.reg = DiagonalGaussian()
|
||||
|
||||
self.scale_factor = params.scale_factor
|
||||
self.shift_factor = params.shift_factor
|
||||
|
||||
def encode(self, x: Tensor) -> Tensor:
|
||||
z = self.reg(self.encoder(x))
|
||||
z = self.scale_factor * (z - self.shift_factor)
|
||||
return z
|
||||
|
||||
def decode(self, z: Tensor) -> Tensor:
|
||||
z = z / self.scale_factor + self.shift_factor
|
||||
return self.decoder(z)
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
return self.decode(self.encode(x))
|
@ -1,33 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
from torch import Tensor, nn
|
||||
from transformers import PreTrainedModel, PreTrainedTokenizer
|
||||
|
||||
|
||||
class HFEncoder(nn.Module):
|
||||
def __init__(self, encoder: PreTrainedModel, tokenizer: PreTrainedTokenizer, is_clip: bool, max_length: int):
|
||||
super().__init__()
|
||||
self.max_length = max_length
|
||||
self.is_clip = is_clip
|
||||
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
|
||||
self.tokenizer = tokenizer
|
||||
self.hf_module = encoder
|
||||
self.hf_module = self.hf_module.eval().requires_grad_(False)
|
||||
|
||||
def forward(self, text: list[str]) -> Tensor:
|
||||
batch_encoding = self.tokenizer(
|
||||
text,
|
||||
truncation=True,
|
||||
max_length=self.max_length,
|
||||
return_length=False,
|
||||
return_overflowing_tokens=False,
|
||||
padding="max_length",
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
outputs = self.hf_module(
|
||||
input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
|
||||
attention_mask=None,
|
||||
output_hidden_states=False,
|
||||
)
|
||||
return outputs[self.output_key]
|
@ -1,253 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from torch import Tensor, nn
|
||||
|
||||
from invokeai.backend.flux.math import attention, rope
|
||||
|
||||
|
||||
class EmbedND(nn.Module):
|
||||
def __init__(self, dim: int, theta: int, axes_dim: list[int]):
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.theta = theta
|
||||
self.axes_dim = axes_dim
|
||||
|
||||
def forward(self, ids: Tensor) -> Tensor:
|
||||
n_axes = ids.shape[-1]
|
||||
emb = torch.cat(
|
||||
[rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
|
||||
dim=-3,
|
||||
)
|
||||
|
||||
return emb.unsqueeze(1)
|
||||
|
||||
|
||||
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
|
||||
"""
|
||||
Create sinusoidal timestep embeddings.
|
||||
:param t: a 1-D Tensor of N indices, one per batch element.
|
||||
These may be fractional.
|
||||
:param dim: the dimension of the output.
|
||||
:param max_period: controls the minimum frequency of the embeddings.
|
||||
:return: an (N, D) Tensor of positional embeddings.
|
||||
"""
|
||||
t = time_factor * t
|
||||
half = dim // 2
|
||||
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
|
||||
|
||||
args = t[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||
if torch.is_floating_point(t):
|
||||
embedding = embedding.to(t)
|
||||
return embedding
|
||||
|
||||
|
||||
class MLPEmbedder(nn.Module):
|
||||
def __init__(self, in_dim: int, hidden_dim: int):
|
||||
super().__init__()
|
||||
self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
|
||||
self.silu = nn.SiLU()
|
||||
self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
return self.out_layer(self.silu(self.in_layer(x)))
|
||||
|
||||
|
||||
class RMSNorm(torch.nn.Module):
|
||||
def __init__(self, dim: int):
|
||||
super().__init__()
|
||||
self.scale = nn.Parameter(torch.ones(dim))
|
||||
|
||||
def forward(self, x: Tensor):
|
||||
x_dtype = x.dtype
|
||||
x = x.float()
|
||||
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
|
||||
return (x * rrms).to(dtype=x_dtype) * self.scale
|
||||
|
||||
|
||||
class QKNorm(torch.nn.Module):
|
||||
def __init__(self, dim: int):
|
||||
super().__init__()
|
||||
self.query_norm = RMSNorm(dim)
|
||||
self.key_norm = RMSNorm(dim)
|
||||
|
||||
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
|
||||
q = self.query_norm(q)
|
||||
k = self.key_norm(k)
|
||||
return q.to(v), k.to(v)
|
||||
|
||||
|
||||
class SelfAttention(nn.Module):
|
||||
def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.norm = QKNorm(head_dim)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
|
||||
def forward(self, x: Tensor, pe: Tensor) -> Tensor:
|
||||
qkv = self.qkv(x)
|
||||
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
q, k = self.norm(q, k, v)
|
||||
x = attention(q, k, v, pe=pe)
|
||||
x = self.proj(x)
|
||||
return x
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModulationOut:
|
||||
shift: Tensor
|
||||
scale: Tensor
|
||||
gate: Tensor
|
||||
|
||||
|
||||
class Modulation(nn.Module):
|
||||
def __init__(self, dim: int, double: bool):
|
||||
super().__init__()
|
||||
self.is_double = double
|
||||
self.multiplier = 6 if double else 3
|
||||
self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
|
||||
|
||||
def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]:
|
||||
out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1)
|
||||
|
||||
return (
|
||||
ModulationOut(*out[:3]),
|
||||
ModulationOut(*out[3:]) if self.is_double else None,
|
||||
)
|
||||
|
||||
|
||||
class DoubleStreamBlock(nn.Module):
|
||||
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False):
|
||||
super().__init__()
|
||||
|
||||
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
self.num_heads = num_heads
|
||||
self.hidden_size = hidden_size
|
||||
self.img_mod = Modulation(hidden_size, double=True)
|
||||
self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
|
||||
|
||||
self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.img_mlp = nn.Sequential(
|
||||
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
|
||||
nn.GELU(approximate="tanh"),
|
||||
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
|
||||
)
|
||||
|
||||
self.txt_mod = Modulation(hidden_size, double=True)
|
||||
self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
|
||||
|
||||
self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.txt_mlp = nn.Sequential(
|
||||
nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
|
||||
nn.GELU(approximate="tanh"),
|
||||
nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
|
||||
)
|
||||
|
||||
def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> tuple[Tensor, Tensor]:
|
||||
img_mod1, img_mod2 = self.img_mod(vec)
|
||||
txt_mod1, txt_mod2 = self.txt_mod(vec)
|
||||
|
||||
# prepare image for attention
|
||||
img_modulated = self.img_norm1(img)
|
||||
img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
|
||||
img_qkv = self.img_attn.qkv(img_modulated)
|
||||
img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
||||
|
||||
# prepare txt for attention
|
||||
txt_modulated = self.txt_norm1(txt)
|
||||
txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
|
||||
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
||||
txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
||||
|
||||
# run actual attention
|
||||
q = torch.cat((txt_q, img_q), dim=2)
|
||||
k = torch.cat((txt_k, img_k), dim=2)
|
||||
v = torch.cat((txt_v, img_v), dim=2)
|
||||
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
||||
|
||||
# calculate the img bloks
|
||||
img = img + img_mod1.gate * self.img_attn.proj(img_attn)
|
||||
img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
|
||||
|
||||
# calculate the txt bloks
|
||||
txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
|
||||
txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
|
||||
return img, txt
|
||||
|
||||
|
||||
class SingleStreamBlock(nn.Module):
|
||||
"""
|
||||
A DiT block with parallel linear layers as described in
|
||||
https://arxiv.org/abs/2302.05442 and adapted modulation interface.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
num_heads: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
qk_scale: float | None = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.hidden_dim = hidden_size
|
||||
self.num_heads = num_heads
|
||||
head_dim = hidden_size // num_heads
|
||||
self.scale = qk_scale or head_dim**-0.5
|
||||
|
||||
self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
||||
# qkv and mlp_in
|
||||
self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
|
||||
# proj and mlp_out
|
||||
self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
|
||||
|
||||
self.norm = QKNorm(head_dim)
|
||||
|
||||
self.hidden_size = hidden_size
|
||||
self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
|
||||
self.mlp_act = nn.GELU(approximate="tanh")
|
||||
self.modulation = Modulation(hidden_size, double=False)
|
||||
|
||||
def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
|
||||
mod, _ = self.modulation(vec)
|
||||
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
|
||||
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
||||
|
||||
q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
q, k = self.norm(q, k, v)
|
||||
|
||||
# compute attention
|
||||
attn = attention(q, k, v, pe=pe)
|
||||
# compute activation in mlp stream, cat again and run second linear layer
|
||||
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
||||
return x + mod.gate * output
|
||||
|
||||
|
||||
class LastLayer(nn.Module):
|
||||
def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
|
||||
super().__init__()
|
||||
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
||||
self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
|
||||
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
|
||||
|
||||
def forward(self, x: Tensor, vec: Tensor) -> Tensor:
|
||||
shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
|
||||
x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
|
||||
x = self.linear(x)
|
||||
return x
|
@ -1,176 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
import math
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
from torch import Tensor
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
||||
|
||||
|
||||
def get_noise(
|
||||
num_samples: int,
|
||||
height: int,
|
||||
width: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
seed: int,
|
||||
):
|
||||
# We always generate noise on the same device and dtype then cast to ensure consistency across devices/dtypes.
|
||||
rand_device = "cpu"
|
||||
rand_dtype = torch.float16
|
||||
return torch.randn(
|
||||
num_samples,
|
||||
16,
|
||||
# allow for packing
|
||||
2 * math.ceil(height / 16),
|
||||
2 * math.ceil(width / 16),
|
||||
device=rand_device,
|
||||
dtype=rand_dtype,
|
||||
generator=torch.Generator(device=rand_device).manual_seed(seed),
|
||||
).to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
def prepare(t5: HFEncoder, clip: HFEncoder, img: Tensor, prompt: str | list[str]) -> dict[str, Tensor]:
|
||||
bs, c, h, w = img.shape
|
||||
if bs == 1 and not isinstance(prompt, str):
|
||||
bs = len(prompt)
|
||||
|
||||
img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
if img.shape[0] == 1 and bs > 1:
|
||||
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
||||
|
||||
if isinstance(prompt, str):
|
||||
prompt = [prompt]
|
||||
txt = t5(prompt)
|
||||
if txt.shape[0] == 1 and bs > 1:
|
||||
txt = repeat(txt, "1 ... -> bs ...", bs=bs)
|
||||
txt_ids = torch.zeros(bs, txt.shape[1], 3)
|
||||
|
||||
vec = clip(prompt)
|
||||
if vec.shape[0] == 1 and bs > 1:
|
||||
vec = repeat(vec, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
return {
|
||||
"img": img,
|
||||
"img_ids": img_ids.to(img.device),
|
||||
"txt": txt.to(img.device),
|
||||
"txt_ids": txt_ids.to(img.device),
|
||||
"vec": vec.to(img.device),
|
||||
}
|
||||
|
||||
|
||||
def time_shift(mu: float, sigma: float, t: Tensor):
|
||||
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
||||
|
||||
|
||||
def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
|
||||
m = (y2 - y1) / (x2 - x1)
|
||||
b = y1 - m * x1
|
||||
return lambda x: m * x + b
|
||||
|
||||
|
||||
def get_schedule(
|
||||
num_steps: int,
|
||||
image_seq_len: int,
|
||||
base_shift: float = 0.5,
|
||||
max_shift: float = 1.15,
|
||||
shift: bool = True,
|
||||
) -> list[float]:
|
||||
# extra step for zero
|
||||
timesteps = torch.linspace(1, 0, num_steps + 1)
|
||||
|
||||
# shifting the schedule to favor high timesteps for higher signal images
|
||||
if shift:
|
||||
# eastimate mu based on linear estimation between two points
|
||||
mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
|
||||
timesteps = time_shift(mu, 1.0, timesteps)
|
||||
|
||||
return timesteps.tolist()
|
||||
|
||||
|
||||
def denoise(
|
||||
model: Flux,
|
||||
# model input
|
||||
img: Tensor,
|
||||
img_ids: Tensor,
|
||||
txt: Tensor,
|
||||
txt_ids: Tensor,
|
||||
vec: Tensor,
|
||||
# sampling parameters
|
||||
timesteps: list[float],
|
||||
step_callback: Callable[[], None],
|
||||
guidance: float = 4.0,
|
||||
):
|
||||
dtype = model.txt_in.bias.dtype
|
||||
|
||||
# TODO(ryand): This shouldn't be necessary if we manage the dtypes properly in the caller.
|
||||
img = img.to(dtype=dtype)
|
||||
img_ids = img_ids.to(dtype=dtype)
|
||||
txt = txt.to(dtype=dtype)
|
||||
txt_ids = txt_ids.to(dtype=dtype)
|
||||
vec = vec.to(dtype=dtype)
|
||||
|
||||
# this is ignored for schnell
|
||||
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
||||
for t_curr, t_prev in tqdm(list(zip(timesteps[:-1], timesteps[1:], strict=True))):
|
||||
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
||||
pred = model(
|
||||
img=img,
|
||||
img_ids=img_ids,
|
||||
txt=txt,
|
||||
txt_ids=txt_ids,
|
||||
y=vec,
|
||||
timesteps=t_vec,
|
||||
guidance=guidance_vec,
|
||||
)
|
||||
|
||||
img = img + (t_prev - t_curr) * pred
|
||||
step_callback()
|
||||
|
||||
return img
|
||||
|
||||
|
||||
def unpack(x: Tensor, height: int, width: int) -> Tensor:
|
||||
return rearrange(
|
||||
x,
|
||||
"b (h w) (c ph pw) -> b c (h ph) (w pw)",
|
||||
h=math.ceil(height / 16),
|
||||
w=math.ceil(width / 16),
|
||||
ph=2,
|
||||
pw=2,
|
||||
)
|
||||
|
||||
|
||||
def prepare_latent_img_patches(latent_img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Convert an input image in latent space to patches for diffusion.
|
||||
|
||||
This implementation was extracted from:
|
||||
https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/sampling.py#L32
|
||||
|
||||
Returns:
|
||||
tuple[Tensor, Tensor]: (img, img_ids), as defined in the original flux repo.
|
||||
"""
|
||||
bs, c, h, w = latent_img.shape
|
||||
|
||||
# Pixel unshuffle with a scale of 2, and flatten the height/width dimensions to get an array of patches.
|
||||
img = rearrange(latent_img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
|
||||
if img.shape[0] == 1 and bs > 1:
|
||||
img = repeat(img, "1 ... -> bs ...", bs=bs)
|
||||
|
||||
# Generate patch position ids.
|
||||
img_ids = torch.zeros(h // 2, w // 2, 3, device=img.device)
|
||||
img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2, device=img.device)[:, None]
|
||||
img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2, device=img.device)[None, :]
|
||||
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
|
||||
|
||||
return img, img_ids
|
@ -1,71 +0,0 @@
|
||||
# Initially pulled from https://github.com/black-forest-labs/flux
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Literal
|
||||
|
||||
from invokeai.backend.flux.model import FluxParams
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoderParams
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelSpec:
|
||||
params: FluxParams
|
||||
ae_params: AutoEncoderParams
|
||||
ckpt_path: str | None
|
||||
ae_path: str | None
|
||||
repo_id: str | None
|
||||
repo_flow: str | None
|
||||
repo_ae: str | None
|
||||
|
||||
|
||||
max_seq_lengths: Dict[str, Literal[256, 512]] = {
|
||||
"flux-dev": 512,
|
||||
"flux-schnell": 256,
|
||||
}
|
||||
|
||||
|
||||
ae_params = {
|
||||
"flux": AutoEncoderParams(
|
||||
resolution=256,
|
||||
in_channels=3,
|
||||
ch=128,
|
||||
out_ch=3,
|
||||
ch_mult=[1, 2, 4, 4],
|
||||
num_res_blocks=2,
|
||||
z_channels=16,
|
||||
scale_factor=0.3611,
|
||||
shift_factor=0.1159,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
params = {
|
||||
"flux-dev": FluxParams(
|
||||
in_channels=64,
|
||||
vec_in_dim=768,
|
||||
context_in_dim=4096,
|
||||
hidden_size=3072,
|
||||
mlp_ratio=4.0,
|
||||
num_heads=24,
|
||||
depth=19,
|
||||
depth_single_blocks=38,
|
||||
axes_dim=[16, 56, 56],
|
||||
theta=10_000,
|
||||
qkv_bias=True,
|
||||
guidance_embed=True,
|
||||
),
|
||||
"flux-schnell": FluxParams(
|
||||
in_channels=64,
|
||||
vec_in_dim=768,
|
||||
context_in_dim=4096,
|
||||
hidden_size=3072,
|
||||
mlp_ratio=4.0,
|
||||
num_heads=24,
|
||||
depth=19,
|
||||
depth_single_blocks=38,
|
||||
axes_dim=[16, 56, 56],
|
||||
theta=10_000,
|
||||
qkv_bias=True,
|
||||
guidance_embed=False,
|
||||
),
|
||||
}
|
@ -52,7 +52,6 @@ class BaseModelType(str, Enum):
|
||||
StableDiffusion2 = "sd-2"
|
||||
StableDiffusionXL = "sdxl"
|
||||
StableDiffusionXLRefiner = "sdxl-refiner"
|
||||
Flux = "flux"
|
||||
# Kandinsky2_1 = "kandinsky-2.1"
|
||||
|
||||
|
||||
@ -67,9 +66,7 @@ class ModelType(str, Enum):
|
||||
TextualInversion = "embedding"
|
||||
IPAdapter = "ip_adapter"
|
||||
CLIPVision = "clip_vision"
|
||||
CLIPEmbed = "clip_embed"
|
||||
T2IAdapter = "t2i_adapter"
|
||||
T5Encoder = "t5_encoder"
|
||||
SpandrelImageToImage = "spandrel_image_to_image"
|
||||
|
||||
|
||||
@ -77,7 +74,6 @@ class SubModelType(str, Enum):
|
||||
"""Submodel type."""
|
||||
|
||||
UNet = "unet"
|
||||
Transformer = "transformer"
|
||||
TextEncoder = "text_encoder"
|
||||
TextEncoder2 = "text_encoder_2"
|
||||
Tokenizer = "tokenizer"
|
||||
@ -108,9 +104,6 @@ class ModelFormat(str, Enum):
|
||||
EmbeddingFile = "embedding_file"
|
||||
EmbeddingFolder = "embedding_folder"
|
||||
InvokeAI = "invokeai"
|
||||
T5Encoder = "t5_encoder"
|
||||
BnbQuantizedLlmInt8b = "bnb_quantized_int8b"
|
||||
BnbQuantizednf4b = "bnb_quantized_nf4b"
|
||||
|
||||
|
||||
class SchedulerPredictionType(str, Enum):
|
||||
@ -193,9 +186,7 @@ class ModelConfigBase(BaseModel):
|
||||
class CheckpointConfigBase(ModelConfigBase):
|
||||
"""Model config for checkpoint-style models."""
|
||||
|
||||
format: Literal[ModelFormat.Checkpoint, ModelFormat.BnbQuantizednf4b] = Field(
|
||||
description="Format of the provided checkpoint model", default=ModelFormat.Checkpoint
|
||||
)
|
||||
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
|
||||
config_path: str = Field(description="path to the checkpoint model config file")
|
||||
converted_at: Optional[float] = Field(
|
||||
description="When this model was last converted to diffusers", default_factory=time.time
|
||||
@ -214,26 +205,6 @@ class LoRAConfigBase(ModelConfigBase):
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
|
||||
|
||||
class T5EncoderConfigBase(ModelConfigBase):
|
||||
type: Literal[ModelType.T5Encoder] = ModelType.T5Encoder
|
||||
|
||||
|
||||
class T5EncoderConfig(T5EncoderConfigBase):
|
||||
format: Literal[ModelFormat.T5Encoder] = ModelFormat.T5Encoder
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.T5Encoder.value}.{ModelFormat.T5Encoder.value}")
|
||||
|
||||
|
||||
class T5EncoderBnbQuantizedLlmInt8bConfig(T5EncoderConfigBase):
|
||||
format: Literal[ModelFormat.BnbQuantizedLlmInt8b] = ModelFormat.BnbQuantizedLlmInt8b
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.T5Encoder.value}.{ModelFormat.BnbQuantizedLlmInt8b.value}")
|
||||
|
||||
|
||||
class LoRALyCORISConfig(LoRAConfigBase):
|
||||
"""Model config for LoRA/Lycoris models."""
|
||||
|
||||
@ -258,6 +229,7 @@ class VAECheckpointConfig(CheckpointConfigBase):
|
||||
"""Model config for standalone VAE models."""
|
||||
|
||||
type: Literal[ModelType.VAE] = ModelType.VAE
|
||||
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
@ -296,6 +268,7 @@ class ControlNetCheckpointConfig(CheckpointConfigBase, ControlAdapterConfigBase)
|
||||
"""Model config for ControlNet models (diffusers version)."""
|
||||
|
||||
type: Literal[ModelType.ControlNet] = ModelType.ControlNet
|
||||
format: Literal[ModelFormat.Checkpoint] = ModelFormat.Checkpoint
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
@ -344,21 +317,6 @@ class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase):
|
||||
return Tag(f"{ModelType.Main.value}.{ModelFormat.Checkpoint.value}")
|
||||
|
||||
|
||||
class MainBnbQuantized4bCheckpointConfig(CheckpointConfigBase, MainConfigBase):
|
||||
"""Model config for main checkpoint models."""
|
||||
|
||||
prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon
|
||||
upcast_attention: bool = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.format = ModelFormat.BnbQuantizednf4b
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.Main.value}.{ModelFormat.BnbQuantizednf4b.value}")
|
||||
|
||||
|
||||
class MainDiffusersConfig(DiffusersConfigBase, MainConfigBase):
|
||||
"""Model config for main diffusers models."""
|
||||
|
||||
@ -392,17 +350,6 @@ class IPAdapterCheckpointConfig(IPAdapterBaseConfig):
|
||||
return Tag(f"{ModelType.IPAdapter.value}.{ModelFormat.Checkpoint.value}")
|
||||
|
||||
|
||||
class CLIPEmbedDiffusersConfig(DiffusersConfigBase):
|
||||
"""Model config for Clip Embeddings."""
|
||||
|
||||
type: Literal[ModelType.CLIPEmbed] = ModelType.CLIPEmbed
|
||||
format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers
|
||||
|
||||
@staticmethod
|
||||
def get_tag() -> Tag:
|
||||
return Tag(f"{ModelType.CLIPEmbed.value}.{ModelFormat.Diffusers.value}")
|
||||
|
||||
|
||||
class CLIPVisionDiffusersConfig(DiffusersConfigBase):
|
||||
"""Model config for CLIPVision."""
|
||||
|
||||
@ -461,15 +408,12 @@ AnyModelConfig = Annotated[
|
||||
Union[
|
||||
Annotated[MainDiffusersConfig, MainDiffusersConfig.get_tag()],
|
||||
Annotated[MainCheckpointConfig, MainCheckpointConfig.get_tag()],
|
||||
Annotated[MainBnbQuantized4bCheckpointConfig, MainBnbQuantized4bCheckpointConfig.get_tag()],
|
||||
Annotated[VAEDiffusersConfig, VAEDiffusersConfig.get_tag()],
|
||||
Annotated[VAECheckpointConfig, VAECheckpointConfig.get_tag()],
|
||||
Annotated[ControlNetDiffusersConfig, ControlNetDiffusersConfig.get_tag()],
|
||||
Annotated[ControlNetCheckpointConfig, ControlNetCheckpointConfig.get_tag()],
|
||||
Annotated[LoRALyCORISConfig, LoRALyCORISConfig.get_tag()],
|
||||
Annotated[LoRADiffusersConfig, LoRADiffusersConfig.get_tag()],
|
||||
Annotated[T5EncoderConfig, T5EncoderConfig.get_tag()],
|
||||
Annotated[T5EncoderBnbQuantizedLlmInt8bConfig, T5EncoderBnbQuantizedLlmInt8bConfig.get_tag()],
|
||||
Annotated[TextualInversionFileConfig, TextualInversionFileConfig.get_tag()],
|
||||
Annotated[TextualInversionFolderConfig, TextualInversionFolderConfig.get_tag()],
|
||||
Annotated[IPAdapterInvokeAIConfig, IPAdapterInvokeAIConfig.get_tag()],
|
||||
@ -477,7 +421,6 @@ AnyModelConfig = Annotated[
|
||||
Annotated[T2IAdapterConfig, T2IAdapterConfig.get_tag()],
|
||||
Annotated[SpandrelImageToImageConfig, SpandrelImageToImageConfig.get_tag()],
|
||||
Annotated[CLIPVisionDiffusersConfig, CLIPVisionDiffusersConfig.get_tag()],
|
||||
Annotated[CLIPEmbedDiffusersConfig, CLIPEmbedDiffusersConfig.get_tag()],
|
||||
],
|
||||
Discriminator(get_model_discriminator_value),
|
||||
]
|
||||
|
@ -1,234 +0,0 @@
|
||||
# Copyright (c) 2024, Brandon W. Rising and the InvokeAI Development Team
|
||||
"""Class for Flux model loading in InvokeAI."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import accelerate
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from transformers import AutoConfig, AutoModelForTextEncoding, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
||||
from invokeai.backend.flux.util import ae_params, params
|
||||
from invokeai.backend.model_manager import (
|
||||
AnyModel,
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
CheckpointConfigBase,
|
||||
CLIPEmbedDiffusersConfig,
|
||||
MainBnbQuantized4bCheckpointConfig,
|
||||
MainCheckpointConfig,
|
||||
T5EncoderBnbQuantizedLlmInt8bConfig,
|
||||
T5EncoderConfig,
|
||||
VAECheckpointConfig,
|
||||
)
|
||||
from invokeai.backend.model_manager.load.load_default import ModelLoader
|
||||
from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
try:
|
||||
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
|
||||
from invokeai.backend.quantization.bnb_nf4 import quantize_model_nf4
|
||||
|
||||
bnb_available = True
|
||||
except ImportError:
|
||||
bnb_available = False
|
||||
|
||||
app_config = get_config()
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.VAE, format=ModelFormat.Checkpoint)
|
||||
class FluxVAELoader(ModelLoader):
|
||||
"""Class to load VAE models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, VAECheckpointConfig):
|
||||
raise ValueError("Only VAECheckpointConfig models are currently supported here.")
|
||||
model_path = Path(config.path)
|
||||
|
||||
with SilenceWarnings():
|
||||
model = AutoEncoder(ae_params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
model.to(dtype=self._torch_dtype)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPEmbed, format=ModelFormat.Diffusers)
|
||||
class ClipCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, CLIPEmbedDiffusersConfig):
|
||||
raise ValueError("Only CLIPEmbedDiffusersConfig models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer:
|
||||
return CLIPTokenizer.from_pretrained(Path(config.path) / "tokenizer")
|
||||
case SubModelType.TextEncoder:
|
||||
return CLIPTextModel.from_pretrained(Path(config.path) / "text_encoder")
|
||||
|
||||
raise ValueError(
|
||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.BnbQuantizedLlmInt8b)
|
||||
class BnbQuantizedLlmInt8bCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, T5EncoderBnbQuantizedLlmInt8bConfig):
|
||||
raise ValueError("Only T5EncoderBnbQuantizedLlmInt8bConfig models are currently supported here.")
|
||||
if not bnb_available:
|
||||
raise ImportError(
|
||||
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
|
||||
)
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
te2_model_path = Path(config.path) / "text_encoder_2"
|
||||
model_config = AutoConfig.from_pretrained(te2_model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
model = AutoModelForTextEncoding.from_config(model_config)
|
||||
model = quantize_model_llm_int8(model, modules_to_not_convert=set())
|
||||
|
||||
state_dict_path = te2_model_path / "bnb_llm_int8_model.safetensors"
|
||||
state_dict = load_file(state_dict_path)
|
||||
self._load_state_dict_into_t5(model, state_dict)
|
||||
|
||||
return model
|
||||
|
||||
raise ValueError(
|
||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _load_state_dict_into_t5(cls, model: T5EncoderModel, state_dict: dict[str, torch.Tensor]):
|
||||
# There is a shared reference to a single weight tensor in the model.
|
||||
# Both "encoder.embed_tokens.weight" and "shared.weight" refer to the same tensor, so only the latter should
|
||||
# be present in the state_dict.
|
||||
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False, assign=True)
|
||||
assert len(unexpected_keys) == 0
|
||||
assert set(missing_keys) == {"encoder.embed_tokens.weight"}
|
||||
# Assert that the layers we expect to be shared are actually shared.
|
||||
assert model.encoder.embed_tokens.weight is model.shared.weight
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder)
|
||||
class T5EncoderCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, T5EncoderConfig):
|
||||
raise ValueError("Only T5EncoderConfig models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Tokenizer2:
|
||||
return T5Tokenizer.from_pretrained(Path(config.path) / "tokenizer_2", max_length=512)
|
||||
case SubModelType.TextEncoder2:
|
||||
return T5EncoderModel.from_pretrained(Path(config.path) / "text_encoder_2")
|
||||
|
||||
raise ValueError(
|
||||
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
class FluxCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, CheckpointConfigBase):
|
||||
raise ValueError("Only CheckpointConfigBase models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Transformer:
|
||||
return self._load_from_singlefile(config)
|
||||
|
||||
raise ValueError(
|
||||
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
def _load_from_singlefile(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
) -> AnyModel:
|
||||
assert isinstance(config, MainCheckpointConfig)
|
||||
model_path = Path(config.path)
|
||||
|
||||
with SilenceWarnings():
|
||||
model = Flux(params[config.config_path])
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.BnbQuantizednf4b)
|
||||
class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
def _load_model(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> AnyModel:
|
||||
if not isinstance(config, CheckpointConfigBase):
|
||||
raise ValueError("Only CheckpointConfigBase models are currently supported here.")
|
||||
|
||||
match submodel_type:
|
||||
case SubModelType.Transformer:
|
||||
return self._load_from_singlefile(config)
|
||||
|
||||
raise ValueError(
|
||||
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||
)
|
||||
|
||||
def _load_from_singlefile(
|
||||
self,
|
||||
config: AnyModelConfig,
|
||||
) -> AnyModel:
|
||||
assert isinstance(config, MainBnbQuantized4bCheckpointConfig)
|
||||
if not bnb_available:
|
||||
raise ImportError(
|
||||
"The bnb modules are not available. Please install bitsandbytes if available on your platform."
|
||||
)
|
||||
model_path = Path(config.path)
|
||||
|
||||
with SilenceWarnings():
|
||||
with accelerate.init_empty_weights():
|
||||
model = Flux(params[config.config_path])
|
||||
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
|
||||
sd = load_file(model_path)
|
||||
model.load_state_dict(sd, assign=True)
|
||||
return model
|
@ -78,12 +78,7 @@ class GenericDiffusersLoader(ModelLoader):
|
||||
|
||||
# TO DO: Add exception handling
|
||||
def _hf_definition_to_type(self, module: str, class_name: str) -> ModelMixin: # fix with correct type
|
||||
if module in [
|
||||
"diffusers",
|
||||
"transformers",
|
||||
"invokeai.backend.quantization.fast_quantized_transformers_model",
|
||||
"invokeai.backend.quantization.fast_quantized_diffusion_model",
|
||||
]:
|
||||
if module in ["diffusers", "transformers"]:
|
||||
res_type = sys.modules[module]
|
||||
else:
|
||||
res_type = sys.modules["diffusers"].pipelines
|
||||
|
@ -36,18 +36,8 @@ VARIANT_TO_IN_CHANNEL_MAP = {
|
||||
}
|
||||
|
||||
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Diffusers
|
||||
)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusionXL, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
@ModelLoaderRegistry.register(
|
||||
base=BaseModelType.StableDiffusionXLRefiner, type=ModelType.Main, format=ModelFormat.Checkpoint
|
||||
)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Diffusers)
|
||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||
class StableDiffusionDiffusersModel(GenericDiffusersLoader):
|
||||
"""Class to load main models."""
|
||||
|
||||
|
@ -9,7 +9,7 @@ from typing import Optional
|
||||
import torch
|
||||
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
||||
from transformers import CLIPTokenizer, T5Tokenizer, T5TokenizerFast
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from invokeai.backend.image_util.depth_anything.depth_anything_pipeline import DepthAnythingPipeline
|
||||
from invokeai.backend.image_util.grounding_dino.grounding_dino_pipeline import GroundingDinoPipeline
|
||||
@ -50,17 +50,6 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
|
||||
),
|
||||
):
|
||||
return model.calc_size()
|
||||
elif isinstance(
|
||||
model,
|
||||
(
|
||||
T5TokenizerFast,
|
||||
T5Tokenizer,
|
||||
),
|
||||
):
|
||||
# HACK(ryand): len(model) just returns the vocabulary size, so this is blatantly wrong. It should be small
|
||||
# relative to the text encoder that it's used with, so shouldn't matter too much, but we should fix this at some
|
||||
# point.
|
||||
return len(model)
|
||||
else:
|
||||
# TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the
|
||||
# supported model types.
|
||||
|
@ -95,7 +95,6 @@ class ModelProbe(object):
|
||||
}
|
||||
|
||||
CLASS2TYPE = {
|
||||
"FluxPipeline": ModelType.Main,
|
||||
"StableDiffusionPipeline": ModelType.Main,
|
||||
"StableDiffusionInpaintPipeline": ModelType.Main,
|
||||
"StableDiffusionXLPipeline": ModelType.Main,
|
||||
@ -107,7 +106,6 @@ class ModelProbe(object):
|
||||
"ControlNetModel": ModelType.ControlNet,
|
||||
"CLIPVisionModelWithProjection": ModelType.CLIPVision,
|
||||
"T2IAdapter": ModelType.T2IAdapter,
|
||||
"CLIPModel": ModelType.CLIPEmbed,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@ -163,7 +161,7 @@ class ModelProbe(object):
|
||||
fields["description"] = (
|
||||
fields.get("description") or f"{fields['base'].value} {model_type.value} model {fields['name']}"
|
||||
)
|
||||
fields["format"] = ModelFormat(fields.get("format")) if "format" in fields else probe.get_format()
|
||||
fields["format"] = fields.get("format") or probe.get_format()
|
||||
fields["hash"] = fields.get("hash") or ModelHash(algorithm=hash_algo).hash(model_path)
|
||||
|
||||
fields["default_settings"] = fields.get("default_settings")
|
||||
@ -178,10 +176,10 @@ class ModelProbe(object):
|
||||
fields["repo_variant"] = fields.get("repo_variant") or probe.get_repo_variant()
|
||||
|
||||
# additional fields needed for main and controlnet models
|
||||
if fields["type"] in [ModelType.Main, ModelType.ControlNet, ModelType.VAE] and fields["format"] in [
|
||||
ModelFormat.Checkpoint,
|
||||
ModelFormat.BnbQuantizednf4b,
|
||||
]:
|
||||
if (
|
||||
fields["type"] in [ModelType.Main, ModelType.ControlNet, ModelType.VAE]
|
||||
and fields["format"] is ModelFormat.Checkpoint
|
||||
):
|
||||
ckpt_config_path = cls._get_checkpoint_config_path(
|
||||
model_path,
|
||||
model_type=fields["type"],
|
||||
@ -224,8 +222,7 @@ class ModelProbe(object):
|
||||
ckpt = ckpt.get("state_dict", ckpt)
|
||||
|
||||
for key in [str(k) for k in ckpt.keys()]:
|
||||
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.", "double_blocks.")):
|
||||
# Keys starting with double_blocks are associated with Flux models
|
||||
if key.startswith(("cond_stage_model.", "first_stage_model.", "model.diffusion_model.")):
|
||||
return ModelType.Main
|
||||
elif key.startswith(("encoder.conv_in", "decoder.conv_in")):
|
||||
return ModelType.VAE
|
||||
@ -324,27 +321,10 @@ class ModelProbe(object):
|
||||
return possible_conf.absolute()
|
||||
|
||||
if model_type is ModelType.Main:
|
||||
if base_type == BaseModelType.Flux:
|
||||
# TODO: Decide between dev/schnell
|
||||
checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
|
||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||
if "guidance_in.out_layer.weight" in state_dict:
|
||||
# For flux, this is a key in invokeai.backend.flux.util.params
|
||||
# Due to model type and format being the descriminator for model configs this
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
# If changed in the future, please fix me
|
||||
config_file = "flux-dev"
|
||||
else:
|
||||
# For flux, this is a key in invokeai.backend.flux.util.params
|
||||
# Due to model type and format being the descriminator for model configs this
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
# If changed in the future, please fix me
|
||||
config_file = "flux-schnell"
|
||||
else:
|
||||
config_file = LEGACY_CONFIGS[base_type][variant_type]
|
||||
if isinstance(config_file, dict): # need another tier for sd-2.x models
|
||||
config_file = config_file[prediction_type]
|
||||
config_file = f"stable-diffusion/{config_file}"
|
||||
config_file = LEGACY_CONFIGS[base_type][variant_type]
|
||||
if isinstance(config_file, dict): # need another tier for sd-2.x models
|
||||
config_file = config_file[prediction_type]
|
||||
config_file = f"stable-diffusion/{config_file}"
|
||||
elif model_type is ModelType.ControlNet:
|
||||
config_file = (
|
||||
"controlnet/cldm_v15.yaml"
|
||||
@ -353,13 +333,7 @@ class ModelProbe(object):
|
||||
)
|
||||
elif model_type is ModelType.VAE:
|
||||
config_file = (
|
||||
# For flux, this is a key in invokeai.backend.flux.util.ae_params
|
||||
# Due to model type and format being the descriminator for model configs this
|
||||
# is used rather than attempting to support flux with separate model types and format
|
||||
# If changed in the future, please fix me
|
||||
"flux"
|
||||
if base_type is BaseModelType.Flux
|
||||
else "stable-diffusion/v1-inference.yaml"
|
||||
"stable-diffusion/v1-inference.yaml"
|
||||
if base_type is BaseModelType.StableDiffusion1
|
||||
else "stable-diffusion/sd_xl_base.yaml"
|
||||
if base_type is BaseModelType.StableDiffusionXL
|
||||
@ -442,15 +416,11 @@ class CheckpointProbeBase(ProbeBase):
|
||||
self.checkpoint = ModelProbe._scan_and_load_checkpoint(model_path)
|
||||
|
||||
def get_format(self) -> ModelFormat:
|
||||
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
|
||||
if "double_blocks.0.img_attn.proj.weight.quant_state.bitsandbytes__nf4" in state_dict:
|
||||
return ModelFormat.BnbQuantizednf4b
|
||||
return ModelFormat("checkpoint")
|
||||
|
||||
def get_variant_type(self) -> ModelVariantType:
|
||||
model_type = ModelProbe.get_model_type_from_checkpoint(self.model_path, self.checkpoint)
|
||||
base_type = self.get_base_type()
|
||||
if model_type != ModelType.Main or base_type == BaseModelType.Flux:
|
||||
if model_type != ModelType.Main:
|
||||
return ModelVariantType.Normal
|
||||
state_dict = self.checkpoint.get("state_dict") or self.checkpoint
|
||||
in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1]
|
||||
@ -470,8 +440,6 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
checkpoint = self.checkpoint
|
||||
state_dict = self.checkpoint.get("state_dict") or checkpoint
|
||||
if "double_blocks.0.img_attn.norm.key_norm.scale" in state_dict:
|
||||
return BaseModelType.Flux
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in state_dict and state_dict[key_name].shape[-1] == 768:
|
||||
return BaseModelType.StableDiffusion1
|
||||
@ -514,7 +482,6 @@ class VaeCheckpointProbe(CheckpointProbeBase):
|
||||
(r"xl", BaseModelType.StableDiffusionXL),
|
||||
(r"sd2", BaseModelType.StableDiffusion2),
|
||||
(r"vae", BaseModelType.StableDiffusion1),
|
||||
(r"FLUX.1-schnell_ae", BaseModelType.Flux),
|
||||
]:
|
||||
if re.search(regexp, self.model_path.name, re.IGNORECASE):
|
||||
return basetype
|
||||
@ -746,11 +713,6 @@ class TextualInversionFolderProbe(FolderProbeBase):
|
||||
return TextualInversionCheckpointProbe(path).get_base_type()
|
||||
|
||||
|
||||
class T5EncoderFolderProbe(FolderProbeBase):
|
||||
def get_format(self) -> ModelFormat:
|
||||
return ModelFormat.T5Encoder
|
||||
|
||||
|
||||
class ONNXFolderProbe(PipelineFolderProbe):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
# Due to the way the installer is set up, the configuration file for safetensors
|
||||
@ -843,11 +805,6 @@ class CLIPVisionFolderProbe(FolderProbeBase):
|
||||
return BaseModelType.Any
|
||||
|
||||
|
||||
class CLIPEmbedFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
return BaseModelType.Any
|
||||
|
||||
|
||||
class SpandrelImageToImageFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
raise NotImplementedError()
|
||||
@ -878,10 +835,8 @@ ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.VAE, VaeFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.LoRA, LoRAFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.T5Encoder, T5EncoderFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.CLIPEmbed, CLIPEmbedFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe)
|
||||
ModelProbe.register_probe("diffusers", ModelType.SpandrelImageToImage, SpandrelImageToImageFolderProbe)
|
||||
|
@ -2,7 +2,7 @@ from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelType
|
||||
|
||||
|
||||
class StarterModelWithoutDependencies(BaseModel):
|
||||
@ -11,7 +11,6 @@ class StarterModelWithoutDependencies(BaseModel):
|
||||
name: str
|
||||
base: BaseModelType
|
||||
type: ModelType
|
||||
format: Optional[ModelFormat] = None
|
||||
is_installed: bool = False
|
||||
|
||||
|
||||
@ -52,76 +51,10 @@ cyberrealistic_negative = StarterModel(
|
||||
type=ModelType.TextualInversion,
|
||||
)
|
||||
|
||||
t5_base_encoder = StarterModel(
|
||||
name="t5_base_encoder",
|
||||
base=BaseModelType.Any,
|
||||
source="InvokeAI/t5-v1_1-xxl::bfloat16",
|
||||
description="T5-XXL text encoder (used in FLUX pipelines). ~8GB",
|
||||
type=ModelType.T5Encoder,
|
||||
)
|
||||
|
||||
t5_8b_quantized_encoder = StarterModel(
|
||||
name="t5_bnb_int8_quantized_encoder",
|
||||
base=BaseModelType.Any,
|
||||
source="InvokeAI/t5-v1_1-xxl::bnb_llm_int8",
|
||||
description="T5-XXL text encoder with bitsandbytes LLM.int8() quantization (used in FLUX pipelines). ~5GB",
|
||||
type=ModelType.T5Encoder,
|
||||
format=ModelFormat.BnbQuantizedLlmInt8b,
|
||||
)
|
||||
|
||||
clip_l_encoder = StarterModel(
|
||||
name="clip-vit-large-patch14",
|
||||
base=BaseModelType.Any,
|
||||
source="InvokeAI/clip-vit-large-patch14-text-encoder::bfloat16",
|
||||
description="CLIP-L text encoder (used in FLUX pipelines). ~250MB",
|
||||
type=ModelType.CLIPEmbed,
|
||||
)
|
||||
|
||||
flux_vae = StarterModel(
|
||||
name="FLUX.1-schnell_ae",
|
||||
base=BaseModelType.Flux,
|
||||
source="black-forest-labs/FLUX.1-schnell::ae.safetensors",
|
||||
description="FLUX VAE compatible with both schnell and dev variants.",
|
||||
type=ModelType.VAE,
|
||||
)
|
||||
|
||||
|
||||
# List of starter models, displayed on the frontend.
|
||||
# The order/sort of this list is not changed by the frontend - set it how you want it here.
|
||||
STARTER_MODELS: list[StarterModel] = [
|
||||
# region: Main
|
||||
StarterModel(
|
||||
name="FLUX Schnell (Quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_schnell::transformer/bnb_nf4/flux1-schnell-bnb_nf4.safetensors",
|
||||
description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="FLUX Dev (Quantized)",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_dev::transformer/bnb_nf4/flux1-dev-bnb_nf4.safetensors",
|
||||
description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="FLUX Schnell",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_schnell::transformer/base/flux1-schnell.safetensors",
|
||||
description="FLUX schnell transformer in bfloat16. Total size with dependencies: ~33GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="FLUX Dev",
|
||||
base=BaseModelType.Flux,
|
||||
source="InvokeAI/flux_dev::transformer/base/flux1-dev.safetensors",
|
||||
description="FLUX dev transformer in bfloat16. Total size with dependencies: ~33GB",
|
||||
type=ModelType.Main,
|
||||
dependencies=[t5_base_encoder, flux_vae, clip_l_encoder],
|
||||
),
|
||||
StarterModel(
|
||||
name="CyberRealistic v4.1",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
@ -192,7 +125,6 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
# endregion
|
||||
# region VAE
|
||||
sdxl_fp16_vae_fix,
|
||||
flux_vae,
|
||||
# endregion
|
||||
# region LoRA
|
||||
StarterModel(
|
||||
@ -518,11 +450,6 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
type=ModelType.SpandrelImageToImage,
|
||||
),
|
||||
# endregion
|
||||
# region TextEncoders
|
||||
t5_base_encoder,
|
||||
t5_8b_quantized_encoder,
|
||||
clip_l_encoder,
|
||||
# endregion
|
||||
]
|
||||
|
||||
assert len(STARTER_MODELS) == len({m.source for m in STARTER_MODELS}), "Duplicate starter models"
|
||||
|
@ -54,7 +54,6 @@ def filter_files(
|
||||
"lora_weights.safetensors",
|
||||
"weights.pb",
|
||||
"onnx_data",
|
||||
"spiece.model", # Added for `black-forest-labs/FLUX.1-schnell`.
|
||||
)
|
||||
):
|
||||
paths.append(file)
|
||||
@ -63,13 +62,13 @@ def filter_files(
|
||||
# downloading random checkpoints that might also be in the repo. However there is no guarantee
|
||||
# that a checkpoint doesn't contain "model" in its name, and no guarantee that future diffusers models
|
||||
# will adhere to this naming convention, so this is an area to be careful of.
|
||||
elif re.search(r"model.*\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name):
|
||||
elif re.search(r"model(\.[^.]+)?\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name):
|
||||
paths.append(file)
|
||||
|
||||
# limit search to subfolder if requested
|
||||
if subfolder:
|
||||
subfolder = root / subfolder
|
||||
paths = [x for x in paths if Path(subfolder) in x.parents]
|
||||
paths = [x for x in paths if x.parent == Path(subfolder)]
|
||||
|
||||
# _filter_by_variant uniquifies the paths and returns a set
|
||||
return sorted(_filter_by_variant(paths, variant))
|
||||
@ -98,9 +97,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
if variant == ModelRepoVariant.Flax:
|
||||
result.add(path)
|
||||
|
||||
# Note: '.model' was added to support:
|
||||
# https://huggingface.co/black-forest-labs/FLUX.1-schnell/blob/768d12a373ed5cc9ef9a9dea7504dc09fcc14842/tokenizer_2/spiece.model
|
||||
elif path.suffix in [".json", ".txt", ".model"]:
|
||||
elif path.suffix in [".json", ".txt"]:
|
||||
result.add(path)
|
||||
|
||||
elif variant in [
|
||||
@ -143,23 +140,6 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path
|
||||
continue
|
||||
|
||||
for candidate_list in subfolder_weights.values():
|
||||
# Check if at least one of the files has the explicit fp16 variant.
|
||||
at_least_one_fp16 = False
|
||||
for candidate in candidate_list:
|
||||
if len(candidate.path.suffixes) == 2 and candidate.path.suffixes[0] == ".fp16":
|
||||
at_least_one_fp16 = True
|
||||
break
|
||||
|
||||
if not at_least_one_fp16:
|
||||
# If none of the candidates in this candidate_list have the explicit fp16 variant label, then this
|
||||
# candidate_list probably doesn't adhere to the variant naming convention that we expected. In this case,
|
||||
# we'll simply keep all the candidates. An example of a model that hits this case is
|
||||
# `black-forest-labs/FLUX.1-schnell` (as of commit 012d2fd).
|
||||
for candidate in candidate_list:
|
||||
result.add(candidate.path)
|
||||
|
||||
# The candidate_list seems to have the expected variant naming convention. We'll select the highest scoring
|
||||
# candidate.
|
||||
highest_score_candidate = max(candidate_list, key=lambda candidate: candidate.score)
|
||||
if highest_score_candidate:
|
||||
result.add(highest_score_candidate.path)
|
||||
|
@ -1,125 +0,0 @@
|
||||
import bitsandbytes as bnb
|
||||
import torch
|
||||
|
||||
# This file contains utils for working with models that use bitsandbytes LLM.int8() quantization.
|
||||
# The utils in this file are partially inspired by:
|
||||
# https://github.com/Lightning-AI/pytorch-lightning/blob/1551a16b94f5234a4a78801098f64d0732ef5cb5/src/lightning/fabric/plugins/precision/bitsandbytes.py
|
||||
|
||||
|
||||
# NOTE(ryand): All of the custom state_dict manipulation logic in this file is pretty hacky. This could be made much
|
||||
# cleaner by re-implementing bnb.nn.Linear8bitLt with proper use of buffers and less magic. But, for now, we try to
|
||||
# stick close to the bitsandbytes classes to make interoperability easier with other models that might use bitsandbytes.
|
||||
|
||||
|
||||
class InvokeInt8Params(bnb.nn.Int8Params):
|
||||
"""We override cuda() to avoid re-quantizing the weights in the following cases:
|
||||
- We loaded quantized weights from a state_dict on the cpu, and then moved the model to the gpu.
|
||||
- We are moving the model back-and-forth between the cpu and gpu.
|
||||
"""
|
||||
|
||||
def cuda(self, device):
|
||||
if self.has_fp16_weights:
|
||||
return super().cuda(device)
|
||||
elif self.CB is not None and self.SCB is not None:
|
||||
self.data = self.data.cuda()
|
||||
self.CB = self.data
|
||||
self.SCB = self.SCB.cuda()
|
||||
else:
|
||||
# we store the 8-bit rows-major weight
|
||||
# we convert this weight to the turning/ampere weight during the first inference pass
|
||||
B = self.data.contiguous().half().cuda(device)
|
||||
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
|
||||
del CBt
|
||||
del SCBt
|
||||
self.data = CB
|
||||
self.CB = CB
|
||||
self.SCB = SCB
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class InvokeLinear8bitLt(bnb.nn.Linear8bitLt):
|
||||
def _load_from_state_dict(
|
||||
self,
|
||||
state_dict: dict[str, torch.Tensor],
|
||||
prefix: str,
|
||||
local_metadata,
|
||||
strict,
|
||||
missing_keys,
|
||||
unexpected_keys,
|
||||
error_msgs,
|
||||
):
|
||||
weight = state_dict.pop(prefix + "weight")
|
||||
bias = state_dict.pop(prefix + "bias", None)
|
||||
|
||||
# See `bnb.nn.Linear8bitLt._save_to_state_dict()` for the serialization logic of SCB and weight_format.
|
||||
scb = state_dict.pop(prefix + "SCB", None)
|
||||
# weight_format is unused, but we pop it so we can validate that there are no unexpected keys.
|
||||
_weight_format = state_dict.pop(prefix + "weight_format", None)
|
||||
|
||||
# TODO(ryand): Technically, we should be using `strict`, `missing_keys`, `unexpected_keys`, and `error_msgs`
|
||||
# rather than raising an exception to correctly implement this API.
|
||||
assert len(state_dict) == 0
|
||||
|
||||
if scb is not None:
|
||||
# We are loading a pre-quantized state dict.
|
||||
self.weight = InvokeInt8Params(
|
||||
data=weight,
|
||||
requires_grad=self.weight.requires_grad,
|
||||
has_fp16_weights=False,
|
||||
# Note: After quantization, CB is the same as weight.
|
||||
CB=weight,
|
||||
SCB=scb,
|
||||
)
|
||||
self.bias = bias if bias is None else torch.nn.Parameter(bias)
|
||||
else:
|
||||
# We are loading a non-quantized state dict.
|
||||
|
||||
# We could simply call the `super()._load_from_state_dict()` method here, but then we wouldn't be able to
|
||||
# load from a state_dict into a model on the "meta" device. Attempting to load into a model on the "meta"
|
||||
# device requires setting `assign=True`, doing this with the default `super()._load_from_state_dict()`
|
||||
# implementation causes `Params4Bit` to be replaced by a `torch.nn.Parameter`. By initializing a new
|
||||
# `Params4bit` object, we work around this issue. It's a bit hacky, but it gets the job done.
|
||||
self.weight = InvokeInt8Params(
|
||||
data=weight,
|
||||
requires_grad=self.weight.requires_grad,
|
||||
has_fp16_weights=False,
|
||||
CB=None,
|
||||
SCB=None,
|
||||
)
|
||||
self.bias = bias if bias is None else torch.nn.Parameter(bias)
|
||||
|
||||
|
||||
def _convert_linear_layers_to_llm_8bit(
|
||||
module: torch.nn.Module, ignore_modules: set[str], outlier_threshold: float, prefix: str = ""
|
||||
) -> None:
|
||||
"""Convert all linear layers in the module to bnb.nn.Linear8bitLt layers."""
|
||||
for name, child in module.named_children():
|
||||
fullname = f"{prefix}.{name}" if prefix else name
|
||||
if isinstance(child, torch.nn.Linear) and not any(fullname.startswith(s) for s in ignore_modules):
|
||||
has_bias = child.bias is not None
|
||||
replacement = InvokeLinear8bitLt(
|
||||
child.in_features,
|
||||
child.out_features,
|
||||
bias=has_bias,
|
||||
has_fp16_weights=False,
|
||||
threshold=outlier_threshold,
|
||||
)
|
||||
replacement.weight.data = child.weight.data
|
||||
if has_bias:
|
||||
replacement.bias.data = child.bias.data
|
||||
replacement.requires_grad_(False)
|
||||
module.__setattr__(name, replacement)
|
||||
else:
|
||||
_convert_linear_layers_to_llm_8bit(
|
||||
child, ignore_modules, outlier_threshold=outlier_threshold, prefix=fullname
|
||||
)
|
||||
|
||||
|
||||
def quantize_model_llm_int8(model: torch.nn.Module, modules_to_not_convert: set[str], outlier_threshold: float = 6.0):
|
||||
"""Apply bitsandbytes LLM.8bit() quantization to the model."""
|
||||
_convert_linear_layers_to_llm_8bit(
|
||||
module=model, ignore_modules=modules_to_not_convert, outlier_threshold=outlier_threshold
|
||||
)
|
||||
|
||||
return model
|
@ -1,156 +0,0 @@
|
||||
import bitsandbytes as bnb
|
||||
import torch
|
||||
|
||||
# This file contains utils for working with models that use bitsandbytes NF4 quantization.
|
||||
# The utils in this file are partially inspired by:
|
||||
# https://github.com/Lightning-AI/pytorch-lightning/blob/1551a16b94f5234a4a78801098f64d0732ef5cb5/src/lightning/fabric/plugins/precision/bitsandbytes.py
|
||||
|
||||
# NOTE(ryand): All of the custom state_dict manipulation logic in this file is pretty hacky. This could be made much
|
||||
# cleaner by re-implementing bnb.nn.LinearNF4 with proper use of buffers and less magic. But, for now, we try to stick
|
||||
# close to the bitsandbytes classes to make interoperability easier with other models that might use bitsandbytes.
|
||||
|
||||
|
||||
class InvokeLinearNF4(bnb.nn.LinearNF4):
|
||||
"""A class that extends `bnb.nn.LinearNF4` to add the following functionality:
|
||||
- Ability to load Linear NF4 layers from a pre-quantized state_dict.
|
||||
- Ability to load Linear NF4 layers from a state_dict when the model is on the "meta" device.
|
||||
"""
|
||||
|
||||
def _load_from_state_dict(
|
||||
self,
|
||||
state_dict: dict[str, torch.Tensor],
|
||||
prefix: str,
|
||||
local_metadata,
|
||||
strict,
|
||||
missing_keys,
|
||||
unexpected_keys,
|
||||
error_msgs,
|
||||
):
|
||||
"""This method is based on the logic in the bitsandbytes serialization unit tests for `Linear4bit`:
|
||||
https://github.com/bitsandbytes-foundation/bitsandbytes/blob/6d714a5cce3db5bd7f577bc447becc7a92d5ccc7/tests/test_linear4bit.py#L52-L71
|
||||
"""
|
||||
weight = state_dict.pop(prefix + "weight")
|
||||
bias = state_dict.pop(prefix + "bias", None)
|
||||
# We expect the remaining keys to be quant_state keys.
|
||||
quant_state_sd = state_dict
|
||||
|
||||
# During serialization, the quant_state is stored as subkeys of "weight." (See
|
||||
# `bnb.nn.LinearNF4._save_to_state_dict()`). We validate that they at least have the correct prefix.
|
||||
# TODO(ryand): Technically, we should be using `strict`, `missing_keys`, `unexpected_keys`, and `error_msgs`
|
||||
# rather than raising an exception to correctly implement this API.
|
||||
assert all(k.startswith(prefix + "weight.") for k in quant_state_sd.keys())
|
||||
|
||||
if len(quant_state_sd) > 0:
|
||||
# We are loading a pre-quantized state dict.
|
||||
self.weight = bnb.nn.Params4bit.from_prequantized(
|
||||
data=weight, quantized_stats=quant_state_sd, device=weight.device
|
||||
)
|
||||
self.bias = bias if bias is None else torch.nn.Parameter(bias, requires_grad=False)
|
||||
else:
|
||||
# We are loading a non-quantized state dict.
|
||||
|
||||
# We could simply call the `super()._load_from_state_dict()` method here, but then we wouldn't be able to
|
||||
# load from a state_dict into a model on the "meta" device. Attempting to load into a model on the "meta"
|
||||
# device requires setting `assign=True`, doing this with the default `super()._load_from_state_dict()`
|
||||
# implementation causes `Params4Bit` to be replaced by a `torch.nn.Parameter`. By initializing a new
|
||||
# `Params4bit` object, we work around this issue. It's a bit hacky, but it gets the job done.
|
||||
self.weight = bnb.nn.Params4bit(
|
||||
data=weight,
|
||||
requires_grad=self.weight.requires_grad,
|
||||
compress_statistics=self.weight.compress_statistics,
|
||||
quant_type=self.weight.quant_type,
|
||||
quant_storage=self.weight.quant_storage,
|
||||
module=self,
|
||||
)
|
||||
self.bias = bias if bias is None else torch.nn.Parameter(bias)
|
||||
|
||||
|
||||
def _replace_param(
|
||||
param: torch.nn.Parameter | bnb.nn.Params4bit,
|
||||
data: torch.Tensor,
|
||||
) -> torch.nn.Parameter:
|
||||
"""A helper function to replace the data of a model parameter with new data in a way that allows replacing params on
|
||||
the "meta" device.
|
||||
|
||||
Supports both `torch.nn.Parameter` and `bnb.nn.Params4bit` parameters.
|
||||
"""
|
||||
if param.device.type == "meta":
|
||||
# Doing `param.data = data` raises a RuntimeError if param.data was on the "meta" device, so we need to
|
||||
# re-create the param instead of overwriting the data.
|
||||
if isinstance(param, bnb.nn.Params4bit):
|
||||
return bnb.nn.Params4bit(
|
||||
data,
|
||||
requires_grad=data.requires_grad,
|
||||
quant_state=param.quant_state,
|
||||
compress_statistics=param.compress_statistics,
|
||||
quant_type=param.quant_type,
|
||||
)
|
||||
return torch.nn.Parameter(data, requires_grad=data.requires_grad)
|
||||
|
||||
param.data = data
|
||||
return param
|
||||
|
||||
|
||||
def _convert_linear_layers_to_nf4(
|
||||
module: torch.nn.Module,
|
||||
ignore_modules: set[str],
|
||||
compute_dtype: torch.dtype,
|
||||
compress_statistics: bool = False,
|
||||
prefix: str = "",
|
||||
) -> None:
|
||||
"""Convert all linear layers in the model to NF4 quantized linear layers.
|
||||
|
||||
Args:
|
||||
module: All linear layers in this module will be converted.
|
||||
ignore_modules: A set of module prefixes to ignore when converting linear layers.
|
||||
compute_dtype: The dtype to use for computation in the quantized linear layers.
|
||||
compress_statistics: Whether to enable nested quantization (aka double quantization) where the quantization
|
||||
constants from the first quantization are quantized again.
|
||||
prefix: The prefix of the current module in the model. Used to call this function recursively.
|
||||
"""
|
||||
for name, child in module.named_children():
|
||||
fullname = f"{prefix}.{name}" if prefix else name
|
||||
if isinstance(child, torch.nn.Linear) and not any(fullname.startswith(s) for s in ignore_modules):
|
||||
has_bias = child.bias is not None
|
||||
replacement = InvokeLinearNF4(
|
||||
child.in_features,
|
||||
child.out_features,
|
||||
bias=has_bias,
|
||||
compute_dtype=compute_dtype,
|
||||
compress_statistics=compress_statistics,
|
||||
)
|
||||
if has_bias:
|
||||
replacement.bias = _replace_param(replacement.bias, child.bias.data)
|
||||
replacement.weight = _replace_param(replacement.weight, child.weight.data)
|
||||
replacement.requires_grad_(False)
|
||||
module.__setattr__(name, replacement)
|
||||
else:
|
||||
_convert_linear_layers_to_nf4(child, ignore_modules, compute_dtype=compute_dtype, prefix=fullname)
|
||||
|
||||
|
||||
def quantize_model_nf4(model: torch.nn.Module, modules_to_not_convert: set[str], compute_dtype: torch.dtype):
|
||||
"""Apply bitsandbytes nf4 quantization to the model.
|
||||
|
||||
You likely want to call this function inside a `accelerate.init_empty_weights()` context.
|
||||
|
||||
Example usage:
|
||||
```
|
||||
# Initialize the model from a config on the meta device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = ModelClass.from_config(...)
|
||||
|
||||
# Add NF4 quantization linear layers to the model - still on the meta device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.float16)
|
||||
|
||||
# Load a state_dict into the model. (Could be either a prequantized or non-quantized state_dict.)
|
||||
model.load_state_dict(state_dict, strict=True, assign=True)
|
||||
|
||||
# Move the model to the "cuda" device. If the model was non-quantized, this is where the weight quantization takes
|
||||
# place.
|
||||
model.to("cuda")
|
||||
```
|
||||
"""
|
||||
_convert_linear_layers_to_nf4(module=model, ignore_modules=modules_to_not_convert, compute_dtype=compute_dtype)
|
||||
|
||||
return model
|
@ -1,79 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import accelerate
|
||||
from safetensors.torch import load_file, save_file
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.util import params
|
||||
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
|
||||
from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time
|
||||
|
||||
|
||||
def main():
|
||||
"""A script for quantizing a FLUX transformer model using the bitsandbytes LLM.int8() quantization method.
|
||||
|
||||
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
|
||||
etc.) are hardcoded and would need to be modified for other use cases.
|
||||
"""
|
||||
# Load the FLUX transformer model onto the meta device.
|
||||
model_path = Path(
|
||||
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
|
||||
)
|
||||
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
# Initialize the model on the "meta" device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = Flux(p)
|
||||
|
||||
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
|
||||
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
|
||||
modules_to_not_convert: set[str] = set()
|
||||
|
||||
model_int8_path = model_path.parent / "bnb_llm_int8.safetensors"
|
||||
if model_int8_path.exists():
|
||||
# The quantized model already exists, load it and return it.
|
||||
print(f"A pre-quantized model already exists at '{model_int8_path}'. Attempting to load it...")
|
||||
|
||||
# Replace the linear layers with LLM.int8() quantized linear layers (still on the meta device).
|
||||
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
sd = load_file(model_int8_path)
|
||||
model.load_state_dict(sd, strict=True, assign=True)
|
||||
|
||||
with log_time("Move model to cuda"):
|
||||
model = model.to("cuda")
|
||||
|
||||
print(f"Successfully loaded pre-quantized model from '{model_int8_path}'.")
|
||||
|
||||
else:
|
||||
# The quantized model does not exist, quantize the model and save it.
|
||||
print(f"No pre-quantized model found at '{model_int8_path}'. Quantizing the model...")
|
||||
|
||||
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
state_dict = load_file(model_path)
|
||||
# TODO(ryand): Cast the state_dict to the appropriate dtype?
|
||||
model.load_state_dict(state_dict, strict=True, assign=True)
|
||||
|
||||
with log_time("Move model to cuda and quantize"):
|
||||
model = model.to("cuda")
|
||||
|
||||
with log_time("Save quantized model"):
|
||||
model_int8_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
save_file(model.state_dict(), model_int8_path)
|
||||
|
||||
print(f"Successfully quantized and saved model to '{model_int8_path}'.")
|
||||
|
||||
assert isinstance(model, Flux)
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,96 +0,0 @@
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
import accelerate
|
||||
import torch
|
||||
from safetensors.torch import load_file, save_file
|
||||
|
||||
from invokeai.backend.flux.model import Flux
|
||||
from invokeai.backend.flux.util import params
|
||||
from invokeai.backend.quantization.bnb_nf4 import quantize_model_nf4
|
||||
|
||||
|
||||
@contextmanager
|
||||
def log_time(name: str):
|
||||
"""Helper context manager to log the time taken by a block of code."""
|
||||
start = time.time()
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
end = time.time()
|
||||
print(f"'{name}' took {end - start:.4f} secs")
|
||||
|
||||
|
||||
def main():
|
||||
"""A script for quantizing a FLUX transformer model using the bitsandbytes NF4 quantization method.
|
||||
|
||||
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
|
||||
etc.) are hardcoded and would need to be modified for other use cases.
|
||||
"""
|
||||
model_path = Path(
|
||||
"/data/invokeai/models/.download_cache/https__huggingface.co_black-forest-labs_flux.1-schnell_resolve_main_flux1-schnell.safetensors/flux1-schnell.safetensors"
|
||||
)
|
||||
|
||||
# inference_dtype = torch.bfloat16
|
||||
with log_time("Intialize FLUX transformer on meta device"):
|
||||
# TODO(ryand): Determine if this is a schnell model or a dev model and load the appropriate config.
|
||||
p = params["flux-schnell"]
|
||||
|
||||
# Initialize the model on the "meta" device.
|
||||
with accelerate.init_empty_weights():
|
||||
model = Flux(p)
|
||||
|
||||
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
|
||||
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
|
||||
modules_to_not_convert: set[str] = set()
|
||||
|
||||
model_nf4_path = model_path.parent / "bnb_nf4.safetensors"
|
||||
if model_nf4_path.exists():
|
||||
# The quantized model already exists, load it and return it.
|
||||
print(f"A pre-quantized model already exists at '{model_nf4_path}'. Attempting to load it...")
|
||||
|
||||
# Replace the linear layers with NF4 quantized linear layers (still on the meta device).
|
||||
with log_time("Replace linear layers with NF4 layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_nf4(
|
||||
model, modules_to_not_convert=modules_to_not_convert, compute_dtype=torch.bfloat16
|
||||
)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
state_dict = load_file(model_nf4_path)
|
||||
model.load_state_dict(state_dict, strict=True, assign=True)
|
||||
|
||||
with log_time("Move model to cuda"):
|
||||
model = model.to("cuda")
|
||||
|
||||
print(f"Successfully loaded pre-quantized model from '{model_nf4_path}'.")
|
||||
|
||||
else:
|
||||
# The quantized model does not exist, quantize the model and save it.
|
||||
print(f"No pre-quantized model found at '{model_nf4_path}'. Quantizing the model...")
|
||||
|
||||
with log_time("Replace linear layers with NF4 layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_nf4(
|
||||
model, modules_to_not_convert=modules_to_not_convert, compute_dtype=torch.bfloat16
|
||||
)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
state_dict = load_file(model_path)
|
||||
# TODO(ryand): Cast the state_dict to the appropriate dtype?
|
||||
model.load_state_dict(state_dict, strict=True, assign=True)
|
||||
|
||||
with log_time("Move model to cuda and quantize"):
|
||||
model = model.to("cuda")
|
||||
|
||||
with log_time("Save quantized model"):
|
||||
model_nf4_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
save_file(model.state_dict(), model_nf4_path)
|
||||
|
||||
print(f"Successfully quantized and saved model to '{model_nf4_path}'.")
|
||||
|
||||
assert isinstance(model, Flux)
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,92 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import accelerate
|
||||
from safetensors.torch import load_file, save_file
|
||||
from transformers import AutoConfig, AutoModelForTextEncoding, T5EncoderModel
|
||||
|
||||
from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8
|
||||
from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time
|
||||
|
||||
|
||||
def load_state_dict_into_t5(model: T5EncoderModel, state_dict: dict):
|
||||
# There is a shared reference to a single weight tensor in the model.
|
||||
# Both "encoder.embed_tokens.weight" and "shared.weight" refer to the same tensor, so only the latter should
|
||||
# be present in the state_dict.
|
||||
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False, assign=True)
|
||||
assert len(unexpected_keys) == 0
|
||||
assert set(missing_keys) == {"encoder.embed_tokens.weight"}
|
||||
# Assert that the layers we expect to be shared are actually shared.
|
||||
assert model.encoder.embed_tokens.weight is model.shared.weight
|
||||
|
||||
|
||||
def main():
|
||||
"""A script for quantizing a T5 text encoder model using the bitsandbytes LLM.int8() quantization method.
|
||||
|
||||
This script is primarily intended for reference. The script params (e.g. the model_path, modules_to_not_convert,
|
||||
etc.) are hardcoded and would need to be modified for other use cases.
|
||||
"""
|
||||
model_path = Path("/data/misc/text_encoder_2")
|
||||
|
||||
with log_time("Intialize T5 on meta device"):
|
||||
model_config = AutoConfig.from_pretrained(model_path)
|
||||
with accelerate.init_empty_weights():
|
||||
model = AutoModelForTextEncoding.from_config(model_config)
|
||||
|
||||
# TODO(ryand): We may want to add some modules to not quantize here (e.g. the proj_out layer). See the accelerate
|
||||
# `get_keys_to_not_convert(...)` function for a heuristic to determine which modules to not quantize.
|
||||
modules_to_not_convert: set[str] = set()
|
||||
|
||||
model_int8_path = model_path / "bnb_llm_int8.safetensors"
|
||||
if model_int8_path.exists():
|
||||
# The quantized model already exists, load it and return it.
|
||||
print(f"A pre-quantized model already exists at '{model_int8_path}'. Attempting to load it...")
|
||||
|
||||
# Replace the linear layers with LLM.int8() quantized linear layers (still on the meta device).
|
||||
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
sd = load_file(model_int8_path)
|
||||
load_state_dict_into_t5(model, sd)
|
||||
|
||||
with log_time("Move model to cuda"):
|
||||
model = model.to("cuda")
|
||||
|
||||
print(f"Successfully loaded pre-quantized model from '{model_int8_path}'.")
|
||||
|
||||
else:
|
||||
# The quantized model does not exist, quantize the model and save it.
|
||||
print(f"No pre-quantized model found at '{model_int8_path}'. Quantizing the model...")
|
||||
|
||||
with log_time("Replace linear layers with LLM.int8() layers"), accelerate.init_empty_weights():
|
||||
model = quantize_model_llm_int8(model, modules_to_not_convert=modules_to_not_convert)
|
||||
|
||||
with log_time("Load state dict into model"):
|
||||
# Load sharded state dict.
|
||||
files = list(model_path.glob("*.safetensors"))
|
||||
state_dict = {}
|
||||
for file in files:
|
||||
sd = load_file(file)
|
||||
state_dict.update(sd)
|
||||
load_state_dict_into_t5(model, state_dict)
|
||||
|
||||
with log_time("Move model to cuda and quantize"):
|
||||
model = model.to("cuda")
|
||||
|
||||
with log_time("Save quantized model"):
|
||||
model_int8_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
state_dict = model.state_dict()
|
||||
state_dict.pop("encoder.embed_tokens.weight")
|
||||
save_file(state_dict, model_int8_path)
|
||||
# This handling of shared weights could also be achieved with save_model(...), but then we'd lose control
|
||||
# over which keys are kept. And, the corresponding load_model(...) function does not support assign=True.
|
||||
# save_model(model, model_int8_path)
|
||||
|
||||
print(f"Successfully quantized and saved model to '{model_int8_path}'.")
|
||||
|
||||
assert isinstance(model, T5EncoderModel)
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -25,6 +25,11 @@ class BasicConditioningInfo:
|
||||
return self
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
conditionings: List[BasicConditioningInfo]
|
||||
|
||||
|
||||
@dataclass
|
||||
class SDXLConditioningInfo(BasicConditioningInfo):
|
||||
"""SDXL text conditioning information produced by Compel."""
|
||||
@ -38,17 +43,6 @@ class SDXLConditioningInfo(BasicConditioningInfo):
|
||||
return super().to(device=device, dtype=dtype)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FLUXConditioningInfo:
|
||||
clip_embeds: torch.Tensor
|
||||
t5_embeds: torch.Tensor
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConditioningFieldData:
|
||||
conditionings: List[BasicConditioningInfo] | List[SDXLConditioningInfo] | List[FLUXConditioningInfo]
|
||||
|
||||
|
||||
@dataclass
|
||||
class IPAdapterConditioningInfo:
|
||||
cond_image_prompt_embeds: torch.Tensor
|
||||
|
@ -1,5 +1,5 @@
|
||||
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/paramsSlice';
|
||||
import { modelChanged } from '../src/features/controlLayers/store/canvasV2Slice';
|
||||
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
|
||||
/**
|
||||
|
@ -24,7 +24,7 @@
|
||||
"build": "pnpm run lint && vite build",
|
||||
"typegen": "node scripts/typegen.js",
|
||||
"preview": "vite preview",
|
||||
"lint:knip": "knip --tags=-knipignore",
|
||||
"lint:knip": "knip",
|
||||
"lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx",
|
||||
"lint:eslint": "eslint --max-warnings=0 .",
|
||||
"lint:prettier": "prettier --check .",
|
||||
@ -58,7 +58,7 @@
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.20",
|
||||
"@invoke-ai/ui-library": "^0.0.32",
|
||||
"@invoke-ai/ui-library": "^0.0.31",
|
||||
"@nanostores/react": "^0.7.3",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
|
203
invokeai/frontend/web/pnpm-lock.yaml
generated
203
invokeai/frontend/web/pnpm-lock.yaml
generated
@ -24,8 +24,8 @@ dependencies:
|
||||
specifier: ^5.0.20
|
||||
version: 5.0.20
|
||||
'@invoke-ai/ui-library':
|
||||
specifier: ^0.0.32
|
||||
version: 0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
specifier: ^0.0.31
|
||||
version: 0.0.31(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@nanostores/react':
|
||||
specifier: ^0.7.3
|
||||
version: 0.7.3(nanostores@0.11.2)(react@18.3.1)
|
||||
@ -40,7 +40,7 @@ dependencies:
|
||||
version: 0.5.0
|
||||
chakra-react-select:
|
||||
specifier: ^4.9.1
|
||||
version: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
|
||||
version: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
|
||||
compare-versions:
|
||||
specifier: ^6.1.1
|
||||
version: 6.1.1
|
||||
@ -1752,13 +1752,6 @@ packages:
|
||||
dependencies:
|
||||
regenerator-runtime: 0.14.1
|
||||
|
||||
/@babel/runtime@7.25.4:
|
||||
resolution: {integrity: sha512-DSgLeL/FNcpXuzav5wfYvHCGvynXkJbn3Zvc3823AEe9nPwW9IK4UoCSS5yGymmQzN0pCPvivtgS6/8U2kkm1w==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
dependencies:
|
||||
regenerator-runtime: 0.14.1
|
||||
dev: false
|
||||
|
||||
/@babel/template@7.24.0:
|
||||
resolution: {integrity: sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==}
|
||||
engines: {node: '>=6.9.0'}
|
||||
@ -1845,7 +1838,7 @@ packages:
|
||||
'@chakra-ui/react-use-controllable-state': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
@ -1861,7 +1854,7 @@ packages:
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1879,7 +1872,7 @@ packages:
|
||||
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1892,7 +1885,7 @@ packages:
|
||||
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1912,7 +1905,7 @@ packages:
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1923,7 +1916,7 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1942,7 +1935,7 @@ packages:
|
||||
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@zag-js/focus-visible': 0.16.0
|
||||
react: 18.3.1
|
||||
@ -1965,7 +1958,7 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1984,7 +1977,7 @@ packages:
|
||||
'@chakra-ui/system': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -1999,13 +1992,13 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/css-reset@2.3.0(@emotion/react@11.13.3)(react@18.3.1):
|
||||
/@chakra-ui/css-reset@2.3.0(@emotion/react@11.13.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-cQwwBy5O0jzvl0K7PLTLgp8ijqLPKyuEMiDXwYzl95seD3AoeuoCLyzZcJtVqaUZ573PiBdAbY/IlZcwDOItWg==}
|
||||
peerDependencies:
|
||||
'@emotion/react': '>=10.0.35'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2038,7 +2031,7 @@ packages:
|
||||
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2069,7 +2062,7 @@ packages:
|
||||
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2092,7 +2085,7 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2103,7 +2096,7 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2115,7 +2108,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2130,7 +2123,7 @@ packages:
|
||||
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2146,7 +2139,7 @@ packages:
|
||||
'@chakra-ui/react-children-utils': 2.0.6(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2171,7 +2164,7 @@ packages:
|
||||
'@chakra-ui/breakpoint-utils': 2.0.8
|
||||
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2196,7 +2189,7 @@ packages:
|
||||
'@chakra-ui/react-use-outside-click': 2.2.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
@ -2223,7 +2216,7 @@ packages:
|
||||
'@chakra-ui/react-use-outside-click': 2.2.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/transition': 2.1.0(framer-motion@11.3.24)(react@18.3.1)
|
||||
framer-motion: 11.3.24(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
@ -2244,7 +2237,7 @@ packages:
|
||||
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
|
||||
aria-hidden: 1.2.4
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
@ -2273,7 +2266,7 @@ packages:
|
||||
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2297,7 +2290,7 @@ packages:
|
||||
'@chakra-ui/react-use-controllable-state': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2319,7 +2312,7 @@ packages:
|
||||
'@chakra-ui/react-use-focus-on-pointer-down': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
@ -2354,11 +2347,11 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/provider@2.4.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
/@chakra-ui/provider@2.4.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-w0Tef5ZCJK1mlJorcSjItCSbyvVuqpvyWdxZiVQmE6fvSJR83wZof42ux0+sfWD+I7rHSfj+f9nzhNaEWClysw==}
|
||||
peerDependencies:
|
||||
'@emotion/react': ^11.0.0
|
||||
@ -2366,13 +2359,13 @@ packages:
|
||||
react: '>=18'
|
||||
react-dom: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.3)(react@18.3.1)
|
||||
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.0.15
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
dev: false
|
||||
@ -2388,7 +2381,7 @@ packages:
|
||||
'@chakra-ui/react-types': 2.0.7(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@zag-js/focus-visible': 0.16.0
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
@ -2588,7 +2581,7 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/react@2.8.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
/@chakra-ui/react@2.8.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-Hn0moyxxyCDKuR9ywYpqgX8dvjqwu9ArwpIb9wHNYjnODETjLwazgNIliCVBRcJvysGRiV51U2/JtJVrpeCjUQ==}
|
||||
peerDependencies:
|
||||
'@emotion/react': ^11.0.0
|
||||
@ -2607,7 +2600,7 @@ packages:
|
||||
'@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/control-box': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/counter': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.3)(react@18.3.1)
|
||||
'@chakra-ui/css-reset': 2.3.0(@emotion/react@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/editable': 3.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/focus-lock': 2.1.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
@ -2626,7 +2619,7 @@ packages:
|
||||
'@chakra-ui/popper': 3.1.0(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/progress': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/provider': 2.4.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/provider': 2.4.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/radio': 2.1.2(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/react-env': 3.1.0(react@18.3.1)
|
||||
'@chakra-ui/select': 2.1.2(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
@ -2638,7 +2631,7 @@ packages:
|
||||
'@chakra-ui/stepper': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/switch': 2.1.2(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/table': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/tabs': 3.0.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/tag': 3.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
@ -2650,8 +2643,8 @@ packages:
|
||||
'@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.3.1)
|
||||
'@chakra-ui/utils': 2.0.15
|
||||
'@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
@ -2667,7 +2660,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2684,7 +2677,7 @@ packages:
|
||||
'@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/react-use-previous': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2694,7 +2687,7 @@ packages:
|
||||
'@chakra-ui/system': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2714,7 +2707,7 @@ packages:
|
||||
'@chakra-ui/react-use-pan-event': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-size': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2725,7 +2718,7 @@ packages:
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2738,7 +2731,7 @@ packages:
|
||||
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2751,7 +2744,7 @@ packages:
|
||||
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2772,12 +2765,12 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/checkbox': 2.3.2(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@chakra-ui/system@2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1):
|
||||
/@chakra-ui/system@2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1):
|
||||
resolution: {integrity: sha512-EGtpoEjLrUu4W1fHD+a62XR+hzC5YfsWm+6lO0Kybcga3yYEij9beegO0jZgug27V+Rf7vns95VPVP6mFd/DEQ==}
|
||||
peerDependencies:
|
||||
'@emotion/react': ^11.0.0
|
||||
@ -2790,8 +2783,8 @@ packages:
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-utils': 2.0.21
|
||||
'@chakra-ui/utils': 2.0.15
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-fast-compare: 3.2.2
|
||||
dev: false
|
||||
@ -2804,7 +2797,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2823,7 +2816,7 @@ packages:
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-safe-layout-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2835,7 +2828,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/react-context': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2847,7 +2840,7 @@ packages:
|
||||
dependencies:
|
||||
'@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -2898,7 +2891,7 @@ packages:
|
||||
'@chakra-ui/react-use-update-effect': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/theme': 3.3.1(@chakra-ui/styled-system@2.9.2)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
@ -2921,7 +2914,7 @@ packages:
|
||||
'@chakra-ui/react-use-event-listener': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/react-use-merge-refs': 2.1.0(react@18.3.1)
|
||||
'@chakra-ui/shared-utils': 2.0.5
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
@ -2964,7 +2957,7 @@ packages:
|
||||
'@chakra-ui/system': '>=2.0.0'
|
||||
react: '>=18'
|
||||
dependencies:
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -3047,10 +3040,10 @@ packages:
|
||||
resolution: {integrity: sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw==}
|
||||
dependencies:
|
||||
'@babel/helper-module-imports': 7.24.7
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
'@emotion/hash': 0.9.2
|
||||
'@emotion/memoize': 0.9.0
|
||||
'@emotion/serialize': 1.3.1
|
||||
'@emotion/serialize': 1.3.0
|
||||
babel-plugin-macros: 3.1.0
|
||||
convert-source-map: 1.9.0
|
||||
escape-string-regexp: 4.0.0
|
||||
@ -3138,8 +3131,8 @@ packages:
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
/@emotion/react@11.13.3(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-lIsdU6JNrmYfJ5EbUCf4xW1ovy5wKQ2CkPRM4xogziOxH1nXxBSjpC9YqbFAP7circxMfYp+6x676BqWcEiixg==}
|
||||
/@emotion/react@11.13.0(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-WkL+bw1REC2VNV1goQyfxjx1GYJkcc23CRQkXX+vZNLINyfI7o+uUn/rTGPt/xJ3bJHd5GcljgnxHf4wRw5VWQ==}
|
||||
peerDependencies:
|
||||
'@types/react': '*'
|
||||
react: '>=16.8.0'
|
||||
@ -3147,10 +3140,10 @@ packages:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
'@emotion/babel-plugin': 11.12.0
|
||||
'@emotion/cache': 11.13.1
|
||||
'@emotion/serialize': 1.3.1
|
||||
'@emotion/serialize': 1.3.0
|
||||
'@emotion/use-insertion-effect-with-fallbacks': 1.1.0(react@18.3.1)
|
||||
'@emotion/utils': 1.4.0
|
||||
'@emotion/weak-memoize': 0.4.0
|
||||
@ -3171,12 +3164,12 @@ packages:
|
||||
csstype: 3.1.3
|
||||
dev: false
|
||||
|
||||
/@emotion/serialize@1.3.1:
|
||||
resolution: {integrity: sha512-dEPNKzBPU+vFPGa+z3axPRn8XVDetYORmDC0wAiej+TNcOZE70ZMJa0X7JdeoM6q/nWTMZeLpN/fTnD9o8MQBA==}
|
||||
/@emotion/serialize@1.3.0:
|
||||
resolution: {integrity: sha512-jACuBa9SlYajnpIVXB+XOXnfJHyckDfe6fOpORIM6yhBDlqGuExvDdZYHDQGoDf3bZXGv7tNr+LpLjJqiEQ6EA==}
|
||||
dependencies:
|
||||
'@emotion/hash': 0.9.2
|
||||
'@emotion/memoize': 0.9.0
|
||||
'@emotion/unitless': 0.10.0
|
||||
'@emotion/unitless': 0.9.0
|
||||
'@emotion/utils': 1.4.0
|
||||
csstype: 3.1.3
|
||||
dev: false
|
||||
@ -3189,7 +3182,7 @@ packages:
|
||||
resolution: {integrity: sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==}
|
||||
dev: false
|
||||
|
||||
/@emotion/styled@11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1):
|
||||
/@emotion/styled@11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1):
|
||||
resolution: {integrity: sha512-tkzkY7nQhW/zC4hztlwucpT8QEZ6eUzpXDRhww/Eej4tFfO0FxQYWRyg/c5CCXa4d/f174kqeXYjuQRnhzf6dA==}
|
||||
peerDependencies:
|
||||
'@emotion/react': ^11.0.0-rc.0
|
||||
@ -3199,11 +3192,11 @@ packages:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
'@emotion/babel-plugin': 11.12.0
|
||||
'@emotion/is-prop-valid': 1.3.0
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/serialize': 1.3.1
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/serialize': 1.3.0
|
||||
'@emotion/use-insertion-effect-with-fallbacks': 1.1.0(react@18.3.1)
|
||||
'@emotion/utils': 1.4.0
|
||||
'@types/react': 18.3.3
|
||||
@ -3212,14 +3205,14 @@ packages:
|
||||
- supports-color
|
||||
dev: false
|
||||
|
||||
/@emotion/unitless@0.10.0:
|
||||
resolution: {integrity: sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==}
|
||||
dev: false
|
||||
|
||||
/@emotion/unitless@0.8.1:
|
||||
resolution: {integrity: sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==}
|
||||
dev: false
|
||||
|
||||
/@emotion/unitless@0.9.0:
|
||||
resolution: {integrity: sha512-TP6GgNZtmtFaFcsOgExdnfxLLpRDla4Q66tnenA9CktvVSdNKDvMVuUah4QvWPIpNjrWsGg3qeGo9a43QooGZQ==}
|
||||
dev: false
|
||||
|
||||
/@emotion/use-insertion-effect-with-fallbacks@1.0.1(react@18.3.1):
|
||||
resolution: {integrity: sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw==}
|
||||
peerDependencies:
|
||||
@ -3571,8 +3564,8 @@ packages:
|
||||
prettier: 3.3.3
|
||||
dev: true
|
||||
|
||||
/@invoke-ai/ui-library@0.0.32(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-JxAoblrDu/cZ4ha9KO4ry5OWvyLUE1Dj28i+ciMaDNUpC/cN+IyiTbUBoFoPaoN5JP8Zpd/MYCcmF2qsziHDzg==}
|
||||
/@invoke-ai/ui-library@0.0.31(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.20)(@types/react@18.3.3)(i18next@23.12.2)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-7LtOUN/bcGHc8jCRd2m22DvP2eeogqwM/shdXQpLH5RY2FzWJNXlWdVT4hIPGDu7znnk3xvXlZvo6tiGSjbnCQ==}
|
||||
peerDependencies:
|
||||
'@fontsource-variable/inter': ^5.0.16
|
||||
react: ^18.2.0
|
||||
@ -3582,14 +3575,14 @@ packages:
|
||||
'@chakra-ui/icons': 2.1.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/portal': 2.1.0(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.8.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/react': 2.8.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(@types/react@18.3.3)(framer-motion@10.18.0)(react-dom@18.3.1)(react@18.3.1)
|
||||
'@chakra-ui/styled-system': 2.9.2
|
||||
'@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.3)(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
'@emotion/styled': 11.13.0(@emotion/react@11.13.0)(@types/react@18.3.3)(react@18.3.1)
|
||||
'@fontsource-variable/inter': 5.0.20
|
||||
'@nanostores/react': 0.7.3(nanostores@0.11.2)(react@18.3.1)
|
||||
chakra-react-select: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
|
||||
chakra-react-select: 4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
|
||||
framer-motion: 10.18.0(react-dom@18.3.1)(react@18.3.1)
|
||||
lodash-es: 4.17.21
|
||||
nanostores: 0.11.2
|
||||
@ -5781,7 +5774,7 @@ packages:
|
||||
resolution: {integrity: sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/aria-query@5.3.0:
|
||||
@ -6133,7 +6126,7 @@ packages:
|
||||
type-detect: 4.0.8
|
||||
dev: true
|
||||
|
||||
/chakra-react-select@4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.3)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1):
|
||||
/chakra-react-select@4.9.1(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.13.0)(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1):
|
||||
resolution: {integrity: sha512-jmgfN+S/wnTaCp3pW30GYDIZ5J8jWcT1gIbhpw6RdKV+atm/U4/sT+gaHOHHhRL8xeaYip+iI/m8MPGREkve0w==}
|
||||
peerDependencies:
|
||||
'@chakra-ui/form-control': ^2.0.0
|
||||
@ -6153,8 +6146,8 @@ packages:
|
||||
'@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.3.24)(react@18.3.1)
|
||||
'@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.3)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@emotion/react': 11.13.3(@types/react@18.3.3)(react@18.3.1)
|
||||
'@chakra-ui/system': 2.6.2(@emotion/react@11.13.0)(@emotion/styled@11.13.0)(react@18.3.1)
|
||||
'@emotion/react': 11.13.0(@types/react@18.3.3)(react@18.3.1)
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
react-select: 5.8.0(@types/react@18.3.3)(react-dom@18.3.1)(react@18.3.1)
|
||||
@ -7579,7 +7572,7 @@ packages:
|
||||
resolution: {integrity: sha512-QFaHbhv9WPUeLYBDe/PAuLKJ4Dd9OPvKs9xZBr3yLXnUrDNaVXKu2baDBXe3naPY30hgHYSsf2JW4jzas2mDEQ==}
|
||||
engines: {node: '>=10'}
|
||||
dependencies:
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/for-each@0.3.3:
|
||||
@ -7618,7 +7611,7 @@ packages:
|
||||
dependencies:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
optionalDependencies:
|
||||
'@emotion/is-prop-valid': 0.8.8
|
||||
dev: false
|
||||
@ -9626,7 +9619,7 @@ packages:
|
||||
peerDependencies:
|
||||
react: ^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
react: 18.3.1
|
||||
dev: false
|
||||
|
||||
@ -9721,7 +9714,7 @@ packages:
|
||||
'@types/react':
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
'@types/react': 18.3.3
|
||||
focus-lock: 1.3.5
|
||||
prop-types: 15.8.1
|
||||
@ -9783,7 +9776,7 @@ packages:
|
||||
react-native:
|
||||
optional: true
|
||||
dependencies:
|
||||
'@babel/runtime': 7.25.4
|
||||
'@babel/runtime': 7.25.0
|
||||
html-parse-stringify: 3.0.1
|
||||
i18next: 23.12.2
|
||||
react: 18.3.1
|
||||
@ -9845,7 +9838,7 @@ packages:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/react-remove-scroll@2.5.10(@types/react@18.3.3)(react@18.3.1):
|
||||
@ -9862,7 +9855,7 @@ packages:
|
||||
react: 18.3.1
|
||||
react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1)
|
||||
react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1)
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1)
|
||||
use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1)
|
||||
dev: false
|
||||
@ -9912,7 +9905,7 @@ packages:
|
||||
get-nonce: 1.0.1
|
||||
invariant: 2.2.4
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/react-transition-group@4.4.5(react-dom@18.3.1)(react@18.3.1):
|
||||
@ -11052,10 +11045,6 @@ packages:
|
||||
/tslib@2.6.3:
|
||||
resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==}
|
||||
|
||||
/tslib@2.7.0:
|
||||
resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==}
|
||||
dev: false
|
||||
|
||||
/tsutils@3.21.0(typescript@5.5.4):
|
||||
resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==}
|
||||
engines: {node: '>= 6'}
|
||||
@ -11303,7 +11292,7 @@ packages:
|
||||
dependencies:
|
||||
'@types/react': 18.3.3
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/use-debounce@10.0.2(react@18.3.1):
|
||||
@ -11349,7 +11338,7 @@ packages:
|
||||
'@types/react': 18.3.3
|
||||
detect-node-es: 1.1.0
|
||||
react: 18.3.1
|
||||
tslib: 2.7.0
|
||||
tslib: 2.6.3
|
||||
dev: false
|
||||
|
||||
/use-sync-external-store@1.2.0(react@18.3.1):
|
||||
|
@ -790,7 +790,6 @@
|
||||
"simpleModelPlaceholder": "URL or path to a local file or diffusers folder",
|
||||
"source": "Source",
|
||||
"starterModels": "Starter Models",
|
||||
"starterModelsInModelManager": "Starter Models can be found in Model Manager",
|
||||
"syncModels": "Sync Models",
|
||||
"textualInversions": "Textual Inversions",
|
||||
"triggerPhrases": "Trigger Phrases",
|
||||
@ -1646,7 +1645,6 @@
|
||||
"storeNotInitialized": "Store is not initialized"
|
||||
},
|
||||
"controlLayers": {
|
||||
"clearHistory": "Clear History",
|
||||
"generateMode": "Generate",
|
||||
"generateModeDesc": "Create individual images. Generated images are added directly to the gallery.",
|
||||
"composeMode": "Compose",
|
||||
@ -1654,11 +1652,11 @@
|
||||
"autoSave": "Auto-save to Gallery",
|
||||
"resetCanvas": "Reset Canvas",
|
||||
"resetAll": "Reset All",
|
||||
"deleteAll": "Delete All",
|
||||
"clearCaches": "Clear Caches",
|
||||
"recalculateRects": "Recalculate Rects",
|
||||
"clipToBbox": "Clip Strokes to Bbox",
|
||||
"addLayer": "Add Layer",
|
||||
"duplicate": "Duplicate",
|
||||
"moveToFront": "Move to Front",
|
||||
"moveToBack": "Move to Back",
|
||||
"moveForward": "Move Forward",
|
||||
@ -1676,7 +1674,7 @@
|
||||
"resetRegion": "Reset Region",
|
||||
"debugLayers": "Debug Layers",
|
||||
"rectangle": "Rectangle",
|
||||
"maskFill": "Mask Fill",
|
||||
"maskPreviewColor": "Mask Preview Color",
|
||||
"addPositivePrompt": "Add $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Add $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Add $t(common.ipAdapter)",
|
||||
@ -1730,10 +1728,6 @@
|
||||
"showingType": "Showing {{type}}",
|
||||
"dynamicGrid": "Dynamic Grid",
|
||||
"logDebugInfo": "Log Debug Info",
|
||||
"locked": "Locked",
|
||||
"unlocked": "Unlocked",
|
||||
"deleteSelected": "Delete Selected",
|
||||
"deleteAll": "Delete All",
|
||||
"fill": {
|
||||
"fillStyle": "Fill Style",
|
||||
"solid": "Solid",
|
||||
|
@ -17,7 +17,7 @@ import { useStarterModelsToast } from 'features/modelManagerV2/hooks/useStarterM
|
||||
import { ClearQueueConfirmationsAlertDialog } from 'features/queue/components/ClearQueueConfirmationAlertDialog';
|
||||
import { StylePresetModal } from 'features/stylePresets/components/StylePresetForm/StylePresetModal';
|
||||
import { configChanged } from 'features/system/store/configSlice';
|
||||
import { selectLanguage } from 'features/system/store/systemSelectors';
|
||||
import { languageSelector } from 'features/system/store/systemSelectors';
|
||||
import { AppContent } from 'features/ui/components/AppContent';
|
||||
import { setActiveTab } from 'features/ui/store/uiSlice';
|
||||
import type { TabName } from 'features/ui/store/uiTypes';
|
||||
@ -45,7 +45,7 @@ interface Props {
|
||||
}
|
||||
|
||||
const App = ({ config = DEFAULT_CONFIG, selectedImage, selectedWorkflowId, destination }: Props) => {
|
||||
const language = useAppSelector(selectLanguage);
|
||||
const language = useAppSelector(languageSelector);
|
||||
const logger = useLogger('system');
|
||||
const dispatch = useAppDispatch();
|
||||
const clearStorage = useClearStorage();
|
||||
|
@ -1,7 +1,5 @@
|
||||
import { Button, Flex, Heading, Image, Link, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import newGithubIssueUrl from 'new-github-issue-url';
|
||||
import InvokeLogoYellow from 'public/assets/images/invoke-symbol-ylw-lrg.svg';
|
||||
@ -15,11 +13,9 @@ type Props = {
|
||||
resetErrorBoundary: () => void;
|
||||
};
|
||||
|
||||
const selectIsLocal = createSelector(selectConfigSlice, (config) => config.isLocal);
|
||||
|
||||
const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const isLocal = useAppSelector(selectIsLocal);
|
||||
const isLocal = useAppSelector((s) => s.config.isLocal);
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const text = JSON.stringify(serializeError(error), null, 2);
|
||||
|
@ -1,10 +1,5 @@
|
||||
import { createLogWriter } from '@roarr/browser-log-writer';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
selectSystemLogIsEnabled,
|
||||
selectSystemLogLevel,
|
||||
selectSystemLogNamespaces,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { useEffect, useMemo } from 'react';
|
||||
import { ROARR, Roarr } from 'roarr';
|
||||
|
||||
@ -12,9 +7,9 @@ import type { LogNamespace } from './logger';
|
||||
import { $logger, BASE_CONTEXT, LOG_LEVEL_MAP, logger } from './logger';
|
||||
|
||||
export const useLogger = (namespace: LogNamespace) => {
|
||||
const logLevel = useAppSelector(selectSystemLogLevel);
|
||||
const logNamespaces = useAppSelector(selectSystemLogNamespaces);
|
||||
const logIsEnabled = useAppSelector(selectSystemLogIsEnabled);
|
||||
const logLevel = useAppSelector((s) => s.system.logLevel);
|
||||
const logNamespaces = useAppSelector((s) => s.system.logNamespaces);
|
||||
const logIsEnabled = useAppSelector((s) => s.system.logIsEnabled);
|
||||
|
||||
// The provided Roarr browser log writer uses localStorage to config logging to console
|
||||
useEffect(() => {
|
||||
|
@ -1,3 +1,2 @@
|
||||
export const STORAGE_PREFIX = '@@invokeai-';
|
||||
export const EMPTY_ARRAY = [];
|
||||
export const EMPTY_OBJECT = {};
|
||||
|
@ -1,11 +1,10 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import {
|
||||
rasterLayerAdded,
|
||||
sessionStagingAreaImageAccepted,
|
||||
sessionStagingAreaReset,
|
||||
} from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { rasterLayerAdded } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import type { CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@ -56,10 +55,10 @@ export const addStagingListeners = (startAppListening: AppStartListening) => {
|
||||
effect: (action, api) => {
|
||||
const { index } = action.payload;
|
||||
const state = api.getState();
|
||||
const stagingAreaImage = state.canvasSession.stagedImages[index];
|
||||
const stagingAreaImage = state.canvasV2.session.stagedImages[index];
|
||||
|
||||
assert(stagingAreaImage, 'No staged image found to accept');
|
||||
const { x, y } = selectCanvasSlice(state).bbox.rect;
|
||||
const { x, y } = state.canvasV2.bbox.rect;
|
||||
|
||||
const { imageDTO, offsetX, offsetY } = stagingAreaImage;
|
||||
const imageObject = imageDTOToImageObject(imageDTO);
|
||||
|
@ -1,5 +1,5 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { setInfillMethod } from 'features/controlLayers/store/paramsSlice';
|
||||
import { setInfillMethod } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
|
||||
import { appInfoApi } from 'services/api/endpoints/appInfo';
|
||||
|
||||
@ -8,7 +8,7 @@ export const addAppConfigReceivedListener = (startAppListening: AppStartListenin
|
||||
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
|
||||
effect: (action, { getState, dispatch }) => {
|
||||
const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
|
||||
const infillMethod = getState().params.infillMethod;
|
||||
const infillMethod = getState().canvasV2.compositing.infillMethod;
|
||||
|
||||
if (!infill_methods.includes(infillMethod)) {
|
||||
// if there is no infill method, set it to the first one
|
||||
|
@ -1,8 +1,6 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getImageUsage } from 'features/deleteImageModal/store/selectors';
|
||||
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
|
||||
export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => {
|
||||
@ -15,12 +13,10 @@ export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppS
|
||||
|
||||
let wasNodeEditorReset = false;
|
||||
|
||||
const state = getState();
|
||||
const nodes = selectNodesSlice(state);
|
||||
const canvas = selectCanvasSlice(state);
|
||||
const { nodes, canvasV2 } = getState();
|
||||
|
||||
deleted_images.forEach((image_name) => {
|
||||
const imageUsage = getImageUsage(nodes, canvas, image_name);
|
||||
const imageUsage = getImageUsage(nodes.present, canvasV2, image_name);
|
||||
|
||||
if (imageUsage.isNodesImage && !wasNodeEditorReset) {
|
||||
dispatch(nodeEditorReset());
|
||||
|
@ -1,11 +1,8 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import type { Result } from 'common/util/result';
|
||||
import { isErr, withResult, withResultAsync } from 'common/util/result';
|
||||
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { sessionStagingAreaReset, sessionStartedStaging } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig';
|
||||
import { buildSD1Graph } from 'features/nodes/util/graph/generation/buildSD1Graph';
|
||||
import { buildSDXLGraph } from 'features/nodes/util/graph/generation/buildSDXLGraph';
|
||||
@ -23,77 +20,55 @@ export const addEnqueueRequestedLinear = (startAppListening: AppStartListening)
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'generation',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const model = state.params.model;
|
||||
const model = state.canvasV2.params.model;
|
||||
const { prepend } = action.payload;
|
||||
|
||||
const manager = $canvasManager.get();
|
||||
assert(manager, 'No model found in state');
|
||||
|
||||
let didStartStaging = false;
|
||||
|
||||
if (!state.canvasSession.isStaging && state.canvasSession.mode === 'compose') {
|
||||
if (!state.canvasV2.session.isStaging && state.canvasV2.session.mode === 'compose') {
|
||||
dispatch(sessionStartedStaging());
|
||||
didStartStaging = true;
|
||||
}
|
||||
|
||||
const abortStaging = () => {
|
||||
if (didStartStaging && getState().canvasSession.isStaging) {
|
||||
try {
|
||||
let g: Graph;
|
||||
let noise: Invocation<'noise'>;
|
||||
let posCond: Invocation<'compel' | 'sdxl_compel_prompt'>;
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
const base = model.base;
|
||||
|
||||
if (base === 'sdxl') {
|
||||
const result = await buildSDXLGraph(state, manager);
|
||||
g = result.g;
|
||||
noise = result.noise;
|
||||
posCond = result.posCond;
|
||||
} else if (base === 'sd-1' || base === 'sd-2') {
|
||||
const result = await buildSD1Graph(state, manager);
|
||||
g = result.g;
|
||||
noise = result.noise;
|
||||
posCond = result.posCond;
|
||||
} else {
|
||||
assert(false, `No graph builders for base ${base}`);
|
||||
}
|
||||
|
||||
const batchConfig = prepareLinearUIBatch(state, g, prepend, noise, posCond);
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(batchConfig, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
await req.unwrap();
|
||||
} catch (error) {
|
||||
log.error({ error: serializeError(error) }, 'Failed to enqueue batch');
|
||||
if (didStartStaging && getState().canvasV2.session.isStaging) {
|
||||
dispatch(sessionStagingAreaReset());
|
||||
}
|
||||
};
|
||||
|
||||
let buildGraphResult: Result<
|
||||
{ g: Graph; noise: Invocation<'noise'>; posCond: Invocation<'compel' | 'sdxl_compel_prompt'> },
|
||||
Error
|
||||
>;
|
||||
|
||||
assert(model, 'No model found in state');
|
||||
const base = model.base;
|
||||
|
||||
switch (base) {
|
||||
case 'sdxl':
|
||||
buildGraphResult = await withResultAsync(() => buildSDXLGraph(state, manager));
|
||||
break;
|
||||
case 'sd-1':
|
||||
case `sd-2`:
|
||||
buildGraphResult = await withResultAsync(() => buildSD1Graph(state, manager));
|
||||
break;
|
||||
default:
|
||||
assert(false, `No graph builders for base ${base}`);
|
||||
}
|
||||
|
||||
if (isErr(buildGraphResult)) {
|
||||
log.error({ error: serializeError(buildGraphResult.error) }, 'Failed to build graph');
|
||||
abortStaging();
|
||||
return;
|
||||
}
|
||||
|
||||
const { g, noise, posCond } = buildGraphResult.value;
|
||||
|
||||
const prepareBatchResult = withResult(() => prepareLinearUIBatch(state, g, prepend, noise, posCond));
|
||||
|
||||
if (isErr(prepareBatchResult)) {
|
||||
log.error({ error: serializeError(prepareBatchResult.error) }, 'Failed to prepare batch');
|
||||
abortStaging();
|
||||
return;
|
||||
}
|
||||
|
||||
const req = dispatch(
|
||||
queueApi.endpoints.enqueueBatch.initiate(prepareBatchResult.value, {
|
||||
fixedCacheKey: 'enqueueBatch',
|
||||
})
|
||||
);
|
||||
req.reset();
|
||||
|
||||
const enqueueResult = await withResultAsync(() => req.unwrap());
|
||||
|
||||
if (isErr(enqueueResult)) {
|
||||
log.error({ error: serializeError(enqueueResult.error) }, 'Failed to enqueue batch');
|
||||
abortStaging();
|
||||
return;
|
||||
}
|
||||
|
||||
log.debug({ batchConfig: prepareBatchResult.value } as SerializableObject, 'Enqueued batch');
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { enqueueRequested } from 'app/store/actions';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph';
|
||||
import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow';
|
||||
import { queueApi } from 'services/api/endpoints/queue';
|
||||
@ -12,12 +11,12 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
enqueueRequested.match(action) && action.payload.tabName === 'workflows',
|
||||
effect: async (action, { getState, dispatch }) => {
|
||||
const state = getState();
|
||||
const nodes = selectNodesSlice(state);
|
||||
const { nodes, edges } = state.nodes.present;
|
||||
const workflow = state.workflow;
|
||||
const graph = buildNodesGraph(nodes);
|
||||
const graph = buildNodesGraph(state.nodes.present);
|
||||
const builtWorkflow = buildWorkflowWithValidation({
|
||||
nodes: nodes.nodes,
|
||||
edges: nodes.edges,
|
||||
nodes,
|
||||
edges,
|
||||
workflow,
|
||||
});
|
||||
|
||||
@ -30,7 +29,7 @@ export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) =
|
||||
batch: {
|
||||
graph,
|
||||
workflow: builtWorkflow,
|
||||
runs: state.params.iterations,
|
||||
runs: state.canvasV2.params.iterations,
|
||||
origin: 'workflows',
|
||||
},
|
||||
prepend: action.payload.prepend,
|
||||
|
@ -1,9 +1,7 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import type { AppDispatch, RootState } from 'app/store/store';
|
||||
import { entityDeleted, ipaImageChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { entityDeleted, ipaImageChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions';
|
||||
import { isModalOpenChanged } from 'features/deleteImageModal/store/slice';
|
||||
import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
@ -41,7 +39,7 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
|
||||
};
|
||||
|
||||
// const deleteControlAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
// state.canvas.present.controlAdapters.entities.forEach(({ id, imageObject, processedImageObject }) => {
|
||||
// state.canvasV2.controlAdapters.entities.forEach(({ id, imageObject, processedImageObject }) => {
|
||||
// if (
|
||||
// imageObject?.image.image_name === imageDTO.image_name ||
|
||||
// processedImageObject?.image.image_name === imageDTO.image_name
|
||||
@ -53,15 +51,15 @@ const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: Im
|
||||
// };
|
||||
|
||||
const deleteIPAdapterImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
selectCanvasSlice(state).ipAdapters.entities.forEach((entity) => {
|
||||
if (entity.ipAdapter.image?.image_name === imageDTO.image_name) {
|
||||
dispatch(ipaImageChanged({ entityIdentifier: getEntityIdentifier(entity), imageDTO: null }));
|
||||
state.canvasV2.ipAdapters.entities.forEach(({ id, ipAdapter }) => {
|
||||
if (ipAdapter.image?.image_name === imageDTO.image_name) {
|
||||
dispatch(ipaImageChanged({ id, imageDTO: null }));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const deleteLayerImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => {
|
||||
selectCanvasSlice(state).rasterLayers.entities.forEach(({ id, objects }) => {
|
||||
state.canvasV2.rasterLayers.entities.forEach(({ id, objects }) => {
|
||||
let shouldDelete = false;
|
||||
for (const obj of objects) {
|
||||
if (obj.type === 'image' && obj.image.image_name === imageDTO.image_name) {
|
||||
|
@ -6,8 +6,7 @@ import {
|
||||
ipaImageChanged,
|
||||
rasterLayerAdded,
|
||||
rgIPAdapterImageChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import type { CanvasControlLayerState, CanvasRasterLayerState } from 'features/controlLayers/store/types';
|
||||
import { imageDTOToImageObject } from 'features/controlLayers/store/types';
|
||||
import type { TypesafeDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
@ -52,9 +51,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { id } = overData.context;
|
||||
dispatch(
|
||||
ipaImageChanged({ entityIdentifier: { id, type: 'ip_adapter' }, imageDTO: activeData.payload.imageDTO })
|
||||
);
|
||||
dispatch(ipaImageChanged({ id, imageDTO: activeData.payload.imageDTO }));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -67,13 +64,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const { id, ipAdapterId } = overData.context;
|
||||
dispatch(
|
||||
rgIPAdapterImageChanged({
|
||||
entityIdentifier: { id, type: 'regional_guidance' },
|
||||
ipAdapterId,
|
||||
imageDTO: activeData.payload.imageDTO,
|
||||
})
|
||||
);
|
||||
dispatch(rgIPAdapterImageChanged({ id, ipAdapterId, imageDTO: activeData.payload.imageDTO }));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -86,7 +77,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const { x, y } = getState().canvasV2.bbox.rect;
|
||||
const overrides: Partial<CanvasRasterLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
@ -104,7 +95,7 @@ export const addImageDroppedListener = (startAppListening: AppStartListening) =>
|
||||
activeData.payload.imageDTO
|
||||
) {
|
||||
const imageObject = imageDTOToImageObject(activeData.payload.imageDTO);
|
||||
const { x, y } = selectCanvasSlice(getState()).bbox.rect;
|
||||
const { x, y } = getState().canvasV2.bbox.rect;
|
||||
const overrides: Partial<CanvasControlLayerState> = {
|
||||
objects: [imageObject],
|
||||
position: { x, y },
|
||||
|
@ -1,6 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { ipaImageChanged, rgIPAdapterImageChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors';
|
||||
import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { upscaleInitialImageChanged } from 'features/parameters/store/upscaleSlice';
|
||||
@ -89,16 +89,14 @@ export const addImageUploadedFulfilledListener = (startAppListening: AppStartLis
|
||||
|
||||
if (postUploadAction?.type === 'SET_IPA_IMAGE') {
|
||||
const { id } = postUploadAction;
|
||||
dispatch(ipaImageChanged({ entityIdentifier: { id, type: 'ip_adapter' }, imageDTO }));
|
||||
dispatch(ipaImageChanged({ id, imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (postUploadAction?.type === 'SET_RG_IP_ADAPTER_IMAGE') {
|
||||
const { id, ipAdapterId } = postUploadAction;
|
||||
dispatch(
|
||||
rgIPAdapterImageChanged({ entityIdentifier: { id, type: 'regional_guidance' }, ipAdapterId, imageDTO })
|
||||
);
|
||||
dispatch(rgIPAdapterImageChanged({ id, ipAdapterId, imageDTO }));
|
||||
toast({ ...DEFAULT_UPLOADED_TOAST, description: t('toast.setControlImage') });
|
||||
return;
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { loraDeleted, modelChanged, vaeSelected } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import { zParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { toast } from 'features/toast/toast';
|
||||
@ -24,14 +23,14 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
const newModel = result.data;
|
||||
|
||||
const newBaseModel = newModel.base;
|
||||
const didBaseModelChange = state.params.model?.base !== newBaseModel;
|
||||
const didBaseModelChange = state.canvasV2.params.model?.base !== newBaseModel;
|
||||
|
||||
if (didBaseModelChange) {
|
||||
// we may need to reset some incompatible submodels
|
||||
let modelsCleared = 0;
|
||||
|
||||
// handle incompatible loras
|
||||
state.loras.loras.forEach((lora) => {
|
||||
state.canvasV2.loras.forEach((lora) => {
|
||||
if (lora.model.base !== newBaseModel) {
|
||||
dispatch(loraDeleted({ id: lora.id }));
|
||||
modelsCleared += 1;
|
||||
@ -39,14 +38,14 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
});
|
||||
|
||||
// handle incompatible vae
|
||||
const { vae } = state.params;
|
||||
const { vae } = state.canvasV2.params;
|
||||
if (vae && vae.base !== newBaseModel) {
|
||||
dispatch(vaeSelected(null));
|
||||
modelsCleared += 1;
|
||||
}
|
||||
|
||||
// handle incompatible controlnets
|
||||
// state.canvas.present.controlAdapters.entities.forEach((ca) => {
|
||||
// state.canvasV2.controlAdapters.entities.forEach((ca) => {
|
||||
// if (ca.model?.base !== newBaseModel) {
|
||||
// modelsCleared += 1;
|
||||
// if (ca.isEnabled) {
|
||||
@ -67,7 +66,7 @@ export const addModelSelectedListener = (startAppListening: AppStartListening) =
|
||||
}
|
||||
}
|
||||
|
||||
dispatch(modelChanged({ model: newModel, previousModel: state.params.model }));
|
||||
dispatch(modelChanged({ model: newModel, previousModel: state.canvasV2.params.model }));
|
||||
},
|
||||
});
|
||||
};
|
||||
|
@ -7,12 +7,12 @@ import {
|
||||
bboxWidthChanged,
|
||||
controlLayerModelChanged,
|
||||
ipaModelChanged,
|
||||
loraDeleted,
|
||||
modelChanged,
|
||||
refinerModelChanged,
|
||||
rgIPAdapterModelChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { loraDeleted } from 'features/controlLayers/store/lorasSlice';
|
||||
import { modelChanged, refinerModelChanged, vaeSelected } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { getEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
vaeSelected,
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { calculateNewSize } from 'features/parameters/components/DocumentSize/calculateNewSize';
|
||||
import { postProcessingModelChanged, upscaleModelChanged } from 'features/parameters/store/upscaleSlice';
|
||||
import { zParameterModel, zParameterVAEModel } from 'features/parameters/types/parameterSchemas';
|
||||
@ -62,7 +62,7 @@ type ModelHandler = (
|
||||
) => undefined;
|
||||
|
||||
const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const currentModel = state.params.model;
|
||||
const currentModel = state.canvasV2.params.model;
|
||||
const mainModels = models.filter(isNonRefinerMainModelConfig);
|
||||
if (mainModels.length === 0) {
|
||||
// No models loaded at all
|
||||
@ -82,12 +82,15 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const result = zParameterModel.safeParse(defaultModelInList);
|
||||
if (result.success) {
|
||||
dispatch(modelChanged({ model: defaultModelInList, previousModel: currentModel }));
|
||||
const { bbox } = selectCanvasSlice(state);
|
||||
|
||||
const optimalDimension = getOptimalDimension(defaultModelInList);
|
||||
if (getIsSizeOptimal(bbox.rect.width, bbox.rect.height, optimalDimension)) {
|
||||
if (getIsSizeOptimal(state.canvasV2.bbox.rect.width, state.canvasV2.bbox.rect.height, optimalDimension)) {
|
||||
return;
|
||||
}
|
||||
const { width, height } = calculateNewSize(bbox.aspectRatio.value, optimalDimension * optimalDimension);
|
||||
const { width, height } = calculateNewSize(
|
||||
state.canvasV2.bbox.aspectRatio.value,
|
||||
optimalDimension * optimalDimension
|
||||
);
|
||||
|
||||
dispatch(bboxWidthChanged({ width }));
|
||||
dispatch(bboxHeightChanged({ height }));
|
||||
@ -106,7 +109,7 @@ const handleMainModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
};
|
||||
|
||||
const handleRefinerModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
const currentRefinerModel = state.params.refinerModel;
|
||||
const currentRefinerModel = state.canvasV2.params.refinerModel;
|
||||
const refinerModels = models.filter(isRefinerMainModelModelConfig);
|
||||
if (models.length === 0) {
|
||||
// No models loaded at all
|
||||
@ -125,7 +128,7 @@ const handleRefinerModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
};
|
||||
|
||||
const handleVAEModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
const currentVae = state.params.vae;
|
||||
const currentVae = state.canvasV2.params.vae;
|
||||
|
||||
if (currentVae === null) {
|
||||
// null is a valid VAE! it means "use the default with the main model"
|
||||
@ -159,7 +162,7 @@ const handleVAEModels: ModelHandler = (models, state, dispatch, log) => {
|
||||
|
||||
const handleLoRAModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
const loraModels = models.filter(isLoRAModelConfig);
|
||||
state.loras.loras.forEach((lora) => {
|
||||
state.canvasV2.loras.forEach((lora) => {
|
||||
const isLoRAAvailable = loraModels.some((m) => m.key === lora.model.key);
|
||||
if (isLoRAAvailable) {
|
||||
return;
|
||||
@ -170,34 +173,32 @@ const handleLoRAModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
|
||||
const handleControlAdapterModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
const caModels = models.filter(isControlNetOrT2IAdapterModelConfig);
|
||||
selectCanvasSlice(state).controlLayers.entities.forEach((entity) => {
|
||||
state.canvasV2.controlLayers.entities.forEach((entity) => {
|
||||
const isModelAvailable = caModels.some((m) => m.key === entity.controlAdapter.model?.key);
|
||||
if (isModelAvailable) {
|
||||
return;
|
||||
}
|
||||
dispatch(controlLayerModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
|
||||
dispatch(controlLayerModelChanged({ id: entity.id, modelConfig: null }));
|
||||
});
|
||||
};
|
||||
|
||||
const handleIPAdapterModels: ModelHandler = (models, state, dispatch, _log) => {
|
||||
const ipaModels = models.filter(isIPAdapterModelConfig);
|
||||
selectCanvasSlice(state).ipAdapters.entities.forEach((entity) => {
|
||||
state.canvasV2.ipAdapters.entities.forEach((entity) => {
|
||||
const isModelAvailable = ipaModels.some((m) => m.key === entity.ipAdapter.model?.key);
|
||||
if (isModelAvailable) {
|
||||
return;
|
||||
}
|
||||
dispatch(ipaModelChanged({ entityIdentifier: getEntityIdentifier(entity), modelConfig: null }));
|
||||
dispatch(ipaModelChanged({ id: entity.id, modelConfig: null }));
|
||||
});
|
||||
|
||||
selectCanvasSlice(state).regions.entities.forEach((entity) => {
|
||||
entity.ipAdapters.forEach(({ id: ipAdapterId, model }) => {
|
||||
state.canvasV2.regions.entities.forEach(({ id, ipAdapters }) => {
|
||||
ipAdapters.forEach(({ id: ipAdapterId, model }) => {
|
||||
const isModelAvailable = ipaModels.some((m) => m.key === model?.key);
|
||||
if (isModelAvailable) {
|
||||
return;
|
||||
}
|
||||
dispatch(
|
||||
rgIPAdapterModelChanged({ entityIdentifier: getEntityIdentifier(entity), ipAdapterId, modelConfig: null })
|
||||
);
|
||||
dispatch(rgIPAdapterModelChanged({ id, ipAdapterId, modelConfig: null }));
|
||||
});
|
||||
});
|
||||
};
|
||||
|
@ -1,6 +1,6 @@
|
||||
import { isAnyOf } from '@reduxjs/toolkit';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { positivePromptChanged } from 'features/controlLayers/store/paramsSlice';
|
||||
import { positivePromptChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import {
|
||||
combinatorialToggled,
|
||||
isErrorChanged,
|
||||
|
@ -1,13 +1,14 @@
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import {
|
||||
bboxHeightChanged,
|
||||
bboxWidthChanged,
|
||||
setCfgRescaleMultiplier,
|
||||
setCfgScale,
|
||||
setScheduler,
|
||||
setSteps,
|
||||
vaePrecisionChanged,
|
||||
vaeSelected,
|
||||
} from 'features/controlLayers/store/paramsSlice';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { setDefaultSettings } from 'features/parameters/store/actions';
|
||||
import {
|
||||
isParameterCFGRescaleMultiplier,
|
||||
@ -30,7 +31,7 @@ export const addSetDefaultSettingsListener = (startAppListening: AppStartListeni
|
||||
effect: async (action, { dispatch, getState }) => {
|
||||
const state = getState();
|
||||
|
||||
const currentModel = state.params.model;
|
||||
const currentModel = state.canvasV2.params.model;
|
||||
|
||||
if (!currentModel) {
|
||||
return;
|
||||
|
@ -2,7 +2,6 @@ import { logger } from 'app/logging/logger';
|
||||
import type { AppStartListening } from 'app/store/middleware/listenerMiddleware';
|
||||
import { updateAllNodesRequested } from 'features/nodes/store/actions';
|
||||
import { $templates, nodesChanged } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodes } from 'features/nodes/store/selectors';
|
||||
import { NodeUpdateError } from 'features/nodes/types/error';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
import { getNeedsUpdate, updateNode } from 'features/nodes/util/node/nodeUpdate';
|
||||
@ -15,7 +14,7 @@ export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartLi
|
||||
startAppListening({
|
||||
actionCreator: updateAllNodesRequested,
|
||||
effect: (action, { dispatch, getState }) => {
|
||||
const nodes = selectNodes(getState());
|
||||
const { nodes } = getState().nodes.present;
|
||||
const templates = $templates.get();
|
||||
|
||||
let unableToUpdateCount = 0;
|
||||
|
@ -6,12 +6,7 @@ import { errorHandler } from 'app/store/enhancers/reduxRemember/errors';
|
||||
import type { SerializableObject } from 'common/types';
|
||||
import { deepClone } from 'common/util/deepClone';
|
||||
import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice';
|
||||
import { canvasSessionPersistConfig, canvasSessionSlice } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { canvasSettingsPersistConfig, canvasSettingsSlice } from 'features/controlLayers/store/canvasSettingsSlice';
|
||||
import { canvasPersistConfig, canvasSlice, canvasUndoableConfig } from 'features/controlLayers/store/canvasSlice';
|
||||
import { lorasPersistConfig, lorasSlice } from 'features/controlLayers/store/lorasSlice';
|
||||
import { paramsPersistConfig, paramsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { toolPersistConfig, toolSlice } from 'features/controlLayers/store/toolSlice';
|
||||
import { canvasV2PersistConfig, canvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { deleteImageModalSlice } from 'features/deleteImageModal/store/slice';
|
||||
import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice';
|
||||
@ -58,15 +53,10 @@ const allReducers = {
|
||||
[queueSlice.name]: queueSlice.reducer,
|
||||
[workflowSlice.name]: workflowSlice.reducer,
|
||||
[hrfSlice.name]: hrfSlice.reducer,
|
||||
[canvasSlice.name]: undoable(canvasSlice.reducer, canvasUndoableConfig),
|
||||
[canvasV2Slice.name]: canvasV2Slice.reducer,
|
||||
[workflowSettingsSlice.name]: workflowSettingsSlice.reducer,
|
||||
[upscaleSlice.name]: upscaleSlice.reducer,
|
||||
[stylePresetSlice.name]: stylePresetSlice.reducer,
|
||||
[paramsSlice.name]: paramsSlice.reducer,
|
||||
[toolSlice.name]: toolSlice.reducer,
|
||||
[canvasSettingsSlice.name]: canvasSettingsSlice.reducer,
|
||||
[canvasSessionSlice.name]: canvasSessionSlice.reducer,
|
||||
[lorasSlice.name]: lorasSlice.reducer,
|
||||
};
|
||||
|
||||
const rootReducer = combineReducers(allReducers);
|
||||
@ -104,15 +94,10 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = {
|
||||
[dynamicPromptsPersistConfig.name]: dynamicPromptsPersistConfig,
|
||||
[modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig,
|
||||
[hrfPersistConfig.name]: hrfPersistConfig,
|
||||
[canvasPersistConfig.name]: canvasPersistConfig,
|
||||
[canvasV2PersistConfig.name]: canvasV2PersistConfig,
|
||||
[workflowSettingsPersistConfig.name]: workflowSettingsPersistConfig,
|
||||
[upscalePersistConfig.name]: upscalePersistConfig,
|
||||
[stylePresetPersistConfig.name]: stylePresetPersistConfig,
|
||||
[paramsPersistConfig.name]: paramsPersistConfig,
|
||||
[toolPersistConfig.name]: toolPersistConfig,
|
||||
[canvasSettingsPersistConfig.name]: canvasSettingsPersistConfig,
|
||||
[canvasSessionPersistConfig.name]: canvasSessionPersistConfig,
|
||||
[lorasPersistConfig.name]: lorasPersistConfig,
|
||||
};
|
||||
|
||||
const unserialize: UnserializeFunction = (data, key) => {
|
||||
|
@ -13,9 +13,8 @@ import {
|
||||
Spacer,
|
||||
Text,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectSystemSlice, setShouldEnableInformationalPopovers } from 'features/system/store/systemSlice';
|
||||
import { setShouldEnableInformationalPopovers } from 'features/system/store/systemSlice';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { merge, omit } from 'lodash-es';
|
||||
import type { ReactElement } from 'react';
|
||||
@ -32,13 +31,8 @@ type Props = {
|
||||
children: ReactElement;
|
||||
};
|
||||
|
||||
const selectShouldEnableInformationalPopovers = createSelector(
|
||||
selectSystemSlice,
|
||||
(system) => system.shouldEnableInformationalPopovers
|
||||
);
|
||||
|
||||
export const InformationalPopover = memo(({ feature, children, inPortal = true, ...rest }: Props) => {
|
||||
const shouldEnableInformationalPopovers = useAppSelector(selectShouldEnableInformationalPopovers);
|
||||
const shouldEnableInformationalPopovers = useAppSelector((s) => s.system.shouldEnableInformationalPopovers);
|
||||
|
||||
const data = useMemo(() => POPOVER_DATA[feature], [feature]);
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { toast } from 'features/toast/toast';
|
||||
import { selectActiveTab } from 'features/ui/store/uiSelectors';
|
||||
import { useCallback, useEffect, useState } from 'react';
|
||||
@ -27,7 +26,7 @@ const selectPostUploadAction = createMemoizedSelector(selectActiveTab, (activeTa
|
||||
|
||||
export const useFullscreenDropzone = () => {
|
||||
const { t } = useTranslation();
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const [isHandlingUpload, setIsHandlingUpload] = useState<boolean>(false);
|
||||
const postUploadAction = useAppSelector(selectPostUploadAction);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
|
@ -1,8 +1,6 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import type { GroupBase } from 'chakra-react-select';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import type { ModelIdentifierField } from 'features/nodes/types/common';
|
||||
import { groupBy, reduce } from 'lodash-es';
|
||||
import { useCallback, useMemo } from 'react';
|
||||
@ -30,13 +28,11 @@ const groupByBaseFunc = <T extends AnyModelConfig>(model: T) => model.base.toUpp
|
||||
const groupByBaseAndTypeFunc = <T extends AnyModelConfig>(model: T) =>
|
||||
`${model.base.toUpperCase()} / ${model.type.replaceAll('_', ' ').toUpperCase()}`;
|
||||
|
||||
const selectBaseWithSDXLFallback = createSelector(selectParamsSlice, (params) => params.model?.base ?? 'sdxl');
|
||||
|
||||
export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
arg: UseGroupedModelComboboxArg<T>
|
||||
): UseGroupedModelComboboxReturn => {
|
||||
const { t } = useTranslation();
|
||||
const base = useAppSelector(selectBaseWithSDXLFallback);
|
||||
const base_model = useAppSelector((s) => s.canvasV2.params.model?.base ?? 'sdxl');
|
||||
const { modelConfigs, selectedModel, getIsDisabled, onChange, isLoading, groupByType = false } = arg;
|
||||
const options = useMemo<GroupBase<ComboboxOption>[]>(() => {
|
||||
if (!modelConfigs) {
|
||||
@ -58,9 +54,9 @@ export const useGroupedModelCombobox = <T extends AnyModelConfig>(
|
||||
},
|
||||
[] as GroupBase<ComboboxOption>[]
|
||||
);
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base) ? -1 : 1));
|
||||
_options.sort((a) => (a.label?.split('/')[0]?.toLowerCase().includes(base_model) ? -1 : 1));
|
||||
return _options;
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base]);
|
||||
}, [modelConfigs, groupByType, getIsDisabled, base_model]);
|
||||
|
||||
const value = useMemo(
|
||||
() =>
|
||||
|
@ -1,5 +1,4 @@
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectAutoAddBoardId } from 'features/gallery/store/gallerySelectors';
|
||||
import { useCallback } from 'react';
|
||||
import { useDropzone } from 'react-dropzone';
|
||||
import { useUploadImageMutation } from 'services/api/endpoints/images';
|
||||
@ -30,7 +29,7 @@ type UseImageUploadButtonArgs = {
|
||||
* <input {...getUploadInputProps()} /> // hidden, handles native upload functionality
|
||||
*/
|
||||
export const useImageUploadButton = ({ postUploadAction, isDisabled }: UseImageUploadButtonArgs) => {
|
||||
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
|
||||
const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId);
|
||||
const [uploadImage] = useUploadImageMutation();
|
||||
const onDropAccepted = useCallback(
|
||||
(files: File[]) => {
|
||||
|
@ -2,12 +2,10 @@ import { useStore } from '@nanostores/react';
|
||||
import { $isConnected } from 'app/hooks/useSocketIO';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectParamsSlice } from 'features/controlLayers/store/paramsSlice';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice';
|
||||
import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt';
|
||||
import { $templates } from 'features/nodes/store/nodesSlice';
|
||||
import { selectNodesSlice } from 'features/nodes/store/selectors';
|
||||
import { $templates, selectNodesSlice } from 'features/nodes/store/nodesSlice';
|
||||
import type { Templates } from 'features/nodes/store/types';
|
||||
import { selectWorkflowSettingsSlice } from 'features/nodes/store/workflowSettingsSlice';
|
||||
import { isInvocationNode } from 'features/nodes/types/invocation';
|
||||
@ -35,15 +33,14 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
selectNodesSlice,
|
||||
selectWorkflowSettingsSlice,
|
||||
selectDynamicPromptsSlice,
|
||||
selectCanvasSlice,
|
||||
selectParamsSlice,
|
||||
selectCanvasV2Slice,
|
||||
selectUpscalelice,
|
||||
selectConfigSlice,
|
||||
selectActiveTab,
|
||||
],
|
||||
(system, nodes, workflowSettings, dynamicPrompts, canvas, params, upscale, config, activeTabName) => {
|
||||
const { bbox } = canvas;
|
||||
const { model, positivePrompt } = params;
|
||||
(system, nodes, workflowSettings, dynamicPrompts, canvasV2, upscale, config, activeTabName) => {
|
||||
const { bbox } = canvasV2;
|
||||
const { model, positivePrompt } = canvasV2.params;
|
||||
|
||||
const reasons: { prefix?: string; content: string }[] = [];
|
||||
|
||||
@ -125,7 +122,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noModelSelected') });
|
||||
}
|
||||
|
||||
canvas.controlLayers.entities
|
||||
canvasV2.controlLayers.entities
|
||||
.filter((controlLayer) => controlLayer.isEnabled)
|
||||
.forEach((controlLayer, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
@ -155,7 +152,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
}
|
||||
});
|
||||
|
||||
canvas.ipAdapters.entities
|
||||
canvasV2.ipAdapters.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
@ -183,7 +180,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
}
|
||||
});
|
||||
|
||||
canvas.regions.entities
|
||||
canvasV2.regions.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
@ -220,7 +217,7 @@ const createSelector = (templates: Templates, isConnected: boolean) =>
|
||||
}
|
||||
});
|
||||
|
||||
canvas.rasterLayers.entities
|
||||
canvasV2.rasterLayers.entities
|
||||
.filter((entity) => entity.isEnabled)
|
||||
.forEach((entity, i) => {
|
||||
const layerLiteral = i18n.t('controlLayers.layers_one');
|
||||
|
@ -1,72 +0,0 @@
|
||||
import type { Equals } from 'tsafe';
|
||||
import { assert } from 'tsafe';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import type { ErrResult, OkResult } from './result';
|
||||
import { Err, isErr, isOk, Ok, withResult, withResultAsync } from './result'; // Adjust import as needed
|
||||
|
||||
const promiseify = <T>(fn: () => T): (() => Promise<T>) => {
|
||||
return () =>
|
||||
new Promise((resolve) => {
|
||||
resolve(fn());
|
||||
});
|
||||
};
|
||||
|
||||
describe('Result Utility Functions', () => {
|
||||
it('Ok() should create an OkResult', () => {
|
||||
const result = Ok(42);
|
||||
expect(result).toEqual({ type: 'Ok', value: 42 });
|
||||
expect(isOk(result)).toBe(true);
|
||||
expect(isErr(result)).toBe(false);
|
||||
assert<Equals<OkResult<number>, typeof result>>(result);
|
||||
});
|
||||
|
||||
it('Err() should create an ErrResult', () => {
|
||||
const error = new Error('Something went wrong');
|
||||
const result = Err(error);
|
||||
expect(result).toEqual({ type: 'Err', error });
|
||||
expect(isOk(result)).toBe(false);
|
||||
expect(isErr(result)).toBe(true);
|
||||
assert<Equals<ErrResult<Error>, typeof result>>(result);
|
||||
});
|
||||
|
||||
it('withResult() should return Ok on success', () => {
|
||||
const fn = () => 42;
|
||||
const result = withResult(fn);
|
||||
expect(isOk(result)).toBe(true);
|
||||
if (isOk(result)) {
|
||||
expect(result.value).toBe(42);
|
||||
}
|
||||
});
|
||||
|
||||
it('withResult() should return Err on exception', () => {
|
||||
const fn = () => {
|
||||
throw new Error('Failure');
|
||||
};
|
||||
const result = withResult(fn);
|
||||
expect(isErr(result)).toBe(true);
|
||||
if (isErr(result)) {
|
||||
expect(result.error.message).toBe('Failure');
|
||||
}
|
||||
});
|
||||
|
||||
it('withResultAsync() should return Ok on success', async () => {
|
||||
const fn = promiseify(() => 42);
|
||||
const result = await withResultAsync(fn);
|
||||
expect(isOk(result)).toBe(true);
|
||||
if (isOk(result)) {
|
||||
expect(result.value).toBe(42);
|
||||
}
|
||||
});
|
||||
|
||||
it('withResultAsync() should return Err on exception', async () => {
|
||||
const fn = promiseify(() => {
|
||||
throw new Error('Async failure');
|
||||
});
|
||||
const result = await withResultAsync(fn);
|
||||
expect(isErr(result)).toBe(true);
|
||||
if (isErr(result)) {
|
||||
expect(result.error.message).toBe('Async failure');
|
||||
}
|
||||
});
|
||||
});
|
@ -1,89 +0,0 @@
|
||||
/**
|
||||
* Represents a successful result.
|
||||
* @template T The type of the value.
|
||||
*/
|
||||
export type OkResult<T> = { type: 'Ok'; value: T };
|
||||
|
||||
/**
|
||||
* Represents a failed result.
|
||||
* @template E The type of the error.
|
||||
*/
|
||||
export type ErrResult<E> = { type: 'Err'; error: E };
|
||||
|
||||
/**
|
||||
* A union type that represents either a successful result (`Ok`) or a failed result (`Err`).
|
||||
* @template T The type of the value in the `Ok` case.
|
||||
* @template E The type of the error in the `Err` case.
|
||||
*/
|
||||
export type Result<T, E = Error> = OkResult<T> | ErrResult<E>;
|
||||
|
||||
/**
|
||||
* Creates a successful result.
|
||||
* @template T The type of the value.
|
||||
* @param {T} value The value to wrap in an `Ok` result.
|
||||
* @returns {OkResult<T>} The `Ok` result containing the value.
|
||||
*/
|
||||
export function Ok<T>(value: T): OkResult<T> {
|
||||
return { type: 'Ok', value };
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a failed result.
|
||||
* @template E The type of the error.
|
||||
* @param {E} error The error to wrap in an `Err` result.
|
||||
* @returns {ErrResult<E>} The `Err` result containing the error.
|
||||
*/
|
||||
export function Err<E>(error: E): ErrResult<E> {
|
||||
return { type: 'Err', error };
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a synchronous function in a try-catch block, returning a `Result`.
|
||||
* @template T The type of the value returned by the function.
|
||||
* @param {() => T} fn The function to execute.
|
||||
* @returns {Result<T>} An `Ok` result if the function succeeds, or an `Err` result if it throws an error.
|
||||
*/
|
||||
export function withResult<T>(fn: () => T): Result<T> {
|
||||
try {
|
||||
return Ok(fn());
|
||||
} catch (error) {
|
||||
return Err(error instanceof Error ? error : new Error(String(error)));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps an asynchronous function in a try-catch block, returning a `Promise` of a `Result`.
|
||||
* @template T The type of the value returned by the function.
|
||||
* @param {() => Promise<T>} fn The asynchronous function to execute.
|
||||
* @returns {Promise<Result<T>>} A `Promise` resolving to an `Ok` result if the function succeeds, or an `Err` result if it throws an error.
|
||||
*/
|
||||
export async function withResultAsync<T>(fn: () => Promise<T>): Promise<Result<T>> {
|
||||
try {
|
||||
const result = await fn();
|
||||
return Ok(result);
|
||||
} catch (error) {
|
||||
return Err(error instanceof Error ? error : new Error(String(error)));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard to check if a `Result` is an `Ok` result.
|
||||
* @template T The type of the value in the `Ok` result.
|
||||
* @template E The type of the error in the `Err` result.
|
||||
* @param {Result<T, E>} result The result to check.
|
||||
* @returns {result is OkResult<T>} `true` if the result is an `Ok` result, otherwise `false`.
|
||||
*/
|
||||
export function isOk<T, E>(result: Result<T, E>): result is OkResult<T> {
|
||||
return result.type === 'Ok';
|
||||
}
|
||||
|
||||
/**
|
||||
* Type guard to check if a `Result` is an `Err` result.
|
||||
* @template T The type of the value in the `Ok` result.
|
||||
* @template E The type of the error in the `Err` result.
|
||||
* @param {Result<T, E>} result The result to check.
|
||||
* @returns {result is ErrResult<E>} `true` if the result is an `Err` result, otherwise `false`.
|
||||
*/
|
||||
export function isErr<T, E>(result: Result<T, E>): result is ErrResult<E> {
|
||||
return result.type === 'Err';
|
||||
}
|
@ -1,3 +1,7 @@
|
||||
export const stopPropagation = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
};
|
||||
|
||||
export const preventDefault = (e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
};
|
||||
|
@ -1,6 +1,5 @@
|
||||
import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||
import { Combobox, ConfirmationAlertDialog, Flex, FormControl, Text } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import {
|
||||
@ -19,17 +18,12 @@ const selectImagesToChange = createMemoizedSelector(
|
||||
(changeBoardModal) => changeBoardModal.imagesToChange
|
||||
);
|
||||
|
||||
const selectIsModalOpen = createSelector(
|
||||
selectChangeBoardModalSlice,
|
||||
(changeBoardModal) => changeBoardModal.isModalOpen
|
||||
);
|
||||
|
||||
const ChangeBoardModal = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const [selectedBoard, setSelectedBoard] = useState<string | null>();
|
||||
const queryArgs = useAppSelector(selectListBoardsQueryArgs);
|
||||
const { data: boards, isFetching } = useListAllBoardsQuery(queryArgs);
|
||||
const isModalOpen = useAppSelector(selectIsModalOpen);
|
||||
const isModalOpen = useAppSelector((s) => s.changeBoardModal.isModalOpen);
|
||||
const imagesToChange = useAppSelector(selectImagesToChange);
|
||||
const [addImagesToBoard] = useAddImagesToBoardMutation();
|
||||
const [removeImagesFromBoard] = useRemoveImagesFromBoardMutation();
|
||||
|
@ -1,12 +1,13 @@
|
||||
import { Button, ButtonGroup, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useDefaultControlAdapter, useDefaultIPAdapter } from 'features/controlLayers/hooks/useLayerControlAdapter';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
@ -14,21 +15,23 @@ import { PiPlusBold } from 'react-icons/pi';
|
||||
export const CanvasAddEntityButtons = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const defaultControlAdapter = useDefaultControlAdapter();
|
||||
const defaultIPAdapter = useDefaultIPAdapter();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
dispatch(inpaintMaskAdded());
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
dispatch(rgAdded());
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides: { controlAdapter: defaultControlAdapter } }));
|
||||
}, [defaultControlAdapter, dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
dispatch(ipaAdded({ ipAdapter: defaultIPAdapter }));
|
||||
}, [defaultIPAdapter, dispatch]);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" w="full" h="full" alignItems="center" justifyContent="center">
|
||||
|
@ -1,5 +1,6 @@
|
||||
import { Flex } from '@invoke-ai/ui-library';
|
||||
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
|
||||
import { CanvasEntityOpacity } from 'features/controlLayers/components/common/CanvasEntityOpacity';
|
||||
import { ControlLayerEntityList } from 'features/controlLayers/components/ControlLayer/ControlLayerEntityList';
|
||||
import { InpaintMaskList } from 'features/controlLayers/components/InpaintMask/InpaintMaskList';
|
||||
import { IPAdapterList } from 'features/controlLayers/components/IPAdapter/IPAdapterList';
|
||||
@ -10,7 +11,8 @@ import { memo } from 'react';
|
||||
export const CanvasEntityList = memo(() => {
|
||||
return (
|
||||
<ScrollableContent>
|
||||
<Flex flexDir="column" gap={2} data-testid="control-layers-layer-list" w="full" h="full">
|
||||
<Flex flexDir="column" gap={4} pt={2} data-testid="control-layers-layer-list">
|
||||
<CanvasEntityOpacity />
|
||||
<InpaintMaskList />
|
||||
<RegionalGuidanceEntityList />
|
||||
<IPAdapterList />
|
@ -1,20 +0,0 @@
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { EntityListActionBarAddLayerButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuButton';
|
||||
import { EntityListActionBarDeleteButton } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarDeleteButton';
|
||||
import { EntityListActionBarSelectedEntityFill } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityFill';
|
||||
import { SelectedEntityOpacity } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarSelectedEntityOpacity';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const EntityListActionBar = memo(() => {
|
||||
return (
|
||||
<Flex w="full" py={1} px={1} gap={2} alignItems="center">
|
||||
<SelectedEntityOpacity />
|
||||
<Spacer />
|
||||
<EntityListActionBarSelectedEntityFill />
|
||||
<EntityListActionBarAddLayerButton />
|
||||
<EntityListActionBarDeleteButton />
|
||||
</Flex>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBar.displayName = 'EntityListActionBar';
|
@ -1,28 +0,0 @@
|
||||
import { IconButton, Menu, MenuButton, MenuList } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityListMenuItems } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBarAddLayerMenuItems';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarAddLayerButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
size="sm"
|
||||
tooltip={t('controlLayers.addLayer')}
|
||||
aria-label={t('controlLayers.addLayer')}
|
||||
icon={<PiPlusBold />}
|
||||
variant="ghost"
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
/>
|
||||
<MenuList>
|
||||
<CanvasEntityListMenuItems />
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarAddLayerButton.displayName = 'EntityListActionBarAddLayerButton';
|
@ -1,54 +0,0 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import {
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiPlusBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityListMenuItems = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.ipAdapter', { count: 1 })}
|
||||
</MenuItem>
|
||||
</>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityListMenuItems.displayName = 'CanvasEntityListMenu';
|
@ -1,39 +0,0 @@
|
||||
import { IconButton, useShiftModifier } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { allEntitiesDeleted, entityDeleted } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectEntityCount, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiTrashSimpleFill } from 'react-icons/pi';
|
||||
|
||||
export const EntityListActionBarDeleteButton = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const entityCount = useAppSelector(selectEntityCount);
|
||||
const shift = useShiftModifier();
|
||||
const onClick = useCallback(() => {
|
||||
if (shift) {
|
||||
dispatch(allEntitiesDeleted());
|
||||
return;
|
||||
}
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityDeleted({ entityIdentifier: selectedEntityIdentifier }));
|
||||
}, [dispatch, selectedEntityIdentifier, shift]);
|
||||
|
||||
return (
|
||||
<IconButton
|
||||
onClick={onClick}
|
||||
isDisabled={shift ? entityCount === 0 : !selectedEntityIdentifier}
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
aria-label={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
tooltip={shift ? t('controlLayers.deleteAll') : t('controlLayers.deleteSelected')}
|
||||
icon={<PiTrashSimpleFill />}
|
||||
/>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarDeleteButton.displayName = 'EntityListActionBarDeleteButton';
|
@ -1,70 +0,0 @@
|
||||
import { Box, Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import RgbColorPicker from 'common/components/RgbColorPicker';
|
||||
import { rgbColorToString } from 'common/util/colorCodeTransformers';
|
||||
import { MaskFillStyle } from 'features/controlLayers/components/common/MaskFillStyle';
|
||||
import { entityFillColorChanged, entityFillStyleChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectSelectedEntityFill, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { type FillStyle, isMaskEntityIdentifier, type RgbColor } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const EntityListActionBarSelectedEntityFill = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const selectedEntityIdentifier = useAppSelector(selectSelectedEntityIdentifier);
|
||||
const fill = useAppSelector(selectSelectedEntityFill);
|
||||
|
||||
const onChangeFillColor = useCallback(
|
||||
(color: RgbColor) => {
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
if (!isMaskEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityFillColorChanged({ entityIdentifier: selectedEntityIdentifier, color }));
|
||||
},
|
||||
[dispatch, selectedEntityIdentifier]
|
||||
);
|
||||
const onChangeFillStyle = useCallback(
|
||||
(style: FillStyle) => {
|
||||
if (!selectedEntityIdentifier) {
|
||||
return;
|
||||
}
|
||||
if (!isMaskEntityIdentifier(selectedEntityIdentifier)) {
|
||||
return;
|
||||
}
|
||||
dispatch(entityFillStyleChanged({ entityIdentifier: selectedEntityIdentifier, style }));
|
||||
},
|
||||
[dispatch, selectedEntityIdentifier]
|
||||
);
|
||||
|
||||
if (!selectedEntityIdentifier || !fill) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<Flex role="button" aria-label={t('controlLayers.maskFill')} tabIndex={-1} w={8} h={8}>
|
||||
<Tooltip label={t('controlLayers.maskFill')}>
|
||||
<Flex w="full" h="full" alignItems="center" justifyContent="center">
|
||||
<Box borderRadius="full" w={6} h={6} borderWidth={1} bg={rgbColorToString(fill.color)} />
|
||||
</Flex>
|
||||
</Tooltip>
|
||||
</Flex>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent>
|
||||
<PopoverBody minH={64}>
|
||||
<Flex flexDir="column" gap={4}>
|
||||
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput />
|
||||
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
|
||||
</Flex>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
EntityListActionBarSelectedEntityFill.displayName = 'EntityListActionBarSelectedEntityFill';
|
@ -0,0 +1,80 @@
|
||||
import { IconButton, Menu, MenuButton, MenuDivider, MenuItem, MenuList } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useDefaultControlAdapter, useDefaultIPAdapter } from 'features/controlLayers/hooks/useLayerControlAdapter';
|
||||
import {
|
||||
allEntitiesDeleted,
|
||||
controlLayerAdded,
|
||||
inpaintMaskAdded,
|
||||
ipaAdded,
|
||||
rasterLayerAdded,
|
||||
rgAdded,
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectEntityCount } from 'features/controlLayers/store/selectors';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDotsThreeOutlineFill, PiPlusBold, PiTrashSimpleBold } from 'react-icons/pi';
|
||||
|
||||
export const CanvasEntityListMenu = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const hasEntities = useAppSelector((s) => {
|
||||
const count = selectEntityCount(s);
|
||||
return count > 0;
|
||||
});
|
||||
const defaultControlAdapter = useDefaultControlAdapter();
|
||||
const defaultIPAdapter = useDefaultIPAdapter();
|
||||
const addInpaintMask = useCallback(() => {
|
||||
dispatch(inpaintMaskAdded());
|
||||
}, [dispatch]);
|
||||
const addRegionalGuidance = useCallback(() => {
|
||||
dispatch(rgAdded());
|
||||
}, [dispatch]);
|
||||
const addRasterLayer = useCallback(() => {
|
||||
dispatch(rasterLayerAdded({ isSelected: true }));
|
||||
}, [dispatch]);
|
||||
const addControlLayer = useCallback(() => {
|
||||
dispatch(controlLayerAdded({ isSelected: true, overrides: { controlAdapter: defaultControlAdapter } }));
|
||||
}, [defaultControlAdapter, dispatch]);
|
||||
const addIPAdapter = useCallback(() => {
|
||||
dispatch(ipaAdded({ ipAdapter: defaultIPAdapter }));
|
||||
}, [defaultIPAdapter, dispatch]);
|
||||
const deleteAll = useCallback(() => {
|
||||
dispatch(allEntitiesDeleted());
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<Menu>
|
||||
<MenuButton
|
||||
as={IconButton}
|
||||
aria-label={t('accessibility.menu')}
|
||||
icon={<PiDotsThreeOutlineFill />}
|
||||
variant="link"
|
||||
data-testid="control-layers-add-layer-menu-button"
|
||||
alignSelf="stretch"
|
||||
/>
|
||||
<MenuList>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addInpaintMask}>
|
||||
{t('controlLayers.inpaintMask', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRegionalGuidance}>
|
||||
{t('controlLayers.regionalGuidance', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addRasterLayer}>
|
||||
{t('controlLayers.rasterLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addControlLayer}>
|
||||
{t('controlLayers.controlLayer', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuItem icon={<PiPlusBold />} onClick={addIPAdapter}>
|
||||
{t('controlLayers.ipAdapter', { count: 1 })}
|
||||
</MenuItem>
|
||||
<MenuDivider />
|
||||
<MenuItem onClick={deleteAll} icon={<PiTrashSimpleBold />} color="error.300" isDisabled={!hasEntities}>
|
||||
{t('controlLayers.deleteAll', { count: 1 })}
|
||||
</MenuItem>
|
||||
</MenuList>
|
||||
</Menu>
|
||||
);
|
||||
});
|
||||
|
||||
CanvasEntityListMenu.displayName = 'CanvasEntityListMenu';
|
@ -1,16 +1,13 @@
|
||||
import { Button, ButtonGroup } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { selectCanvasSessionSlice, sessionModeChanged } from 'features/controlLayers/store/canvasSessionSlice';
|
||||
import { sessionModeChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
const selectCanvasMode = createSelector(selectCanvasSessionSlice, (canvasSession) => canvasSession.mode);
|
||||
|
||||
export const CanvasModeSwitcher = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const mode = useAppSelector(selectCanvasMode);
|
||||
const mode = useAppSelector((s) => s.canvasV2.session.mode);
|
||||
const onClickGenerate = useCallback(() => dispatch(sessionModeChanged({ mode: 'generate' })), [dispatch]);
|
||||
const onClickCompose = useCallback(() => dispatch(sessionModeChanged({ mode: 'compose' })), [dispatch]);
|
||||
|
||||
|
@ -1,22 +1,16 @@
|
||||
import { Divider, Flex } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasAddEntityButtons } from 'features/controlLayers/components/CanvasAddEntityButtons';
|
||||
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList/CanvasEntityList';
|
||||
import { EntityListActionBar } from 'features/controlLayers/components/CanvasEntityList/EntityListActionBar';
|
||||
import { CanvasEntityList } from 'features/controlLayers/components/CanvasEntityList';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectHasEntities } from 'features/controlLayers/store/selectors';
|
||||
import { selectEntityCount } from 'features/controlLayers/store/selectors';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const CanvasPanelContent = memo(() => {
|
||||
const hasEntities = useAppSelector(selectHasEntities);
|
||||
const hasEntities = useAppSelector((s) => selectEntityCount(s) > 0);
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<Flex flexDir="column" gap={2} w="full" h="full">
|
||||
<EntityListActionBar />
|
||||
<Divider py={0} />
|
||||
{!hasEntities && <CanvasAddEntityButtons />}
|
||||
{hasEntities && <CanvasEntityList />}
|
||||
</Flex>
|
||||
{!hasEntities && <CanvasAddEntityButtons />}
|
||||
{hasEntities && <CanvasEntityList />}
|
||||
</CanvasManagerProviderGate>
|
||||
);
|
||||
});
|
||||
|
@ -14,9 +14,10 @@ import {
|
||||
PopoverTrigger,
|
||||
} from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { $canvasManager } from 'features/controlLayers/konva/CanvasManager';
|
||||
import { MAX_CANVAS_SCALE, MIN_CANVAS_SCALE } from 'features/controlLayers/konva/constants';
|
||||
import { snapToNearest } from 'features/controlLayers/konva/util';
|
||||
import { $stageAttrs } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { clamp, round } from 'lodash-es';
|
||||
import { computed } from 'nanostores';
|
||||
import type { KeyboardEvent } from 'react';
|
||||
@ -71,10 +72,12 @@ const sliderDefaultValue = mapScaleToSliderValue(100);
|
||||
|
||||
const snapCandidates = marks.slice(1, marks.length - 1);
|
||||
|
||||
const $scale = computed($stageAttrs, (attrs) => attrs.scale);
|
||||
|
||||
export const CanvasScale = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const scale = useStore(computed(canvasManager.stateApi.$stageAttrs, (attrs) => attrs.scale));
|
||||
const canvasManager = useStore($canvasManager);
|
||||
const scale = useStore($scale);
|
||||
const [localScale, setLocalScale] = useState(scale * 100);
|
||||
|
||||
const onChangeSlider = useCallback(
|
||||
|
@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
@ -29,7 +28,6 @@ export const ControlLayer = memo(({ id }: Props) => {
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<ControlLayerBadges />
|
||||
<CanvasEntityIsLockedToggle />
|
||||
<CanvasEntityEnabledToggle />
|
||||
</CanvasEntityHeader>
|
||||
<CanvasEntitySettingsWrapper>
|
||||
|
@ -1,15 +1,15 @@
|
||||
import { Badge } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import { selectControlLayerEntityOrThrow } from 'features/controlLayers/store/controlLayersReducers';
|
||||
import { memo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const ControlLayerBadges = memo(() => {
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const { id } = useEntityIdentifierContext();
|
||||
const { t } = useTranslation();
|
||||
const withTransparencyEffect = useAppSelector(
|
||||
(s) => selectEntityOrThrow(selectCanvasSlice(s), entityIdentifier).withTransparencyEffect
|
||||
(s) => selectControlLayerEntityOrThrow(s.canvasV2, id).withTransparencyEffect
|
||||
);
|
||||
|
||||
return (
|
||||
|
@ -11,42 +11,42 @@ import {
|
||||
controlLayerControlModeChanged,
|
||||
controlLayerModelChanged,
|
||||
controlLayerWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import type { ControlModeV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import type { ControlNetModelConfig, T2IAdapterModelConfig } from 'services/api/types';
|
||||
|
||||
export const ControlLayerControlAdapter = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const controlAdapter = useControlLayerControlAdapter(entityIdentifier);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(controlLayerBeginEndStepPctChanged({ entityIdentifier, beginEndStepPct }));
|
||||
dispatch(controlLayerBeginEndStepPctChanged({ id: entityIdentifier.id, beginEndStepPct }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, entityIdentifier.id]
|
||||
);
|
||||
|
||||
const onChangeControlMode = useCallback(
|
||||
(controlMode: ControlModeV2) => {
|
||||
dispatch(controlLayerControlModeChanged({ entityIdentifier, controlMode }));
|
||||
dispatch(controlLayerControlModeChanged({ id: entityIdentifier.id, controlMode }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, entityIdentifier.id]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(controlLayerWeightChanged({ entityIdentifier, weight }));
|
||||
dispatch(controlLayerWeightChanged({ id: entityIdentifier.id, weight }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, entityIdentifier.id]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: ControlNetModelConfig | T2IAdapterModelConfig) => {
|
||||
dispatch(controlLayerModelChanged({ entityIdentifier, modelConfig }));
|
||||
dispatch(controlLayerModelChanged({ id: entityIdentifier.id, modelConfig }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, entityIdentifier.id]
|
||||
);
|
||||
|
||||
return (
|
||||
|
@ -3,7 +3,6 @@ import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -19,7 +18,7 @@ export const ControlLayerControlAdapterModel = memo(({ modelKey, onChange: onCha
|
||||
const { t } = useTranslation();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const canvasManager = useCanvasManager();
|
||||
const currentBaseModel = useAppSelector(selectBase);
|
||||
const currentBaseModel = useAppSelector((s) => s.canvasV2.params.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useControlNetAndT2IAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
|
@ -1,22 +1,17 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
|
||||
import { ControlLayer } from 'features/controlLayers/components/ControlLayer/ControlLayer';
|
||||
import { mapId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
return canvas.controlLayers.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
|
||||
return selectedEntityIdentifier?.type === 'control_layer';
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
|
||||
return canvasV2.controlLayers.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
export const ControlLayerEntityList = memo(() => {
|
||||
const isSelected = useAppSelector(selectIsSelected);
|
||||
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'control_layer'));
|
||||
const layerIds = useAppSelector(selectEntityIds);
|
||||
|
||||
if (layerIds.length === 0) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/common/CanvasEntityMenuItemsFilter';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { ControlLayerMenuItemsControlToRaster } from 'features/controlLayers/components/ControlLayer/ControlLayerMenuItemsControlToRaster';
|
||||
@ -18,7 +17,6 @@ export const ControlLayerMenuItems = memo(() => {
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasSlice';
|
||||
import { controlLayerConvertedToRasterLayer } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiLightningBold } from 'react-icons/pi';
|
||||
@ -9,11 +9,11 @@ import { PiLightningBold } from 'react-icons/pi';
|
||||
export const ControlLayerMenuItemsControlToRaster = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
|
||||
const convertControlLayerToRasterLayer = useCallback(() => {
|
||||
dispatch(controlLayerConvertedToRasterLayer({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
dispatch(controlLayerConvertedToRasterLayer({ id: entityIdentifier.id }));
|
||||
}, [dispatch, entityIdentifier.id]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={convertControlLayerToRasterLayer} icon={<PiLightningBold />}>
|
||||
|
@ -2,8 +2,11 @@ import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { controlLayerWithTransparencyEffectToggled } from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
controlLayerWithTransparencyEffectToggled,
|
||||
selectCanvasV2Slice,
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectControlLayerEntityOrThrow } from 'features/controlLayers/store/controlLayersReducers';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiDropHalfBold } from 'react-icons/pi';
|
||||
@ -11,18 +14,18 @@ import { PiDropHalfBold } from 'react-icons/pi';
|
||||
export const ControlLayerMenuItemsTransparencyEffect = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('control_layer');
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const selectWithTransparencyEffect = useMemo(
|
||||
() =>
|
||||
createSelector(selectCanvasSlice, (canvas) => {
|
||||
const entity = selectEntityOrThrow(canvas, entityIdentifier);
|
||||
createSelector(selectCanvasV2Slice, (canvasV2) => {
|
||||
const entity = selectControlLayerEntityOrThrow(canvasV2, entityIdentifier.id);
|
||||
return entity.withTransparencyEffect;
|
||||
}),
|
||||
[entityIdentifier]
|
||||
[entityIdentifier.id]
|
||||
);
|
||||
const withTransparencyEffect = useAppSelector(selectWithTransparencyEffect);
|
||||
const onToggle = useCallback(() => {
|
||||
dispatch(controlLayerWithTransparencyEffectToggled({ entityIdentifier }));
|
||||
dispatch(controlLayerWithTransparencyEffectToggled({ id: entityIdentifier.id }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
|
||||
return (
|
||||
|
@ -33,11 +33,9 @@ export const CanvasEditor = memo(() => {
|
||||
<ControlLayersToolbar />
|
||||
<StageComponent />
|
||||
<Flex position="absolute" bottom={16} gap={2} align="center" justify="center">
|
||||
<CanvasManagerProviderGate>
|
||||
<StagingAreaIsStagingGate>
|
||||
<StagingAreaToolbar />
|
||||
</StagingAreaIsStagingGate>
|
||||
</CanvasManagerProviderGate>
|
||||
<StagingAreaIsStagingGate>
|
||||
<StagingAreaToolbar />
|
||||
</StagingAreaIsStagingGate>
|
||||
</Flex>
|
||||
<Flex position="absolute" bottom={16}>
|
||||
<CanvasManagerProviderGate>
|
||||
|
@ -1,12 +1,14 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { Flex, Spacer } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasModeSwitcher } from 'features/controlLayers/components/CanvasModeSwitcher';
|
||||
import { CanvasResetViewButton } from 'features/controlLayers/components/CanvasResetViewButton';
|
||||
import { CanvasScale } from 'features/controlLayers/components/CanvasScale';
|
||||
import { CanvasSettingsPopover } from 'features/controlLayers/components/Settings/CanvasSettingsPopover';
|
||||
import { ToolBrushWidth } from 'features/controlLayers/components/Tool/ToolBrushWidth';
|
||||
import { ToolChooser } from 'features/controlLayers/components/Tool/ToolChooser';
|
||||
import { ToolEraserWidth } from 'features/controlLayers/components/Tool/ToolEraserWidth';
|
||||
import { ToolFillColorPicker } from 'features/controlLayers/components/Tool/ToolFillColorPicker';
|
||||
import { ToolSettings } from 'features/controlLayers/components/Tool/ToolSettings';
|
||||
import { UndoRedoButtonGroup } from 'features/controlLayers/components/UndoRedoButtonGroup';
|
||||
import { CanvasManagerProviderGate } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { ToggleProgressButton } from 'features/gallery/components/ImageViewer/ToggleProgressButton';
|
||||
@ -14,13 +16,15 @@ import { ViewerToggleMenu } from 'features/gallery/components/ImageViewer/Viewer
|
||||
import { memo } from 'react';
|
||||
|
||||
export const ControlLayersToolbar = memo(() => {
|
||||
const tool = useAppSelector((s) => s.canvasV2.tool.selected);
|
||||
return (
|
||||
<CanvasManagerProviderGate>
|
||||
<Flex w="full" gap={2} alignItems="center">
|
||||
<ToggleProgressButton />
|
||||
<ToolChooser />
|
||||
<Spacer />
|
||||
<ToolSettings />
|
||||
{tool === 'brush' && <ToolBrushWidth />}
|
||||
{tool === 'eraser' && <ToolEraserWidth />}
|
||||
<Spacer />
|
||||
<CanvasScale />
|
||||
<CanvasResetViewButton />
|
||||
|
@ -11,8 +11,8 @@ import { PiCheckBold, PiShootingStarBold, PiXBold } from 'react-icons/pi';
|
||||
export const Filter = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const canvasManager = useCanvasManager();
|
||||
const adapter = useStore(canvasManager.filter.$adapter);
|
||||
const config = useStore(canvasManager.filter.$config);
|
||||
const isFiltering = useStore(canvasManager.filter.$isFiltering);
|
||||
const isProcessing = useStore(canvasManager.filter.$isProcessing);
|
||||
|
||||
const previewFilter = useCallback(() => {
|
||||
@ -41,7 +41,7 @@ export const Filter = memo(() => {
|
||||
[canvasManager.filter.$config]
|
||||
);
|
||||
|
||||
if (!isFiltering) {
|
||||
if (!adapter) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -1,17 +1,20 @@
|
||||
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
|
||||
import type { FilterConfig } from 'features/controlLayers/store/types';
|
||||
import { IMAGE_FILTERS, isFilterType } from 'features/controlLayers/store/types';
|
||||
import { selectConfigSlice } from 'features/system/store/configSlice';
|
||||
import { configSelector } from 'features/system/store/configSelectors';
|
||||
import { includes, map } from 'lodash-es';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { assert } from 'tsafe';
|
||||
|
||||
const selectDisabledProcessors = createSelector(selectConfigSlice, (config) => config.sd.disabledControlNetProcessors);
|
||||
const selectDisabledProcessors = createMemoizedSelector(
|
||||
configSelector,
|
||||
(config) => config.sd.disabledControlNetProcessors
|
||||
);
|
||||
|
||||
type Props = {
|
||||
filterType: FilterConfig['type'];
|
||||
|
@ -1,23 +1,25 @@
|
||||
import { Box, Flex, Text } from '@invoke-ai/ui-library';
|
||||
import { useStore } from '@nanostores/react';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useCanvasManager } from 'features/controlLayers/contexts/CanvasManagerProviderGate';
|
||||
import { selectCanvasSlice } from 'features/controlLayers/store/selectors';
|
||||
import {
|
||||
$isDrawing,
|
||||
$isMouseDown,
|
||||
$lastAddedPoint,
|
||||
$lastCursorPos,
|
||||
$lastMouseDownPos,
|
||||
$stageAttrs,
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { round } from 'lodash-es';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectBbox = createSelector(selectCanvasSlice, (canvas) => canvas.bbox);
|
||||
|
||||
export const HeadsUpDisplay = memo(() => {
|
||||
const canvasManager = useCanvasManager();
|
||||
const stageAttrs = useStore(canvasManager.stateApi.$stageAttrs);
|
||||
const cursorPos = useStore(canvasManager.stateApi.$lastCursorPos);
|
||||
const isDrawing = useStore(canvasManager.stateApi.$isDrawing);
|
||||
const isMouseDown = useStore(canvasManager.stateApi.$isMouseDown);
|
||||
const lastMouseDownPos = useStore(canvasManager.stateApi.$lastMouseDownPos);
|
||||
const lastAddedPoint = useStore(canvasManager.stateApi.$lastAddedPoint);
|
||||
const bbox = useAppSelector(selectBbox);
|
||||
const stageAttrs = useStore($stageAttrs);
|
||||
const cursorPos = useStore($lastCursorPos);
|
||||
const isDrawing = useStore($isDrawing);
|
||||
const isMouseDown = useStore($isMouseDown);
|
||||
const lastMouseDownPos = useStore($lastMouseDownPos);
|
||||
const lastAddedPoint = useStore($lastAddedPoint);
|
||||
const bbox = useAppSelector((s) => s.canvasV2.bbox);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" bg="blackAlpha.400" borderBottomEndRadius="base" p={2} minW={64} gap={2}>
|
||||
|
@ -5,7 +5,7 @@ import { $isConnected } from 'app/hooks/useSocketIO';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import IAIDndImage from 'common/components/IAIDndImage';
|
||||
import IAIDndImageIcon from 'common/components/IAIDndImageIcon';
|
||||
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasSlice';
|
||||
import { bboxHeightChanged, bboxWidthChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectOptimalDimension } from 'features/controlLayers/store/selectors';
|
||||
import type { ImageWithDims } from 'features/controlLayers/store/types';
|
||||
import type { ImageDraggableData, TypesafeDroppableData } from 'features/dnd/types';
|
||||
|
@ -1,22 +1,18 @@
|
||||
/* eslint-disable i18next/no-literal-string */
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
|
||||
import { IPAdapter } from 'features/controlLayers/components/IPAdapter/IPAdapter';
|
||||
import { mapId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
return canvas.ipAdapters.entities.map(mapId).reverse();
|
||||
});
|
||||
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
|
||||
return selectedEntityIdentifier?.type === 'ip_adapter';
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
|
||||
return canvasV2.ipAdapters.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
export const IPAdapterList = memo(() => {
|
||||
const isSelected = useAppSelector(selectIsSelected);
|
||||
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'ip_adapter'));
|
||||
const ipaIds = useAppSelector(selectEntityIds);
|
||||
|
||||
if (ipaIds.length === 0) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { memo } from 'react';
|
||||
|
||||
export const IPAdapterMenuItems = memo(() => {
|
||||
@ -9,7 +8,6 @@ export const IPAdapterMenuItems = memo(() => {
|
||||
<>
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
|
@ -2,7 +2,6 @@ import type { ComboboxOnChange } from '@invoke-ai/ui-library';
|
||||
import { Combobox, Flex, FormControl, Tooltip } from '@invoke-ai/ui-library';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox';
|
||||
import { selectBase } from 'features/controlLayers/store/paramsSlice';
|
||||
import type { CLIPVisionModelV2 } from 'features/controlLayers/store/types';
|
||||
import { isCLIPVisionModelV2 } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -25,7 +24,7 @@ type Props = {
|
||||
|
||||
export const IPAdapterModel = memo(({ modelKey, onChangeModel, clipVisionModel, onChangeCLIPVisionModel }: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const currentBaseModel = useAppSelector(selectBase);
|
||||
const currentBaseModel = useAppSelector((s) => s.canvasV2.params.model?.base);
|
||||
const [modelConfigs, { isLoading }] = useIPAdapterModels();
|
||||
const selectedModel = useMemo(() => modelConfigs.find((m) => m.key === modelKey), [modelConfigs, modelKey]);
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
import { Box, Flex } from '@invoke-ai/ui-library';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginEndStepPct';
|
||||
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
|
||||
@ -13,8 +12,8 @@ import {
|
||||
ipaMethodChanged,
|
||||
ipaModelChanged,
|
||||
ipaWeightChanged,
|
||||
} from 'features/controlLayers/store/canvasSlice';
|
||||
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
|
||||
} from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectIPAdapterEntityOrThrow } from 'features/controlLayers/store/ipAdaptersReducers';
|
||||
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
|
||||
import type { IPAImageDropData } from 'features/dnd/types';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
@ -25,63 +24,53 @@ import { IPAdapterModel } from './IPAdapterModel';
|
||||
|
||||
export const IPAdapterSettings = memo(() => {
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('ip_adapter');
|
||||
const selectIPAdapter = useMemo(
|
||||
() => createSelector(selectCanvasSlice, (s) => selectEntityOrThrow(s, entityIdentifier).ipAdapter),
|
||||
[entityIdentifier]
|
||||
);
|
||||
const ipAdapter = useAppSelector(selectIPAdapter);
|
||||
const { id } = useEntityIdentifierContext();
|
||||
const ipAdapter = useAppSelector((s) => selectIPAdapterEntityOrThrow(s.canvasV2, id).ipAdapter);
|
||||
|
||||
const onChangeBeginEndStepPct = useCallback(
|
||||
(beginEndStepPct: [number, number]) => {
|
||||
dispatch(ipaBeginEndStepPctChanged({ entityIdentifier, beginEndStepPct }));
|
||||
dispatch(ipaBeginEndStepPctChanged({ id, beginEndStepPct }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onChangeWeight = useCallback(
|
||||
(weight: number) => {
|
||||
dispatch(ipaWeightChanged({ entityIdentifier, weight }));
|
||||
dispatch(ipaWeightChanged({ id, weight }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onChangeIPMethod = useCallback(
|
||||
(method: IPMethodV2) => {
|
||||
dispatch(ipaMethodChanged({ entityIdentifier, method }));
|
||||
dispatch(ipaMethodChanged({ id, method }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onChangeModel = useCallback(
|
||||
(modelConfig: IPAdapterModelConfig) => {
|
||||
dispatch(ipaModelChanged({ entityIdentifier, modelConfig }));
|
||||
dispatch(ipaModelChanged({ id, modelConfig }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onChangeCLIPVisionModel = useCallback(
|
||||
(clipVisionModel: CLIPVisionModelV2) => {
|
||||
dispatch(ipaCLIPVisionModelChanged({ entityIdentifier, clipVisionModel }));
|
||||
dispatch(ipaCLIPVisionModelChanged({ id, clipVisionModel }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const onChangeImage = useCallback(
|
||||
(imageDTO: ImageDTO | null) => {
|
||||
dispatch(ipaImageChanged({ entityIdentifier, imageDTO }));
|
||||
dispatch(ipaImageChanged({ id, imageDTO }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
[dispatch, id]
|
||||
);
|
||||
|
||||
const droppableData = useMemo<IPAImageDropData>(
|
||||
() => ({ actionType: 'SET_IPA_IMAGE', context: { id: entityIdentifier.id }, id: entityIdentifier.id }),
|
||||
[entityIdentifier.id]
|
||||
);
|
||||
const postUploadAction = useMemo<IPALayerImagePostUploadAction>(
|
||||
() => ({ type: 'SET_IPA_IMAGE', id: entityIdentifier.id }),
|
||||
[entityIdentifier.id]
|
||||
);
|
||||
const droppableData = useMemo<IPAImageDropData>(() => ({ actionType: 'SET_IPA_IMAGE', context: { id }, id }), [id]);
|
||||
const postUploadAction = useMemo<IPALayerImagePostUploadAction>(() => ({ type: 'SET_IPA_IMAGE', id }), [id]);
|
||||
|
||||
return (
|
||||
<CanvasEntitySettingsWrapper>
|
||||
@ -106,7 +95,7 @@ export const IPAdapterSettings = memo(() => {
|
||||
<IPAdapterImagePreview
|
||||
image={ipAdapter.image ?? null}
|
||||
onChangeImage={onChangeImage}
|
||||
ipAdapterId={entityIdentifier.id}
|
||||
ipAdapterId={id}
|
||||
droppableData={droppableData}
|
||||
postUploadAction={postUploadAction}
|
||||
/>
|
||||
|
@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { EntityMaskAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
@ -10,6 +9,8 @@ import { EntityIdentifierContext } from 'features/controlLayers/contexts/EntityI
|
||||
import type { CanvasEntityIdentifier } from 'features/controlLayers/store/types';
|
||||
import { memo, useMemo } from 'react';
|
||||
|
||||
import { InpaintMaskMaskFillColorPicker } from './InpaintMaskMaskFillColorPicker';
|
||||
|
||||
type Props = {
|
||||
id: string;
|
||||
};
|
||||
@ -25,7 +26,7 @@ export const InpaintMask = memo(({ id }: Props) => {
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityIsLockedToggle />
|
||||
<InpaintMaskMaskFillColorPicker />
|
||||
<CanvasEntityEnabledToggle />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
|
@ -1,22 +1,17 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
|
||||
import { InpaintMask } from 'features/controlLayers/components/InpaintMask/InpaintMask';
|
||||
import { mapId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
return canvas.inpaintMasks.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
|
||||
return selectedEntityIdentifier?.type === 'inpaint_mask';
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
|
||||
return canvasV2.inpaintMasks.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
export const InpaintMaskList = memo(() => {
|
||||
const isSelected = useAppSelector(selectIsSelected);
|
||||
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'inpaint_mask'));
|
||||
const entityIds = useAppSelector(selectEntityIds);
|
||||
|
||||
if (entityIds.length === 0) {
|
||||
|
@ -0,0 +1,57 @@
|
||||
import { Flex, Popover, PopoverBody, PopoverContent, PopoverTrigger } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
|
||||
import RgbColorPicker from 'common/components/RgbColorPicker';
|
||||
import { rgbColorToString } from 'common/util/colorCodeTransformers';
|
||||
import { MaskFillStyle } from 'features/controlLayers/components/common/MaskFillStyle';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { inpaintMaskFillColorChanged, inpaintMaskFillStyleChanged } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { selectInpaintMaskEntityOrThrow } from 'features/controlLayers/store/inpaintMaskReducers';
|
||||
import type { FillStyle, RgbColor } from 'features/controlLayers/store/types';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export const InpaintMaskMaskFillColorPicker = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
const fill = useAppSelector((s) => selectInpaintMaskEntityOrThrow(s.canvasV2, entityIdentifier.id).fill);
|
||||
|
||||
const onChangeFillColor = useCallback(
|
||||
(color: RgbColor) => {
|
||||
dispatch(inpaintMaskFillColorChanged({ entityIdentifier, color }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
const onChangeFillStyle = useCallback(
|
||||
(style: FillStyle) => {
|
||||
dispatch(inpaintMaskFillStyleChanged({ entityIdentifier, style }));
|
||||
},
|
||||
[dispatch, entityIdentifier]
|
||||
);
|
||||
return (
|
||||
<Popover isLazy>
|
||||
<PopoverTrigger>
|
||||
<Flex
|
||||
role="button"
|
||||
aria-label={t('controlLayers.maskPreviewColor')}
|
||||
borderRadius="full"
|
||||
borderWidth={1}
|
||||
bg={rgbColorToString(fill.color)}
|
||||
w="22px"
|
||||
h="22px"
|
||||
tabIndex={-1}
|
||||
/>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent>
|
||||
<PopoverBody minH={64}>
|
||||
<Flex flexDir="column" gap={4}>
|
||||
<RgbColorPicker color={fill.color} onChange={onChangeFillColor} withNumberInput />
|
||||
<MaskFillStyle style={fill.style} onChange={onChangeFillStyle} />
|
||||
</Flex>
|
||||
</PopoverBody>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
);
|
||||
});
|
||||
|
||||
InpaintMaskMaskFillColorPicker.displayName = 'InpaintMaskMaskFillColorPicker';
|
@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { memo } from 'react';
|
||||
|
||||
@ -12,7 +11,6 @@ export const InpaintMaskMenuItems = memo(() => {
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
|
@ -2,7 +2,6 @@ import { Spacer } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityContainer } from 'features/controlLayers/components/common/CanvasEntityContainer';
|
||||
import { CanvasEntityEnabledToggle } from 'features/controlLayers/components/common/CanvasEntityEnabledToggle';
|
||||
import { CanvasEntityHeader } from 'features/controlLayers/components/common/CanvasEntityHeader';
|
||||
import { CanvasEntityIsLockedToggle } from 'features/controlLayers/components/common/CanvasEntityIsLockedToggle';
|
||||
import { CanvasEntityPreviewImage } from 'features/controlLayers/components/common/CanvasEntityPreviewImage';
|
||||
import { CanvasEntityEditableTitle } from 'features/controlLayers/components/common/CanvasEntityTitleEdit';
|
||||
import { EntityLayerAdapterGate } from 'features/controlLayers/contexts/EntityAdapterContext';
|
||||
@ -25,7 +24,6 @@ export const RasterLayer = memo(({ id }: Props) => {
|
||||
<CanvasEntityPreviewImage />
|
||||
<CanvasEntityEditableTitle />
|
||||
<Spacer />
|
||||
<CanvasEntityIsLockedToggle />
|
||||
<CanvasEntityEnabledToggle />
|
||||
</CanvasEntityHeader>
|
||||
</CanvasEntityContainer>
|
||||
|
@ -1,21 +1,17 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||
import { useAppSelector } from 'app/store/storeHooks';
|
||||
import { CanvasEntityGroupList } from 'features/controlLayers/components/common/CanvasEntityGroupList';
|
||||
import { RasterLayer } from 'features/controlLayers/components/RasterLayer/RasterLayer';
|
||||
import { mapId } from 'features/controlLayers/konva/util';
|
||||
import { selectCanvasSlice, selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
|
||||
import { selectCanvasV2Slice } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo } from 'react';
|
||||
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasSlice, (canvas) => {
|
||||
return canvas.rasterLayers.entities.map(mapId).reverse();
|
||||
});
|
||||
const selectIsSelected = createSelector(selectSelectedEntityIdentifier, (selectedEntityIdentifier) => {
|
||||
return selectedEntityIdentifier?.type === 'raster_layer';
|
||||
const selectEntityIds = createMemoizedSelector(selectCanvasV2Slice, (canvasV2) => {
|
||||
return canvasV2.rasterLayers.entities.map(mapId).reverse();
|
||||
});
|
||||
|
||||
export const RasterLayerEntityList = memo(() => {
|
||||
const isSelected = useAppSelector(selectIsSelected);
|
||||
const isSelected = useAppSelector((s) => Boolean(s.canvasV2.selectedEntityIdentifier?.type === 'raster_layer'));
|
||||
const layerIds = useAppSelector(selectEntityIds);
|
||||
|
||||
if (layerIds.length === 0) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
import { MenuDivider } from '@invoke-ai/ui-library';
|
||||
import { CanvasEntityMenuItemsArrange } from 'features/controlLayers/components/common/CanvasEntityMenuItemsArrange';
|
||||
import { CanvasEntityMenuItemsDelete } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDelete';
|
||||
import { CanvasEntityMenuItemsDuplicate } from 'features/controlLayers/components/common/CanvasEntityMenuItemsDuplicate';
|
||||
import { CanvasEntityMenuItemsFilter } from 'features/controlLayers/components/common/CanvasEntityMenuItemsFilter';
|
||||
import { CanvasEntityMenuItemsTransform } from 'features/controlLayers/components/common/CanvasEntityMenuItemsTransform';
|
||||
import { RasterLayerMenuItemsRasterToControl } from 'features/controlLayers/components/RasterLayer/RasterLayerMenuItemsRasterToControl';
|
||||
@ -16,7 +15,6 @@ export const RasterLayerMenuItems = memo(() => {
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsArrange />
|
||||
<MenuDivider />
|
||||
<CanvasEntityMenuItemsDuplicate />
|
||||
<CanvasEntityMenuItemsDelete />
|
||||
</>
|
||||
);
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { MenuItem } from '@invoke-ai/ui-library';
|
||||
import { useAppDispatch } from 'app/store/storeHooks';
|
||||
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
|
||||
import { rasterLayerConvertedToControlLayer } from 'features/controlLayers/store/canvasSlice';
|
||||
import { rasterLayerConvertedToControlLayer } from 'features/controlLayers/store/canvasV2Slice';
|
||||
import { memo, useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { PiLightningBold } from 'react-icons/pi';
|
||||
@ -9,11 +9,11 @@ import { PiLightningBold } from 'react-icons/pi';
|
||||
export const RasterLayerMenuItemsRasterToControl = memo(() => {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useAppDispatch();
|
||||
const entityIdentifier = useEntityIdentifierContext('raster_layer');
|
||||
const entityIdentifier = useEntityIdentifierContext();
|
||||
|
||||
const convertRasterLayerToControlLayer = useCallback(() => {
|
||||
dispatch(rasterLayerConvertedToControlLayer({ entityIdentifier }));
|
||||
}, [dispatch, entityIdentifier]);
|
||||
dispatch(rasterLayerConvertedToControlLayer({ id: entityIdentifier.id }));
|
||||
}, [dispatch, entityIdentifier.id]);
|
||||
|
||||
return (
|
||||
<MenuItem onClick={convertRasterLayerToControlLayer} icon={<PiLightningBold />}>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user