mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix: SDXL Lora Loader not showing weight input (#4430)
## What type of PR is this? (check all applicable) - [ ] Refactor - [ ] Feature - [ ] Bug Fix - [ ] Optimization - [ ] Documentation Update - [ ] Community Node Submission ## Have you discussed this change with the InvokeAI team? - [ ] Yes - [ ] No, because: ## Have you updated all relevant documentation? - [ ] Yes - [ ] No ## Description ## Related Tickets & Documents <!-- For pull requests that relate or close an issue, please include them below. For example having the text: "closes #1234" would connect the current pull request to issue 1234. And when we merge the pull request, Github will automatically close the issue. --> - Related Issue # - Closes # ## QA Instructions, Screenshots, Recordings <!-- Please provide steps on how to test changes, any hardware or software specifications as well as any other pertinent information. --> ## Added/updated tests? - [ ] Yes - [ ] No : _please replace this line with details on why tests have not been included_ ## [optional] Are there any post deployment tasks we need to perform?
This commit is contained in:
commit
26f7adeaa3
@ -249,7 +249,7 @@ class SDXLLoraLoaderInvocation(BaseInvocation):
|
|||||||
"""Apply selected lora to unet and text_encoder."""
|
"""Apply selected lora to unet and text_encoder."""
|
||||||
|
|
||||||
lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA")
|
lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA")
|
||||||
weight: float = Field(default=0.75, description=FieldDescriptions.lora_weight)
|
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||||
unet: Optional[UNetField] = Field(
|
unet: Optional[UNetField] = Field(
|
||||||
default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNET"
|
default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNET"
|
||||||
)
|
)
|
||||||
|
@ -215,7 +215,10 @@ class InvokeAIDiffuserComponent:
|
|||||||
dim=0,
|
dim=0,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
(encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch(
|
(
|
||||||
|
encoder_hidden_states,
|
||||||
|
encoder_attention_mask,
|
||||||
|
) = self._concat_conditionings_for_batch(
|
||||||
conditioning_data.unconditioned_embeddings.embeds,
|
conditioning_data.unconditioned_embeddings.embeds,
|
||||||
conditioning_data.text_embeddings.embeds,
|
conditioning_data.text_embeddings.embeds,
|
||||||
)
|
)
|
||||||
@ -277,7 +280,10 @@ class InvokeAIDiffuserComponent:
|
|||||||
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
|
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
|
||||||
|
|
||||||
if wants_cross_attention_control:
|
if wants_cross_attention_control:
|
||||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning(
|
(
|
||||||
|
unconditioned_next_x,
|
||||||
|
conditioned_next_x,
|
||||||
|
) = self._apply_cross_attention_controlled_conditioning(
|
||||||
sample,
|
sample,
|
||||||
timestep,
|
timestep,
|
||||||
conditioning_data,
|
conditioning_data,
|
||||||
@ -285,7 +291,10 @@ class InvokeAIDiffuserComponent:
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
elif self.sequential_guidance:
|
elif self.sequential_guidance:
|
||||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially(
|
(
|
||||||
|
unconditioned_next_x,
|
||||||
|
conditioned_next_x,
|
||||||
|
) = self._apply_standard_conditioning_sequentially(
|
||||||
sample,
|
sample,
|
||||||
timestep,
|
timestep,
|
||||||
conditioning_data,
|
conditioning_data,
|
||||||
@ -293,7 +302,10 @@ class InvokeAIDiffuserComponent:
|
|||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning(
|
(
|
||||||
|
unconditioned_next_x,
|
||||||
|
conditioned_next_x,
|
||||||
|
) = self._apply_standard_conditioning(
|
||||||
sample,
|
sample,
|
||||||
timestep,
|
timestep,
|
||||||
conditioning_data,
|
conditioning_data,
|
||||||
|
@ -562,18 +562,14 @@ def rgb2ycbcr(img, only_y=True):
|
|||||||
if only_y:
|
if only_y:
|
||||||
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
||||||
else:
|
else:
|
||||||
rlt = (
|
rlt = np.matmul(
|
||||||
np.matmul(
|
|
||||||
img,
|
img,
|
||||||
[
|
[
|
||||||
[65.481, -37.797, 112.0],
|
[65.481, -37.797, 112.0],
|
||||||
[128.553, -74.203, -93.786],
|
[128.553, -74.203, -93.786],
|
||||||
[24.966, 112.0, -18.214],
|
[24.966, 112.0, -18.214],
|
||||||
],
|
],
|
||||||
)
|
) / 255.0 + [16, 128, 128]
|
||||||
/ 255.0
|
|
||||||
+ [16, 128, 128]
|
|
||||||
)
|
|
||||||
if in_img_type == np.uint8:
|
if in_img_type == np.uint8:
|
||||||
rlt = rlt.round()
|
rlt = rlt.round()
|
||||||
else:
|
else:
|
||||||
@ -592,18 +588,14 @@ def ycbcr2rgb(img):
|
|||||||
if in_img_type != np.uint8:
|
if in_img_type != np.uint8:
|
||||||
img *= 255.0
|
img *= 255.0
|
||||||
# convert
|
# convert
|
||||||
rlt = (
|
rlt = np.matmul(
|
||||||
np.matmul(
|
|
||||||
img,
|
img,
|
||||||
[
|
[
|
||||||
[0.00456621, 0.00456621, 0.00456621],
|
[0.00456621, 0.00456621, 0.00456621],
|
||||||
[0, -0.00153632, 0.00791071],
|
[0, -0.00153632, 0.00791071],
|
||||||
[0.00625893, -0.00318811, 0],
|
[0.00625893, -0.00318811, 0],
|
||||||
],
|
],
|
||||||
)
|
) * 255.0 + [-222.921, 135.576, -276.836]
|
||||||
* 255.0
|
|
||||||
+ [-222.921, 135.576, -276.836]
|
|
||||||
)
|
|
||||||
if in_img_type == np.uint8:
|
if in_img_type == np.uint8:
|
||||||
rlt = rlt.round()
|
rlt = rlt.round()
|
||||||
else:
|
else:
|
||||||
@ -626,18 +618,14 @@ def bgr2ycbcr(img, only_y=True):
|
|||||||
if only_y:
|
if only_y:
|
||||||
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
||||||
else:
|
else:
|
||||||
rlt = (
|
rlt = np.matmul(
|
||||||
np.matmul(
|
|
||||||
img,
|
img,
|
||||||
[
|
[
|
||||||
[24.966, 112.0, -18.214],
|
[24.966, 112.0, -18.214],
|
||||||
[128.553, -74.203, -93.786],
|
[128.553, -74.203, -93.786],
|
||||||
[65.481, -37.797, 112.0],
|
[65.481, -37.797, 112.0],
|
||||||
],
|
],
|
||||||
)
|
) / 255.0 + [16, 128, 128]
|
||||||
/ 255.0
|
|
||||||
+ [16, 128, 128]
|
|
||||||
)
|
|
||||||
if in_img_type == np.uint8:
|
if in_img_type == np.uint8:
|
||||||
rlt = rlt.round()
|
rlt = rlt.round()
|
||||||
else:
|
else:
|
||||||
|
@ -475,7 +475,10 @@ class TextualInversionDataset(Dataset):
|
|||||||
|
|
||||||
if self.center_crop:
|
if self.center_crop:
|
||||||
crop = min(img.shape[0], img.shape[1])
|
crop = min(img.shape[0], img.shape[1])
|
||||||
(h, w,) = (
|
(
|
||||||
|
h,
|
||||||
|
w,
|
||||||
|
) = (
|
||||||
img.shape[0],
|
img.shape[0],
|
||||||
img.shape[1],
|
img.shape[1],
|
||||||
)
|
)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import math
|
import math
|
||||||
import torch
|
|
||||||
import diffusers
|
|
||||||
|
|
||||||
|
import diffusers
|
||||||
|
import torch
|
||||||
|
|
||||||
if torch.backends.mps.is_available():
|
if torch.backends.mps.is_available():
|
||||||
torch.empty = torch.zeros
|
torch.empty = torch.zeros
|
||||||
|
Loading…
x
Reference in New Issue
Block a user