From ba2048dbc60a2b30648d870296721b048a3f32ea Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 2 Sep 2023 10:47:55 +1200 Subject: [PATCH 1/4] fix: SDXL Lora Loader not showing weight input --- invokeai/app/invocations/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index c3dde5e725..548ff3cb4b 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -249,7 +249,7 @@ class SDXLLoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA") - weight: float = Field(default=0.75, description=FieldDescriptions.lora_weight) + weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight) unet: Optional[UNetField] = Field( default=None, description=FieldDescriptions.unet, input=Input.Connection, title="UNET" ) From a12fbc740653525f9b56fc1a7bc76ad8be05ff5f Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 2 Sep 2023 10:51:53 +1200 Subject: [PATCH 2/4] chore: black fix --- .../diffusion/cross_attention_control.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 20 ++++-- .../image_degradation/bsrgan.py | 4 +- .../image_degradation/bsrgan_light.py | 4 +- .../image_degradation/utils_image.py | 72 ++++++++----------- .../training/textual_inversion_training.py | 5 +- invokeai/backend/util/mps_fixes.py | 6 +- invokeai/backend/util/util.py | 2 +- 8 files changed, 59 insertions(+), 56 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 334837a273..35d4800859 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -265,7 +265,7 @@ class InvokeAICrossAttentionMixin: if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: - slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1])) + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) return self.einsum_op_slice_dim1(q, k, v, slice_size) def einsum_op_mps_v2(self, q, k, v): diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 331b42c047..f05adafca2 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -215,7 +215,10 @@ class InvokeAIDiffuserComponent: dim=0, ), } - (encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch( + ( + encoder_hidden_states, + encoder_attention_mask, + ) = self._concat_conditionings_for_batch( conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds, ) @@ -277,7 +280,10 @@ class InvokeAIDiffuserComponent: wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 if wants_cross_attention_control: - (unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_cross_attention_controlled_conditioning( sample, timestep, conditioning_data, @@ -285,7 +291,10 @@ class InvokeAIDiffuserComponent: **kwargs, ) elif self.sequential_guidance: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning_sequentially( sample, timestep, conditioning_data, @@ -293,7 +302,10 @@ class InvokeAIDiffuserComponent: ) else: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning( sample, timestep, conditioning_data, diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py index 2a2edc92bd..e4d614207b 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py +++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan.py @@ -395,7 +395,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -413,7 +413,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img diff --git a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py index 6c516c8101..cd74adc519 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py +++ b/invokeai/backend/stable_diffusion/image_degradation/bsrgan_light.py @@ -399,7 +399,7 @@ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img @@ -417,7 +417,7 @@ def add_speckle_noise(img, noise_level1=2, noise_level2=25): D = np.diag(np.random.rand(3)) U = orth(np.random.rand(3, 3)) conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L**2 * conv), img.shape[:2]).astype(np.float32) img = np.clip(img, 0.0, 1.0) return img diff --git a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py index 11df5c5710..2a0773c3ed 100644 --- a/invokeai/backend/stable_diffusion/image_degradation/utils_image.py +++ b/invokeai/backend/stable_diffusion/image_degradation/utils_image.py @@ -562,18 +562,14 @@ def rgb2ycbcr(img, only_y=True): if only_y: rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 else: - rlt = ( - np.matmul( - img, - [ - [65.481, -37.797, 112.0], - [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214], - ], - ) - / 255.0 - + [16, 128, 128] - ) + rlt = np.matmul( + img, + [ + [65.481, -37.797, 112.0], + [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214], + ], + ) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -592,18 +588,14 @@ def ycbcr2rgb(img): if in_img_type != np.uint8: img *= 255.0 # convert - rlt = ( - np.matmul( - img, - [ - [0.00456621, 0.00456621, 0.00456621], - [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0], - ], - ) - * 255.0 - + [-222.921, 135.576, -276.836] - ) + rlt = np.matmul( + img, + [ + [0.00456621, 0.00456621, 0.00456621], + [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0], + ], + ) * 255.0 + [-222.921, 135.576, -276.836] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -626,18 +618,14 @@ def bgr2ycbcr(img, only_y=True): if only_y: rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 else: - rlt = ( - np.matmul( - img, - [ - [24.966, 112.0, -18.214], - [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0], - ], - ) - / 255.0 - + [16, 128, 128] - ) + rlt = np.matmul( + img, + [ + [24.966, 112.0, -18.214], + [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0], + ], + ) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: @@ -728,11 +716,11 @@ def ssim(img1, img2): mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1 ** 2 - mu2_sq = mu2 ** 2 + mu1_sq = mu1**2 + mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) @@ -749,8 +737,8 @@ def ssim(img1, img2): # matlab 'imresize' function, now only support 'bicubic' def cubic(x): absx = torch.abs(x) - absx2 = absx ** 2 - absx3 = absx ** 3 + absx2 = absx**2 + absx3 = absx**3 return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + ( -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2 ) * (((absx > 1) * (absx <= 2)).type_as(absx)) diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 658db5e1d5..d92aa80b38 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -475,7 +475,10 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - (h, w,) = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py index be465d7d9a..ce21d33b88 100644 --- a/invokeai/backend/util/mps_fixes.py +++ b/invokeai/backend/util/mps_fixes.py @@ -1,7 +1,7 @@ import math -import torch -import diffusers +import diffusers +import torch if torch.backends.mps.is_available(): torch.empty = torch.zeros @@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor: if attn.upcast_attention: out_item_size = 4 - chunk_size = 2 ** 29 + chunk_size = 2**29 out_size = query.shape[1] * key.shape[1] * out_item_size chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size)) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index da0b9f6834..7ef9c72fb0 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -207,7 +207,7 @@ def parallel_data_prefetch( return gather_res -def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): +def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) From 46bc6968b84d2017646656b38eee5b53e56da50d Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 2 Sep 2023 11:11:34 +1200 Subject: [PATCH 3/4] fix: ControlImage Dimension retrieval not working as intended --- .../controlNet/components/ControlNetImagePreview.tsx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx b/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx index d3ab3c3817..d2c37445a3 100644 --- a/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx +++ b/invokeai/frontend/web/src/features/controlNet/components/ControlNetImagePreview.tsx @@ -104,22 +104,22 @@ const ControlNetImagePreview = ({ isSmall, controlNet }: Props) => { ]); const handleSetControlImageToDimensions = useCallback(() => { - if (!processedControlImage) { + if (!controlImage) { return; } if (activeTabName === 'unifiedCanvas') { dispatch( setBoundingBoxDimensions({ - width: processedControlImage.width, - height: processedControlImage.height, + width: controlImage.width, + height: controlImage.height, }) ); } else { - dispatch(setWidth(processedControlImage.width)); - dispatch(setHeight(processedControlImage.height)); + dispatch(setWidth(controlImage.width)); + dispatch(setHeight(controlImage.height)); } - }, [processedControlImage, activeTabName, dispatch]); + }, [controlImage, activeTabName, dispatch]); const handleMouseEnter = useCallback(() => { setIsMouseOverImage(true); From 0a7d06f8c614b42c8c70617d36c4340171c09f24 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 2 Sep 2023 11:26:48 +1000 Subject: [PATCH 4/4] fix(ui): fix circular imports The logic that introduced a circular import was actually extraneous. I have entirely removed it. --- .../web/src/features/nodes/types/types.ts | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/types/types.ts b/invokeai/frontend/web/src/features/nodes/types/types.ts index 7783393d6f..a2de44cb38 100644 --- a/invokeai/frontend/web/src/features/nodes/types/types.ts +++ b/invokeai/frontend/web/src/features/nodes/types/types.ts @@ -1,4 +1,3 @@ -import { store } from 'app/store/store'; import { SchedulerParam, zBaseModel, @@ -10,7 +9,6 @@ import { keyBy } from 'lodash-es'; import { OpenAPIV3 } from 'openapi-types'; import { RgbaColor } from 'react-colorful'; import { Node } from 'reactflow'; -import { JsonObject } from 'type-fest'; import { Graph, ImageDTO, _InputField, _OutputField } from 'services/api/types'; import { AnyInvocationType, @@ -18,6 +16,7 @@ import { ProgressImage, } from 'services/events/types'; import { O } from 'ts-toolbelt'; +import { JsonObject } from 'type-fest'; import { z } from 'zod'; export type NonNullableGraph = O.Required; @@ -936,22 +935,10 @@ export const zWorkflow = z.object({ }); export const zValidatedWorkflow = zWorkflow.transform((workflow) => { - const nodeTemplates = store.getState().nodes.nodeTemplates; const { nodes, edges } = workflow; const warnings: WorkflowWarning[] = []; const invocationNodes = nodes.filter(isWorkflowInvocationNode); const keyedNodes = keyBy(invocationNodes, 'id'); - invocationNodes.forEach((node, i) => { - const nodeTemplate = nodeTemplates[node.data.type]; - if (!nodeTemplate) { - warnings.push({ - message: `Node "${node.data.label || node.data.id}" skipped`, - issues: [`Unable to find template for type "${node.data.type}"`], - data: node, - }); - delete nodes[i]; - } - }); edges.forEach((edge, i) => { const sourceNode = keyedNodes[edge.source]; const targetNode = keyedNodes[edge.target];