From 540d506ec9e4d96a55ed3760858f5f2cee7b972d Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 6 Apr 2024 00:05:27 +0530 Subject: [PATCH 01/19] fix: Incorrect default clip vision opt in the node --- invokeai/app/invocations/ip_adapter.py | 2 +- invokeai/frontend/web/src/services/api/schema.ts | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 0ac40e97fb..485414d263 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -67,7 +67,7 @@ class IPAdapterInvocation(BaseInvocation): ) clip_vision_model: Literal["ViT-H", "ViT-G"] = InputField( description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.", - default="auto", + default="ViT-H", ui_order=2, ) weight: Union[float, List[float]] = InputField( diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 72da0f1f8c..cb222bd497 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -4112,7 +4112,7 @@ export type components = { * @description The nodes in this graph */ nodes: { - [key: string]: components["schemas"]["ColorInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["IntegerMathInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["ImagePasteInvocation"]; + [key: string]: components["schemas"]["IntegerMathInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["CoreMetadataInvocation"] | components["schemas"]["ImageWatermarkInvocation"] | components["schemas"]["ColorInvocation"] | components["schemas"]["ImageChannelOffsetInvocation"] | components["schemas"]["PromptsFromFileInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ColorCorrectInvocation"] | components["schemas"]["FloatInvocation"] | components["schemas"]["ImageChannelMultiplyInvocation"] | components["schemas"]["IntegerCollectionInvocation"] | components["schemas"]["DepthAnythingImageProcessorInvocation"] | components["schemas"]["FreeUInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["StringSplitNegInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["RandomFloatInvocation"] | components["schemas"]["MetadataInvocation"] | components["schemas"]["SDXLModelLoaderInvocation"] | components["schemas"]["MergeTilesToImageInvocation"] | components["schemas"]["DenoiseLatentsInvocation"] | components["schemas"]["MaskEdgeInvocation"] | components["schemas"]["LoRALoaderInvocation"] | components["schemas"]["ImageHueAdjustmentInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["BooleanCollectionInvocation"] | components["schemas"]["FaceMaskInvocation"] | components["schemas"]["FloatToIntegerInvocation"] | components["schemas"]["BlankImageInvocation"] | components["schemas"]["MaskFromIDInvocation"] | components["schemas"]["CalculateImageTilesMinimumOverlapInvocation"] | components["schemas"]["FloatCollectionInvocation"] | components["schemas"]["MetadataItemInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["IPAdapterInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["StringSplitInvocation"] | components["schemas"]["CalculateImageTilesInvocation"] | components["schemas"]["TileToPropertiesInvocation"] | components["schemas"]["CreateGradientMaskInvocation"] | components["schemas"]["CropLatentsCoreInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["UnsharpMaskInvocation"] | components["schemas"]["StringJoinThreeInvocation"] | components["schemas"]["ESRGANInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["FaceIdentifierInvocation"] | components["schemas"]["ImageNSFWBlurInvocation"] | components["schemas"]["StringJoinInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["StringInvocation"] | components["schemas"]["FloatMathInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["SDXLCompelPromptInvocation"] | components["schemas"]["LatentsInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["SegmentAnythingProcessorInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["MainModelLoaderInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["LeresImageProcessorInvocation"] | components["schemas"]["CanvasPasteBackInvocation"] | components["schemas"]["CreateDenoiseMaskInvocation"] | components["schemas"]["StringCollectionInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["T2IAdapterInvocation"] | components["schemas"]["DWOpenposeImageProcessorInvocation"] | components["schemas"]["SaveImageInvocation"] | components["schemas"]["StringReplaceInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["SchedulerInvocation"] | components["schemas"]["RoundInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["PairTileImageInvocation"] | components["schemas"]["ColorMapImageProcessorInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["SeamlessModeInvocation"] | components["schemas"]["SDXLRefinerCompelPromptInvocation"] | components["schemas"]["IntegerInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["MergeMetadataInvocation"] | components["schemas"]["MaskCombineInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["CLIPSkipInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["VAELoaderInvocation"] | components["schemas"]["ConditioningInvocation"] | components["schemas"]["LatentsCollectionInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["BooleanInvocation"] | components["schemas"]["BlendLatentsInvocation"] | components["schemas"]["ImageCollectionInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["SDXLRefinerModelLoaderInvocation"] | components["schemas"]["CalculateImageTilesEvenSplitInvocation"] | components["schemas"]["CV2InfillInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["SDXLLoRALoaderInvocation"] | components["schemas"]["LaMaInfillInvocation"] | components["schemas"]["CenterPadCropInvocation"] | components["schemas"]["ConditioningCollectionInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["TileResamplerProcessorInvocation"] | components["schemas"]["IdealSizeInvocation"] | components["schemas"]["FaceOffInvocation"] | components["schemas"]["ImageInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["LineartImageProcessorInvocation"]; }; /** * Edges @@ -4149,7 +4149,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["String2Output"] | components["schemas"]["StringOutput"]; + [key: string]: components["schemas"]["FloatCollectionOutput"] | components["schemas"]["VAEOutput"] | components["schemas"]["SDXLModelLoaderOutput"] | components["schemas"]["ConditioningCollectionOutput"] | components["schemas"]["FaceOffOutput"] | components["schemas"]["FaceMaskOutput"] | components["schemas"]["ImageOutput"] | components["schemas"]["ColorOutput"] | components["schemas"]["SDXLLoRALoaderOutput"] | components["schemas"]["T2IAdapterOutput"] | components["schemas"]["String2Output"] | components["schemas"]["CLIPSkipInvocationOutput"] | components["schemas"]["ConditioningOutput"] | components["schemas"]["StringOutput"] | components["schemas"]["ImageCollectionOutput"] | components["schemas"]["TileToPropertiesOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["MetadataItemOutput"] | components["schemas"]["LatentsCollectionOutput"] | components["schemas"]["IntegerOutput"] | components["schemas"]["ColorCollectionOutput"] | components["schemas"]["SchedulerOutput"] | components["schemas"]["CalculateImageTilesOutput"] | components["schemas"]["IdealSizeOutput"] | components["schemas"]["BooleanCollectionOutput"] | components["schemas"]["CollectInvocationOutput"] | components["schemas"]["DenoiseMaskOutput"] | components["schemas"]["MetadataOutput"] | components["schemas"]["StringCollectionOutput"] | components["schemas"]["PairTileImageOutput"] | components["schemas"]["SeamlessModeOutput"] | components["schemas"]["UNetOutput"] | components["schemas"]["CLIPOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["GradientMaskOutput"] | components["schemas"]["BooleanOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["StringPosNegOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["LoRALoaderOutput"] | components["schemas"]["IntegerCollectionOutput"] | components["schemas"]["IPAdapterOutput"] | components["schemas"]["SDXLRefinerModelLoaderOutput"]; }; /** * Errors @@ -4435,7 +4435,7 @@ export type components = { /** * Clip Vision Model * @description CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models. - * @default auto + * @default ViT-H * @enum {string} */ clip_vision_model?: "ViT-H" | "ViT-G"; From a09d705e4cfce4b45bc9bd603f89298968cd62c0 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 5 Apr 2024 18:40:57 +1100 Subject: [PATCH 02/19] fix(mm): remove vram check This check prematurely reports insufficient VRAM on Windows. See #6106 for details. --- .../load/model_cache/model_cache_default.py | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 4d5d09864e..49b48f20ef 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -269,9 +269,6 @@ class ModelCache(ModelCacheBase[AnyModel]): if torch.device(source_device).type == torch.device(target_device).type: return - # may raise an exception here if insufficient GPU VRAM - self._check_free_vram(target_device, cache_entry.size) - start_model_to_time = time.time() snapshot_before = self._capture_memory_snapshot() cache_entry.model.to(target_device) @@ -420,24 +417,3 @@ class ModelCache(ModelCacheBase[AnyModel]): mps.empty_cache() self.logger.debug(f"After making room: cached_models={len(self._cached_models)}") - - def _free_vram(self, device: torch.device) -> int: - vram_device = ( # mem_get_info() needs an indexed device - device if device.index is not None else torch.device(str(device), index=0) - ) - free_mem, _ = torch.cuda.mem_get_info(vram_device) - for _, cache_entry in self._cached_models.items(): - if cache_entry.loaded and not cache_entry.locked: - free_mem += cache_entry.size - return free_mem - - def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None: - if target_device.type != "cuda": - return - free_mem = self._free_vram(target_device) - if needed_size > free_mem: - needed_gb = round(needed_size / GIG, 2) - free_gb = round(free_mem / GIG, 2) - raise torch.cuda.OutOfMemoryError( - f"Insufficient VRAM to load model, requested {needed_gb}GB but only had {free_gb}GB free" - ) From 4068e817d6eae4689362b0e74c79f3b1b05a5fd5 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 5 Apr 2024 18:41:10 +1100 Subject: [PATCH 03/19] fix(mm): typing issues in model cache --- .../model_manager/load/model_cache/model_cache_base.py | 2 +- .../model_manager/load/model_cache/model_cache_default.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_base.py b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py index eb82f87cb2..a8c2dd3e92 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_base.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py @@ -117,7 +117,7 @@ class ModelCacheBase(ABC, Generic[T]): @property @abstractmethod - def stats(self) -> CacheStats: + def stats(self) -> Optional[CacheStats]: """Return collected CacheStats object.""" pass diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 49b48f20ef..f2e0c01a94 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -326,11 +326,11 @@ class ModelCache(ModelCacheBase[AnyModel]): f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})" ) - def make_room(self, model_size: int) -> None: + def make_room(self, size: int) -> None: """Make enough room in the cache to accommodate a new model of indicated size.""" # calculate how much memory this model will require # multiplier = 2 if self.precision==torch.float32 else 1 - bytes_needed = model_size + bytes_needed = size maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes current_size = self.cache_size() @@ -385,7 +385,7 @@ class ModelCache(ModelCacheBase[AnyModel]): # 1 from onnx runtime object if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2): self.logger.debug( - f"Removing {model_key} from RAM cache to free at least {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" + f"Removing {model_key} from RAM cache to free at least {(size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" ) current_size -= cache_entry.size models_cleared += 1 From a95756f3edd97b03d54a4cc7b6c150ce1f2e8c47 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 5 Apr 2024 18:58:08 +1100 Subject: [PATCH 04/19] docs: update FAQ.md (shared GPU memory) --- docs/help/FAQ.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/help/FAQ.md b/docs/help/FAQ.md index 458770e41a..4c297f442a 100644 --- a/docs/help/FAQ.md +++ b/docs/help/FAQ.md @@ -40,6 +40,25 @@ Follow the same steps to scan and import the missing models. - Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it. - Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well. - Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed. +- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry. + +## Shared GPU Memory (Windows) + +!!! tip "Nvidia GPUs with driver 536.40" + + This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023. + +When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM. + +When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash. + +If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490). + +Here's how to get the python path required in the linked guide: + +- Run `invoke.bat`. +- Select option 2 for developer console. +- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first). ## Installer cannot find python (Windows) From f56b9537cde6e4b50c54b3fbc08f38befafa207c Mon Sep 17 00:00:00 2001 From: Jennifer Player Date: Thu, 4 Apr 2024 10:54:02 -0400 Subject: [PATCH 05/19] added initial image to metadata viewer --- .../components/ImageMetadataViewer/ImageMetadataActions.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx index 5b9f15c21a..ce75ea62e0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx @@ -33,6 +33,7 @@ const ImageMetadataActions = (props: Props) => { + From 8a17616bf456853d3194fe5578b4a515ba104b8d Mon Sep 17 00:00:00 2001 From: Jennifer Player Date: Thu, 4 Apr 2024 10:56:25 -0400 Subject: [PATCH 06/19] recall initial image from metadata and set to image2image --- .../web/src/features/metadata/util/handlers.ts | 5 +++++ .../web/src/features/metadata/util/parsers.ts | 5 +++++ .../web/src/features/metadata/util/recallers.ts | 11 +++++++++++ .../src/features/parameters/types/parameterSchemas.ts | 7 +++++++ 4 files changed, 28 insertions(+) diff --git a/invokeai/frontend/web/src/features/metadata/util/handlers.ts b/invokeai/frontend/web/src/features/metadata/util/handlers.ts index b64426b422..4bf717f638 100644 --- a/invokeai/frontend/web/src/features/metadata/util/handlers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/handlers.ts @@ -189,6 +189,11 @@ export const handlers = { recaller: recallers.cfgScale, }), height: buildHandlers({ getLabel: () => t('metadata.height'), parser: parsers.height, recaller: recallers.height }), + initialImage: buildHandlers({ + getLabel: () => t('metadata.initImage'), + parser: parsers.initialImage, + recaller: recallers.initialImage, + }), negativePrompt: buildHandlers({ getLabel: () => t('metadata.negativePrompt'), parser: parsers.negativePrompt, diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts index 635a63a8de..26a0c3c5b1 100644 --- a/invokeai/frontend/web/src/features/metadata/util/parsers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts @@ -20,6 +20,7 @@ import type { ParameterHeight, ParameterHRFEnabled, ParameterHRFMethod, + ParameterInitialImage, ParameterModel, ParameterNegativePrompt, ParameterNegativeStylePromptSDXL, @@ -135,6 +136,9 @@ const parseCFGRescaleMultiplier: MetadataParseFunc = (metadata) => getProperty(metadata, 'scheduler', isParameterScheduler); +const parseInitialImage: MetadataParseFunc = (metadata) => + getProperty(metadata, 'init_image', isString); + const parseWidth: MetadataParseFunc = (metadata) => getProperty(metadata, 'width', isParameterWidth); const parseHeight: MetadataParseFunc = (metadata) => @@ -402,6 +406,7 @@ export const parsers = { cfgScale: parseCFGScale, cfgRescaleMultiplier: parseCFGRescaleMultiplier, scheduler: parseScheduler, + initialImage: parseInitialImage, width: parseWidth, height: parseHeight, steps: parseSteps, diff --git a/invokeai/frontend/web/src/features/metadata/util/recallers.ts b/invokeai/frontend/web/src/features/metadata/util/recallers.ts index f35399c139..617e5b2377 100644 --- a/invokeai/frontend/web/src/features/metadata/util/recallers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/recallers.ts @@ -17,6 +17,7 @@ import type { import { modelSelected } from 'features/parameters/store/actions'; import { heightRecalled, + initialImageChanged, setCfgRescaleMultiplier, setCfgScale, setImg2imgStrength, @@ -34,6 +35,7 @@ import type { ParameterHeight, ParameterHRFEnabled, ParameterHRFMethod, + ParameterInitialImage, ParameterModel, ParameterNegativePrompt, ParameterNegativeStylePromptSDXL, @@ -61,6 +63,7 @@ import { setRefinerStart, setRefinerSteps, } from 'features/sdxl/store/sdxlSlice'; +import type { ImageDTO } from 'services/api/types'; const recallPositivePrompt: MetadataRecallFunc = (positivePrompt) => { getStore().dispatch(setPositivePrompt(positivePrompt)); @@ -94,6 +97,13 @@ const recallScheduler: MetadataRecallFunc = (scheduler) => { getStore().dispatch(setScheduler(scheduler)); }; +const recallInitialImage: MetadataRecallFunc = (initialImage) => { + const image = { + image_name: initialImage, + }; + getStore().dispatch(initialImageChanged(image as ImageDTO)); +}; + const recallWidth: MetadataRecallFunc = (width) => { getStore().dispatch(widthRecalled(width)); }; @@ -235,6 +245,7 @@ export const recallers = { cfgScale: recallCFGScale, cfgRescaleMultiplier: recallCFGRescaleMultiplier, scheduler: recallScheduler, + initialImage: recallInitialImage, width: recallWidth, height: recallHeight, steps: recallSteps, diff --git a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts index 75693cd47f..b3c403488a 100644 --- a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts +++ b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts @@ -126,6 +126,13 @@ const zParameterT2IAdapterModel = zModelIdentifierField; export type ParameterT2IAdapterModel = z.infer; // #endregion +// #region I2I Initial Image +const zParameterInitialImage = z.string(); +export type ParameterInitialImage = z.infer; +export const isParameterInitialImage = (val: unknown): val is ParameterInitialImage => + zParameterInitialImage.safeParse(val).success; +// #endregion + // #region Strength (l2l strength) const zParameterStrength = z.number().min(0).max(1); export type ParameterStrength = z.infer; From 9ba5ec4b67d16451e49e6080c93f5d3069fe53a4 Mon Sep 17 00:00:00 2001 From: Jennifer Player Date: Thu, 4 Apr 2024 10:56:54 -0400 Subject: [PATCH 07/19] fix typo Params set set --- invokeai/frontend/web/public/locales/en.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 623cea64ee..e2ef66b9de 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1044,7 +1044,7 @@ "parameterNotSet": "{{parameter}} not set", "parameterSet": "{{parameter}} set", "parametersNotSet": "Parameters Not Set", - "parametersSet": "Parameters Set", + "parametersSet": "Parameters", "problemCopyingCanvas": "Problem Copying Canvas", "problemCopyingCanvasDesc": "Unable to export base layer", "problemCopyingImage": "Unable to Copy Image", From fdd0e5797684f2e105770517508bc97097eb4fee Mon Sep 17 00:00:00 2001 From: Jennifer Player Date: Thu, 4 Apr 2024 11:06:30 -0400 Subject: [PATCH 08/19] actually use the schema --- invokeai/frontend/web/src/features/metadata/util/parsers.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts index 26a0c3c5b1..5d844c4663 100644 --- a/invokeai/frontend/web/src/features/metadata/util/parsers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts @@ -43,6 +43,7 @@ import { isParameterHeight, isParameterHRFEnabled, isParameterHRFMethod, + isParameterInitialImage, isParameterLoRAWeight, isParameterNegativePrompt, isParameterNegativeStylePromptSDXL, @@ -137,7 +138,7 @@ const parseScheduler: MetadataParseFunc = (metadata) => getProperty(metadata, 'scheduler', isParameterScheduler); const parseInitialImage: MetadataParseFunc = (metadata) => - getProperty(metadata, 'init_image', isString); + getProperty(metadata, 'init_image', isParameterInitialImage); const parseWidth: MetadataParseFunc = (metadata) => getProperty(metadata, 'width', isParameterWidth); From adc30045a6ec268afcce3ba2fd76db3e02159273 Mon Sep 17 00:00:00 2001 From: Jennifer Player Date: Fri, 5 Apr 2024 13:30:31 -0400 Subject: [PATCH 09/19] addressed pr feedback --- invokeai/frontend/web/public/locales/en.json | 2 +- .../web/src/features/metadata/util/handlers.ts | 2 +- .../web/src/features/metadata/util/recallers.ts | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index e2ef66b9de..9686f8a02a 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -1041,10 +1041,10 @@ "metadataLoadFailed": "Failed to load metadata", "modelAddedSimple": "Model Added to Queue", "modelImportCanceled": "Model Import Canceled", + "parameters": "Parameters", "parameterNotSet": "{{parameter}} not set", "parameterSet": "{{parameter}} set", "parametersNotSet": "Parameters Not Set", - "parametersSet": "Parameters", "problemCopyingCanvas": "Problem Copying Canvas", "problemCopyingCanvasDesc": "Unable to export base layer", "problemCopyingImage": "Unable to Copy Image", diff --git a/invokeai/frontend/web/src/features/metadata/util/handlers.ts b/invokeai/frontend/web/src/features/metadata/util/handlers.ts index 4bf717f638..af089a3177 100644 --- a/invokeai/frontend/web/src/features/metadata/util/handlers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/handlers.ts @@ -410,6 +410,6 @@ export const parseAndRecallAllMetadata = async (metadata: unknown, skip: (keyof }) ); if (results.some((result) => result.status === 'fulfilled')) { - parameterSetToast(t('toast.parametersSet')); + parameterSetToast(t('toast.parameters')); } }; diff --git a/invokeai/frontend/web/src/features/metadata/util/recallers.ts b/invokeai/frontend/web/src/features/metadata/util/recallers.ts index 617e5b2377..50d814b859 100644 --- a/invokeai/frontend/web/src/features/metadata/util/recallers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/recallers.ts @@ -63,7 +63,7 @@ import { setRefinerStart, setRefinerSteps, } from 'features/sdxl/store/sdxlSlice'; -import type { ImageDTO } from 'services/api/types'; +import { imagesApi } from 'services/api/endpoints/images'; const recallPositivePrompt: MetadataRecallFunc = (positivePrompt) => { getStore().dispatch(setPositivePrompt(positivePrompt)); @@ -97,11 +97,11 @@ const recallScheduler: MetadataRecallFunc = (scheduler) => { getStore().dispatch(setScheduler(scheduler)); }; -const recallInitialImage: MetadataRecallFunc = (initialImage) => { - const image = { - image_name: initialImage, - }; - getStore().dispatch(initialImageChanged(image as ImageDTO)); +const recallInitialImage: MetadataRecallFunc = async (initialImage) => { + const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(initialImage)); + const imageDTO = await imageDTORequest.unwrap(); + imageDTORequest.unsubscribe(); + getStore().dispatch(initialImageChanged(imageDTO)); }; const recallWidth: MetadataRecallFunc = (width) => { From 55f7a7737a271dba902d2dba6cf523c074c177ed Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 6 Apr 2024 14:47:24 +1100 Subject: [PATCH 10/19] feat(ui): shift around init image recall logic Retrieving the DTO happens as part of the metadata parsing, not recall. This way, we don't show the option to recall a nonexistent image. This matches the flow for other metadata entities like models - we don't show the model recall button if the model isn't available. --- .../web/src/features/metadata/util/handlers.ts | 1 + .../web/src/features/metadata/util/parsers.ts | 13 ++++++++++--- .../web/src/features/metadata/util/recallers.ts | 8 ++------ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/invokeai/frontend/web/src/features/metadata/util/handlers.ts b/invokeai/frontend/web/src/features/metadata/util/handlers.ts index af089a3177..2fb840afcb 100644 --- a/invokeai/frontend/web/src/features/metadata/util/handlers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/handlers.ts @@ -193,6 +193,7 @@ export const handlers = { getLabel: () => t('metadata.initImage'), parser: parsers.initialImage, recaller: recallers.initialImage, + renderValue: async (imageDTO) => imageDTO.image_name, }), negativePrompt: buildHandlers({ getLabel: () => t('metadata.negativePrompt'), diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts index 5d844c4663..55a170745d 100644 --- a/invokeai/frontend/web/src/features/metadata/util/parsers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts @@ -1,3 +1,4 @@ +import { getStore } from 'app/store/nanostores/store'; import { initialControlNet, initialIPAdapter, @@ -20,7 +21,6 @@ import type { ParameterHeight, ParameterHRFEnabled, ParameterHRFMethod, - ParameterInitialImage, ParameterModel, ParameterNegativePrompt, ParameterNegativeStylePromptSDXL, @@ -59,6 +59,8 @@ import { isParameterWidth, } from 'features/parameters/types/parameterSchemas'; import { get, isArray, isString } from 'lodash-es'; +import { imagesApi } from 'services/api/endpoints/images'; +import type { ImageDTO } from 'services/api/types'; import { isControlNetModelConfig, isIPAdapterModelConfig, @@ -137,8 +139,13 @@ const parseCFGRescaleMultiplier: MetadataParseFunc = (metadata) => getProperty(metadata, 'scheduler', isParameterScheduler); -const parseInitialImage: MetadataParseFunc = (metadata) => - getProperty(metadata, 'init_image', isParameterInitialImage); +const parseInitialImage: MetadataParseFunc = async (metadata) => { + const imageName = await getProperty(metadata, 'init_image', isParameterInitialImage); + const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName)); + const imageDTO = await imageDTORequest.unwrap(); + imageDTORequest.unsubscribe(); + return imageDTO; +}; const parseWidth: MetadataParseFunc = (metadata) => getProperty(metadata, 'width', isParameterWidth); diff --git a/invokeai/frontend/web/src/features/metadata/util/recallers.ts b/invokeai/frontend/web/src/features/metadata/util/recallers.ts index 50d814b859..88af390a20 100644 --- a/invokeai/frontend/web/src/features/metadata/util/recallers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/recallers.ts @@ -35,7 +35,6 @@ import type { ParameterHeight, ParameterHRFEnabled, ParameterHRFMethod, - ParameterInitialImage, ParameterModel, ParameterNegativePrompt, ParameterNegativeStylePromptSDXL, @@ -63,7 +62,7 @@ import { setRefinerStart, setRefinerSteps, } from 'features/sdxl/store/sdxlSlice'; -import { imagesApi } from 'services/api/endpoints/images'; +import type { ImageDTO } from 'services/api/types'; const recallPositivePrompt: MetadataRecallFunc = (positivePrompt) => { getStore().dispatch(setPositivePrompt(positivePrompt)); @@ -97,10 +96,7 @@ const recallScheduler: MetadataRecallFunc = (scheduler) => { getStore().dispatch(setScheduler(scheduler)); }; -const recallInitialImage: MetadataRecallFunc = async (initialImage) => { - const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(initialImage)); - const imageDTO = await imageDTORequest.unwrap(); - imageDTORequest.unsubscribe(); +const recallInitialImage: MetadataRecallFunc = async (imageDTO) => { getStore().dispatch(initialImageChanged(imageDTO)); }; From 4d0a49298cbf813dddb27b852ba7fc83d5af7079 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 6 Apr 2024 14:48:51 +1100 Subject: [PATCH 11/19] tidy(ui): remove extraneous zod schema --- .../frontend/web/src/features/metadata/util/parsers.ts | 3 +-- .../web/src/features/parameters/types/parameterSchemas.ts | 7 ------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts index 55a170745d..9f5c14d94e 100644 --- a/invokeai/frontend/web/src/features/metadata/util/parsers.ts +++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts @@ -43,7 +43,6 @@ import { isParameterHeight, isParameterHRFEnabled, isParameterHRFMethod, - isParameterInitialImage, isParameterLoRAWeight, isParameterNegativePrompt, isParameterNegativeStylePromptSDXL, @@ -140,7 +139,7 @@ const parseScheduler: MetadataParseFunc = (metadata) => getProperty(metadata, 'scheduler', isParameterScheduler); const parseInitialImage: MetadataParseFunc = async (metadata) => { - const imageName = await getProperty(metadata, 'init_image', isParameterInitialImage); + const imageName = await getProperty(metadata, 'init_image', isString); const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName)); const imageDTO = await imageDTORequest.unwrap(); imageDTORequest.unsubscribe(); diff --git a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts index b3c403488a..75693cd47f 100644 --- a/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts +++ b/invokeai/frontend/web/src/features/parameters/types/parameterSchemas.ts @@ -126,13 +126,6 @@ const zParameterT2IAdapterModel = zModelIdentifierField; export type ParameterT2IAdapterModel = z.infer; // #endregion -// #region I2I Initial Image -const zParameterInitialImage = z.string(); -export type ParameterInitialImage = z.infer; -export const isParameterInitialImage = (val: unknown): val is ParameterInitialImage => - zParameterInitialImage.safeParse(val).success; -// #endregion - // #region Strength (l2l strength) const zParameterStrength = z.number().min(0).max(1); export type ParameterStrength = z.infer; From 69f17da1a21e5cc2768de36f18664242d2fe97bc Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 6 Apr 2024 08:50:52 +1100 Subject: [PATCH 12/19] fix(nodes): add WithBoard to public API --- invokeai/invocation_api/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/invokeai/invocation_api/__init__.py b/invokeai/invocation_api/__init__.py index 300ecd751b..11f334e24e 100644 --- a/invokeai/invocation_api/__init__.py +++ b/invokeai/invocation_api/__init__.py @@ -27,6 +27,7 @@ from invokeai.app.invocations.fields import ( OutputField, UIComponent, UIType, + WithBoard, WithMetadata, WithWorkflow, ) @@ -105,6 +106,7 @@ __all__ = [ "OutputField", "UIComponent", "UIType", + "WithBoard", "WithMetadata", "WithWorkflow", # invokeai.app.invocations.latent From 9a0a90e2a293b58d2a7b0714505f6f192165ffc5 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 6 Apr 2024 14:25:44 +1100 Subject: [PATCH 13/19] chore: v4.0.4 --- invokeai/version/invokeai_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index c7a18d13e8..4b56dfc53e 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__ = "4.0.3" +__version__ = "4.0.4" From 2c45697f3d50fefbdd8c4a8b68694a2043d5d2ad Mon Sep 17 00:00:00 2001 From: Hosted Weblate Date: Sat, 6 Apr 2024 05:54:19 +0200 Subject: [PATCH 14/19] translationBot(ui): update translation files Updated by "Cleanup translation files" hook in Weblate. Co-authored-by: Hosted Weblate Translate-URL: https://hosted.weblate.org/projects/invokeai/web-ui/ Translation: InvokeAI/Web UI --- invokeai/frontend/web/public/locales/ar.json | 1 - invokeai/frontend/web/public/locales/de.json | 1 - invokeai/frontend/web/public/locales/es.json | 1 - invokeai/frontend/web/public/locales/fr.json | 1 - invokeai/frontend/web/public/locales/he.json | 1 - invokeai/frontend/web/public/locales/it.json | 1 - invokeai/frontend/web/public/locales/nl.json | 1 - invokeai/frontend/web/public/locales/pl.json | 1 - invokeai/frontend/web/public/locales/pt.json | 1 - invokeai/frontend/web/public/locales/pt_BR.json | 1 - invokeai/frontend/web/public/locales/ru.json | 1 - invokeai/frontend/web/public/locales/uk.json | 1 - invokeai/frontend/web/public/locales/zh_CN.json | 1 - 13 files changed, 13 deletions(-) diff --git a/invokeai/frontend/web/public/locales/ar.json b/invokeai/frontend/web/public/locales/ar.json index d5be1b1fce..ee370d1e42 100644 --- a/invokeai/frontend/web/public/locales/ar.json +++ b/invokeai/frontend/web/public/locales/ar.json @@ -291,7 +291,6 @@ "canvasMerged": "تم دمج الخط", "sentToImageToImage": "تم إرسال إلى صورة إلى صورة", "sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة", - "parametersSet": "تم تعيين المعلمات", "parametersNotSet": "لم يتم تعيين المعلمات", "metadataLoadFailed": "فشل تحميل البيانات الوصفية" }, diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 4f18cd0050..033dffdc44 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -480,7 +480,6 @@ "canvasMerged": "Leinwand zusammengeführt", "sentToImageToImage": "Gesendet an Bild zu Bild", "sentToUnifiedCanvas": "Gesendet an Leinwand", - "parametersSet": "Parameter festlegen", "parametersNotSet": "Parameter nicht festgelegt", "metadataLoadFailed": "Metadaten konnten nicht geladen werden", "setCanvasInitialImage": "Ausgangsbild setzen", diff --git a/invokeai/frontend/web/public/locales/es.json b/invokeai/frontend/web/public/locales/es.json index c7af596556..3037045db5 100644 --- a/invokeai/frontend/web/public/locales/es.json +++ b/invokeai/frontend/web/public/locales/es.json @@ -363,7 +363,6 @@ "canvasMerged": "Lienzo consolidado", "sentToImageToImage": "Enviar hacia Imagen a Imagen", "sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado", - "parametersSet": "Parámetros establecidos", "parametersNotSet": "Parámetros no establecidos", "metadataLoadFailed": "Error al cargar metadatos", "serverError": "Error en el servidor", diff --git a/invokeai/frontend/web/public/locales/fr.json b/invokeai/frontend/web/public/locales/fr.json index 095ee5d0d5..b8f560e265 100644 --- a/invokeai/frontend/web/public/locales/fr.json +++ b/invokeai/frontend/web/public/locales/fr.json @@ -298,7 +298,6 @@ "canvasMerged": "Canvas fusionné", "sentToImageToImage": "Envoyé à Image à Image", "sentToUnifiedCanvas": "Envoyé à Canvas unifié", - "parametersSet": "Paramètres définis", "parametersNotSet": "Paramètres non définis", "metadataLoadFailed": "Échec du chargement des métadonnées" }, diff --git a/invokeai/frontend/web/public/locales/he.json b/invokeai/frontend/web/public/locales/he.json index efb90f61c7..dbbb3cbec4 100644 --- a/invokeai/frontend/web/public/locales/he.json +++ b/invokeai/frontend/web/public/locales/he.json @@ -306,7 +306,6 @@ "canvasMerged": "קנבס מוזג", "sentToImageToImage": "נשלח לתמונה לתמונה", "sentToUnifiedCanvas": "נשלח אל קנבס מאוחד", - "parametersSet": "הגדרת פרמטרים", "parametersNotSet": "פרמטרים לא הוגדרו", "metadataLoadFailed": "טעינת מטא-נתונים נכשלה" }, diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 2b211484aa..ff4e44c487 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -569,7 +569,6 @@ "canvasMerged": "Tela unita", "sentToImageToImage": "Inviato a Immagine a Immagine", "sentToUnifiedCanvas": "Inviato a Tela Unificata", - "parametersSet": "Parametri impostati", "parametersNotSet": "Parametri non impostati", "metadataLoadFailed": "Impossibile caricare i metadati", "serverError": "Errore del Server", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index 8fd8c96ee4..70adbb371d 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -420,7 +420,6 @@ "canvasMerged": "Canvas samengevoegd", "sentToImageToImage": "Gestuurd naar Afbeelding naar afbeelding", "sentToUnifiedCanvas": "Gestuurd naar Centraal canvas", - "parametersSet": "Parameters ingesteld", "parametersNotSet": "Parameters niet ingesteld", "metadataLoadFailed": "Fout bij laden metagegevens", "serverError": "Serverfout", diff --git a/invokeai/frontend/web/public/locales/pl.json b/invokeai/frontend/web/public/locales/pl.json index 399417db58..b7592c3fae 100644 --- a/invokeai/frontend/web/public/locales/pl.json +++ b/invokeai/frontend/web/public/locales/pl.json @@ -267,7 +267,6 @@ "canvasMerged": "Scalono widoczne warstwy", "sentToImageToImage": "Wysłano do Obraz na obraz", "sentToUnifiedCanvas": "Wysłano do trybu uniwersalnego", - "parametersSet": "Ustawiono parametry", "parametersNotSet": "Nie ustawiono parametrów", "metadataLoadFailed": "Błąd wczytywania metadanych" }, diff --git a/invokeai/frontend/web/public/locales/pt.json b/invokeai/frontend/web/public/locales/pt.json index 34f99b7075..3003a1732b 100644 --- a/invokeai/frontend/web/public/locales/pt.json +++ b/invokeai/frontend/web/public/locales/pt.json @@ -310,7 +310,6 @@ "canvasMerged": "Tela Fundida", "sentToImageToImage": "Mandar Para Imagem Para Imagem", "sentToUnifiedCanvas": "Enviada para a Tela Unificada", - "parametersSet": "Parâmetros Definidos", "parametersNotSet": "Parâmetros Não Definidos", "metadataLoadFailed": "Falha ao tentar carregar metadados" }, diff --git a/invokeai/frontend/web/public/locales/pt_BR.json b/invokeai/frontend/web/public/locales/pt_BR.json index 2859eb31db..c966c6db50 100644 --- a/invokeai/frontend/web/public/locales/pt_BR.json +++ b/invokeai/frontend/web/public/locales/pt_BR.json @@ -307,7 +307,6 @@ "canvasMerged": "Tela Fundida", "sentToImageToImage": "Mandar Para Imagem Para Imagem", "sentToUnifiedCanvas": "Enviada para a Tela Unificada", - "parametersSet": "Parâmetros Definidos", "parametersNotSet": "Parâmetros Não Definidos", "metadataLoadFailed": "Falha ao tentar carregar metadados" }, diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 258bceeb05..4dd2ad895a 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -575,7 +575,6 @@ "canvasMerged": "Холст объединен", "sentToImageToImage": "Отправить в img2img", "sentToUnifiedCanvas": "Отправлено на Единый холст", - "parametersSet": "Параметры заданы", "parametersNotSet": "Параметры не заданы", "metadataLoadFailed": "Не удалось загрузить метаданные", "serverError": "Ошибка сервера", diff --git a/invokeai/frontend/web/public/locales/uk.json b/invokeai/frontend/web/public/locales/uk.json index f97909525c..9bb38c21b3 100644 --- a/invokeai/frontend/web/public/locales/uk.json +++ b/invokeai/frontend/web/public/locales/uk.json @@ -315,7 +315,6 @@ "canvasMerged": "Полотно об'єднане", "sentToImageToImage": "Надіслати до img2img", "sentToUnifiedCanvas": "Надіслати на полотно", - "parametersSet": "Параметри задані", "parametersNotSet": "Параметри не задані", "metadataLoadFailed": "Не вдалося завантажити метадані", "serverError": "Помилка сервера", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 77a06ea77b..a88f540990 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -487,7 +487,6 @@ "canvasMerged": "画布已合并", "sentToImageToImage": "已发送到图生图", "sentToUnifiedCanvas": "已发送到统一画布", - "parametersSet": "参数已设定", "parametersNotSet": "参数未设定", "metadataLoadFailed": "加载元数据失败", "uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片", From 29cfe5a2745c233a6557256271688360995905ca Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 6 Apr 2024 15:30:30 +1100 Subject: [PATCH 15/19] fix(ui): handle multipleOf on number fields This data is already in the template but it wasn't ever used. One big place where this improves UX is the noise node. Previously, the UI let you change width and height in increments of 1, despite the template requiring a multiple of 8. It now works in multiples of 8. --- .../inputs/NumberFieldInputComponent.tsx | 36 +++++++++++++------ 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx index 0cb250bb22..e3f33d8a45 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/fields/inputs/NumberFieldInputComponent.tsx @@ -37,34 +37,50 @@ const NumberFieldInputComponent = ( ); const min = useMemo(() => { + let min = -NUMPY_RAND_MAX; if (!isNil(fieldTemplate.minimum)) { - return fieldTemplate.minimum; + min = fieldTemplate.minimum; } if (!isNil(fieldTemplate.exclusiveMinimum)) { - return fieldTemplate.exclusiveMinimum + 0.01; + min = fieldTemplate.exclusiveMinimum + 0.01; } - return; + return min; }, [fieldTemplate.exclusiveMinimum, fieldTemplate.minimum]); const max = useMemo(() => { + let max = NUMPY_RAND_MAX; if (!isNil(fieldTemplate.maximum)) { - return fieldTemplate.maximum; + max = fieldTemplate.maximum; } if (!isNil(fieldTemplate.exclusiveMaximum)) { - return fieldTemplate.exclusiveMaximum - 0.01; + max = fieldTemplate.exclusiveMaximum - 0.01; } - return; + return max; }, [fieldTemplate.exclusiveMaximum, fieldTemplate.maximum]); + const step = useMemo(() => { + if (isNil(fieldTemplate.multipleOf)) { + return isIntegerField ? 1 : 0.1; + } + return fieldTemplate.multipleOf; + }, [fieldTemplate.multipleOf, isIntegerField]); + + const fineStep = useMemo(() => { + if (isNil(fieldTemplate.multipleOf)) { + return isIntegerField ? 1 : 0.01; + } + return fieldTemplate.multipleOf; + }, [fieldTemplate.multipleOf, isIntegerField]); + return ( ); From 9ab66554914cea5c9cc5879d5252c3e95724ae44 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sun, 7 Apr 2024 14:28:29 +1000 Subject: [PATCH 16/19] feat(backend): clean up choose_precision - Allow user-defined precision on MPS. - Use more explicit logic to handle all possible cases. - Add comments. - Remove the app_config args (they were effectively unused, just get the config using the singleton getter util) --- .../model_manager/load/load_default.py | 2 +- invokeai/backend/util/devices.py | 46 +++++++++---------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py index 60cc1f5e6c..6774fc2989 100644 --- a/invokeai/backend/model_manager/load/load_default.py +++ b/invokeai/backend/model_manager/load/load_default.py @@ -37,7 +37,7 @@ class ModelLoader(ModelLoaderBase): self._logger = logger self._ram_cache = ram_cache self._convert_cache = convert_cache - self._torch_dtype = torch_dtype(choose_torch_device(), app_config) + self._torch_dtype = torch_dtype(choose_torch_device()) def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel: """ diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 0be53c842a..cb6b93eaac 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -6,8 +6,7 @@ from typing import Literal, Optional, Union import torch from torch import autocast -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.config.config_default import get_config +from invokeai.app.services.config.config_default import PRECISION, get_config CPU_DEVICE = torch.device("cpu") CUDA_DEVICE = torch.device("cuda") @@ -33,35 +32,34 @@ def get_torch_device_name() -> str: return torch.cuda.get_device_name(device) if device.type == "cuda" else device.type.upper() -# We are in transition here from using a single global AppConfig to allowing multiple -# configurations. It is strongly recommended to pass the app_config to this function. -def choose_precision( - device: torch.device, app_config: Optional[InvokeAIAppConfig] = None -) -> Literal["float32", "float16", "bfloat16"]: +def choose_precision(device: torch.device) -> Literal["float32", "float16", "bfloat16"]: """Return an appropriate precision for the given torch device.""" - app_config = app_config or get_config() + app_config = get_config() if device.type == "cuda": device_name = torch.cuda.get_device_name(device) - if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): - if app_config.precision == "float32": - return "float32" - elif app_config.precision == "bfloat16": - return "bfloat16" - else: - return "float16" + if "GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name: + # These GPUs have limited support for float16 + return "float32" + elif app_config.precision == "auto" or app_config.precision == "autocast": + # Default to float16 for CUDA devices + return "float16" + else: + # Use the user-defined precision + return app_config.precision elif device.type == "mps": - return "float16" + if app_config.precision == "auto" or app_config.precision == "autocast": + # Default to float16 for MPS devices + return "float16" + else: + # Use the user-defined precision + return app_config.precision + # CPU / safe fallback return "float32" -# We are in transition here from using a single global AppConfig to allowing multiple -# configurations. It is strongly recommended to pass the app_config to this function. -def torch_dtype( - device: Optional[torch.device] = None, - app_config: Optional[InvokeAIAppConfig] = None, -) -> torch.dtype: +def torch_dtype(device: Optional[torch.device] = None) -> torch.dtype: device = device or choose_torch_device() - precision = choose_precision(device, app_config) + precision = choose_precision(device) if precision == "float16": return torch.float16 if precision == "bfloat16": @@ -71,7 +69,7 @@ def torch_dtype( return torch.float32 -def choose_autocast(precision): +def choose_autocast(precision: PRECISION): """Returns an autocast context or nullcontext for the given precision string""" # float16 currently requires autocast to avoid errors like: # 'expected scalar type Half but found Float' From dca30d54629cc739b39c894ecfea5452eae44735 Mon Sep 17 00:00:00 2001 From: fieldOfView Date: Mon, 8 Apr 2024 09:01:07 +0200 Subject: [PATCH 17/19] (feat) add a method to get the path of an image from the invocation context Fixes #6175 --- invokeai/app/services/shared/invocation_context.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index e533baf3bc..9994d663e5 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -245,6 +245,18 @@ class ImagesInterface(InvocationContextInterface): """ return self._services.images.get_dto(image_name) + def get_path(self, image_name: str, thumbnail: bool = False) -> Path: + """Gets the internal path to an image or thumbnail. + + Args: + image_name: The name of the image to get the path of. + thumbnail: Get the path of the thumbnail instead of the full image + + Returns: + The local path of the image or thumbnail. + """ + return self._services.images.get_path(image_name, thumbnail) + class TensorsInterface(InvocationContextInterface): def save(self, tensor: Tensor) -> str: From b58494c42071dfd026d13699bc38b1950d87474a Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 9 Apr 2024 10:17:03 +1000 Subject: [PATCH 18/19] feat(ui): add graph-to-workflow debug helper This is intended for debug usage, so it's hidden away in the workflow library `...` menu. Hold shift to see the button for it. - Paste a graph (from a network request, for example) and then click the convert button to convert it to a workflow. - Disable auto layout to stack the nodes with an offset (try it out). If you change this, you must re-convert to get the changes. - Edit the workflow JSON if you need to tweak something before loading it. --- invokeai/frontend/web/package.json | 1 + invokeai/frontend/web/pnpm-lock.yaml | 9 ++ invokeai/frontend/web/public/locales/en.json | 7 +- .../features/nodes/components/NodeEditor.tsx | 2 + .../nodes/util/workflow/graphToWorkflow.ts | 148 ++++++++++++++++++ .../LoadWorkflowFromGraphModal.tsx | 111 +++++++++++++ .../LoadWorkflowFromGraphMenuItem.tsx | 18 +++ .../WorkflowLibraryMenu.tsx | 5 + 8 files changed, 300 insertions(+), 1 deletion(-) create mode 100644 invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts create mode 100644 invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx create mode 100644 invokeai/frontend/web/src/features/workflowLibrary/components/WorkflowLibraryMenu/LoadWorkflowFromGraphMenuItem.tsx diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index 81fc9c4dd3..a99910c549 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -52,6 +52,7 @@ }, "dependencies": { "@chakra-ui/react-use-size": "^2.1.0", + "@dagrejs/dagre": "^1.1.1", "@dagrejs/graphlib": "^2.2.1", "@dnd-kit/core": "^6.1.0", "@dnd-kit/sortable": "^8.0.0", diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index 4be16619ec..bf423c3d46 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -11,6 +11,9 @@ dependencies: '@chakra-ui/react-use-size': specifier: ^2.1.0 version: 2.1.0(react@18.2.0) + '@dagrejs/dagre': + specifier: ^1.1.1 + version: 1.1.1 '@dagrejs/graphlib': specifier: ^2.2.1 version: 2.2.1 @@ -3092,6 +3095,12 @@ packages: dev: true optional: true + /@dagrejs/dagre@1.1.1: + resolution: {integrity: sha512-AQfT6pffEuPE32weFzhS/u3UpX+bRXUARIXL7UqLaxz497cN8pjuBlX6axO4IIECE2gBV8eLFQkGCtKX5sDaUA==} + dependencies: + '@dagrejs/graphlib': 2.2.1 + dev: false + /@dagrejs/graphlib@2.2.1: resolution: {integrity: sha512-xJsN1v6OAxXk6jmNdM+OS/bBE8nDCwM0yDNprXR18ZNatL6to9ggod9+l2XtiLhXfLm0NkE7+Er/cpdlM+SkUA==} engines: {node: '>17.0.0'} diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 9686f8a02a..5454c72e68 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -849,6 +849,7 @@ "version": "Version", "versionUnknown": " Version Unknown", "workflow": "Workflow", + "graph": "Graph", "workflowAuthor": "Author", "workflowContact": "Contact", "workflowDescription": "Short Description", @@ -1482,7 +1483,11 @@ "workflowName": "Workflow Name", "newWorkflowCreated": "New Workflow Created", "workflowCleared": "Workflow Cleared", - "workflowEditorMenu": "Workflow Editor Menu" + "workflowEditorMenu": "Workflow Editor Menu", + "loadFromGraph": "Load Workflow from Graph", + "convertGraph": "Convert Graph", + "loadWorkflow": "$t(common.load) Workflow", + "autoLayout": "Auto Layout" }, "app": { "storeNotInitialized": "Store is not initialized" diff --git a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx index 8307997ff9..737adb52e7 100644 --- a/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/NodeEditor.tsx @@ -3,6 +3,7 @@ import 'reactflow/dist/style.css'; import { Flex } from '@invoke-ai/ui-library'; import { IAINoContentFallback } from 'common/components/IAIImageFallback'; import TopPanel from 'features/nodes/components/flow/panels/TopPanel/TopPanel'; +import { LoadWorkflowFromGraphModal } from 'features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal'; import { SaveWorkflowAsDialog } from 'features/workflowLibrary/components/SaveWorkflowAsDialog/SaveWorkflowAsDialog'; import type { AnimationProps } from 'framer-motion'; import { AnimatePresence, motion } from 'framer-motion'; @@ -61,6 +62,7 @@ const NodeEditor = () => { + )} diff --git a/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts b/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts new file mode 100644 index 0000000000..eec9c6cf4b --- /dev/null +++ b/invokeai/frontend/web/src/features/nodes/util/workflow/graphToWorkflow.ts @@ -0,0 +1,148 @@ +import * as dagre from '@dagrejs/dagre'; +import { logger } from 'app/logging/logger'; +import { getStore } from 'app/store/nanostores/store'; +import { NODE_WIDTH } from 'features/nodes/types/constants'; +import type { FieldInputInstance } from 'features/nodes/types/field'; +import type { WorkflowV3 } from 'features/nodes/types/workflow'; +import { buildFieldInputInstance } from 'features/nodes/util/schema/buildFieldInputInstance'; +import { t } from 'i18next'; +import { forEach } from 'lodash-es'; +import type { NonNullableGraph } from 'services/api/types'; +import { v4 as uuidv4 } from 'uuid'; + +/** + * Converts a graph to a workflow. This is a best-effort conversion and may not be perfect. + * For example, if a graph references an unknown node type, that node will be skipped. + * @param graph The graph to convert to a workflow + * @param autoLayout Whether to auto-layout the nodes using `dagre`. If false, nodes will be simply stacked on top of one another with an offset. + * @returns The workflow. + */ +export const graphToWorkflow = (graph: NonNullableGraph, autoLayout = true): WorkflowV3 => { + const invocationTemplates = getStore().getState().nodes.templates; + + if (!invocationTemplates) { + throw new Error(t('app.storeNotInitialized')); + } + + // Initialize the workflow + const workflow: WorkflowV3 = { + name: '', + author: '', + contact: '', + description: '', + meta: { + category: 'user', + version: '3.0.0', + }, + notes: '', + tags: '', + version: '', + exposedFields: [], + edges: [], + nodes: [], + }; + + // Convert nodes + forEach(graph.nodes, (node) => { + const template = invocationTemplates[node.type]; + + // Skip missing node templates - this is a best-effort + if (!template) { + logger('nodes').warn(`Node type ${node.type} not found in invocationTemplates`); + return; + } + + // Build field input instances for each attr + const inputs: Record = {}; + + forEach(node, (value, key) => { + // Ignore the non-input keys - I think this is all of them? + if (key === 'id' || key === 'type' || key === 'is_intermediate' || key === 'use_cache') { + return; + } + + const inputTemplate = template.inputs[key]; + + // Skip missing input templates + if (!inputTemplate) { + logger('nodes').warn(`Input ${key} not found in template for node type ${node.type}`); + return; + } + + // This _should_ be all we need to do! + const inputInstance = buildFieldInputInstance(node.id, inputTemplate); + inputInstance.value = value; + inputs[key] = inputInstance; + }); + + workflow.nodes.push({ + id: node.id, + type: 'invocation', + position: { x: 0, y: 0 }, // we'll do layout later, just need something here + data: { + id: node.id, + type: node.type, + version: template.version, + label: '', + notes: '', + isOpen: true, + isIntermediate: node.is_intermediate ?? false, + useCache: node.use_cache ?? true, + inputs, + }, + }); + }); + + forEach(graph.edges, (edge) => { + workflow.edges.push({ + id: uuidv4(), // we don't have edge IDs in the graph + type: 'default', + source: edge.source.node_id, + sourceHandle: edge.source.field, + target: edge.destination.node_id, + targetHandle: edge.destination.field, + }); + }); + + if (autoLayout) { + // Best-effort auto layout via dagre - not perfect but better than nothing + const dagreGraph = new dagre.graphlib.Graph(); + // `rankdir` and `align` could be tweaked, but it's gonna be janky no matter what we choose + dagreGraph.setGraph({ rankdir: 'TB', align: 'UL' }); + dagreGraph.setDefaultEdgeLabel(() => ({})); + + // We don't know the dimensions of the nodes until we load the graph into `reactflow` - use a reasonable value + forEach(graph.nodes, (node) => { + const width = NODE_WIDTH; + const height = NODE_WIDTH * 1.5; + dagreGraph.setNode(node.id, { width, height }); + }); + + graph.edges.forEach((edge) => { + dagreGraph.setEdge(edge.source.node_id, edge.destination.node_id); + }); + + // This does the magic + dagre.layout(dagreGraph); + + // Update the workflow now that we've got the positions + workflow.nodes.forEach((node) => { + const nodeWithPosition = dagreGraph.node(node.id); + node.position = { + x: nodeWithPosition.x - nodeWithPosition.width / 2, + y: nodeWithPosition.y - nodeWithPosition.height / 2, + }; + }); + } else { + // Stack nodes with a 50px,50px offset from the previous ndoe + let x = 0; + let y = 0; + workflow.nodes.forEach((node) => { + node.position = { x, y }; + x = x + 50; + y = y + 50; + }); + } + + return workflow; +}; diff --git a/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx b/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx new file mode 100644 index 0000000000..ecb4aa7dd4 --- /dev/null +++ b/invokeai/frontend/web/src/features/workflowLibrary/components/LoadWorkflowFromGraphModal/LoadWorkflowFromGraphModal.tsx @@ -0,0 +1,111 @@ +import { + Button, + Checkbox, + Flex, + FormControl, + FormLabel, + Modal, + ModalBody, + ModalCloseButton, + ModalContent, + ModalHeader, + ModalOverlay, + Spacer, + Textarea, +} from '@invoke-ai/ui-library'; +import { useStore } from '@nanostores/react'; +import { useAppDispatch } from 'app/store/storeHooks'; +import { workflowLoadRequested } from 'features/nodes/store/actions'; +import { graphToWorkflow } from 'features/nodes/util/workflow/graphToWorkflow'; +import { atom } from 'nanostores'; +import type { ChangeEvent } from 'react'; +import { useCallback, useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +const $isOpen = atom(false); + +export const useLoadWorkflowFromGraphModal = () => { + const isOpen = useStore($isOpen); + const onOpen = useCallback(() => { + $isOpen.set(true); + }, []); + const onClose = useCallback(() => { + $isOpen.set(false); + }, []); + + return { isOpen, onOpen, onClose }; +}; + +export const LoadWorkflowFromGraphModal = () => { + const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const { isOpen, onClose } = useLoadWorkflowFromGraphModal(); + const [graphRaw, setGraphRaw] = useState(''); + const [workflowRaw, setWorkflowRaw] = useState(''); + const [shouldAutoLayout, setShouldAutoLayout] = useState(true); + const onChangeGraphRaw = useCallback((e: ChangeEvent) => { + setGraphRaw(e.target.value); + }, []); + const onChangeWorkflowRaw = useCallback((e: ChangeEvent) => { + setWorkflowRaw(e.target.value); + }, []); + const onChangeShouldAutoLayout = useCallback((e: ChangeEvent) => { + setShouldAutoLayout(e.target.checked); + }, []); + const parse = useCallback(() => { + const graph = JSON.parse(graphRaw); + const workflow = graphToWorkflow(graph, shouldAutoLayout); + setWorkflowRaw(JSON.stringify(workflow, null, 2)); + }, [graphRaw, shouldAutoLayout]); + const loadWorkflow = useCallback(() => { + const workflow = JSON.parse(workflowRaw); + dispatch(workflowLoadRequested({ workflow, asCopy: true })); + onClose(); + }, [dispatch, onClose, workflowRaw]); + return ( + + + + {t('workflows.loadFromGraph')} + + + + + + {t('workflows.autoLayout')} + + + + + + + {t('nodes.graph')} +