From 634e5652ef3a2016b5790eb56d4978fbbedd87b9 Mon Sep 17 00:00:00 2001 From: Mary Hipp Date: Mon, 25 Sep 2023 14:40:12 -0400 Subject: [PATCH 1/7] add skeleton loading state for queue lit --- .../QueueList/QueueItemSkeleton.tsx | 50 ++++++++++++++ .../queue/components/QueueList/QueueList.tsx | 69 ++++++++++++------- 2 files changed, 94 insertions(+), 25 deletions(-) create mode 100644 invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx new file mode 100644 index 0000000000..72a5fcdc96 --- /dev/null +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx @@ -0,0 +1,50 @@ +import { Flex, Skeleton, Text } from '@chakra-ui/react'; +import { memo } from 'react'; +import { COLUMN_WIDTHS } from './constants'; + +const QueueItemSkeleton = () => { + return ( + + + +   + + + + +   + + + + +   + + + + +   + + + + +   + + + + ); +}; + +export default memo(QueueItemSkeleton); diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx index dea1443489..cae8669efc 100644 --- a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx @@ -1,4 +1,4 @@ -import { Flex, Heading } from '@chakra-ui/react'; +import { Flex, Heading, Skeleton, Text } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; @@ -23,6 +23,7 @@ import QueueItemComponent from './QueueItemComponent'; import QueueListComponent from './QueueListComponent'; import QueueListHeader from './QueueListHeader'; import { ListContext } from './types'; +import QueueItemSkeleton from './QueueItemSkeleton'; // eslint-disable-next-line @typescript-eslint/no-explicit-any type TableVirtuosoScrollerRef = (ref: HTMLElement | Window | null) => any; @@ -85,7 +86,7 @@ const QueueList = () => { return () => osInstance()?.destroy(); }, [scroller, initialize, osInstance]); - const { data: listQueueItemsData } = useListQueueItemsQuery({ + const { data: listQueueItemsData, isLoading } = useListQueueItemsQuery({ cursor: listCursor, priority: listPriority, }); @@ -127,33 +128,51 @@ const QueueList = () => { return ( - {queueItems.length ? ( + {isLoading ? ( <> - - - data={queueItems} - endReached={handleLoadMore} - scrollerRef={setScroller as TableVirtuosoScrollerRef} - itemContent={itemContent} - computeItemKey={computeItemKey} - components={components} - context={context} - /> - + + + + + + + + + + ) : ( - - - {t('queue.queueEmpty')} - - + <> + {queueItems.length ? ( + <> + + + + data={queueItems} + endReached={handleLoadMore} + scrollerRef={setScroller as TableVirtuosoScrollerRef} + itemContent={itemContent} + computeItemKey={computeItemKey} + components={components} + context={context} + /> + + + ) : ( + + + {t('queue.queueEmpty')} + + + )} + )} ); From 13919ff30020a0258a4ba0de5df96ab61c46f0d0 Mon Sep 17 00:00:00 2001 From: Mary Hipp Date: Mon, 25 Sep 2023 14:41:29 -0400 Subject: [PATCH 2/7] remove unused vars --- .../web/src/features/queue/components/QueueList/QueueList.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx index cae8669efc..19c61b4379 100644 --- a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx @@ -1,4 +1,4 @@ -import { Flex, Heading, Skeleton, Text } from '@chakra-ui/react'; +import { Flex, Heading } from '@chakra-ui/react'; import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; From 869b4a8d4900d1ffa14554402c8dcc91bcb70b20 Mon Sep 17 00:00:00 2001 From: Darren Ringer Date: Mon, 25 Sep 2023 18:49:04 -0400 Subject: [PATCH 3/7] Add image enhance node to composition pack in communitynods, 9 more nodes Adds 9 more of my nodes to the Image & Mask Composition Pack in the community nodes page, and integrates the Enhance Image node into that pack as well (formerly it was its own entry). --- docs/nodes/communityNodes.md | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index 2b30b9f0af..151b9ea262 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -121,18 +121,6 @@ To be imported, an .obj must use triangulated meshes, so make sure to enable tha **Example Usage:** ![depth from obj usage graph](https://raw.githubusercontent.com/dwringer/depth-from-obj-node/main/depth_from_obj_usage.jpg) --------------------------------- -### Enhance Image (simple adjustments) - -**Description:** Boost or reduce color saturation, contrast, brightness, sharpness, or invert colors of any image at any stage with this simple wrapper for pillow [PIL]'s ImageEnhance module. - -Color inversion is toggled with a simple switch, while each of the four enhancer modes are activated by entering a value other than 1 in each corresponding input field. Values less than 1 will reduce the corresponding property, while values greater than 1 will enhance it. - -**Node Link:** https://github.com/dwringer/image-enhance-node - -**Example Usage:** -![enhance image usage graph](https://raw.githubusercontent.com/dwringer/image-enhance-node/main/image_enhance_usage.jpg) - -------------------------------- ### Generative Grammar-Based Prompt Nodes @@ -153,16 +141,26 @@ This includes 3 Nodes: **Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling. -This includes 4 Nodes: -- *Text Mask (simple 2D)* - create and position a white on black (or black on white) line of text using any font locally available to Invoke. +This includes 14 Nodes: +- *Adjust Image Hue Plus* - Rotate the hue of an image in one of several different color spaces. +- *Blend Latents/Noise (Masked)* - Use a mask to blend part of one latents tensor [including Noise outputs] into another. Can be used to "renoise" sections during a multi-stage [masked] denoising process. +- *Enhance Image* - Boost or reduce color saturation, contrast, brightness, sharpness, or invert colors of any image at any stage with this simple wrapper for pillow [PIL]'s ImageEnhance module. +- *Equivalent Achromatic Lightness* - Calculates image lightness accounting for Helmholtz-Kohlrausch effect based on a method described by High, Green, and Nussbaum (2023). +- *Text to Mask (Clipseg)* - Input a prompt and an image to generate a mask representing areas of the image matched by the prompt. +- *Text to Mask Advanced (Clipseg)* - Output up to four prompt masks combined with logical "and", logical "or", or as separate channels of an RGBA image. +- *Image Layer Blend* - Perform a layered blend of two images using alpha compositing. Opacity of top layer is selectable, with optional mask and several different blend modes/color spaces. - *Image Compositor* - Take a subject from an image with a flat backdrop and layer it on another image using a chroma key or flood select background removal. +- *Image Dilate or Erode* - Dilate or expand a mask (or any image!). This is equivalent to an expand/contract operation. +- *Image Value Thresholds* - Clip an image to pure black/white beyond specified thresholds. - *Offset Latents* - Offset a latents tensor in the vertical and/or horizontal dimensions, wrapping it around. - *Offset Image* - Offset an image in the vertical and/or horizontal dimensions, wrapping it around. +- *Shadows/Highlights/Midtones* - Extract three masks (with adjustable hard or soft thresholds) representing shadows, midtones, and highlights regions of an image. +- *Text Mask (simple 2D)* - create and position a white on black (or black on white) line of text using any font locally available to Invoke. **Node Link:** https://github.com/dwringer/composition-nodes -**Example Usage:** -![composition nodes usage graph](https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_nodes_usage.jpg) +**Nodes and Output Examples:** +![composition nodes usage graph](https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg) -------------------------------- ### Size Stepper Nodes From 066e09b5175b735d00358f325ead6f0194e565aa Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 25 Sep 2023 19:30:41 -0400 Subject: [PATCH 4/7] remove dangling debug statement --- invokeai/app/api/routers/models.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index cb5d2d79f0..ebc40f5ce5 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -156,8 +156,6 @@ async def import_model( prediction_types = {x.value: x for x in SchedulerPredictionType} logger = ApiDependencies.invoker.services.logger - print(f"DEBUG: prediction_type = {prediction_type}") - try: installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( items_to_import=items_to_import, prediction_type_helper=lambda x: prediction_types.get(prediction_type) From 358116bc225e6f886c29c69da63517a1ffa48f8f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 26 Sep 2023 10:21:11 +1000 Subject: [PATCH 5/7] feat(ui): use spinner for queue loading state Skeletons are for when we know the number of specific content items that are loading. When the queue is loading, we don't know how many items there are, or how many will load, so the whole list should be replaced with loading state. The previous behaviour rendered a static number of skeletons. That number would rarely be the right number - the app shouldn't say "I'm loading 7 queue items", then load none, or load 50. A future enhancement could use the queue item skeleton component and go by the total number of queue items, as reported by the queue status. I tried this but had some layout jankiness, not worth the effort right now. The queue item skeleton component's styling was updated to support this future enhancement, making it exactly the same size as a queue item (it was a bit smaller before). --- invokeai/frontend/web/public/locales/en.json | 2 +- .../common/components/IAIImageFallback.tsx | 35 ++++++++ .../QueueList/QueueItemSkeleton.tsx | 41 ++++------ .../queue/components/QueueList/QueueList.tsx | 80 ++++++++----------- 4 files changed, 85 insertions(+), 73 deletions(-) diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index a28ef8d490..285da58e3c 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -79,7 +79,7 @@ "lightMode": "Light Mode", "linear": "Linear", "load": "Load", - "loading": "Loading", + "loading": "Loading $t({{noun}})...", "loadingInvokeAI": "Loading Invoke AI", "learnMore": "Learn More", "modelManager": "Model Manager", diff --git a/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx b/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx index ca61ea847f..3c1a05d527 100644 --- a/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx +++ b/invokeai/frontend/web/src/common/components/IAIImageFallback.tsx @@ -81,3 +81,38 @@ export const IAINoContentFallback = (props: IAINoImageFallbackProps) => { ); }; + +type IAINoImageFallbackWithSpinnerProps = FlexProps & { + label?: string; +}; + +export const IAINoContentFallbackWithSpinner = ( + props: IAINoImageFallbackWithSpinnerProps +) => { + const { sx, ...rest } = props; + + return ( + + + {props.label && {props.label}} + + ); +}; diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx index 72a5fcdc96..529c46af74 100644 --- a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueItemSkeleton.tsx @@ -1,46 +1,37 @@ -import { Flex, Skeleton, Text } from '@chakra-ui/react'; +import { Flex, Skeleton } from '@chakra-ui/react'; import { memo } from 'react'; import { COLUMN_WIDTHS } from './constants'; const QueueItemSkeleton = () => { return ( - + - -   + +   - - -   + + +   - - -   + + +   - - -   + + +   - - -   + + +   diff --git a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx index 19c61b4379..e136e6df6c 100644 --- a/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx +++ b/invokeai/frontend/web/src/features/queue/components/QueueList/QueueList.tsx @@ -3,6 +3,7 @@ import { createSelector } from '@reduxjs/toolkit'; import { stateSelector } from 'app/store/store'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; +import { IAINoContentFallbackWithSpinner } from 'common/components/IAIImageFallback'; import { listCursorChanged, listPriorityChanged, @@ -23,7 +24,6 @@ import QueueItemComponent from './QueueItemComponent'; import QueueListComponent from './QueueListComponent'; import QueueListHeader from './QueueListHeader'; import { ListContext } from './types'; -import QueueItemSkeleton from './QueueItemSkeleton'; // eslint-disable-next-line @typescript-eslint/no-explicit-any type TableVirtuosoScrollerRef = (ref: HTMLElement | Window | null) => any; @@ -126,54 +126,40 @@ const QueueList = () => { [openQueueItems, toggleQueueItem] ); + if (isLoading) { + return ; + } + + if (!queueItems.length) { + return ( + + + {t('queue.queueEmpty')} + + + ); + } + return ( - {isLoading ? ( - <> - - - - - - - - - - - - - ) : ( - <> - {queueItems.length ? ( - <> - - - - data={queueItems} - endReached={handleLoadMore} - scrollerRef={setScroller as TableVirtuosoScrollerRef} - itemContent={itemContent} - computeItemKey={computeItemKey} - components={components} - context={context} - /> - - - ) : ( - - - {t('queue.queueEmpty')} - - - )} - - )} + + + + data={queueItems} + endReached={handleLoadMore} + scrollerRef={setScroller as TableVirtuosoScrollerRef} + itemContent={itemContent} + computeItemKey={computeItemKey} + components={components} + context={context} + /> + ); }; From f8392b2f78d60054be521b3714ee86a6531544e7 Mon Sep 17 00:00:00 2001 From: Mary Hipp Rogers Date: Mon, 25 Sep 2023 23:26:15 -0400 Subject: [PATCH 6/7] Maryhipp/hide use cache checkbox if disabled (#4691) * add skeleton loading state for queue lit * hide use cache checkbox if cache is disabled * undo accidental add * feat(ui): hide node footer entirely if nothing to show there --------- Co-authored-by: Mary Hipp Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> --- .../flow/nodes/Invocation/InvocationNode.tsx | 6 ++-- .../nodes/Invocation/InvocationNodeFooter.tsx | 4 ++- .../src/features/nodes/hooks/useWithFooter.ts | 35 +++++-------------- 3 files changed, 16 insertions(+), 29 deletions(-) diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNode.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNode.tsx index d2e0667ab2..a33a854c3b 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNode.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNode.tsx @@ -8,6 +8,7 @@ import InvocationNodeFooter from './InvocationNodeFooter'; import InvocationNodeHeader from './InvocationNodeHeader'; import InputField from './fields/InputField'; import OutputField from './fields/OutputField'; +import { useWithFooter } from 'features/nodes/hooks/useWithFooter'; type Props = { nodeId: string; @@ -20,6 +21,7 @@ type Props = { const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => { const inputConnectionFieldNames = useConnectionInputFieldNames(nodeId); const inputAnyOrDirectFieldNames = useAnyOrDirectInputFieldNames(nodeId); + const withFooter = useWithFooter(nodeId); const outputFieldNames = useOutputFieldNames(nodeId); return ( @@ -41,7 +43,7 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => { h: 'full', py: 2, gap: 1, - borderBottomRadius: 0, + borderBottomRadius: withFooter ? 0 : 'base', }} > @@ -74,7 +76,7 @@ const InvocationNode = ({ nodeId, isOpen, label, type, selected }: Props) => { ))} - + {withFooter && } )} diff --git a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx index ba1f7977ab..ec5085221e 100644 --- a/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/flow/nodes/Invocation/InvocationNodeFooter.tsx @@ -5,6 +5,7 @@ import EmbedWorkflowCheckbox from './EmbedWorkflowCheckbox'; import SaveToGalleryCheckbox from './SaveToGalleryCheckbox'; import UseCacheCheckbox from './UseCacheCheckbox'; import { useHasImageOutput } from 'features/nodes/hooks/useHasImageOutput'; +import { useFeatureStatus } from '../../../../../system/hooks/useFeatureStatus'; type Props = { nodeId: string; @@ -12,6 +13,7 @@ type Props = { const InvocationNodeFooter = ({ nodeId }: Props) => { const hasImageOutput = useHasImageOutput(nodeId); + const isCacheEnabled = useFeatureStatus('invocationCache').isFeatureEnabled; return ( { justifyContent: 'space-between', }} > - + {isCacheEnabled && } {hasImageOutput && } {hasImageOutput && } diff --git a/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts b/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts index 57941eaec8..4d2a58cc35 100644 --- a/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts +++ b/invokeai/frontend/web/src/features/nodes/hooks/useWithFooter.ts @@ -1,31 +1,14 @@ -import { createSelector } from '@reduxjs/toolkit'; -import { stateSelector } from 'app/store/store'; -import { useAppSelector } from 'app/store/storeHooks'; -import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions'; -import { some } from 'lodash-es'; +import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; import { useMemo } from 'react'; -import { FOOTER_FIELDS } from '../types/constants'; -import { isInvocationNode } from '../types/types'; +import { useHasImageOutput } from './useHasImageOutput'; -export const useHasImageOutputs = (nodeId: string) => { - const selector = useMemo( - () => - createSelector( - stateSelector, - ({ nodes }) => { - const node = nodes.nodes.find((node) => node.id === nodeId); - if (!isInvocationNode(node)) { - return false; - } - return some(node.data.outputs, (output) => - FOOTER_FIELDS.includes(output.type) - ); - }, - defaultSelectorOptions - ), - [nodeId] +export const useWithFooter = (nodeId: string) => { + const hasImageOutput = useHasImageOutput(nodeId); + const isCacheEnabled = useFeatureStatus('invocationCache').isFeatureEnabled; + + const withFooter = useMemo( + () => hasImageOutput || isCacheEnabled, + [hasImageOutput, isCacheEnabled] ); - - const withFooter = useAppSelector(selector); return withFooter; }; From a2613948d8328957b1bf9570e18159bf46f2c4e3 Mon Sep 17 00:00:00 2001 From: Martin Kristiansen Date: Mon, 25 Sep 2023 23:42:09 -0400 Subject: [PATCH 7/7] Feature/lru caching 2 (#4657) * fix(nodes): do not disable invocation cache delete methods When the runtime disabled flag is on, do not skip the delete methods. This could lead to a hit on a missing resource. Do skip them when the cache size is 0, because the user cannot change this (must restart app to change it). * fix(nodes): do not use double-underscores in cache service * Thread lock for cache * Making cache LRU * Bug fixes * bugfix * Switching to one Lock and OrderedDict cache * Removing unused imports * Move lock cache instance * Addressing PR comments --------- Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Co-authored-by: Martin Kristiansen --- .../invocation_cache_memory.py | 140 ++++++++++-------- 1 file changed, 78 insertions(+), 62 deletions(-) diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index be07029f4d..b40243f285 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -1,4 +1,7 @@ -from queue import Queue +from collections import OrderedDict +from dataclasses import dataclass, field +from threading import Lock +from time import time from typing import Optional, Union from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput @@ -7,22 +10,28 @@ from invokeai.app.services.invocation_cache.invocation_cache_common import Invoc from invokeai.app.services.invoker import Invoker +@dataclass(order=True) +class CachedItem: + invocation_output: BaseInvocationOutput = field(compare=False) + invocation_output_json: str = field(compare=False) + + class MemoryInvocationCache(InvocationCacheBase): - _cache: dict[Union[int, str], tuple[BaseInvocationOutput, str]] + _cache: OrderedDict[Union[int, str], CachedItem] _max_cache_size: int _disabled: bool _hits: int _misses: int - _cache_ids: Queue _invoker: Invoker + _lock: Lock def __init__(self, max_cache_size: int = 0) -> None: - self._cache = dict() + self._cache = OrderedDict() self._max_cache_size = max_cache_size self._disabled = False self._hits = 0 self._misses = 0 - self._cache_ids = Queue() + self._lock = Lock() def start(self, invoker: Invoker) -> None: self._invoker = invoker @@ -32,80 +41,87 @@ class MemoryInvocationCache(InvocationCacheBase): self._invoker.services.latents.on_deleted(self._delete_by_match) def get(self, key: Union[int, str]) -> Optional[BaseInvocationOutput]: - if self._max_cache_size == 0 or self._disabled: - return - - item = self._cache.get(key, None) - if item is not None: - self._hits += 1 - return item[0] - self._misses += 1 + with self._lock: + if self._max_cache_size == 0 or self._disabled: + return None + item = self._cache.get(key, None) + if item is not None: + self._hits += 1 + self._cache.move_to_end(key) + return item.invocation_output + self._misses += 1 + return None def save(self, key: Union[int, str], invocation_output: BaseInvocationOutput) -> None: - if self._max_cache_size == 0 or self._disabled: - return + with self._lock: + if self._max_cache_size == 0 or self._disabled or key in self._cache: + return + # If the cache is full, we need to remove the least used + number_to_delete = len(self._cache) + 1 - self._max_cache_size + self._delete_oldest_access(number_to_delete) + self._cache[key] = CachedItem(time(), invocation_output, invocation_output.json()) - if key not in self._cache: - self._cache[key] = (invocation_output, invocation_output.json()) - self._cache_ids.put(key) - if self._cache_ids.qsize() > self._max_cache_size: - try: - self._cache.pop(self._cache_ids.get()) - except KeyError: - # this means the cache_ids are somehow out of sync w/ the cache - pass + def _delete_oldest_access(self, number_to_delete: int) -> None: + number_to_delete = min(number_to_delete, len(self._cache)) + for _ in range(number_to_delete): + self._cache.popitem(last=False) - def delete(self, key: Union[int, str]) -> None: + def _delete(self, key: Union[int, str]) -> None: if self._max_cache_size == 0: return - if key in self._cache: del self._cache[key] + def delete(self, key: Union[int, str]) -> None: + with self._lock: + return self._delete(key) + def clear(self, *args, **kwargs) -> None: - if self._max_cache_size == 0: - return + with self._lock: + if self._max_cache_size == 0: + return + self._cache.clear() + self._misses = 0 + self._hits = 0 - self._cache.clear() - self._cache_ids = Queue() - self._misses = 0 - self._hits = 0 - - def create_key(self, invocation: BaseInvocation) -> int: + @staticmethod + def create_key(invocation: BaseInvocation) -> int: return hash(invocation.json(exclude={"id"})) def disable(self) -> None: - if self._max_cache_size == 0: - return - self._disabled = True + with self._lock: + if self._max_cache_size == 0: + return + self._disabled = True def enable(self) -> None: - if self._max_cache_size == 0: - return - self._disabled = False + with self._lock: + if self._max_cache_size == 0: + return + self._disabled = False def get_status(self) -> InvocationCacheStatus: - return InvocationCacheStatus( - hits=self._hits, - misses=self._misses, - enabled=not self._disabled and self._max_cache_size > 0, - size=len(self._cache), - max_size=self._max_cache_size, - ) + with self._lock: + return InvocationCacheStatus( + hits=self._hits, + misses=self._misses, + enabled=not self._disabled and self._max_cache_size > 0, + size=len(self._cache), + max_size=self._max_cache_size, + ) def _delete_by_match(self, to_match: str) -> None: - if self._max_cache_size == 0: - return - - keys_to_delete = set() - for key, value_tuple in self._cache.items(): - if to_match in value_tuple[1]: - keys_to_delete.add(key) - - if not keys_to_delete: - return - - for key in keys_to_delete: - self.delete(key) - - self._invoker.services.logger.debug(f"Deleted {len(keys_to_delete)} cached invocation outputs for {to_match}") + with self._lock: + if self._max_cache_size == 0: + return + keys_to_delete = set() + for key, cached_item in self._cache.items(): + if to_match in cached_item.invocation_output_json: + keys_to_delete.add(key) + if not keys_to_delete: + return + for key in keys_to_delete: + self._delete(key) + self._invoker.services.logger.debug( + f"Deleted {len(keys_to_delete)} cached invocation outputs for {to_match}" + )