Merge branch 'main' into feat/taesd

This commit is contained in:
Kent Keirsey 2023-09-15 12:19:19 -04:00 committed by GitHub
commit afe9756667
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 138 additions and 44 deletions

View File

@ -34,12 +34,9 @@ body:
id: whatisexpected
attributes:
label: What should this feature add?
description: Please try to explain the functionality this feature should add
description: Explain the functionality this feature should add. Feature requests should be for single features. Please create multiple requests if you want to request multiple features.
placeholder: |
Instead of one huge text field, it would be nice to have forms for bug-reports, feature-requests, ...
Great benefits with automatic labeling, assigning and other functionalitys not available in that form
via old-fashioned markdown-templates. I would also love to see the use of a moderator bot 🤖 like
https://github.com/marketplace/actions/issue-moderator-with-commands to auto close old issues and other things
I'd like a button that creates an image of banana sushi every time I press it. Each image should be different. There should be a toggle next to the button that enables strawberry mode, in which the images are of strawberry sushi instead.
validations:
required: true

View File

@ -335,8 +335,8 @@ class ImageResizeInvocation(BaseInvocation):
"""Resizes an image to specific dimensions"""
image: ImageField = InputField(description="The image to resize")
width: int = InputField(default=512, ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = InputField(default=512, ge=64, multiple_of=8, description="The height to resize to (px)")
width: int = InputField(default=512, gt=0, description="The width to resize to (px)")
height: int = InputField(default=512, gt=0, description="The height to resize to (px)")
resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode")
metadata: Optional[CoreMetadata] = InputField(
default=None, description=FieldDescriptions.core_metadata, ui_hidden=True

View File

@ -65,6 +65,9 @@ from .compel import ConditioningField
from .controlnet_image_processors import ControlField
from .model import ModelInfo, UNetField, VaeField
if choose_torch_device() == torch.device("mps"):
from torch import mps
DEFAULT_PRECISION = choose_precision(choose_torch_device())
@ -543,6 +546,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
result_latents = result_latents.to("cpu")
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
context.services.latents.save(name, result_latents)
@ -614,6 +619,8 @@ class LatentsToImageInvocation(BaseInvocation):
# clear memory as vae decode can request a lot
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
with torch.inference_mode():
# copied from diffusers pipeline
@ -626,6 +633,8 @@ class LatentsToImageInvocation(BaseInvocation):
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
image_dto = context.services.images.create(
image=image,
@ -685,6 +694,8 @@ class ResizeLatentsInvocation(BaseInvocation):
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
resized_latents = resized_latents.to("cpu")
torch.cuda.empty_cache()
if device == torch.device("mps"):
mps.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)
@ -721,6 +732,8 @@ class ScaleLatentsInvocation(BaseInvocation):
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
resized_latents = resized_latents.to("cpu")
torch.cuda.empty_cache()
if device == torch.device("mps"):
mps.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)
@ -888,6 +901,8 @@ class BlendLatentsInvocation(BaseInvocation):
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
blended_latents = blended_latents.to("cpu")
torch.cuda.empty_cache()
if device == torch.device("mps"):
mps.empty_cache()
name = f"{context.graph_execution_state_id}__{self.id}"
# context.services.latents.set(name, resized_latents)

View File

@ -29,8 +29,12 @@ import torch
import invokeai.backend.util.logging as logger
from ..util.devices import choose_torch_device
from .models import BaseModelType, ModelBase, ModelType, SubModelType
if choose_torch_device() == torch.device("mps"):
from torch import mps
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0
@ -406,6 +410,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
@ -426,6 +432,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256()

View File

@ -772,11 +772,13 @@ diffusers.models.controlnet.ControlNetModel = ControlNetModel
# NOTE: with this patch, torch.compile crashes on 2.0 torch(already fixed in nightly)
# https://github.com/huggingface/diffusers/pull/4315
# https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/lora.py#L96C18-L96C18
def new_LoRACompatibleConv_forward(self, x):
def new_LoRACompatibleConv_forward(self, hidden_states, scale: float = 1.0):
if self.lora_layer is None:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x)
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(hidden_states)
else:
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(x) + self.lora_layer(x)
return super(diffusers.models.lora.LoRACompatibleConv, self).forward(hidden_states) + (
scale * self.lora_layer(hidden_states)
)
diffusers.models.lora.LoRACompatibleConv.forward = new_LoRACompatibleConv_forward

View File

@ -12,29 +12,26 @@ import { languageSelector } from 'features/system/store/systemSelectors';
import InvokeTabs from 'features/ui/components/InvokeTabs';
import i18n from 'i18n';
import { size } from 'lodash-es';
import { ReactNode, memo, useCallback, useEffect } from 'react';
import { memo, useCallback, useEffect } from 'react';
import { ErrorBoundary } from 'react-error-boundary';
import { usePreselectedImage } from '../../features/parameters/hooks/usePreselectedImage';
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
import GlobalHotkeys from './GlobalHotkeys';
import Toaster from './Toaster';
import { useStore } from '@nanostores/react';
import { $headerComponent } from 'app/store/nanostores/headerComponent';
const DEFAULT_CONFIG = {};
interface Props {
config?: PartialAppConfig;
headerComponent?: ReactNode;
selectedImage?: {
imageName: string;
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
};
}
const App = ({
config = DEFAULT_CONFIG,
headerComponent,
selectedImage,
}: Props) => {
const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
const language = useAppSelector(languageSelector);
const logger = useLogger('system');
@ -65,6 +62,8 @@ const App = ({
handlePreselectedImage(selectedImage);
}, [handlePreselectedImage, selectedImage]);
const headerComponent = useStore($headerComponent);
return (
<ErrorBoundary
onReset={handleReset}

View File

@ -15,6 +15,8 @@ import { socketMiddleware } from 'services/events/middleware';
import Loading from '../../common/components/Loading/Loading';
import '../../i18n';
import AppDndContext from '../../features/dnd/components/AppDndContext';
import { $customStarUI, CustomStarUi } from 'app/store/nanostores/customStarUI';
import { $headerComponent } from 'app/store/nanostores/headerComponent';
const App = lazy(() => import('./App'));
const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
@ -30,6 +32,7 @@ interface Props extends PropsWithChildren {
imageName: string;
action: 'sendToImg2Img' | 'sendToCanvas' | 'useAllParameters';
};
customStarUi?: CustomStarUi;
}
const InvokeAIUI = ({
@ -40,6 +43,7 @@ const InvokeAIUI = ({
middleware,
projectId,
selectedImage,
customStarUi,
}: Props) => {
useEffect(() => {
// configure API client token
@ -80,17 +84,33 @@ const InvokeAIUI = ({
};
}, [apiUrl, token, middleware, projectId]);
useEffect(() => {
if (customStarUi) {
$customStarUI.set(customStarUi);
}
return () => {
$customStarUI.set(undefined);
};
}, [customStarUi]);
useEffect(() => {
if (headerComponent) {
$headerComponent.set(headerComponent);
}
return () => {
$headerComponent.set(undefined);
};
}, [headerComponent]);
return (
<React.StrictMode>
<Provider store={store}>
<React.Suspense fallback={<Loading />}>
<ThemeLocaleProvider>
<AppDndContext>
<App
config={config}
headerComponent={headerComponent}
selectedImage={selectedImage}
/>
<App config={config} selectedImage={selectedImage} />
</AppDndContext>
</ThemeLocaleProvider>
</React.Suspense>

View File

@ -0,0 +1,14 @@
import { MenuItemProps } from '@chakra-ui/react';
import { atom } from 'nanostores';
export type CustomStarUi = {
on: {
icon: MenuItemProps['icon'];
text: string;
};
off: {
icon: MenuItemProps['icon'];
text: string;
};
};
export const $customStarUI = atom<CustomStarUi | undefined>(undefined);

View File

@ -0,0 +1,4 @@
import { atom } from 'nanostores';
import { ReactNode } from 'react';
export const $headerComponent = atom<ReactNode | undefined>(undefined);

View File

@ -0,0 +1,3 @@
/**
* For non-serializable data that needs to be available throughout the app, or when redux is not appropriate, use nanostores.
*/

View File

@ -86,10 +86,7 @@ export const store = configureStore({
.concat(autoBatchEnhancer());
},
middleware: (getDefaultMiddleware) =>
getDefaultMiddleware({
immutableCheck: false,
serializableCheck: false,
})
getDefaultMiddleware({ immutableCheck: false })
.concat(api.middleware)
.concat(dynamicMiddlewares)
.prepend(listenerMiddleware.middleware),

View File

@ -154,6 +154,8 @@ const IAICanvas = () => {
resizeObserver.observe(containerRef.current);
dispatch(canvasResized(containerRef.current.getBoundingClientRect()));
return () => {
resizeObserver.disconnect();
};

View File

@ -8,7 +8,7 @@ const calculateScale = (
const scaleX = (containerWidth * padding) / contentWidth;
const scaleY = (containerHeight * padding) / contentHeight;
const scaleFit = Math.min(1, Math.min(scaleX, scaleY));
return scaleFit;
return scaleFit ? scaleFit : 1;
};
export default calculateScale;

View File

@ -1,4 +1,6 @@
import { MenuItem } from '@chakra-ui/react';
import { useStore } from '@nanostores/react';
import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import {
imagesToChangeSelected,
@ -16,6 +18,7 @@ import {
const MultipleSelectionMenuItems = () => {
const dispatch = useAppDispatch();
const selection = useAppSelector((state) => state.gallery.selection);
const customStarUi = useStore($customStarUI);
const [starImages] = useStarImagesMutation();
const [unstarImages] = useUnstarImagesMutation();
@ -49,15 +52,18 @@ const MultipleSelectionMenuItems = () => {
<>
{areAllStarred && (
<MenuItem
icon={<MdStarBorder />}
icon={customStarUi ? customStarUi.on.icon : <MdStarBorder />}
onClickCapture={handleUnstarSelection}
>
Unstar All
{customStarUi ? customStarUi.off.text : `Unstar All`}
</MenuItem>
)}
{(areAllUnstarred || (!areAllStarred && !areAllUnstarred)) && (
<MenuItem icon={<MdStar />} onClickCapture={handleStarSelection}>
Star All
<MenuItem
icon={customStarUi ? customStarUi.on.icon : <MdStar />}
onClickCapture={handleStarSelection}
>
{customStarUi ? customStarUi.on.text : `Star All`}
</MenuItem>
)}
<MenuItem icon={<FaFolder />} onClickCapture={handleChangeBoard}>

View File

@ -1,5 +1,7 @@
import { Flex, MenuItem, Spinner } from '@chakra-ui/react';
import { useStore } from '@nanostores/react';
import { useAppToaster } from 'app/components/Toaster';
import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import {
@ -7,6 +9,7 @@ import {
isModalOpenChanged,
} from 'features/changeBoardModal/store/slice';
import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice';
import { workflowLoadRequested } from 'features/nodes/store/actions';
import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters';
import { initialImageSelected } from 'features/parameters/store/actions';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
@ -32,9 +35,9 @@ import {
useUnstarImagesMutation,
} from 'services/api/endpoints/images';
import { ImageDTO } from 'services/api/types';
import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions';
import { workflowLoadRequested } from 'features/nodes/store/actions';
import { configSelector } from '../../../system/store/configSelectors';
import { sentImageToCanvas, sentImageToImg2Img } from '../../store/actions';
import { flushSync } from 'react-dom';
type SingleSelectionMenuItemsProps = {
imageDTO: ImageDTO;
@ -50,6 +53,7 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled;
const { shouldFetchMetadataFromApi } = useAppSelector(configSelector);
const customStarUi = useStore($customStarUI);
const { metadata, workflow, isLoading } = useGetImageMetadataFromFileQuery(
{ image: imageDTO, shouldFetchMetadataFromApi },
@ -112,8 +116,10 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
const handleSendToCanvas = useCallback(() => {
dispatch(sentImageToCanvas());
flushSync(() => {
dispatch(setActiveTab('unifiedCanvas'));
});
dispatch(setInitialCanvasImage(imageDTO));
dispatch(setActiveTab('unifiedCanvas'));
toaster({
title: t('toast.sentToUnifiedCanvas'),
@ -225,12 +231,18 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => {
Change Board
</MenuItem>
{imageDTO.starred ? (
<MenuItem icon={<MdStar />} onClickCapture={handleUnstarImage}>
Unstar Image
<MenuItem
icon={customStarUi ? customStarUi.off.icon : <MdStar />}
onClickCapture={handleUnstarImage}
>
{customStarUi ? customStarUi.off.text : `Unstar Image`}
</MenuItem>
) : (
<MenuItem icon={<MdStarBorder />} onClickCapture={handleStarImage}>
Star Image
<MenuItem
icon={customStarUi ? customStarUi.on.icon : <MdStarBorder />}
onClickCapture={handleStarImage}
>
{customStarUi ? customStarUi.on.text : `Star Image`}
</MenuItem>
)}
<MenuItem

View File

@ -1,4 +1,6 @@
import { Box, Flex } from '@chakra-ui/react';
import { useStore } from '@nanostores/react';
import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIDndImage from 'common/components/IAIDndImage';
import IAIFillSkeleton from 'common/components/IAIFillSkeleton';
@ -10,6 +12,7 @@ import {
} from 'features/dnd/types';
import { useMultiselect } from 'features/gallery/hooks/useMultiselect';
import { MouseEvent, memo, useCallback, useMemo, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { FaTrash } from 'react-icons/fa';
import { MdStar, MdStarBorder } from 'react-icons/md';
import {
@ -18,7 +21,6 @@ import {
useUnstarImagesMutation,
} from 'services/api/endpoints/images';
import IAIDndImageIcon from '../../../../common/components/IAIDndImageIcon';
import { useTranslation } from 'react-i18next';
interface HoverableImageProps {
imageName: string;
@ -34,6 +36,8 @@ const GalleryImage = (props: HoverableImageProps) => {
const { handleClick, isSelected, selection, selectionCount } =
useMultiselect(imageDTO);
const customStarUi = useStore($customStarUI);
const handleDelete = useCallback(
(e: MouseEvent<HTMLButtonElement>) => {
e.stopPropagation();
@ -91,12 +95,22 @@ const GalleryImage = (props: HoverableImageProps) => {
const starIcon = useMemo(() => {
if (imageDTO?.starred) {
return <MdStar size="20" />;
return customStarUi ? customStarUi.on.icon : <MdStar size="20" />;
}
if (!imageDTO?.starred && isHovered) {
return <MdStarBorder size="20" />;
return customStarUi ? customStarUi.off.icon : <MdStarBorder size="20" />;
}
}, [imageDTO?.starred, isHovered]);
}, [imageDTO?.starred, isHovered, customStarUi]);
const starTooltip = useMemo(() => {
if (imageDTO?.starred) {
return customStarUi ? customStarUi.off.text : 'Unstar';
}
if (!imageDTO?.starred) {
return customStarUi ? customStarUi.on.text : 'Star';
}
return '';
}, [imageDTO?.starred, customStarUi]);
if (!imageDTO) {
return <IAIFillSkeleton />;
@ -131,7 +145,7 @@ const GalleryImage = (props: HoverableImageProps) => {
<IAIDndImageIcon
onClick={toggleStarredState}
icon={starIcon}
tooltip={imageDTO.starred ? 'Unstar' : 'Star'}
tooltip={starTooltip}
/>
{isHovered && shift && (

View File

@ -26,7 +26,8 @@ export const receivedOpenAPISchema = createAsyncThunk(
'nodes/receivedOpenAPISchema',
async (_, { rejectWithValue }) => {
try {
const response = await fetch(`openapi.json`);
const url = [window.location.origin, 'openapi.json'].join('/');
const response = await fetch(url);
const openAPISchema = await response.json();
const schemaJSON = JSON.parse(