Merge branch 'main' into install/refactor-configure-and-model-select

This commit is contained in:
Lincoln Stein 2023-02-16 21:51:15 -05:00
commit f3351a5e47
86 changed files with 707 additions and 318 deletions

View File

@ -5,8 +5,17 @@ on:
- 'main' - 'main'
- 'update/ci/docker/*' - 'update/ci/docker/*'
- 'update/docker/*' - 'update/docker/*'
paths:
- '/pyproject.toml'
- '/ldm/**'
- '/invokeai/backend/**'
- '/invokeai/configs/**'
- '/invokeai/frontend/dist/**'
- '/docker/Dockerfile'
tags: tags:
- 'v*.*.*' - 'v*.*.*'
workflow_dispatch:
jobs: jobs:
docker: docker:

View File

@ -0,0 +1,67 @@
name: Test invoke.py pip
on:
pull_request:
paths-ignore:
- '/pyproject.toml'
- '/ldm/**'
- '/invokeai/backend/**'
- '/invokeai/configs/**'
- '/invokeai/frontend/dist/**'
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
if: github.event.pull_request.draft == false
strategy:
matrix:
python-version:
# - '3.9'
- '3.10'
pytorch:
# - linux-cuda-11_6
- linux-cuda-11_7
- linux-rocm-5_2
- linux-cpu
- macos-default
- windows-cpu
# - windows-cuda-11_6
# - windows-cuda-11_7
include:
# - pytorch: linux-cuda-11_6
# os: ubuntu-22.04
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $GITHUB_ENV
- pytorch: linux-cuda-11_7
os: ubuntu-22.04
github-env: $GITHUB_ENV
- pytorch: linux-rocm-5_2
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
github-env: $GITHUB_ENV
- pytorch: linux-cpu
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- pytorch: macos-default
os: macOS-12
github-env: $GITHUB_ENV
- pytorch: windows-cpu
os: windows-2022
github-env: $env:GITHUB_ENV
# - pytorch: windows-cuda-11_6
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
# github-env: $env:GITHUB_ENV
# - pytorch: windows-cuda-11_7
# os: windows-2022
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
steps:
- run: 'echo "No build required"'

View File

@ -3,7 +3,19 @@ on:
push: push:
branches: branches:
- 'main' - 'main'
paths:
- '/pyproject.toml'
- '/ldm/**'
- '/invokeai/backend/**'
- '/invokeai/configs/**'
- '/invokeai/frontend/dist/**'
pull_request: pull_request:
paths:
- '/pyproject.toml'
- '/ldm/**'
- '/invokeai/backend/**'
- '/invokeai/configs/**'
- '/invokeai/frontend/dist/**'
types: types:
- 'ready_for_review' - 'ready_for_review'
- 'opened' - 'opened'

View File

@ -13,7 +13,7 @@
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github [CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml [CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord [discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy [discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github [github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github

View File

@ -3,3 +3,4 @@ dist/
node_modules/ node_modules/
patches/ patches/
public/ public/
stats.html

View File

@ -3,3 +3,4 @@ dist/
node_modules/ node_modules/
patches/ patches/
public/ public/
stats.html

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-6b9f1e33.js"></script> <script type="module" crossorigin src="./assets/index-12bd70ca.js"></script>
<link rel="stylesheet" href="./assets/index-fecb6dd4.css"> <link rel="stylesheet" href="./assets/index-c1af841f.css">
</head> </head>
<body> <body>

View File

@ -3,8 +3,8 @@ import { Flex, Spinner } from '@chakra-ui/react';
const Loading = () => { const Loading = () => {
return ( return (
<Flex <Flex
width={'100vw'} width="100vw"
height={'100vh'} height="100vh"
alignItems="center" alignItems="center"
justifyContent="center" justifyContent="center"
> >

View File

@ -13,7 +13,7 @@ const GuideIcon = forwardRef(
({ feature, icon = MdHelp }: GuideIconProps, ref) => ( ({ feature, icon = MdHelp }: GuideIconProps, ref) => (
<GuidePopover feature={feature}> <GuidePopover feature={feature}>
<Box ref={ref}> <Box ref={ref}>
<Icon marginBottom={'-.15rem'} as={icon} /> <Icon marginBottom="-.15rem" as={icon} />
</Box> </Box>
</GuidePopover> </GuidePopover>
) )

View File

@ -29,15 +29,15 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
if (!shouldDisplayGuides) return null; if (!shouldDisplayGuides) return null;
return ( return (
<Popover trigger={'hover'}> <Popover trigger="hover">
<PopoverTrigger> <PopoverTrigger>
<Box>{children}</Box> <Box>{children}</Box>
</PopoverTrigger> </PopoverTrigger>
<PopoverContent <PopoverContent
className={`guide-popover-content`} className="guide-popover-content"
maxWidth="400px" maxWidth="400px"
onClick={(e) => e.preventDefault()} onClick={(e) => e.preventDefault()}
cursor={'initial'} cursor="initial"
> >
<PopoverArrow className="guide-popover-arrow" /> <PopoverArrow className="guide-popover-arrow" />
<div className="guide-popover-guide-content">{text}</div> <div className="guide-popover-guide-content">{text}</div>

View File

@ -169,7 +169,7 @@ export default function IAISlider(props: IAIFullSliderProps) {
{label} {label}
</FormLabel> </FormLabel>
<HStack w={'100%'} gap={2} alignItems="center"> <HStack w="100%" gap={2} alignItems="center">
<Slider <Slider
aria-label={label} aria-label={label}
value={value} value={value}
@ -259,9 +259,9 @@ export default function IAISlider(props: IAIFullSliderProps) {
{withReset && ( {withReset && (
<IAIIconButton <IAIIconButton
size={'sm'} size="sm"
aria-label={'Reset'} aria-label="Reset"
tooltip={'Reset'} tooltip="Reset"
icon={<BiReset />} icon={<BiReset />}
onClick={handleResetDisable} onClick={handleResetDisable}
isDisabled={isResetDisabled} isDisabled={isResetDisabled}

View File

@ -24,13 +24,13 @@ const ImageUploadOverlay = (props: ImageUploadOverlayProps) => {
<div className="dropzone-container"> <div className="dropzone-container">
{isDragAccept && ( {isDragAccept && (
<div className="dropzone-overlay is-drag-accept"> <div className="dropzone-overlay is-drag-accept">
<Heading size={'lg'}>Upload Image{overlaySecondaryText}</Heading> <Heading size="lg">Upload Image{overlaySecondaryText}</Heading>
</div> </div>
)} )}
{isDragReject && ( {isDragReject && (
<div className="dropzone-overlay is-drag-reject"> <div className="dropzone-overlay is-drag-reject">
<Heading size={'lg'}>Invalid Upload</Heading> <Heading size="lg">Invalid Upload</Heading>
<Heading size={'md'}>Must be single JPEG or PNG image</Heading> <Heading size="md">Must be single JPEG or PNG image</Heading>
</div> </div>
)} )}
</div> </div>

View File

@ -22,7 +22,7 @@ const ImageUploaderButton = (props: ImageUploaderButtonProps) => {
> >
<div className="image-upload-button"> <div className="image-upload-button">
<FaUpload /> <FaUpload />
<Heading size={'lg'}>Click or Drag and Drop</Heading> <Heading size="lg">Click or Drag and Drop</Heading>
</div> </div>
</div> </div>
); );

View File

@ -17,7 +17,7 @@ const ClearCanvasHistoryButtonModal = () => {
acceptCallback={() => dispatch(clearCanvasHistory())} acceptCallback={() => dispatch(clearCanvasHistory())}
acceptButtonText={t('unifiedcanvas:clearHistory')} acceptButtonText={t('unifiedcanvas:clearHistory')}
triggerComponent={ triggerComponent={
<IAIButton size={'sm'} leftIcon={<FaTrash />} isDisabled={isStaging}> <IAIButton size="sm" leftIcon={<FaTrash />} isDisabled={isStaging}>
{t('unifiedcanvas:clearCanvasHistory')} {t('unifiedcanvas:clearCanvasHistory')}
</IAIButton> </IAIButton>
} }

View File

@ -140,7 +140,7 @@ const IAICanvas = () => {
<Stage <Stage
tabIndex={-1} tabIndex={-1}
ref={canvasStageRefCallback} ref={canvasStageRefCallback}
className={'inpainting-canvas-stage'} className="inpainting-canvas-stage"
style={{ style={{
...(stageCursor ? { cursor: stageCursor } : {}), ...(stageCursor ? { cursor: stageCursor } : {}),
}} }}
@ -165,19 +165,19 @@ const IAICanvas = () => {
onWheel={handleWheel} onWheel={handleWheel}
draggable={(tool === 'move' || isStaging) && !isModifyingBoundingBox} draggable={(tool === 'move' || isStaging) && !isModifyingBoundingBox}
> >
<Layer id={'grid'} visible={shouldShowGrid}> <Layer id="grid" visible={shouldShowGrid}>
<IAICanvasGrid /> <IAICanvasGrid />
</Layer> </Layer>
<Layer <Layer
id={'base'} id="base"
ref={canvasBaseLayerRefCallback} ref={canvasBaseLayerRefCallback}
listening={false} listening={false}
imageSmoothingEnabled={false} imageSmoothingEnabled={false}
> >
<IAICanvasObjectRenderer /> <IAICanvasObjectRenderer />
</Layer> </Layer>
<Layer id={'mask'} visible={isMaskEnabled} listening={false}> <Layer id="mask" visible={isMaskEnabled} listening={false}>
<IAICanvasMaskLines visible={true} listening={false} /> <IAICanvasMaskLines visible={true} listening={false} />
<IAICanvasMaskCompositer listening={false} /> <IAICanvasMaskCompositer listening={false} />
</Layer> </Layer>

View File

@ -49,7 +49,7 @@ const IAICanvasBoundingBoxOverlay = () => {
offsetY={stageCoordinates.y / stageScale} offsetY={stageCoordinates.y / stageScale}
height={stageDimensions.height / stageScale} height={stageDimensions.height / stageScale}
width={stageDimensions.width / stageScale} width={stageDimensions.width / stageScale}
fill={'rgba(0,0,0,0.4)'} fill="rgba(0,0,0,0.4)"
listening={false} listening={false}
visible={shouldDarkenOutsideBoundingBox} visible={shouldDarkenOutsideBoundingBox}
/> />
@ -58,10 +58,10 @@ const IAICanvasBoundingBoxOverlay = () => {
y={boundingBoxCoordinates.y} y={boundingBoxCoordinates.y}
width={boundingBoxDimensions.width} width={boundingBoxDimensions.width}
height={boundingBoxDimensions.height} height={boundingBoxDimensions.height}
fill={'rgb(255,255,255)'} fill="rgb(255,255,255)"
listening={false} listening={false}
visible={shouldDarkenOutsideBoundingBox} visible={shouldDarkenOutsideBoundingBox}
globalCompositeOperation={'destination-out'} globalCompositeOperation="destination-out"
/> />
</Group> </Group>
); );

View File

@ -163,10 +163,10 @@ const IAICanvasMaskCompositer = (props: IAICanvasMaskCompositerProps) => {
width={stageDimensions.width / stageScale} width={stageDimensions.width / stageScale}
fillPatternImage={fillPatternImage} fillPatternImage={fillPatternImage}
fillPatternOffsetY={!isNumber(offset) ? 0 : offset} fillPatternOffsetY={!isNumber(offset) ? 0 : offset}
fillPatternRepeat={'repeat'} fillPatternRepeat="repeat"
fillPatternScale={{ x: 1 / stageScale, y: 1 / stageScale }} fillPatternScale={{ x: 1 / stageScale, y: 1 / stageScale }}
listening={true} listening={true}
globalCompositeOperation={'source-in'} globalCompositeOperation="source-in"
{...rest} {...rest}
/> />
); );

View File

@ -36,7 +36,7 @@ const IAICanvasLines = (props: InpaintingCanvasLinesProps) => {
<Line <Line
key={i} key={i}
points={line.points} points={line.points}
stroke={'rgb(0,0,0)'} // The lines can be any color, just need alpha > 0 stroke="rgb(0,0,0)" // The lines can be any color, just need alpha > 0
strokeWidth={line.strokeWidth * 2} strokeWidth={line.strokeWidth * 2}
tension={0} tension={0}
lineCap="round" lineCap="round"

View File

@ -93,8 +93,8 @@ const IAICanvasObjectRenderer = () => {
y={obj.y} y={obj.y}
width={obj.width} width={obj.width}
height={obj.height} height={obj.height}
fill={'rgb(255, 255, 255)'} fill="rgb(255, 255, 255)"
globalCompositeOperation={'destination-out'} globalCompositeOperation="destination-out"
/> />
); );
} }

View File

@ -67,7 +67,7 @@ const IAICanvasStagingArea = (props: Props) => {
width={width} width={width}
height={height} height={height}
strokeWidth={1} strokeWidth={1}
stroke={'white'} stroke="white"
strokeScaleEnabled={false} strokeScaleEnabled={false}
/> />
<Rect <Rect
@ -77,7 +77,7 @@ const IAICanvasStagingArea = (props: Props) => {
height={height} height={height}
dash={[4, 4]} dash={[4, 4]}
strokeWidth={1} strokeWidth={1}
stroke={'black'} stroke="black"
strokeScaleEnabled={false} strokeScaleEnabled={false}
/> />
</Group> </Group>

View File

@ -114,11 +114,11 @@ const IAICanvasStagingAreaToolbar = () => {
return ( return (
<Flex <Flex
pos={'absolute'} pos="absolute"
bottom={'1rem'} bottom="1rem"
w={'100%'} w="100%"
align={'center'} align="center"
justify={'center'} justify="center"
filter="drop-shadow(0 0.5rem 1rem rgba(0,0,0))" filter="drop-shadow(0 0.5rem 1rem rgba(0,0,0))"
onMouseOver={handleMouseOver} onMouseOver={handleMouseOver}
onMouseOut={handleMouseOut} onMouseOut={handleMouseOut}

View File

@ -172,7 +172,7 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
x={brushX} x={brushX}
y={brushY} y={brushY}
radius={radius} radius={radius}
stroke={'rgba(255,255,255,0.4)'} stroke="rgba(255,255,255,0.4)"
strokeWidth={strokeWidth * 2} strokeWidth={strokeWidth * 2}
strokeEnabled={true} strokeEnabled={true}
listening={false} listening={false}
@ -181,7 +181,7 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
x={brushX} x={brushX}
y={brushY} y={brushY}
radius={radius} radius={radius}
stroke={'rgba(0,0,0,1)'} stroke="rgba(0,0,0,1)"
strokeWidth={strokeWidth} strokeWidth={strokeWidth}
strokeEnabled={true} strokeEnabled={true}
listening={false} listening={false}
@ -192,14 +192,14 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
x={brushX} x={brushX}
y={brushY} y={brushY}
radius={dotRadius * 2} radius={dotRadius * 2}
fill={'rgba(255,255,255,0.4)'} fill="rgba(255,255,255,0.4)"
listening={false} listening={false}
/> />
<Circle <Circle
x={brushX} x={brushX}
y={brushY} y={brushY}
radius={dotRadius} radius={dotRadius}
fill={'rgba(0,0,0,1)'} fill="rgba(0,0,0,1)"
listening={false} listening={false}
/> />
</Group> </Group>

View File

@ -269,12 +269,12 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
<Transformer <Transformer
anchorCornerRadius={3} anchorCornerRadius={3}
anchorDragBoundFunc={anchorDragBoundFunc} anchorDragBoundFunc={anchorDragBoundFunc}
anchorFill={'rgba(212,216,234,1)'} anchorFill="rgba(212,216,234,1)"
anchorSize={15} anchorSize={15}
anchorStroke={'rgb(42,42,42)'} anchorStroke="rgb(42,42,42)"
borderDash={[4, 4]} borderDash={[4, 4]}
borderEnabled={true} borderEnabled={true}
borderStroke={'black'} borderStroke="black"
draggable={false} draggable={false}
enabledAnchors={tool === 'move' ? undefined : []} enabledAnchors={tool === 'move' ? undefined : []}
flipEnabled={false} flipEnabled={false}

View File

@ -121,7 +121,7 @@ const IAICanvasMaskOptions = () => {
</ButtonGroup> </ButtonGroup>
} }
> >
<Flex direction={'column'} gap={'0.5rem'}> <Flex direction="column" gap="0.5rem">
<IAICheckbox <IAICheckbox
label={`${t('unifiedcanvas:enableMask')} (H)`} label={`${t('unifiedcanvas:enableMask')} (H)`}
isChecked={isMaskEnabled} isChecked={isMaskEnabled}
@ -139,7 +139,7 @@ const IAICanvasMaskOptions = () => {
color={maskColor} color={maskColor}
onChange={(newColor) => dispatch(setMaskColor(newColor))} onChange={(newColor) => dispatch(setMaskColor(newColor))}
/> />
<IAIButton size={'sm'} leftIcon={<FaTrash />} onClick={handleClearMask}> <IAIButton size="sm" leftIcon={<FaTrash />} onClick={handleClearMask}>
{t('unifiedcanvas:clearMask')} (Shift+C) {t('unifiedcanvas:clearMask')} (Shift+C)
</IAIButton> </IAIButton>
</Flex> </Flex>

View File

@ -97,7 +97,7 @@ const IAICanvasSettingsButtonPopover = () => {
/> />
} }
> >
<Flex direction={'column'} gap={'0.5rem'}> <Flex direction="column" gap="0.5rem">
<IAICheckbox <IAICheckbox
label={t('unifiedcanvas:showIntermediates')} label={t('unifiedcanvas:showIntermediates')}
isChecked={shouldShowIntermediates} isChecked={shouldShowIntermediates}

View File

@ -228,13 +228,8 @@ const IAICanvasToolChooserOptions = () => {
/> />
} }
> >
<Flex <Flex minWidth="15rem" direction="column" gap="1rem" width="100%">
minWidth={'15rem'} <Flex gap="1rem" justifyContent="space-between">
direction={'column'}
gap={'1rem'}
width={'100%'}
>
<Flex gap={'1rem'} justifyContent="space-between">
<IAISlider <IAISlider
label={t('unifiedcanvas:brushSize')} label={t('unifiedcanvas:brushSize')}
value={brushSize} value={brushSize}

View File

@ -415,14 +415,14 @@ const CurrentImageButtons = () => {
> >
<div className="current-image-send-to-popover"> <div className="current-image-send-to-popover">
<IAIButton <IAIButton
size={'sm'} size="sm"
onClick={handleClickUseAsInitialImage} onClick={handleClickUseAsInitialImage}
leftIcon={<FaShare />} leftIcon={<FaShare />}
> >
{t('parameters:sendToImg2Img')} {t('parameters:sendToImg2Img')}
</IAIButton> </IAIButton>
<IAIButton <IAIButton
size={'sm'} size="sm"
onClick={handleSendToCanvas} onClick={handleSendToCanvas}
leftIcon={<FaShare />} leftIcon={<FaShare />}
> >
@ -430,14 +430,14 @@ const CurrentImageButtons = () => {
</IAIButton> </IAIButton>
<IAIButton <IAIButton
size={'sm'} size="sm"
onClick={handleCopyImage} onClick={handleCopyImage}
leftIcon={<FaCopy />} leftIcon={<FaCopy />}
> >
{t('parameters:copyImage')} {t('parameters:copyImage')}
</IAIButton> </IAIButton>
<IAIButton <IAIButton
size={'sm'} size="sm"
onClick={handleCopyImageLink} onClick={handleCopyImageLink}
leftIcon={<FaCopy />} leftIcon={<FaCopy />}
> >
@ -445,7 +445,7 @@ const CurrentImageButtons = () => {
</IAIButton> </IAIButton>
<Link download={true} href={currentImage?.url}> <Link download={true} href={currentImage?.url}>
<IAIButton leftIcon={<FaDownload />} size={'sm'} w="100%"> <IAIButton leftIcon={<FaDownload />} size="sm" w="100%">
{t('parameters:downloadImage')} {t('parameters:downloadImage')}
</IAIButton> </IAIButton>
</Link> </Link>

View File

@ -82,7 +82,7 @@ export default function CurrentImagePreview() {
}; };
return ( return (
<div className={'current-image-preview'}> <div className="current-image-preview">
{imageToDisplay && ( {imageToDisplay && (
<Image <Image
src={imageToDisplay.url} src={imageToDisplay.url}

View File

@ -116,13 +116,13 @@ const DeleteImageModal = forwardRef(
</AlertDialogHeader> </AlertDialogHeader>
<AlertDialogBody> <AlertDialogBody>
<Flex direction={'column'} gap={5}> <Flex direction="column" gap={5}>
<Text> <Text>
Are you sure? Deleted images will be sent to the Bin. You Are you sure? Deleted images will be sent to the Bin. You
can restore from there if you wish to. can restore from there if you wish to.
</Text> </Text>
<FormControl> <FormControl>
<Flex alignItems={'center'}> <Flex alignItems="center">
<FormLabel mb={0}>Don&apos;t ask me again</FormLabel> <FormLabel mb={0}>Don&apos;t ask me again</FormLabel>
<Switch <Switch
checked={!shouldConfirmOnDelete} checked={!shouldConfirmOnDelete}

View File

@ -175,12 +175,12 @@ const HoverableImage = memo((props: HoverableImageProps) => {
> >
<ContextMenu.Trigger> <ContextMenu.Trigger>
<Box <Box
position={'relative'} position="relative"
key={uuid} key={uuid}
className="hoverable-image" className="hoverable-image"
onMouseOver={handleMouseOver} onMouseOver={handleMouseOver}
onMouseOut={handleMouseOut} onMouseOut={handleMouseOut}
userSelect={'none'} userSelect="none"
draggable={true} draggable={true}
onDragStart={handleDragStart} onDragStart={handleDragStart}
> >
@ -189,15 +189,15 @@ const HoverableImage = memo((props: HoverableImageProps) => {
objectFit={ objectFit={
shouldUseSingleGalleryColumn ? 'contain' : galleryImageObjectFit shouldUseSingleGalleryColumn ? 'contain' : galleryImageObjectFit
} }
rounded={'md'} rounded="md"
src={thumbnail || url} src={thumbnail || url}
loading={'lazy'} loading="lazy"
/> />
<div className="hoverable-image-content" onClick={handleSelectImage}> <div className="hoverable-image-content" onClick={handleSelectImage}>
{isSelected && ( {isSelected && (
<Icon <Icon
width={'50%'} width="50%"
height={'50%'} height="50%"
as={FaCheck} as={FaCheck}
className="hoverable-image-check" className="hoverable-image-check"
/> />
@ -210,7 +210,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
aria-label={t('parameters:deleteImage')} aria-label={t('parameters:deleteImage')}
icon={<FaTrashAlt />} icon={<FaTrashAlt />}
size="xs" size="xs"
variant={'imageHoverIconButton'} variant="imageHoverIconButton"
fontSize={14} fontSize={14}
isDisabled={!mayDeleteImage} isDisabled={!mayDeleteImage}
/> />
@ -221,7 +221,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
</ContextMenu.Trigger> </ContextMenu.Trigger>
<ContextMenu.Content <ContextMenu.Content
className="hoverable-image-context-menu" className="hoverable-image-context-menu"
sticky={'always'} sticky="always"
onInteractOutside={(e) => { onInteractOutside={(e) => {
e.detail.originalEvent.preventDefault(); e.detail.originalEvent.preventDefault();
}} }}

View File

@ -281,7 +281,7 @@ export default function ImageGallery() {
<Resizable <Resizable
minWidth={galleryMinWidth} minWidth={galleryMinWidth}
maxWidth={shouldPinGallery ? galleryMaxWidth : window.innerWidth} maxWidth={shouldPinGallery ? galleryMaxWidth : window.innerWidth}
className={'image-gallery-popup'} className="image-gallery-popup"
handleStyles={{ handleStyles={{
left: { left: {
width: '15px', width: '15px',
@ -395,14 +395,14 @@ export default function ImageGallery() {
{shouldShowButtons ? ( {shouldShowButtons ? (
<> <>
<IAIButton <IAIButton
size={'sm'} size="sm"
data-selected={currentCategory === 'result'} data-selected={currentCategory === 'result'}
onClick={() => dispatch(setCurrentCategory('result'))} onClick={() => dispatch(setCurrentCategory('result'))}
> >
{t('gallery:generations')} {t('gallery:generations')}
</IAIButton> </IAIButton>
<IAIButton <IAIButton
size={'sm'} size="sm"
data-selected={currentCategory === 'user'} data-selected={currentCategory === 'user'}
onClick={() => dispatch(setCurrentCategory('user'))} onClick={() => dispatch(setCurrentCategory('user'))}
> >
@ -433,14 +433,14 @@ export default function ImageGallery() {
<IAIPopover <IAIPopover
isLazy isLazy
trigger="hover" trigger="hover"
placement={'left'} placement="left"
triggerComponent={ triggerComponent={
<IAIIconButton <IAIIconButton
size={'sm'} size="sm"
aria-label={t('gallery:gallerySettings')} aria-label={t('gallery:gallerySettings')}
icon={<FaWrench />} icon={<FaWrench />}
className="image-gallery-icon-btn" className="image-gallery-icon-btn"
cursor={'pointer'} cursor="pointer"
/> />
} }
> >
@ -455,7 +455,7 @@ export default function ImageGallery() {
label={t('gallery:galleryImageSize')} label={t('gallery:galleryImageSize')}
/> />
<IAIIconButton <IAIIconButton
size={'sm'} size="sm"
aria-label={t('gallery:galleryImageResetSize')} aria-label={t('gallery:galleryImageResetSize')}
tooltip={t('gallery:galleryImageResetSize')} tooltip={t('gallery:galleryImageResetSize')}
onClick={() => dispatch(setGalleryImageMinimumWidth(64))} onClick={() => dispatch(setGalleryImageMinimumWidth(64))}
@ -505,8 +505,8 @@ export default function ImageGallery() {
</IAIPopover> </IAIPopover>
<IAIIconButton <IAIIconButton
size={'sm'} size="sm"
className={'image-gallery-icon-btn'} className="image-gallery-icon-btn"
aria-label={t('gallery:pinGallery')} aria-label={t('gallery:pinGallery')}
tooltip={`${t('gallery:pinGallery')} (Shift+G)`} tooltip={`${t('gallery:pinGallery')} (Shift+G)`}
onClick={handleSetShouldPinGallery} onClick={handleSetShouldPinGallery}

View File

@ -71,8 +71,8 @@ const MetadataItem = ({
<IconButton <IconButton
aria-label="Use this parameter" aria-label="Use this parameter"
icon={<IoArrowUndoCircleOutline />} icon={<IoArrowUndoCircleOutline />}
size={'xs'} size="xs"
variant={'ghost'} variant="ghost"
fontSize={20} fontSize={20}
onClick={onClick} onClick={onClick}
/> />
@ -83,23 +83,23 @@ const MetadataItem = ({
<IconButton <IconButton
aria-label={`Copy ${label}`} aria-label={`Copy ${label}`}
icon={<FaCopy />} icon={<FaCopy />}
size={'xs'} size="xs"
variant={'ghost'} variant="ghost"
fontSize={14} fontSize={14}
onClick={() => navigator.clipboard.writeText(value.toString())} onClick={() => navigator.clipboard.writeText(value.toString())}
/> />
</Tooltip> </Tooltip>
)} )}
<Flex direction={labelPosition ? 'column' : 'row'}> <Flex direction={labelPosition ? 'column' : 'row'}>
<Text fontWeight={'semibold'} whiteSpace={'pre-wrap'} pr={2}> <Text fontWeight="semibold" whiteSpace="pre-wrap" pr={2}>
{label}: {label}:
</Text> </Text>
{isLink ? ( {isLink ? (
<Link href={value.toString()} isExternal wordBreak={'break-all'}> <Link href={value.toString()} isExternal wordBreak="break-all">
{value.toString()} <ExternalLinkIcon mx="2px" /> {value.toString()} <ExternalLinkIcon mx="2px" />
</Link> </Link>
) : ( ) : (
<Text overflowY={'scroll'} wordBreak={'break-all'}> <Text overflowY="scroll" wordBreak="break-all">
{value.toString()} {value.toString()}
</Text> </Text>
)} )}
@ -163,10 +163,10 @@ const ImageMetadataViewer = memo(
return ( return (
<div className={`image-metadata-viewer ${styleClass}`}> <div className={`image-metadata-viewer ${styleClass}`}>
<Flex gap={1} direction={'column'} width={'100%'}> <Flex gap={1} direction="column" width="100%">
<Flex gap={2}> <Flex gap={2}>
<Text fontWeight={'semibold'}>File:</Text> <Text fontWeight="semibold">File:</Text>
<Link href={image.url} isExternal maxW={'calc(100% - 3rem)'}> <Link href={image.url} isExternal maxW="calc(100% - 3rem)">
{image.url.length > 64 {image.url.length > 64
? image.url.substring(0, 64).concat('...') ? image.url.substring(0, 64).concat('...')
: image.url} : image.url}
@ -304,7 +304,7 @@ const ImageMetadataViewer = memo(
)} )}
{postprocessing && postprocessing.length > 0 && ( {postprocessing && postprocessing.length > 0 && (
<> <>
<Heading size={'sm'}>Postprocessing</Heading> <Heading size="sm">Postprocessing</Heading>
{postprocessing.map( {postprocessing.map(
( (
postprocess: InvokeAI.PostProcessedImageMetadata, postprocess: InvokeAI.PostProcessedImageMetadata,
@ -313,13 +313,8 @@ const ImageMetadataViewer = memo(
if (postprocess.type === 'esrgan') { if (postprocess.type === 'esrgan') {
const { scale, strength, denoise_str } = postprocess; const { scale, strength, denoise_str } = postprocess;
return ( return (
<Flex <Flex key={i} pl="2rem" gap={1} direction="column">
key={i} <Text size="md">{`${
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1 i + 1
}: Upscale (ESRGAN)`}</Text> }: Upscale (ESRGAN)`}</Text>
<MetadataItem <MetadataItem
@ -348,13 +343,8 @@ const ImageMetadataViewer = memo(
} else if (postprocess.type === 'gfpgan') { } else if (postprocess.type === 'gfpgan') {
const { strength } = postprocess; const { strength } = postprocess;
return ( return (
<Flex <Flex key={i} pl="2rem" gap={1} direction="column">
key={i} <Text size="md">{`${
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1 i + 1
}: Face restoration (GFPGAN)`}</Text> }: Face restoration (GFPGAN)`}</Text>
@ -371,13 +361,8 @@ const ImageMetadataViewer = memo(
} else if (postprocess.type === 'codeformer') { } else if (postprocess.type === 'codeformer') {
const { strength, fidelity } = postprocess; const { strength, fidelity } = postprocess;
return ( return (
<Flex <Flex key={i} pl="2rem" gap={1} direction="column">
key={i} <Text size="md">{`${
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1 i + 1
}: Face restoration (Codeformer)`}</Text> }: Face restoration (Codeformer)`}</Text>
@ -413,30 +398,30 @@ const ImageMetadataViewer = memo(
value={dreamPrompt} value={dreamPrompt}
/> />
)} )}
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<Flex gap={2}> <Flex gap={2}>
<Tooltip label={`Copy metadata JSON`}> <Tooltip label="Copy metadata JSON">
<IconButton <IconButton
aria-label="Copy metadata JSON" aria-label="Copy metadata JSON"
icon={<FaCopy />} icon={<FaCopy />}
size={'xs'} size="xs"
variant={'ghost'} variant="ghost"
fontSize={14} fontSize={14}
onClick={() => onClick={() =>
navigator.clipboard.writeText(metadataJSON) navigator.clipboard.writeText(metadataJSON)
} }
/> />
</Tooltip> </Tooltip>
<Text fontWeight={'semibold'}>Metadata JSON:</Text> <Text fontWeight="semibold">Metadata JSON:</Text>
</Flex> </Flex>
<div className={'image-json-viewer'}> <div className="image-json-viewer">
<pre>{metadataJSON}</pre> <pre>{metadataJSON}</pre>
</div> </div>
</Flex> </Flex>
</> </>
) : ( ) : (
<Center width={'100%'} pt={10}> <Center width="100%" pt={10}>
<Text fontSize={'lg'} fontWeight="semibold"> <Text fontSize="lg" fontWeight="semibold">
No metadata available No metadata available
</Text> </Text>
</Center> </Center>

View File

@ -23,8 +23,8 @@ export default function InvokeAccordionItem(props: InvokeAccordionItemProps) {
return ( return (
<AccordionItem className="advanced-parameters-item"> <AccordionItem className="advanced-parameters-item">
<AccordionButton className="advanced-parameters-header"> <AccordionButton className="advanced-parameters-header">
<Flex width={'100%'} gap={'0.5rem'} align={'center'}> <Flex width="100%" gap="0.5rem" align="center">
<Box flexGrow={1} textAlign={'left'}> <Box flexGrow={1} textAlign="left">
{header} {header}
</Box> </Box>
{additionalHeaderComponents} {additionalHeaderComponents}

View File

@ -63,7 +63,7 @@ const FaceRestoreSettings = () => {
const { t } = useTranslation(); const { t } = useTranslation();
return ( return (
<Flex direction={'column'} gap={2}> <Flex direction="column" gap={2}>
<IAISelect <IAISelect
label={t('parameters:type')} label={t('parameters:type')}
validValues={FACETOOL_TYPES.concat()} validValues={FACETOOL_TYPES.concat()}

View File

@ -36,7 +36,7 @@ export default function ImageToImageStrength(props: ImageToImageStrengthProps) {
styleClass={styleClass} styleClass={styleClass}
withInput withInput
withSliderMarks withSliderMarks
inputWidth={'5.5rem'} inputWidth="5.5rem"
withReset withReset
handleReset={handleImg2ImgStrengthReset} handleReset={handleImg2ImgStrengthReset}
/> />

View File

@ -49,7 +49,7 @@ const HiresStrength = () => {
isInteger={false} isInteger={false}
withInput withInput
withSliderMarks withSliderMarks
inputWidth={'5.5rem'} inputWidth="5.5rem"
withReset withReset
handleReset={handleHiResStrengthReset} handleReset={handleHiResStrengthReset}
isSliderDisabled={!hiresFix} isSliderDisabled={!hiresFix}
@ -75,10 +75,10 @@ const HiresSettings = () => {
dispatch(setHiresFix(e.target.checked)); dispatch(setHiresFix(e.target.checked));
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<IAISwitch <IAISwitch
label={t('parameters:hiresOptim')} label={t('parameters:hiresOptim')}
fontSize={'md'} fontSize="md"
isChecked={hiresFix} isChecked={hiresFix}
onChange={handleChangeHiresFix} onChange={handleChangeHiresFix}
/> />

View File

@ -3,7 +3,7 @@ import SeamlessSettings from './SeamlessSettings';
const ImageToImageOutputSettings = () => { const ImageToImageOutputSettings = () => {
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<SeamlessSettings /> <SeamlessSettings />
</Flex> </Flex>
); );

View File

@ -4,7 +4,7 @@ import SeamlessSettings from './SeamlessSettings';
const OutputSettings = () => { const OutputSettings = () => {
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<SeamlessSettings /> <SeamlessSettings />
<HiresSettings /> <HiresSettings />
</Flex> </Flex>

View File

@ -22,10 +22,10 @@ const SeamlessSettings = () => {
const { t } = useTranslation(); const { t } = useTranslation();
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<IAISwitch <IAISwitch
label={t('parameters:seamlessTiling')} label={t('parameters:seamlessTiling')}
fontSize={'md'} fontSize="md"
isChecked={seamless} isChecked={seamless}
onChange={handleChangeSeamless} onChange={handleChangeSeamless}
/> />

View File

@ -10,7 +10,7 @@ import Threshold from './Threshold';
*/ */
const SeedSettings = () => { const SeedSettings = () => {
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<RandomizeSeed /> <RandomizeSeed />
<Flex gap={2}> <Flex gap={2}>
<Seed /> <Seed />

View File

@ -18,7 +18,7 @@ export default function ShuffleSeed() {
return ( return (
<Button <Button
size={'sm'} size="sm"
isDisabled={shouldRandomizeSeed} isDisabled={shouldRandomizeSeed}
onClick={handleClickRandomizeSeed} onClick={handleClickRandomizeSeed}
padding="0 1.5rem" padding="0 1.5rem"

View File

@ -18,7 +18,7 @@ export default function GenerateVariationsToggle() {
return ( return (
<IAISwitch <IAISwitch
isChecked={shouldGenerateVariations} isChecked={shouldGenerateVariations}
width={'auto'} width="auto"
onChange={handleChangeShouldGenerateVariations} onChange={handleChangeShouldGenerateVariations}
/> />
); );

View File

@ -7,7 +7,7 @@ import VariationAmount from './VariationAmount';
*/ */
const VariationsSettings = () => { const VariationsSettings = () => {
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction="column">
<VariationAmount /> <VariationAmount />
<SeedWeights /> <SeedWeights />
</Flex> </Flex>

View File

@ -71,7 +71,7 @@ const PromptInput = () => {
id="prompt" id="prompt"
name="prompt" name="prompt"
placeholder={t('parameters:promptPlaceholder')} placeholder={t('parameters:promptPlaceholder')}
size={'lg'} size="lg"
value={prompt} value={prompt}
onChange={handleChangePrompt} onChange={handleChangePrompt}
onKeyDown={handleKeyDown} onKeyDown={handleKeyDown}

View File

@ -27,7 +27,7 @@ const EmptyTempFolderButtonModal = () => {
acceptCallback={acceptCallback} acceptCallback={acceptCallback}
acceptButtonText={t('unifiedcanvas:emptyFolder')} acceptButtonText={t('unifiedcanvas:emptyFolder')}
triggerComponent={ triggerComponent={
<IAIButton leftIcon={<FaTrash />} size={'sm'} isDisabled={isStaging}> <IAIButton leftIcon={<FaTrash />} size="sm" isDisabled={isStaging}>
{t('unifiedcanvas:emptyTempImageFolder')} {t('unifiedcanvas:emptyTempImageFolder')}
</IAIButton> </IAIButton>
} }

View File

@ -109,7 +109,7 @@ const Console = () => {
bottom: 0, bottom: 0,
zIndex: 9999, zIndex: 9999,
}} }}
maxHeight={'90vh'} maxHeight="90vh"
> >
<div className="console" ref={viewerRef} onScroll={handleOnScroll}> <div className="console" ref={viewerRef} onScroll={handleOnScroll}>
{log.map((entry, i) => { {log.map((entry, i) => {
@ -130,11 +130,11 @@ const Console = () => {
label={shouldAutoscroll ? 'Autoscroll On' : 'Autoscroll Off'} label={shouldAutoscroll ? 'Autoscroll On' : 'Autoscroll Off'}
> >
<IconButton <IconButton
className={'console-autoscroll-icon-button'} className="console-autoscroll-icon-button"
data-autoscroll-enabled={shouldAutoscroll} data-autoscroll-enabled={shouldAutoscroll}
size="sm" size="sm"
aria-label="Toggle autoscroll" aria-label="Toggle autoscroll"
variant={'solid'} variant="solid"
icon={<FaAngleDoubleDown />} icon={<FaAngleDoubleDown />}
onClick={() => setShouldAutoscroll(!shouldAutoscroll)} onClick={() => setShouldAutoscroll(!shouldAutoscroll)}
/> />
@ -145,11 +145,11 @@ const Console = () => {
label={shouldShowLogViewer ? 'Hide Console' : 'Show Console'} label={shouldShowLogViewer ? 'Hide Console' : 'Show Console'}
> >
<IconButton <IconButton
className={'console-toggle-icon-button'} className="console-toggle-icon-button"
data-error-seen={hasError || !wasErrorSeen} data-error-seen={hasError || !wasErrorSeen}
size="sm" size="sm"
position={'fixed'} position="fixed"
variant={'solid'} variant="solid"
aria-label="Toggle Log Viewer" aria-label="Toggle Log Viewer"
icon={shouldShowLogViewer ? <FaMinus /> : <FaCode />} icon={shouldShowLogViewer ? <FaMinus /> : <FaCode />}
onClick={handleClickLogViewerToggle} onClick={handleClickLogViewerToggle}

View File

@ -54,7 +54,7 @@ export default function LanguagePicker() {
aria-label={t('common:languagePickerLabel')} aria-label={t('common:languagePickerLabel')}
tooltip={t('common:languagePickerLabel')} tooltip={t('common:languagePickerLabel')}
icon={<FaLanguage />} icon={<FaLanguage />}
size={'sm'} size="sm"
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={26} fontSize={26}

View File

@ -99,8 +99,8 @@ export default function AddCheckpointModel() {
> >
{({ handleSubmit, errors, touched }) => ( {({ handleSubmit, errors, touched }) => (
<form onSubmit={handleSubmit}> <form onSubmit={handleSubmit}>
<VStack rowGap={'0.5rem'}> <VStack rowGap="0.5rem">
<Text fontSize={20} fontWeight="bold" alignSelf={'start'}> <Text fontSize={20} fontWeight="bold" alignSelf="start">
{t('modelmanager:manual')} {t('modelmanager:manual')}
</Text> </Text>
{/* Name */} {/* Name */}
@ -111,7 +111,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="name" fontSize="sm"> <FormLabel htmlFor="name" fontSize="sm">
{t('modelmanager:name')} {t('modelmanager:name')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="name" id="name"
@ -138,7 +138,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="description" fontSize="sm"> <FormLabel htmlFor="description" fontSize="sm">
{t('modelmanager:description')} {t('modelmanager:description')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="description" id="description"
@ -164,7 +164,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="config" fontSize="sm"> <FormLabel htmlFor="config" fontSize="sm">
{t('modelmanager:config')} {t('modelmanager:config')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="config" id="config"
@ -190,7 +190,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="config" fontSize="sm"> <FormLabel htmlFor="config" fontSize="sm">
{t('modelmanager:modelLocation')} {t('modelmanager:modelLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="weights" id="weights"
@ -213,7 +213,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="vae" fontSize="sm"> <FormLabel htmlFor="vae" fontSize="sm">
{t('modelmanager:vaeLocation')} {t('modelmanager:vaeLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae" id="vae"
@ -231,13 +231,13 @@ export default function AddCheckpointModel() {
</VStack> </VStack>
</FormControl> </FormControl>
<HStack width={'100%'}> <HStack width="100%">
{/* Width */} {/* Width */}
<FormControl isInvalid={!!errors.width && touched.width}> <FormControl isInvalid={!!errors.width && touched.width}>
<FormLabel htmlFor="width" fontSize="sm"> <FormLabel htmlFor="width" fontSize="sm">
{t('modelmanager:width')} {t('modelmanager:width')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field id="width" name="width"> <Field id="width" name="width">
{({ {({
field, field,
@ -276,7 +276,7 @@ export default function AddCheckpointModel() {
<FormLabel htmlFor="height" fontSize="sm"> <FormLabel htmlFor="height" fontSize="sm">
{t('modelmanager:height')} {t('modelmanager:height')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field id="height" name="height"> <Field id="height" name="height">
{({ {({
field, field,

View File

@ -105,7 +105,7 @@ export default function AddDiffusersModel() {
> >
{({ handleSubmit, errors, touched }) => ( {({ handleSubmit, errors, touched }) => (
<form onSubmit={handleSubmit}> <form onSubmit={handleSubmit}>
<VStack rowGap={'0.5rem'}> <VStack rowGap="0.5rem">
<FormItemWrapper> <FormItemWrapper>
{/* Name */} {/* Name */}
<FormControl <FormControl
@ -115,7 +115,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="name" fontSize="sm"> <FormLabel htmlFor="name" fontSize="sm">
{t('modelmanager:name')} {t('modelmanager:name')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="name" id="name"
@ -145,7 +145,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="description" fontSize="sm"> <FormLabel htmlFor="description" fontSize="sm">
{t('modelmanager:description')} {t('modelmanager:description')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="description" id="description"
@ -182,7 +182,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="path" fontSize="sm"> <FormLabel htmlFor="path" fontSize="sm">
{t('modelmanager:modelLocation')} {t('modelmanager:modelLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="path" id="path"
@ -205,7 +205,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="repo_id" fontSize="sm"> <FormLabel htmlFor="repo_id" fontSize="sm">
{t('modelmanager:repo_id')} {t('modelmanager:repo_id')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="repo_id" id="repo_id"
@ -242,7 +242,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="vae.path" fontSize="sm"> <FormLabel htmlFor="vae.path" fontSize="sm">
{t('modelmanager:vaeLocation')} {t('modelmanager:vaeLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae.path" id="vae.path"
@ -267,7 +267,7 @@ export default function AddDiffusersModel() {
<FormLabel htmlFor="vae.repo_id" fontSize="sm"> <FormLabel htmlFor="vae.repo_id" fontSize="sm">
{t('modelmanager:vaeRepoID')} {t('modelmanager:vaeRepoID')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae.repo_id" id="vae.repo_id"

View File

@ -72,9 +72,9 @@ export default function AddModel() {
tooltip={t('modelmanager:addNewModel')} tooltip={t('modelmanager:addNewModel')}
onClick={onOpen} onClick={onOpen}
className="modal-close-btn" className="modal-close-btn"
size={'sm'} size="sm"
> >
<Flex columnGap={'0.5rem'} alignItems="center"> <Flex columnGap="0.5rem" alignItems="center">
<FaPlus /> <FaPlus />
{t('modelmanager:addNew')} {t('modelmanager:addNew')}
</Flex> </Flex>

View File

@ -121,7 +121,7 @@ export default function CheckpointModelEdit() {
> >
{({ handleSubmit, errors, touched }) => ( {({ handleSubmit, errors, touched }) => (
<form onSubmit={handleSubmit}> <form onSubmit={handleSubmit}>
<VStack rowGap={'0.5rem'} alignItems="start"> <VStack rowGap="0.5rem" alignItems="start">
{/* Description */} {/* Description */}
<FormControl <FormControl
isInvalid={!!errors.description && touched.description} isInvalid={!!errors.description && touched.description}
@ -130,7 +130,7 @@ export default function CheckpointModelEdit() {
<FormLabel htmlFor="description" fontSize="sm"> <FormLabel htmlFor="description" fontSize="sm">
{t('modelmanager:description')} {t('modelmanager:description')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="description" id="description"
@ -156,7 +156,7 @@ export default function CheckpointModelEdit() {
<FormLabel htmlFor="config" fontSize="sm"> <FormLabel htmlFor="config" fontSize="sm">
{t('modelmanager:config')} {t('modelmanager:config')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="config" id="config"
@ -182,7 +182,7 @@ export default function CheckpointModelEdit() {
<FormLabel htmlFor="config" fontSize="sm"> <FormLabel htmlFor="config" fontSize="sm">
{t('modelmanager:modelLocation')} {t('modelmanager:modelLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="weights" id="weights"
@ -205,7 +205,7 @@ export default function CheckpointModelEdit() {
<FormLabel htmlFor="vae" fontSize="sm"> <FormLabel htmlFor="vae" fontSize="sm">
{t('modelmanager:vaeLocation')} {t('modelmanager:vaeLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae" id="vae"
@ -223,13 +223,13 @@ export default function CheckpointModelEdit() {
</VStack> </VStack>
</FormControl> </FormControl>
<HStack width={'100%'}> <HStack width="100%">
{/* Width */} {/* Width */}
<FormControl isInvalid={!!errors.width && touched.width}> <FormControl isInvalid={!!errors.width && touched.width}>
<FormLabel htmlFor="width" fontSize="sm"> <FormLabel htmlFor="width" fontSize="sm">
{t('modelmanager:width')} {t('modelmanager:width')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field id="width" name="width"> <Field id="width" name="width">
{({ {({
field, field,
@ -267,7 +267,7 @@ export default function CheckpointModelEdit() {
<FormLabel htmlFor="height" fontSize="sm"> <FormLabel htmlFor="height" fontSize="sm">
{t('modelmanager:height')} {t('modelmanager:height')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field id="height" name="height"> <Field id="height" name="height">
{({ {({
field, field,

View File

@ -128,7 +128,7 @@ export default function DiffusersModelEdit() {
> >
{({ handleSubmit, errors, touched }) => ( {({ handleSubmit, errors, touched }) => (
<form onSubmit={handleSubmit}> <form onSubmit={handleSubmit}>
<VStack rowGap={'0.5rem'} alignItems="start"> <VStack rowGap="0.5rem" alignItems="start">
{/* Description */} {/* Description */}
<FormControl <FormControl
isInvalid={!!errors.description && touched.description} isInvalid={!!errors.description && touched.description}
@ -137,7 +137,7 @@ export default function DiffusersModelEdit() {
<FormLabel htmlFor="description" fontSize="sm"> <FormLabel htmlFor="description" fontSize="sm">
{t('modelmanager:description')} {t('modelmanager:description')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="description" id="description"
@ -163,7 +163,7 @@ export default function DiffusersModelEdit() {
<FormLabel htmlFor="path" fontSize="sm"> <FormLabel htmlFor="path" fontSize="sm">
{t('modelmanager:modelLocation')} {t('modelmanager:modelLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="path" id="path"
@ -186,7 +186,7 @@ export default function DiffusersModelEdit() {
<FormLabel htmlFor="repo_id" fontSize="sm"> <FormLabel htmlFor="repo_id" fontSize="sm">
{t('modelmanager:repo_id')} {t('modelmanager:repo_id')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="repo_id" id="repo_id"
@ -211,7 +211,7 @@ export default function DiffusersModelEdit() {
<FormLabel htmlFor="vae.path" fontSize="sm"> <FormLabel htmlFor="vae.path" fontSize="sm">
{t('modelmanager:vaeLocation')} {t('modelmanager:vaeLocation')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae.path" id="vae.path"
@ -236,7 +236,7 @@ export default function DiffusersModelEdit() {
<FormLabel htmlFor="vae.repo_id" fontSize="sm"> <FormLabel htmlFor="vae.repo_id" fontSize="sm">
{t('modelmanager:vaeRepoID')} {t('modelmanager:vaeRepoID')}
</FormLabel> </FormLabel>
<VStack alignItems={'start'}> <VStack alignItems="start">
<Field <Field
as={IAIInput} as={IAIInput}
id="vae.repo_id" id="vae.repo_id"

View File

@ -176,9 +176,9 @@ const ModelList = () => {
}, [models, searchText, t, isSelectedFilter]); }, [models, searchText, t, isSelectedFilter]);
return ( return (
<Flex flexDirection={'column'} rowGap="2rem" width="50%" minWidth="50%"> <Flex flexDirection="column" rowGap="2rem" width="50%" minWidth="50%">
<Flex justifyContent={'space-between'}> <Flex justifyContent="space-between">
<Text fontSize={'1.4rem'} fontWeight="bold"> <Text fontSize="1.4rem" fontWeight="bold">
{t('modelmanager:availableModels')} {t('modelmanager:availableModels')}
</Text> </Text>
<AddModel /> <AddModel />
@ -190,10 +190,10 @@ const ModelList = () => {
/> />
<Flex <Flex
flexDirection={'column'} flexDirection="column"
gap={1} gap={1}
maxHeight={window.innerHeight - 360} maxHeight={window.innerHeight - 360}
overflow={'scroll'} overflow="scroll"
paddingRight="1rem" paddingRight="1rem"
> >
<Flex columnGap="0.5rem"> <Flex columnGap="0.5rem">

View File

@ -56,7 +56,7 @@ export default function ModelListItem(props: ModelListItemProps) {
return ( return (
<Flex <Flex
alignItems={'center'} alignItems="center"
padding="0.5rem 0.5rem" padding="0.5rem 0.5rem"
borderRadius="0.2rem" borderRadius="0.2rem"
backgroundColor={name === openModel ? 'var(--accent-color)' : ''} backgroundColor={name === openModel ? 'var(--accent-color)' : ''}
@ -69,14 +69,14 @@ export default function ModelListItem(props: ModelListItemProps) {
> >
<Box onClick={openModelHandler} cursor="pointer"> <Box onClick={openModelHandler} cursor="pointer">
<Tooltip label={description} hasArrow placement="bottom"> <Tooltip label={description} hasArrow placement="bottom">
<Text fontWeight={'bold'}>{name}</Text> <Text fontWeight="bold">{name}</Text>
</Tooltip> </Tooltip>
</Box> </Box>
<Spacer onClick={openModelHandler} cursor="pointer" /> <Spacer onClick={openModelHandler} cursor="pointer" />
<Flex gap={2} alignItems="center"> <Flex gap={2} alignItems="center">
<Text color={statusTextColor()}>{status}</Text> <Text color={statusTextColor()}>{status}</Text>
<Button <Button
size={'sm'} size="sm"
onClick={handleChangeModel} onClick={handleChangeModel}
isDisabled={status === 'active' || isProcessing || !isConnected} isDisabled={status === 'active' || isProcessing || !isConnected}
className="modal-close-btn" className="modal-close-btn"
@ -86,7 +86,7 @@ export default function ModelListItem(props: ModelListItemProps) {
<IAIIconButton <IAIIconButton
icon={<EditIcon />} icon={<EditIcon />}
size={'sm'} size="sm"
onClick={openModelHandler} onClick={openModelHandler}
aria-label="Modify Config" aria-label="Modify Config"
isDisabled={status === 'active' || isProcessing || !isConnected} isDisabled={status === 'active' || isProcessing || !isConnected}
@ -99,7 +99,7 @@ export default function ModelListItem(props: ModelListItemProps) {
triggerComponent={ triggerComponent={
<IAIIconButton <IAIIconButton
icon={<DeleteIcon />} icon={<DeleteIcon />}
size={'sm'} size="sm"
aria-label={t('modelmanager:deleteConfig')} aria-label={t('modelmanager:deleteConfig')}
isDisabled={status === 'active' || isProcessing || !isConnected} isDisabled={status === 'active' || isProcessing || !isConnected}
className=" modal-close-btn" className=" modal-close-btn"
@ -107,7 +107,7 @@ export default function ModelListItem(props: ModelListItemProps) {
/> />
} }
> >
<Flex rowGap={'1rem'} flexDirection="column"> <Flex rowGap="1rem" flexDirection="column">
<p style={{ fontWeight: 'bold' }}>{t('modelmanager:deleteMsg1')}</p> <p style={{ fontWeight: 'bold' }}>{t('modelmanager:deleteMsg1')}</p>
<p style={{ color: 'var(--text-color-secondary' }}> <p style={{ color: 'var(--text-color-secondary' }}>
{t('modelmanager:deleteMsg2')} {t('modelmanager:deleteMsg2')}

View File

@ -58,11 +58,7 @@ export default function ModelManagerModal({
<ModalHeader fontWeight="bold"> <ModalHeader fontWeight="bold">
{t('modelmanager:modelManager')} {t('modelmanager:modelManager')}
</ModalHeader> </ModalHeader>
<Flex <Flex padding="0 1.5rem 1.5rem 1.5rem" width="100%" columnGap="2rem">
padding={'0 1.5rem 1.5rem 1.5rem'}
width="100%"
columnGap={'2rem'}
>
<ModelList /> <ModelList />
{openModel && model_list[openModel]['format'] === 'diffusers' ? ( {openModel && model_list[openModel]['format'] === 'diffusers' ? (
<DiffusersModelEdit /> <DiffusersModelEdit />

View File

@ -52,16 +52,16 @@ function ModelExistsTag() {
const { t } = useTranslation(); const { t } = useTranslation();
return ( return (
<Box <Box
position={'absolute'} position="absolute"
zIndex={2} zIndex={2}
right={4} right={4}
top={4} top={4}
fontSize="0.7rem" fontSize="0.7rem"
fontWeight={'bold'} fontWeight="bold"
backgroundColor={'var(--accent-color)'} backgroundColor="var(--accent-color)"
padding={'0.2rem 0.5rem'} padding="0.2rem 0.5rem"
borderRadius="0.2rem" borderRadius="0.2rem"
alignItems={'center'} alignItems="center"
> >
{t('modelmanager:modelExists')} {t('modelmanager:modelExists')}
</Box> </Box>
@ -96,7 +96,7 @@ function SearchModelEntry({
value={model.name} value={model.name}
label={ label={
<> <>
<VStack alignItems={'start'}> <VStack alignItems="start">
<p style={{ fontWeight: 'bold' }}>{model.name}</p> <p style={{ fontWeight: 'bold' }}>{model.name}</p>
<p style={{ fontStyle: 'italic' }}>{model.location}</p> <p style={{ fontStyle: 'italic' }}>{model.location}</p>
</VStack> </VStack>
@ -105,9 +105,9 @@ function SearchModelEntry({
isChecked={modelsToAdd.includes(model.name)} isChecked={modelsToAdd.includes(model.name)}
isDisabled={existingModels.includes(model.location)} isDisabled={existingModels.includes(model.location)}
onChange={foundModelsChangeHandler} onChange={foundModelsChangeHandler}
padding={'1rem'} padding="1rem"
backgroundColor={'var(--background-color)'} backgroundColor="var(--background-color)"
borderRadius={'0.5rem'} borderRadius="0.5rem"
_checked={{ _checked={{
backgroundColor: 'var(--accent-color)', backgroundColor: 'var(--accent-color)',
color: 'var(--text-color)', color: 'var(--text-color)',
@ -243,12 +243,12 @@ export default function SearchModels() {
<> <>
{searchFolder ? ( {searchFolder ? (
<Flex <Flex
flexDirection={'column'} flexDirection="column"
padding={'1rem'} padding="1rem"
backgroundColor={'var(--background-color)'} backgroundColor="var(--background-color)"
borderRadius="0.5rem" borderRadius="0.5rem"
rowGap={'0.5rem'} rowGap="0.5rem"
position={'relative'} position="relative"
> >
<p <p
style={{ style={{
@ -271,7 +271,7 @@ export default function SearchModels() {
aria-label={t('modelmanager:scanAgain')} aria-label={t('modelmanager:scanAgain')}
tooltip={t('modelmanager:scanAgain')} tooltip={t('modelmanager:scanAgain')}
icon={<BiReset />} icon={<BiReset />}
position={'absolute'} position="absolute"
right={16} right={16}
fontSize={18} fontSize={18}
disabled={isProcessing} disabled={isProcessing}
@ -280,7 +280,7 @@ export default function SearchModels() {
<IAIIconButton <IAIIconButton
aria-label={t('modelmanager:clearCheckpointFolder')} aria-label={t('modelmanager:clearCheckpointFolder')}
icon={<FaPlus style={{ transform: 'rotate(45deg)' }} />} icon={<FaPlus style={{ transform: 'rotate(45deg)' }} />}
position={'absolute'} position="absolute"
right={5} right={5}
onClick={resetSearchModelHandler} onClick={resetSearchModelHandler}
/> />
@ -319,8 +319,8 @@ export default function SearchModels() {
</Formik> </Formik>
)} )}
{foundModels && ( {foundModels && (
<Flex flexDirection={'column'} rowGap={'1rem'}> <Flex flexDirection="column" rowGap="1rem">
<Flex justifyContent={'space-between'} alignItems="center"> <Flex justifyContent="space-between" alignItems="center">
<p> <p>
{t('modelmanager:modelsFound')}: {foundModels.length} {t('modelmanager:modelsFound')}: {foundModels.length}
</p> </p>
@ -328,8 +328,8 @@ export default function SearchModels() {
{t('modelmanager:selected')}: {modelsToAdd.length} {t('modelmanager:selected')}: {modelsToAdd.length}
</p> </p>
</Flex> </Flex>
<Flex columnGap={'0.5rem'} justifyContent={'space-between'}> <Flex columnGap="0.5rem" justifyContent="space-between">
<Flex columnGap={'0.5rem'}> <Flex columnGap="0.5rem">
<IAIButton <IAIButton
isDisabled={modelsToAdd.length === foundModels.length} isDisabled={modelsToAdd.length === foundModels.length}
onClick={addAllToSelected} onClick={addAllToSelected}

View File

@ -7,6 +7,8 @@
div { div {
background-color: var(--progress-bar-color); background-color: var(--progress-bar-color);
transition: width 0.2s ease-in-out;
&[data-indeterminate] { &[data-indeterminate] {
background-color: unset; background-color: unset;
background-image: linear-gradient( background-image: linear-gradient(

View File

@ -206,7 +206,7 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
</div> </div>
<div className="settings-modal-reset"> <div className="settings-modal-reset">
<Heading size={'md'}>{t('settings:resetWebUI')}</Heading> <Heading size="md">{t('settings:resetWebUI')}</Heading>
<Button colorScheme="red" onClick={handleClickResetWebUI}> <Button colorScheme="red" onClick={handleClickResetWebUI}>
{t('settings:resetWebUI')} {t('settings:resetWebUI')}
</Button> </Button>
@ -232,8 +232,8 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
<ModalOverlay bg="blackAlpha.300" backdropFilter="blur(40px)" /> <ModalOverlay bg="blackAlpha.300" backdropFilter="blur(40px)" />
<ModalContent> <ModalContent>
<ModalBody pb={6} pt={6}> <ModalBody pb={6} pt={6}>
<Flex justifyContent={'center'}> <Flex justifyContent="center">
<Text fontSize={'lg'}> <Text fontSize="lg">
<Text>{t('settings:resetComplete')}</Text> <Text>{t('settings:resetComplete')}</Text>
</Text> </Text>
</Flex> </Flex>

View File

@ -56,7 +56,7 @@ const SiteHeader = () => {
<IAIIconButton <IAIIconButton
aria-label={t('modelmanager:modelManager')} aria-label={t('modelmanager:modelManager')}
tooltip={t('modelmanager:modelManager')} tooltip={t('modelmanager:modelManager')}
size={'sm'} size="sm"
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
@ -68,7 +68,7 @@ const SiteHeader = () => {
<IAIIconButton <IAIIconButton
aria-label={t('common:hotkeysLabel')} aria-label={t('common:hotkeysLabel')}
tooltip={t('common:hotkeysLabel')} tooltip={t('common:hotkeysLabel')}
size={'sm'} size="sm"
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
@ -86,7 +86,7 @@ const SiteHeader = () => {
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
size={'sm'} size="sm"
icon={ icon={
<Link isExternal href="http://github.com/invoke-ai/InvokeAI/issues"> <Link isExternal href="http://github.com/invoke-ai/InvokeAI/issues">
<FaBug /> <FaBug />
@ -100,7 +100,7 @@ const SiteHeader = () => {
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
size={'sm'} size="sm"
icon={ icon={
<Link isExternal href="http://github.com/invoke-ai/InvokeAI"> <Link isExternal href="http://github.com/invoke-ai/InvokeAI">
<FaGithub /> <FaGithub />
@ -114,7 +114,7 @@ const SiteHeader = () => {
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
size={'sm'} size="sm"
icon={ icon={
<Link isExternal href="https://discord.gg/ZmtBAhwWhy"> <Link isExternal href="https://discord.gg/ZmtBAhwWhy">
<FaDiscord /> <FaDiscord />
@ -129,7 +129,7 @@ const SiteHeader = () => {
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={22} fontSize={22}
size={'sm'} size="sm"
icon={<MdSettings />} icon={<MdSettings />}
/> />
</SettingsModal> </SettingsModal>

View File

@ -46,7 +46,7 @@ export default function ThemeChanger() {
width: '6rem', width: '6rem',
}} }}
leftIcon={currentTheme === theme ? <FaCheck /> : undefined} leftIcon={currentTheme === theme ? <FaCheck /> : undefined}
size={'sm'} size="sm"
onClick={() => handleChangeTheme(theme)} onClick={() => handleChangeTheme(theme)}
key={theme} key={theme}
> >
@ -64,7 +64,7 @@ export default function ThemeChanger() {
triggerComponent={ triggerComponent={
<IAIIconButton <IAIIconButton
aria-label={t('common:themeLabel')} aria-label={t('common:themeLabel')}
size={'sm'} size="sm"
variant="link" variant="link"
data-variant="link" data-variant="link"
fontSize={20} fontSize={20}
@ -72,7 +72,7 @@ export default function ThemeChanger() {
/> />
} }
> >
<VStack align={'stretch'}>{renderThemeOptions()}</VStack> <VStack align="stretch">{renderThemeOptions()}</VStack>
</IAIPopover> </IAIPopover>
); );
} }

View File

@ -35,9 +35,9 @@ export default function InitImagePreview() {
{initialImage && ( {initialImage && (
<div className="init-image-preview"> <div className="init-image-preview">
<Image <Image
fit={'contain'} fit="contain"
maxWidth={'100%'} maxWidth="100%"
maxHeight={'100%'} maxHeight="100%"
src={ src={
typeof initialImage === 'string' ? initialImage : initialImage.url typeof initialImage === 'string' ? initialImage : initialImage.url
} }

View File

@ -9,10 +9,10 @@ export default function InitialImageOverlay() {
return initialImage ? ( return initialImage ? (
<Image <Image
fit={'contain'} fit="contain"
src={typeof initialImage === 'string' ? initialImage : initialImage.url} src={typeof initialImage === 'string' ? initialImage : initialImage.url}
rounded={'md'} rounded="md"
className={'checkerboard'} className="checkerboard"
/> />
) : null; ) : null;
} }

View File

@ -31,32 +31,32 @@ export interface InvokeTabInfo {
export const tabDict: Record<InvokeTabName, InvokeTabInfo> = { export const tabDict: Record<InvokeTabName, InvokeTabInfo> = {
txt2img: { txt2img: {
title: <TextToImageIcon fill={'black'} boxSize={'2.5rem'} />, title: <TextToImageIcon fill="black" boxSize="2.5rem" />,
workarea: <TextToImageWorkarea />, workarea: <TextToImageWorkarea />,
tooltip: 'Text To Image', tooltip: 'Text To Image',
}, },
img2img: { img2img: {
title: <ImageToImageIcon fill={'black'} boxSize={'2.5rem'} />, title: <ImageToImageIcon fill="black" boxSize="2.5rem" />,
workarea: <ImageToImageWorkarea />, workarea: <ImageToImageWorkarea />,
tooltip: 'Image To Image', tooltip: 'Image To Image',
}, },
unifiedCanvas: { unifiedCanvas: {
title: <UnifiedCanvasIcon fill={'black'} boxSize={'2.5rem'} />, title: <UnifiedCanvasIcon fill="black" boxSize="2.5rem" />,
workarea: <UnifiedCanvasWorkarea />, workarea: <UnifiedCanvasWorkarea />,
tooltip: 'Unified Canvas', tooltip: 'Unified Canvas',
}, },
nodes: { nodes: {
title: <NodesIcon fill={'black'} boxSize={'2.5rem'} />, title: <NodesIcon fill="black" boxSize="2.5rem" />,
workarea: <NodesWIP />, workarea: <NodesWIP />,
tooltip: 'Nodes', tooltip: 'Nodes',
}, },
postprocess: { postprocess: {
title: <PostprocessingIcon fill={'black'} boxSize={'2.5rem'} />, title: <PostprocessingIcon fill="black" boxSize="2.5rem" />,
workarea: <PostProcessingWIP />, workarea: <PostProcessingWIP />,
tooltip: 'Post Processing', tooltip: 'Post Processing',
}, },
training: { training: {
title: <TrainingIcon fill={'black'} boxSize={'2.5rem'} />, title: <TrainingIcon fill="black" boxSize="2.5rem" />,
workarea: <TrainingWIP />, workarea: <TrainingWIP />,
tooltip: 'Training', tooltip: 'Training',
}, },
@ -122,7 +122,7 @@ export default function InvokeTabs() {
key={key} key={key}
hasArrow hasArrow
label={tabDict[key as keyof typeof tabDict].tooltip} label={tabDict[key as keyof typeof tabDict].tooltip}
placement={'right'} placement="right"
> >
<Tab>{tabDict[key as keyof typeof tabDict].title}</Tab> <Tab>{tabDict[key as keyof typeof tabDict].title}</Tab>
</Tooltip> </Tooltip>
@ -147,7 +147,7 @@ export default function InvokeTabs() {
<Tabs <Tabs
isLazy isLazy
className="app-tabs" className="app-tabs"
variant={'unstyled'} variant="unstyled"
defaultIndex={activeTab} defaultIndex={activeTab}
index={activeTab} index={activeTab}
onChange={(index: number) => { onChange={(index: number) => {

View File

@ -45,21 +45,16 @@ const UnifiedCanvasDisplayBeta = () => {
}, [dispatch]); }, [dispatch]);
return ( return (
<div className={'workarea-single-view'}> <div className="workarea-single-view">
<Flex <Flex
flexDirection={'row'} flexDirection="row"
width="100%" width="100%"
height="100%" height="100%"
columnGap={'1rem'} columnGap="1rem"
padding="1rem" padding="1rem"
> >
<UnifiedCanvasToolbarBeta /> <UnifiedCanvasToolbarBeta />
<Flex <Flex width="100%" height="100%" flexDirection="column" rowGap="1rem">
width="100%"
height="100%"
flexDirection={'column'}
rowGap={'1rem'}
>
<UnifiedCanvasToolSettingsBeta /> <UnifiedCanvasToolSettingsBeta />
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />} {doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />}
</Flex> </Flex>

View File

@ -4,7 +4,7 @@ import UnifiedCanvasLimitStrokesToBox from './UnifiedCanvasLimitStrokesToBox';
export default function UnifiedCanvasBaseBrushSettings() { export default function UnifiedCanvasBaseBrushSettings() {
return ( return (
<Flex gap={'1rem'} alignItems="center"> <Flex gap="1rem" alignItems="center">
<UnifiedCanvasBrushSettings /> <UnifiedCanvasBrushSettings />
<UnifiedCanvasLimitStrokesToBox /> <UnifiedCanvasLimitStrokesToBox />
</Flex> </Flex>

View File

@ -4,7 +4,7 @@ import UnifiedCanvasColorPicker from './UnifiedCanvasColorPicker';
export default function UnifiedCanvasBrushSettings() { export default function UnifiedCanvasBrushSettings() {
return ( return (
<Flex columnGap={'1rem'} alignItems="center"> <Flex columnGap="1rem" alignItems="center">
<UnifiedCanvasBrushSize /> <UnifiedCanvasBrushSize />
<UnifiedCanvasColorPicker /> <UnifiedCanvasColorPicker />
</Flex> </Flex>

View File

@ -49,7 +49,7 @@ export default function UnifiedCanvasBrushSize() {
onChange={(newSize) => dispatch(setBrushSize(newSize))} onChange={(newSize) => dispatch(setBrushSize(newSize))}
sliderNumberInputProps={{ max: 500 }} sliderNumberInputProps={{ max: 500 }}
inputReadOnly={false} inputReadOnly={false}
width={'100px'} width="100px"
isCompact isCompact
/> />
); );

View File

@ -14,7 +14,7 @@ export default function UnifiedCanvasClearMask() {
return ( return (
<IAIButton <IAIButton
size={'sm'} size="sm"
leftIcon={<FaTrash />} leftIcon={<FaTrash />}
onClick={handleClearMask} onClick={handleClearMask}
tooltip={`${t('unifiedcanvas:clearMask')} (Shift+C)`} tooltip={`${t('unifiedcanvas:clearMask')} (Shift+C)`}

View File

@ -92,7 +92,7 @@ export default function UnifiedCanvasColorPicker() {
/> />
} }
> >
<Flex minWidth={'15rem'} direction={'column'} gap={'1rem'} width={'100%'}> <Flex minWidth="15rem" direction="column" gap="1rem" width="100%">
{layer === 'base' && ( {layer === 'base' && (
<IAIColorPicker <IAIColorPicker
style={{ style={{

View File

@ -6,7 +6,7 @@ import UnifiedCanvasPreserveMask from './UnifiedCanvasPreserveMask';
export default function UnifiedCanvasMaskBrushSettings() { export default function UnifiedCanvasMaskBrushSettings() {
return ( return (
<Flex gap={'1rem'} alignItems="center"> <Flex gap="1rem" alignItems="center">
<UnifiedCanvasBrushSettings /> <UnifiedCanvasBrushSettings />
<UnifiedCanvasEnableMask /> <UnifiedCanvasEnableMask />
<UnifiedCanvasPreserveMask /> <UnifiedCanvasPreserveMask />

View File

@ -5,7 +5,7 @@ import UnifiedCanvasSnapToGrid from './UnifiedCanvasSnapToGrid';
export default function UnifiedCanvasMoveSettings() { export default function UnifiedCanvasMoveSettings() {
return ( return (
<Flex alignItems={'center'} gap="1rem"> <Flex alignItems="center" gap="1rem">
<UnifiedCanvasShowGrid /> <UnifiedCanvasShowGrid />
<UnifiedCanvasSnapToGrid /> <UnifiedCanvasSnapToGrid />
<UnifiedCanvasDarkenOutsideSelection /> <UnifiedCanvasDarkenOutsideSelection />

View File

@ -68,7 +68,7 @@ const UnifiedCanvasSettings = () => {
/> />
} }
> >
<Flex direction={'column'} gap={'0.5rem'}> <Flex direction="column" gap="0.5rem">
<IAICheckbox <IAICheckbox
label={t('unifiedcanvas:showIntermediates')} label={t('unifiedcanvas:showIntermediates')}
isChecked={shouldShowIntermediates} isChecked={shouldShowIntermediates}

View File

@ -28,7 +28,7 @@ export default function UnifiedCanvasToolSettingsBeta() {
const { tool, layer } = useAppSelector(selector); const { tool, layer } = useAppSelector(selector);
return ( return (
<Flex height="2rem" minHeight="2rem" maxHeight="2rem" alignItems={'center'}> <Flex height="2rem" minHeight="2rem" maxHeight="2rem" alignItems="center">
{layer == 'base' && ['brush', 'eraser', 'colorPicker'].includes(tool) && ( {layer == 'base' && ['brush', 'eraser', 'colorPicker'].includes(tool) && (
<UnifiedCanvasBaseBrushSettings /> <UnifiedCanvasBaseBrushSettings />
)} )}

View File

@ -25,7 +25,7 @@ export default function UnifiedCanvasProcessingButtons() {
}; };
return ( return (
<Flex flexDirection={'column'} gap="0.5rem"> <Flex flexDirection="column" gap="0.5rem">
<IAIIconButton <IAIIconButton
tooltip={`${t('parameters:showOptionsPanel')} (O)`} tooltip={`${t('parameters:showOptionsPanel')} (O)`}
tooltipProps={{ placement: 'top' }} tooltipProps={{ placement: 'top' }}
@ -38,7 +38,7 @@ export default function UnifiedCanvasProcessingButtons() {
<InvokeButton iconButton /> <InvokeButton iconButton />
</Flex> </Flex>
<Flex> <Flex>
<CancelButton width={'100%'} height={'40px'} /> <CancelButton width="100%" height="40px" />
</Flex> </Flex>
</Flex> </Flex>
); );

View File

@ -113,7 +113,7 @@ const UnifiedCanvasToolSelect = () => {
const handleEraseBoundingBox = () => dispatch(addEraseRect()); const handleEraseBoundingBox = () => dispatch(addEraseRect());
return ( return (
<Flex flexDirection={'column'} gap={'0.5rem'}> <Flex flexDirection="column" gap="0.5rem">
<ButtonGroup> <ButtonGroup>
<IAIIconButton <IAIIconButton
aria-label={`${t('unifiedcanvas:brush')} (B)`} aria-label={`${t('unifiedcanvas:brush')} (B)`}
@ -155,7 +155,7 @@ const UnifiedCanvasToolSelect = () => {
data-selected={tool === 'colorPicker' && !isStaging} data-selected={tool === 'colorPicker' && !isStaging}
isDisabled={isStaging} isDisabled={isStaging}
onClick={handleSelectColorPickerTool} onClick={handleSelectColorPickerTool}
width={'max-content'} width="max-content"
/> />
</Flex> </Flex>
); );

View File

@ -23,30 +23,30 @@ const UnifiedCanvasToolbarBeta = () => {
); );
return ( return (
<Flex flexDirection={'column'} rowGap="0.5rem" width="6rem"> <Flex flexDirection="column" rowGap="0.5rem" width="6rem">
<UnifiedCanvasLayerSelect /> <UnifiedCanvasLayerSelect />
<UnifiedCanvasToolSelect /> <UnifiedCanvasToolSelect />
<Flex gap={'0.5rem'}> <Flex gap="0.5rem">
<UnifiedCanvasMoveTool /> <UnifiedCanvasMoveTool />
<UnifiedCanvasResetView /> <UnifiedCanvasResetView />
</Flex> </Flex>
<Flex columnGap={'0.5rem'}> <Flex columnGap="0.5rem">
<UnifiedCanvasMergeVisible /> <UnifiedCanvasMergeVisible />
<UnifiedCanvasSaveToGallery /> <UnifiedCanvasSaveToGallery />
</Flex> </Flex>
<Flex columnGap={'0.5rem'}> <Flex columnGap="0.5rem">
<UnifiedCanvasCopyToClipboard /> <UnifiedCanvasCopyToClipboard />
<UnifiedCanvasDownloadImage /> <UnifiedCanvasDownloadImage />
</Flex> </Flex>
<Flex gap={'0.5rem'}> <Flex gap="0.5rem">
<IAICanvasUndoButton /> <IAICanvasUndoButton />
<IAICanvasRedoButton /> <IAICanvasRedoButton />
</Flex> </Flex>
<Flex gap={'0.5rem'}> <Flex gap="0.5rem">
<UnifiedCanvasFileUploader /> <UnifiedCanvasFileUploader />
<UnifiedCanvasResetCanvas /> <UnifiedCanvasResetCanvas />
</Flex> </Flex>

View File

@ -43,7 +43,7 @@ const UnifiedCanvasDisplay = () => {
}, [dispatch]); }, [dispatch]);
return ( return (
<div className={'workarea-single-view'}> <div className="workarea-single-view">
<div className="workarea-split-view-left"> <div className="workarea-split-view-left">
<div className="inpainting-main-area"> <div className="inpainting-main-area">
<IAICanvasOutpaintingControls /> <IAICanvasOutpaintingControls />

File diff suppressed because one or more lines are too long

View File

@ -213,7 +213,9 @@ class Generate:
print('>> xformers not installed') print('>> xformers not installed')
# model caching system for fast switching # model caching system for fast switching
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models) self.model_manager = ModelManager(mconfig, self.device, self.precision,
max_loaded_models=max_loaded_models,
sequential_offload=self.free_gpu_mem)
# don't accept invalid models # don't accept invalid models
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
model = model or fallback model = model or fallback
@ -480,7 +482,6 @@ class Generate:
self.model.cond_stage_model.device = self.model.device self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device) self.model.cond_stage_model.to(self.model.device)
except AttributeError: except AttributeError:
print(">> Warning: '--free_gpu_mem' is not yet supported when generating image using model based on HuggingFace Diffuser.")
pass pass
try: try:

View File

@ -4,39 +4,34 @@ import dataclasses
import inspect import inspect
import psutil import psutil
import secrets import secrets
import sys from collections.abc import Sequence
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any
if sys.version_info < (3, 10):
from typing_extensions import ParamSpec
else:
from typing import ParamSpec
import PIL.Image import PIL.Image
import einops import einops
import psutil
import torch import torch
import torchvision.transforms as T import torchvision.transforms as T
from diffusers.utils.import_utils import is_xformers_available
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
from ...modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.outputs import BaseOutput from diffusers.utils.outputs import BaseOutput
from torchvision.transforms.functional import resize as tv_resize from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals from ldm.invoke.globals import Globals
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager from ldm.modules.textual_inversion_manager import TextualInversionManager
from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
from ...modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
@dataclass @dataclass
@ -265,6 +260,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
feature_extractor ([`CLIPFeatureExtractor`]): feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`. Model that extracts features from generated images to be used as inputs for the `safety_checker`.
""" """
_model_group: ModelGroup
ID_LENGTH = 8 ID_LENGTH = 8
@ -274,7 +270,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
text_encoder: CLIPTextModel, text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer, tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel, unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], scheduler: KarrasDiffusionSchedulers,
safety_checker: Optional[StableDiffusionSafetyChecker], safety_checker: Optional[StableDiffusionSafetyChecker],
feature_extractor: Optional[CLIPFeatureExtractor], feature_extractor: Optional[CLIPFeatureExtractor],
requires_safety_checker: bool = False, requires_safety_checker: bool = False,
@ -304,8 +300,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
textual_inversion_manager=self.textual_inversion_manager textual_inversion_manager=self.textual_inversion_manager
) )
self._model_group = FullyLoadedModelGroup(self.unet.device)
self._model_group.install(*self._submodels)
def _adjust_memory_efficient_attention(self, latents: Torch.tensor):
def _adjust_memory_efficient_attention(self, latents: torch.Tensor):
""" """
if xformers is available, use it, otherwise use sliced attention. if xformers is available, use it, otherwise use sliced attention.
""" """
@ -323,7 +322,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
elif self.device.type == 'cuda': elif self.device.type == 'cuda':
mem_free, _ = torch.cuda.mem_get_info(self.device) mem_free, _ = torch.cuda.mem_get_info(self.device)
else: else:
raise ValueError(f"unrecognized device {device}") raise ValueError(f"unrecognized device {self.device}")
# input tensor of [1, 4, h/8, w/8] # input tensor of [1, 4, h/8, w/8]
# output tensor of [16, (h/8 * w/8), (h/8 * w/8)] # output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4 bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
@ -337,6 +336,66 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
self.disable_attention_slicing() self.disable_attention_slicing()
def enable_offload_submodels(self, device: torch.device):
"""
Offload each submodel when it's not in use.
Useful for low-vRAM situations where the size of the model in memory is a big chunk of
the total available resource, and you want to free up as much for inference as possible.
This requires more moving parts and may add some delay as the U-Net is swapped out for the
VAE and vice-versa.
"""
models = self._submodels
if self._model_group is not None:
self._model_group.uninstall(*models)
group = LazilyLoadedModelGroup(device)
group.install(*models)
self._model_group = group
def disable_offload_submodels(self):
"""
Leave all submodels loaded.
Appropriate for cases where the size of the model in memory is small compared to the memory
required for inference. Avoids the delay and complexity of shuffling the submodels to and
from the GPU.
"""
models = self._submodels
if self._model_group is not None:
self._model_group.uninstall(*models)
group = FullyLoadedModelGroup(self._model_group.execution_device)
group.install(*models)
self._model_group = group
def offload_all(self):
"""Offload all this pipeline's models to CPU."""
self._model_group.offload_current()
def ready(self):
"""
Ready this pipeline's models.
i.e. pre-load them to the GPU if appropriate.
"""
self._model_group.ready()
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
if torch_device is None:
return self
self._model_group.set_device(torch_device)
self._model_group.ready()
@property
def device(self) -> torch.device:
return self._model_group.execution_device
@property
def _submodels(self) -> Sequence[torch.nn.Module]:
module_names, _, _ = self.extract_init_dict(dict(self.config))
values = [getattr(self, name) for name in module_names.keys()]
return [m for m in values if isinstance(m, torch.nn.Module)]
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int, def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
conditioning_data: ConditioningData, conditioning_data: ConditioningData,
*, *,
@ -378,7 +437,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
callback: Callable[[PipelineIntermediateState], None] = None callback: Callable[[PipelineIntermediateState], None] = None
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]: ) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
if timesteps is None: if timesteps is None:
self.scheduler.set_timesteps(num_inference_steps, device=self.unet.device) self.scheduler.set_timesteps(num_inference_steps, device=self._model_group.device_for(self.unet))
timesteps = self.scheduler.timesteps timesteps = self.scheduler.timesteps
infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState) infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState)
result: PipelineIntermediateState = infer_latents_from_embeddings( result: PipelineIntermediateState = infer_latents_from_embeddings(
@ -410,7 +469,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
batch_size = latents.shape[0] batch_size = latents.shape[0]
batched_t = torch.full((batch_size,), timesteps[0], batched_t = torch.full((batch_size,), timesteps[0],
dtype=timesteps.dtype, device=self.unet.device) dtype=timesteps.dtype, device=self._model_group.device_for(self.unet))
latents = self.scheduler.add_noise(latents, noise, batched_t) latents = self.scheduler.add_noise(latents, noise, batched_t)
attention_map_saver: Optional[AttentionMapSaver] = None attention_map_saver: Optional[AttentionMapSaver] = None
@ -494,9 +553,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype) initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype)
).add_mask_channels(latents) ).add_mask_channels(latents)
return self.unet(sample=latents, # First three args should be positional, not keywords, so torch hooks can see them.
timestep=t, return self.unet(latents, t, text_embeddings,
encoder_hidden_states=text_embeddings,
cross_attention_kwargs=cross_attention_kwargs).sample cross_attention_kwargs=cross_attention_kwargs).sample
def img2img_from_embeddings(self, def img2img_from_embeddings(self,
@ -515,9 +573,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
init_image = einops.rearrange(init_image, 'c h w -> 1 c h w') init_image = einops.rearrange(init_image, 'c h w -> 1 c h w')
# 6. Prepare latent variables # 6. Prepare latent variables
device = self.unet.device initial_latents = self.non_noised_latents_from_image(
latents_dtype = self.unet.dtype init_image, device=self._model_group.device_for(self.unet),
initial_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype) dtype=self.unet.dtype)
noise = noise_func(initial_latents) noise = noise_func(initial_latents)
return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps, return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps,
@ -530,7 +588,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
strength, strength,
noise: torch.Tensor, run_id=None, callback=None noise: torch.Tensor, run_id=None, callback=None
) -> InvokeAIStableDiffusionPipelineOutput: ) -> InvokeAIStableDiffusionPipelineOutput:
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, self.unet.device) timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength,
device=self._model_group.device_for(self.unet))
result_latents, result_attention_maps = self.latents_from_embeddings( result_latents, result_attention_maps = self.latents_from_embeddings(
initial_latents, num_inference_steps, conditioning_data, initial_latents, num_inference_steps, conditioning_data,
timesteps=timesteps, timesteps=timesteps,
@ -569,7 +628,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
run_id=None, run_id=None,
noise_func=None, noise_func=None,
) -> InvokeAIStableDiffusionPipelineOutput: ) -> InvokeAIStableDiffusionPipelineOutput:
device = self.unet.device device = self._model_group.device_for(self.unet)
latents_dtype = self.unet.dtype latents_dtype = self.unet.dtype
if isinstance(init_image, PIL.Image.Image): if isinstance(init_image, PIL.Image.Image):
@ -633,6 +692,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline # TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
self.vae.to('cpu') self.vae.to('cpu')
init_image = init_image.to('cpu') init_image = init_image.to('cpu')
else:
self._model_group.load(self.vae)
init_latent_dist = self.vae.encode(init_image).latent_dist init_latent_dist = self.vae.encode(init_image).latent_dist
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible! init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
if device.type == 'mps': if device.type == 'mps':
@ -644,8 +705,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
def check_for_safety(self, output, dtype): def check_for_safety(self, output, dtype):
with torch.inference_mode(): with torch.inference_mode():
screened_images, has_nsfw_concept = self.run_safety_checker( screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype)
output.images, device=self._execution_device, dtype=dtype)
screened_attention_map_saver = None screened_attention_map_saver = None
if has_nsfw_concept is None or not has_nsfw_concept: if has_nsfw_concept is None or not has_nsfw_concept:
screened_attention_map_saver = output.attention_map_saver screened_attention_map_saver = output.attention_map_saver
@ -654,6 +714,12 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
# block the attention maps if NSFW content is detected # block the attention maps if NSFW content is detected
attention_map_saver=screened_attention_map_saver) attention_map_saver=screened_attention_map_saver)
def run_safety_checker(self, image, device=None, dtype=None):
# overriding to use the model group for device info instead of requiring the caller to know.
if self.safety_checker is not None:
device = self._model_group.device_for(self.safety_checker)
return super().run_safety_checker(image, device, dtype)
@torch.inference_mode() @torch.inference_mode()
def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None): def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None):
""" """
@ -663,7 +729,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
text=c, text=c,
fragment_weights=fragment_weights, fragment_weights=fragment_weights,
should_return_tokens=return_tokens, should_return_tokens=return_tokens,
device=self.device) device=self._model_group.device_for(self.unet))
@property @property
def cond_stage_model(self): def cond_stage_model(self):
@ -684,6 +750,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
"""Compatible with DiffusionWrapper""" """Compatible with DiffusionWrapper"""
return self.unet.in_channels return self.unet.in_channels
def decode_latents(self, latents):
# Explicit call to get the vae loaded, since `decode` isn't the forward method.
self._model_group.load(self.vae)
return super().decode_latents(latents)
def debug_latents(self, latents, msg): def debug_latents(self, latents, msg):
with torch.inference_mode(): with torch.inference_mode():
from ldm.util import debug_image from ldm.util import debug_image

View File

@ -26,7 +26,6 @@ import torch
import transformers import transformers
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from diffusers import logging as dlogging from diffusers import logging as dlogging
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
from huggingface_hub import scan_cache_dir from huggingface_hub import scan_cache_dir
from omegaconf import OmegaConf from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig from omegaconf.dictconfig import DictConfig
@ -49,9 +48,10 @@ class ModelManager(object):
def __init__( def __init__(
self, self,
config: OmegaConf, config: OmegaConf,
device_type: str = "cpu", device_type: str | torch.device = "cpu",
precision: str = "float16", precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS, max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload = False
): ):
""" """
Initialize with the path to the models.yaml config file, Initialize with the path to the models.yaml config file,
@ -69,6 +69,7 @@ class ModelManager(object):
self.models = {} self.models = {}
self.stack = [] # this is an LRU FIFO self.stack = [] # this is an LRU FIFO
self.current_model = None self.current_model = None
self.sequential_offload = sequential_offload
def valid_model(self, model_name: str) -> bool: def valid_model(self, model_name: str) -> bool:
""" """
@ -530,6 +531,9 @@ class ModelManager(object):
dlogging.set_verbosity(verbosity) dlogging.set_verbosity(verbosity)
assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded') assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded')
if self.sequential_offload:
pipeline.enable_offload_submodels(self.device)
else:
pipeline.to(self.device) pipeline.to(self.device)
model_hash = self._diffuser_sha256(name_or_path) model_hash = self._diffuser_sha256(name_or_path)
@ -746,7 +750,7 @@ class ModelManager(object):
return return
if model_path.stem in self.config: #already imported if model_path.stem in self.config: #already imported
print(f' > Already imported. Skipping') print(' > Already imported. Skipping')
return return
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
@ -755,7 +759,7 @@ class ModelManager(object):
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
print(f' > SD-v2 model detected; model will be converted to diffusers format') print(' > SD-v2 model detected; model will be converted to diffusers format')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml') model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml')
convert = True convert = True
@ -765,10 +769,10 @@ class ModelManager(object):
state_dict = checkpoint.get('state_dict') or checkpoint state_dict = checkpoint.get('state_dict') or checkpoint
in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1] in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
if in_channels == 9: if in_channels == 9:
print(f' > SD-v1 inpainting model detected') print(' > SD-v1 inpainting model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml')
elif in_channels == 4: elif in_channels == 4:
print(f' > SD-v1 model detected') print(' > SD-v1 model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
else: else:
print(f'** {thing} does not have an expected number of in_channels ({in_channels}). It will probably break when loaded.') print(f'** {thing} does not have an expected number of in_channels ({in_channels}). It will probably break when loaded.')
@ -1094,12 +1098,12 @@ class ModelManager(object):
if self.device == "cpu": if self.device == "cpu":
return model return model
# diffusers really really doesn't like us moving a float16 model onto CPU if isinstance(model, StableDiffusionGeneratorPipeline):
verbosity = get_verbosity() model.offload_all()
set_verbosity_error() return model
model.cond_stage_model.device = "cpu" model.cond_stage_model.device = "cpu"
model.to("cpu") model.to("cpu")
set_verbosity(verbosity)
for submodel in ("first_stage_model", "cond_stage_model", "model"): for submodel in ("first_stage_model", "cond_stage_model", "model"):
try: try:
@ -1112,6 +1116,10 @@ class ModelManager(object):
if self.device == "cpu": if self.device == "cpu":
return model return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.ready()
return model
model.to(self.device) model.to(self.device)
model.cond_stage_model.device = self.device model.cond_stage_model.device = self.device
@ -1267,7 +1275,7 @@ class ModelManager(object):
strategy.execute() strategy.execute()
@staticmethod @staticmethod
def _abs_path(path: Union(str, Path)) -> Path: def _abs_path(path: str | Path) -> Path:
if path is None or Path(path).is_absolute(): if path is None or Path(path).is_absolute():
return path return path
return Path(Globals.root, path).resolve() return Path(Globals.root, path).resolve()

247
ldm/invoke/offloading.py Normal file
View File

@ -0,0 +1,247 @@
from __future__ import annotations
import warnings
import weakref
from abc import ABCMeta, abstractmethod
from collections.abc import MutableMapping
from typing import Callable
import torch
from accelerate.utils import send_to_device
from torch.utils.hooks import RemovableHandle
OFFLOAD_DEVICE = torch.device("cpu")
class _NoModel:
"""Symbol that indicates no model is loaded.
(We can't weakref.ref(None), so this was my best idea at the time to come up with something
type-checkable.)
"""
def __bool__(self):
return False
def to(self, device: torch.device):
pass
def __repr__(self):
return "<NO MODEL>"
NO_MODEL = _NoModel()
class ModelGroup(metaclass=ABCMeta):
"""
A group of models.
The use case I had in mind when writing this is the sub-models used by a DiffusionPipeline,
e.g. its text encoder, U-net, VAE, etc.
Those models are :py:class:`diffusers.ModelMixin`, but "model" is interchangeable with
:py:class:`torch.nn.Module` here.
"""
def __init__(self, execution_device: torch.device):
self.execution_device = execution_device
@abstractmethod
def install(self, *models: torch.nn.Module):
"""Add models to this group."""
pass
@abstractmethod
def uninstall(self, models: torch.nn.Module):
"""Remove models from this group."""
pass
@abstractmethod
def uninstall_all(self):
"""Remove all models from this group."""
@abstractmethod
def load(self, model: torch.nn.Module):
"""Load this model to the execution device."""
pass
@abstractmethod
def offload_current(self):
"""Offload the current model(s) from the execution device."""
pass
@abstractmethod
def ready(self):
"""Ready this group for use."""
pass
@abstractmethod
def set_device(self, device: torch.device):
"""Change which device models from this group will execute on."""
pass
@abstractmethod
def device_for(self, model) -> torch.device:
"""Get the device the given model will execute on.
The model should already be a member of this group.
"""
pass
@abstractmethod
def __contains__(self, model):
"""Check if the model is a member of this group."""
pass
def __repr__(self) -> str:
return f"<{self.__class__.__name__} object at {id(self):x}: " \
f"device={self.execution_device} >"
class LazilyLoadedModelGroup(ModelGroup):
"""
Only one model from this group is loaded on the GPU at a time.
Running the forward method of a model will displace the previously-loaded model,
offloading it to CPU.
If you call other methods on the model, e.g. ``model.encode(x)`` instead of ``model(x)``,
you will need to explicitly load it with :py:method:`.load(model)`.
This implementation relies on pytorch forward-pre-hooks, and it will copy forward arguments
to the appropriate execution device, as long as they are positional arguments and not keyword
arguments. (I didn't make the rules; that's the way the pytorch 1.13 API works for hooks.)
"""
_hooks: MutableMapping[torch.nn.Module, RemovableHandle]
_current_model_ref: Callable[[], torch.nn.Module | _NoModel]
def __init__(self, execution_device: torch.device):
super().__init__(execution_device)
self._hooks = weakref.WeakKeyDictionary()
self._current_model_ref = weakref.ref(NO_MODEL)
def install(self, *models: torch.nn.Module):
for model in models:
self._hooks[model] = model.register_forward_pre_hook(self._pre_hook)
def uninstall(self, *models: torch.nn.Module):
for model in models:
hook = self._hooks.pop(model)
hook.remove()
if self.is_current_model(model):
# no longer hooked by this object, so don't claim to manage it
self.clear_current_model()
def uninstall_all(self):
self.uninstall(*self._hooks.keys())
def _pre_hook(self, module: torch.nn.Module, forward_input):
self.load(module)
if len(forward_input) == 0:
warnings.warn(f"Hook for {module.__class__.__name__} got no input. "
f"Inputs must be positional, not keywords.", stacklevel=3)
return send_to_device(forward_input, self.execution_device)
def load(self, module):
if not self.is_current_model(module):
self.offload_current()
self._load(module)
def offload_current(self):
module = self._current_model_ref()
if module is not NO_MODEL:
module.to(device=OFFLOAD_DEVICE)
self.clear_current_model()
def _load(self, module: torch.nn.Module) -> torch.nn.Module:
assert self.is_empty(), f"A model is already loaded: {self._current_model_ref()}"
module = module.to(self.execution_device)
self.set_current_model(module)
return module
def is_current_model(self, model: torch.nn.Module) -> bool:
"""Is the given model the one currently loaded on the execution device?"""
return self._current_model_ref() is model
def is_empty(self):
"""Are none of this group's models loaded on the execution device?"""
return self._current_model_ref() is NO_MODEL
def set_current_model(self, value):
self._current_model_ref = weakref.ref(value)
def clear_current_model(self):
self._current_model_ref = weakref.ref(NO_MODEL)
def set_device(self, device: torch.device):
if device == self.execution_device:
return
self.execution_device = device
current = self._current_model_ref()
if current is not NO_MODEL:
current.to(device)
def device_for(self, model):
if model not in self:
raise KeyError(f"This does not manage this model {type(model).__name__}", model)
return self.execution_device # this implementation only dispatches to one device
def ready(self):
pass # always ready to load on-demand
def __contains__(self, model):
return model in self._hooks
def __repr__(self) -> str:
return f"<{self.__class__.__name__} object at {id(self):x}: " \
f"current_model={type(self._current_model_ref()).__name__} >"
class FullyLoadedModelGroup(ModelGroup):
"""
A group of models without any implicit loading or unloading.
:py:meth:`.ready` loads _all_ the models to the execution device at once.
"""
_models: weakref.WeakSet
def __init__(self, execution_device: torch.device):
super().__init__(execution_device)
self._models = weakref.WeakSet()
def install(self, *models: torch.nn.Module):
for model in models:
self._models.add(model)
model.to(device=self.execution_device)
def uninstall(self, *models: torch.nn.Module):
for model in models:
self._models.remove(model)
def uninstall_all(self):
self.uninstall(*self._models)
def load(self, model):
model.to(device=self.execution_device)
def offload_current(self):
for model in self._models:
model.to(device=OFFLOAD_DEVICE)
def ready(self):
for model in self._models:
self.load(model)
def set_device(self, device: torch.device):
self.execution_device = device
for model in self._models:
if model.device != OFFLOAD_DEVICE:
model.to(device=device)
def device_for(self, model):
if model not in self:
raise KeyError("This does not manage this model f{type(model).__name__}", model)
return self.execution_device # this implementation only dispatches to one device
def __contains__(self, model):
return model in self._models

View File

@ -214,7 +214,7 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
def build_weighted_embedding_tensor(self, token_ids: torch.Tensor, per_token_weights: torch.Tensor) -> torch.Tensor: def build_weighted_embedding_tensor(self, token_ids: torch.Tensor, per_token_weights: torch.Tensor) -> torch.Tensor:
''' '''
Build a tensor that embeds the passed-in token IDs and applyies the given per_token weights Build a tensor that embeds the passed-in token IDs and applies the given per_token weights
:param token_ids: A tensor of shape `[self.max_length]` containing token IDs (ints) :param token_ids: A tensor of shape `[self.max_length]` containing token IDs (ints)
:param per_token_weights: A tensor of shape `[self.max_length]` containing weights (floats) :param per_token_weights: A tensor of shape `[self.max_length]` containing weights (floats)
:return: A tensor of shape `[1, self.max_length, token_dim]` representing the requested weighted embeddings :return: A tensor of shape `[1, self.max_length, token_dim]` representing the requested weighted embeddings
@ -224,13 +224,12 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
if token_ids.shape != torch.Size([self.max_length]): if token_ids.shape != torch.Size([self.max_length]):
raise ValueError(f"token_ids has shape {token_ids.shape} - expected [{self.max_length}]") raise ValueError(f"token_ids has shape {token_ids.shape} - expected [{self.max_length}]")
z = self.text_encoder.forward(input_ids=token_ids.unsqueeze(0), z = self.text_encoder(token_ids.unsqueeze(0), return_dict=False)[0]
return_dict=False)[0]
empty_token_ids = torch.tensor([self.tokenizer.bos_token_id] + empty_token_ids = torch.tensor([self.tokenizer.bos_token_id] +
[self.tokenizer.pad_token_id] * (self.max_length-2) + [self.tokenizer.pad_token_id] * (self.max_length-2) +
[self.tokenizer.eos_token_id], dtype=torch.int, device=token_ids.device).unsqueeze(0) [self.tokenizer.eos_token_id], dtype=torch.int, device=z.device).unsqueeze(0)
empty_z = self.text_encoder(input_ids=empty_token_ids).last_hidden_state empty_z = self.text_encoder(empty_token_ids).last_hidden_state
batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape) batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape).to(z)
z_delta_from_empty = z - empty_z z_delta_from_empty = z - empty_z
weighted_z = empty_z + (z_delta_from_empty * batch_weights_expanded) weighted_z = empty_z + (z_delta_from_empty * batch_weights_expanded)