mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into install/refactor-configure-and-model-select
This commit is contained in:
commit
f3351a5e47
9
.github/workflows/build-container.yml
vendored
9
.github/workflows/build-container.yml
vendored
@ -5,8 +5,17 @@ on:
|
||||
- 'main'
|
||||
- 'update/ci/docker/*'
|
||||
- 'update/docker/*'
|
||||
paths:
|
||||
- '/pyproject.toml'
|
||||
- '/ldm/**'
|
||||
- '/invokeai/backend/**'
|
||||
- '/invokeai/configs/**'
|
||||
- '/invokeai/frontend/dist/**'
|
||||
- '/docker/Dockerfile'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
|
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
67
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
name: Test invoke.py pip
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '/pyproject.toml'
|
||||
- '/ldm/**'
|
||||
- '/invokeai/backend/**'
|
||||
- '/invokeai/configs/**'
|
||||
- '/invokeai/frontend/dist/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
# - linux-cuda-11_6
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
# - windows-cuda-11_6
|
||||
# - windows-cuda-11_7
|
||||
include:
|
||||
# - pytorch: linux-cuda-11_6
|
||||
# os: ubuntu-22.04
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: macos-default
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: windows-cpu
|
||||
os: windows-2022
|
||||
github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_6
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
# - pytorch: windows-cuda-11_7
|
||||
# os: windows-2022
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||
# github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
12
.github/workflows/test-invoke-pip.yml
vendored
12
.github/workflows/test-invoke-pip.yml
vendored
@ -3,7 +3,19 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
paths:
|
||||
- '/pyproject.toml'
|
||||
- '/ldm/**'
|
||||
- '/invokeai/backend/**'
|
||||
- '/invokeai/configs/**'
|
||||
- '/invokeai/frontend/dist/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '/pyproject.toml'
|
||||
- '/ldm/**'
|
||||
- '/invokeai/backend/**'
|
||||
- '/invokeai/configs/**'
|
||||
- '/invokeai/frontend/dist/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
|
@ -13,7 +13,7 @@
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
||||
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
|
@ -3,3 +3,4 @@ dist/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
@ -3,3 +3,4 @@ dist/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
||||
stats.html
|
||||
|
1
invokeai/frontend/dist/assets/index-c1af841f.css
vendored
Normal file
1
invokeai/frontend/dist/assets/index-c1af841f.css
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
4
invokeai/frontend/dist/index.html
vendored
4
invokeai/frontend/dist/index.html
vendored
@ -5,8 +5,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-6b9f1e33.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-fecb6dd4.css">
|
||||
<script type="module" crossorigin src="./assets/index-12bd70ca.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-c1af841f.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
@ -3,8 +3,8 @@ import { Flex, Spinner } from '@chakra-ui/react';
|
||||
const Loading = () => {
|
||||
return (
|
||||
<Flex
|
||||
width={'100vw'}
|
||||
height={'100vh'}
|
||||
width="100vw"
|
||||
height="100vh"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
|
@ -13,7 +13,7 @@ const GuideIcon = forwardRef(
|
||||
({ feature, icon = MdHelp }: GuideIconProps, ref) => (
|
||||
<GuidePopover feature={feature}>
|
||||
<Box ref={ref}>
|
||||
<Icon marginBottom={'-.15rem'} as={icon} />
|
||||
<Icon marginBottom="-.15rem" as={icon} />
|
||||
</Box>
|
||||
</GuidePopover>
|
||||
)
|
||||
|
@ -29,15 +29,15 @@ const GuidePopover = ({ children, feature }: GuideProps) => {
|
||||
if (!shouldDisplayGuides) return null;
|
||||
|
||||
return (
|
||||
<Popover trigger={'hover'}>
|
||||
<Popover trigger="hover">
|
||||
<PopoverTrigger>
|
||||
<Box>{children}</Box>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent
|
||||
className={`guide-popover-content`}
|
||||
className="guide-popover-content"
|
||||
maxWidth="400px"
|
||||
onClick={(e) => e.preventDefault()}
|
||||
cursor={'initial'}
|
||||
cursor="initial"
|
||||
>
|
||||
<PopoverArrow className="guide-popover-arrow" />
|
||||
<div className="guide-popover-guide-content">{text}</div>
|
||||
|
@ -169,7 +169,7 @@ export default function IAISlider(props: IAIFullSliderProps) {
|
||||
{label}
|
||||
</FormLabel>
|
||||
|
||||
<HStack w={'100%'} gap={2} alignItems="center">
|
||||
<HStack w="100%" gap={2} alignItems="center">
|
||||
<Slider
|
||||
aria-label={label}
|
||||
value={value}
|
||||
@ -259,9 +259,9 @@ export default function IAISlider(props: IAIFullSliderProps) {
|
||||
|
||||
{withReset && (
|
||||
<IAIIconButton
|
||||
size={'sm'}
|
||||
aria-label={'Reset'}
|
||||
tooltip={'Reset'}
|
||||
size="sm"
|
||||
aria-label="Reset"
|
||||
tooltip="Reset"
|
||||
icon={<BiReset />}
|
||||
onClick={handleResetDisable}
|
||||
isDisabled={isResetDisabled}
|
||||
|
@ -24,13 +24,13 @@ const ImageUploadOverlay = (props: ImageUploadOverlayProps) => {
|
||||
<div className="dropzone-container">
|
||||
{isDragAccept && (
|
||||
<div className="dropzone-overlay is-drag-accept">
|
||||
<Heading size={'lg'}>Upload Image{overlaySecondaryText}</Heading>
|
||||
<Heading size="lg">Upload Image{overlaySecondaryText}</Heading>
|
||||
</div>
|
||||
)}
|
||||
{isDragReject && (
|
||||
<div className="dropzone-overlay is-drag-reject">
|
||||
<Heading size={'lg'}>Invalid Upload</Heading>
|
||||
<Heading size={'md'}>Must be single JPEG or PNG image</Heading>
|
||||
<Heading size="lg">Invalid Upload</Heading>
|
||||
<Heading size="md">Must be single JPEG or PNG image</Heading>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
@ -22,7 +22,7 @@ const ImageUploaderButton = (props: ImageUploaderButtonProps) => {
|
||||
>
|
||||
<div className="image-upload-button">
|
||||
<FaUpload />
|
||||
<Heading size={'lg'}>Click or Drag and Drop</Heading>
|
||||
<Heading size="lg">Click or Drag and Drop</Heading>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
@ -17,7 +17,7 @@ const ClearCanvasHistoryButtonModal = () => {
|
||||
acceptCallback={() => dispatch(clearCanvasHistory())}
|
||||
acceptButtonText={t('unifiedcanvas:clearHistory')}
|
||||
triggerComponent={
|
||||
<IAIButton size={'sm'} leftIcon={<FaTrash />} isDisabled={isStaging}>
|
||||
<IAIButton size="sm" leftIcon={<FaTrash />} isDisabled={isStaging}>
|
||||
{t('unifiedcanvas:clearCanvasHistory')}
|
||||
</IAIButton>
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ const IAICanvas = () => {
|
||||
<Stage
|
||||
tabIndex={-1}
|
||||
ref={canvasStageRefCallback}
|
||||
className={'inpainting-canvas-stage'}
|
||||
className="inpainting-canvas-stage"
|
||||
style={{
|
||||
...(stageCursor ? { cursor: stageCursor } : {}),
|
||||
}}
|
||||
@ -165,19 +165,19 @@ const IAICanvas = () => {
|
||||
onWheel={handleWheel}
|
||||
draggable={(tool === 'move' || isStaging) && !isModifyingBoundingBox}
|
||||
>
|
||||
<Layer id={'grid'} visible={shouldShowGrid}>
|
||||
<Layer id="grid" visible={shouldShowGrid}>
|
||||
<IAICanvasGrid />
|
||||
</Layer>
|
||||
|
||||
<Layer
|
||||
id={'base'}
|
||||
id="base"
|
||||
ref={canvasBaseLayerRefCallback}
|
||||
listening={false}
|
||||
imageSmoothingEnabled={false}
|
||||
>
|
||||
<IAICanvasObjectRenderer />
|
||||
</Layer>
|
||||
<Layer id={'mask'} visible={isMaskEnabled} listening={false}>
|
||||
<Layer id="mask" visible={isMaskEnabled} listening={false}>
|
||||
<IAICanvasMaskLines visible={true} listening={false} />
|
||||
<IAICanvasMaskCompositer listening={false} />
|
||||
</Layer>
|
||||
|
@ -49,7 +49,7 @@ const IAICanvasBoundingBoxOverlay = () => {
|
||||
offsetY={stageCoordinates.y / stageScale}
|
||||
height={stageDimensions.height / stageScale}
|
||||
width={stageDimensions.width / stageScale}
|
||||
fill={'rgba(0,0,0,0.4)'}
|
||||
fill="rgba(0,0,0,0.4)"
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
/>
|
||||
@ -58,10 +58,10 @@ const IAICanvasBoundingBoxOverlay = () => {
|
||||
y={boundingBoxCoordinates.y}
|
||||
width={boundingBoxDimensions.width}
|
||||
height={boundingBoxDimensions.height}
|
||||
fill={'rgb(255,255,255)'}
|
||||
fill="rgb(255,255,255)"
|
||||
listening={false}
|
||||
visible={shouldDarkenOutsideBoundingBox}
|
||||
globalCompositeOperation={'destination-out'}
|
||||
globalCompositeOperation="destination-out"
|
||||
/>
|
||||
</Group>
|
||||
);
|
||||
|
@ -163,10 +163,10 @@ const IAICanvasMaskCompositer = (props: IAICanvasMaskCompositerProps) => {
|
||||
width={stageDimensions.width / stageScale}
|
||||
fillPatternImage={fillPatternImage}
|
||||
fillPatternOffsetY={!isNumber(offset) ? 0 : offset}
|
||||
fillPatternRepeat={'repeat'}
|
||||
fillPatternRepeat="repeat"
|
||||
fillPatternScale={{ x: 1 / stageScale, y: 1 / stageScale }}
|
||||
listening={true}
|
||||
globalCompositeOperation={'source-in'}
|
||||
globalCompositeOperation="source-in"
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
|
@ -36,7 +36,7 @@ const IAICanvasLines = (props: InpaintingCanvasLinesProps) => {
|
||||
<Line
|
||||
key={i}
|
||||
points={line.points}
|
||||
stroke={'rgb(0,0,0)'} // The lines can be any color, just need alpha > 0
|
||||
stroke="rgb(0,0,0)" // The lines can be any color, just need alpha > 0
|
||||
strokeWidth={line.strokeWidth * 2}
|
||||
tension={0}
|
||||
lineCap="round"
|
||||
|
@ -93,8 +93,8 @@ const IAICanvasObjectRenderer = () => {
|
||||
y={obj.y}
|
||||
width={obj.width}
|
||||
height={obj.height}
|
||||
fill={'rgb(255, 255, 255)'}
|
||||
globalCompositeOperation={'destination-out'}
|
||||
fill="rgb(255, 255, 255)"
|
||||
globalCompositeOperation="destination-out"
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ const IAICanvasStagingArea = (props: Props) => {
|
||||
width={width}
|
||||
height={height}
|
||||
strokeWidth={1}
|
||||
stroke={'white'}
|
||||
stroke="white"
|
||||
strokeScaleEnabled={false}
|
||||
/>
|
||||
<Rect
|
||||
@ -77,7 +77,7 @@ const IAICanvasStagingArea = (props: Props) => {
|
||||
height={height}
|
||||
dash={[4, 4]}
|
||||
strokeWidth={1}
|
||||
stroke={'black'}
|
||||
stroke="black"
|
||||
strokeScaleEnabled={false}
|
||||
/>
|
||||
</Group>
|
||||
|
@ -114,11 +114,11 @@ const IAICanvasStagingAreaToolbar = () => {
|
||||
|
||||
return (
|
||||
<Flex
|
||||
pos={'absolute'}
|
||||
bottom={'1rem'}
|
||||
w={'100%'}
|
||||
align={'center'}
|
||||
justify={'center'}
|
||||
pos="absolute"
|
||||
bottom="1rem"
|
||||
w="100%"
|
||||
align="center"
|
||||
justify="center"
|
||||
filter="drop-shadow(0 0.5rem 1rem rgba(0,0,0))"
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
|
@ -172,7 +172,7 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={radius}
|
||||
stroke={'rgba(255,255,255,0.4)'}
|
||||
stroke="rgba(255,255,255,0.4)"
|
||||
strokeWidth={strokeWidth * 2}
|
||||
strokeEnabled={true}
|
||||
listening={false}
|
||||
@ -181,7 +181,7 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={radius}
|
||||
stroke={'rgba(0,0,0,1)'}
|
||||
stroke="rgba(0,0,0,1)"
|
||||
strokeWidth={strokeWidth}
|
||||
strokeEnabled={true}
|
||||
listening={false}
|
||||
@ -192,14 +192,14 @@ const IAICanvasToolPreview = (props: GroupConfig) => {
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={dotRadius * 2}
|
||||
fill={'rgba(255,255,255,0.4)'}
|
||||
fill="rgba(255,255,255,0.4)"
|
||||
listening={false}
|
||||
/>
|
||||
<Circle
|
||||
x={brushX}
|
||||
y={brushY}
|
||||
radius={dotRadius}
|
||||
fill={'rgba(0,0,0,1)'}
|
||||
fill="rgba(0,0,0,1)"
|
||||
listening={false}
|
||||
/>
|
||||
</Group>
|
||||
|
@ -269,12 +269,12 @@ const IAICanvasBoundingBox = (props: IAICanvasBoundingBoxPreviewProps) => {
|
||||
<Transformer
|
||||
anchorCornerRadius={3}
|
||||
anchorDragBoundFunc={anchorDragBoundFunc}
|
||||
anchorFill={'rgba(212,216,234,1)'}
|
||||
anchorFill="rgba(212,216,234,1)"
|
||||
anchorSize={15}
|
||||
anchorStroke={'rgb(42,42,42)'}
|
||||
anchorStroke="rgb(42,42,42)"
|
||||
borderDash={[4, 4]}
|
||||
borderEnabled={true}
|
||||
borderStroke={'black'}
|
||||
borderStroke="black"
|
||||
draggable={false}
|
||||
enabledAnchors={tool === 'move' ? undefined : []}
|
||||
flipEnabled={false}
|
||||
|
@ -121,7 +121,7 @@ const IAICanvasMaskOptions = () => {
|
||||
</ButtonGroup>
|
||||
}
|
||||
>
|
||||
<Flex direction={'column'} gap={'0.5rem'}>
|
||||
<Flex direction="column" gap="0.5rem">
|
||||
<IAICheckbox
|
||||
label={`${t('unifiedcanvas:enableMask')} (H)`}
|
||||
isChecked={isMaskEnabled}
|
||||
@ -139,7 +139,7 @@ const IAICanvasMaskOptions = () => {
|
||||
color={maskColor}
|
||||
onChange={(newColor) => dispatch(setMaskColor(newColor))}
|
||||
/>
|
||||
<IAIButton size={'sm'} leftIcon={<FaTrash />} onClick={handleClearMask}>
|
||||
<IAIButton size="sm" leftIcon={<FaTrash />} onClick={handleClearMask}>
|
||||
{t('unifiedcanvas:clearMask')} (Shift+C)
|
||||
</IAIButton>
|
||||
</Flex>
|
||||
|
@ -97,7 +97,7 @@ const IAICanvasSettingsButtonPopover = () => {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex direction={'column'} gap={'0.5rem'}>
|
||||
<Flex direction="column" gap="0.5rem">
|
||||
<IAICheckbox
|
||||
label={t('unifiedcanvas:showIntermediates')}
|
||||
isChecked={shouldShowIntermediates}
|
||||
|
@ -228,13 +228,8 @@ const IAICanvasToolChooserOptions = () => {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex
|
||||
minWidth={'15rem'}
|
||||
direction={'column'}
|
||||
gap={'1rem'}
|
||||
width={'100%'}
|
||||
>
|
||||
<Flex gap={'1rem'} justifyContent="space-between">
|
||||
<Flex minWidth="15rem" direction="column" gap="1rem" width="100%">
|
||||
<Flex gap="1rem" justifyContent="space-between">
|
||||
<IAISlider
|
||||
label={t('unifiedcanvas:brushSize')}
|
||||
value={brushSize}
|
||||
|
@ -415,14 +415,14 @@ const CurrentImageButtons = () => {
|
||||
>
|
||||
<div className="current-image-send-to-popover">
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={handleClickUseAsInitialImage}
|
||||
leftIcon={<FaShare />}
|
||||
>
|
||||
{t('parameters:sendToImg2Img')}
|
||||
</IAIButton>
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={handleSendToCanvas}
|
||||
leftIcon={<FaShare />}
|
||||
>
|
||||
@ -430,14 +430,14 @@ const CurrentImageButtons = () => {
|
||||
</IAIButton>
|
||||
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={handleCopyImage}
|
||||
leftIcon={<FaCopy />}
|
||||
>
|
||||
{t('parameters:copyImage')}
|
||||
</IAIButton>
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={handleCopyImageLink}
|
||||
leftIcon={<FaCopy />}
|
||||
>
|
||||
@ -445,7 +445,7 @@ const CurrentImageButtons = () => {
|
||||
</IAIButton>
|
||||
|
||||
<Link download={true} href={currentImage?.url}>
|
||||
<IAIButton leftIcon={<FaDownload />} size={'sm'} w="100%">
|
||||
<IAIButton leftIcon={<FaDownload />} size="sm" w="100%">
|
||||
{t('parameters:downloadImage')}
|
||||
</IAIButton>
|
||||
</Link>
|
||||
|
@ -82,7 +82,7 @@ export default function CurrentImagePreview() {
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={'current-image-preview'}>
|
||||
<div className="current-image-preview">
|
||||
{imageToDisplay && (
|
||||
<Image
|
||||
src={imageToDisplay.url}
|
||||
|
@ -116,13 +116,13 @@ const DeleteImageModal = forwardRef(
|
||||
</AlertDialogHeader>
|
||||
|
||||
<AlertDialogBody>
|
||||
<Flex direction={'column'} gap={5}>
|
||||
<Flex direction="column" gap={5}>
|
||||
<Text>
|
||||
Are you sure? Deleted images will be sent to the Bin. You
|
||||
can restore from there if you wish to.
|
||||
</Text>
|
||||
<FormControl>
|
||||
<Flex alignItems={'center'}>
|
||||
<Flex alignItems="center">
|
||||
<FormLabel mb={0}>Don't ask me again</FormLabel>
|
||||
<Switch
|
||||
checked={!shouldConfirmOnDelete}
|
||||
|
@ -175,12 +175,12 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
>
|
||||
<ContextMenu.Trigger>
|
||||
<Box
|
||||
position={'relative'}
|
||||
position="relative"
|
||||
key={uuid}
|
||||
className="hoverable-image"
|
||||
onMouseOver={handleMouseOver}
|
||||
onMouseOut={handleMouseOut}
|
||||
userSelect={'none'}
|
||||
userSelect="none"
|
||||
draggable={true}
|
||||
onDragStart={handleDragStart}
|
||||
>
|
||||
@ -189,15 +189,15 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
objectFit={
|
||||
shouldUseSingleGalleryColumn ? 'contain' : galleryImageObjectFit
|
||||
}
|
||||
rounded={'md'}
|
||||
rounded="md"
|
||||
src={thumbnail || url}
|
||||
loading={'lazy'}
|
||||
loading="lazy"
|
||||
/>
|
||||
<div className="hoverable-image-content" onClick={handleSelectImage}>
|
||||
{isSelected && (
|
||||
<Icon
|
||||
width={'50%'}
|
||||
height={'50%'}
|
||||
width="50%"
|
||||
height="50%"
|
||||
as={FaCheck}
|
||||
className="hoverable-image-check"
|
||||
/>
|
||||
@ -210,7 +210,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
aria-label={t('parameters:deleteImage')}
|
||||
icon={<FaTrashAlt />}
|
||||
size="xs"
|
||||
variant={'imageHoverIconButton'}
|
||||
variant="imageHoverIconButton"
|
||||
fontSize={14}
|
||||
isDisabled={!mayDeleteImage}
|
||||
/>
|
||||
@ -221,7 +221,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
</ContextMenu.Trigger>
|
||||
<ContextMenu.Content
|
||||
className="hoverable-image-context-menu"
|
||||
sticky={'always'}
|
||||
sticky="always"
|
||||
onInteractOutside={(e) => {
|
||||
e.detail.originalEvent.preventDefault();
|
||||
}}
|
||||
|
@ -281,7 +281,7 @@ export default function ImageGallery() {
|
||||
<Resizable
|
||||
minWidth={galleryMinWidth}
|
||||
maxWidth={shouldPinGallery ? galleryMaxWidth : window.innerWidth}
|
||||
className={'image-gallery-popup'}
|
||||
className="image-gallery-popup"
|
||||
handleStyles={{
|
||||
left: {
|
||||
width: '15px',
|
||||
@ -395,14 +395,14 @@ export default function ImageGallery() {
|
||||
{shouldShowButtons ? (
|
||||
<>
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
data-selected={currentCategory === 'result'}
|
||||
onClick={() => dispatch(setCurrentCategory('result'))}
|
||||
>
|
||||
{t('gallery:generations')}
|
||||
</IAIButton>
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
data-selected={currentCategory === 'user'}
|
||||
onClick={() => dispatch(setCurrentCategory('user'))}
|
||||
>
|
||||
@ -433,14 +433,14 @@ export default function ImageGallery() {
|
||||
<IAIPopover
|
||||
isLazy
|
||||
trigger="hover"
|
||||
placement={'left'}
|
||||
placement="left"
|
||||
triggerComponent={
|
||||
<IAIIconButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
aria-label={t('gallery:gallerySettings')}
|
||||
icon={<FaWrench />}
|
||||
className="image-gallery-icon-btn"
|
||||
cursor={'pointer'}
|
||||
cursor="pointer"
|
||||
/>
|
||||
}
|
||||
>
|
||||
@ -455,7 +455,7 @@ export default function ImageGallery() {
|
||||
label={t('gallery:galleryImageSize')}
|
||||
/>
|
||||
<IAIIconButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
aria-label={t('gallery:galleryImageResetSize')}
|
||||
tooltip={t('gallery:galleryImageResetSize')}
|
||||
onClick={() => dispatch(setGalleryImageMinimumWidth(64))}
|
||||
@ -505,8 +505,8 @@ export default function ImageGallery() {
|
||||
</IAIPopover>
|
||||
|
||||
<IAIIconButton
|
||||
size={'sm'}
|
||||
className={'image-gallery-icon-btn'}
|
||||
size="sm"
|
||||
className="image-gallery-icon-btn"
|
||||
aria-label={t('gallery:pinGallery')}
|
||||
tooltip={`${t('gallery:pinGallery')} (Shift+G)`}
|
||||
onClick={handleSetShouldPinGallery}
|
||||
|
@ -71,8 +71,8 @@ const MetadataItem = ({
|
||||
<IconButton
|
||||
aria-label="Use this parameter"
|
||||
icon={<IoArrowUndoCircleOutline />}
|
||||
size={'xs'}
|
||||
variant={'ghost'}
|
||||
size="xs"
|
||||
variant="ghost"
|
||||
fontSize={20}
|
||||
onClick={onClick}
|
||||
/>
|
||||
@ -83,23 +83,23 @@ const MetadataItem = ({
|
||||
<IconButton
|
||||
aria-label={`Copy ${label}`}
|
||||
icon={<FaCopy />}
|
||||
size={'xs'}
|
||||
variant={'ghost'}
|
||||
size="xs"
|
||||
variant="ghost"
|
||||
fontSize={14}
|
||||
onClick={() => navigator.clipboard.writeText(value.toString())}
|
||||
/>
|
||||
</Tooltip>
|
||||
)}
|
||||
<Flex direction={labelPosition ? 'column' : 'row'}>
|
||||
<Text fontWeight={'semibold'} whiteSpace={'pre-wrap'} pr={2}>
|
||||
<Text fontWeight="semibold" whiteSpace="pre-wrap" pr={2}>
|
||||
{label}:
|
||||
</Text>
|
||||
{isLink ? (
|
||||
<Link href={value.toString()} isExternal wordBreak={'break-all'}>
|
||||
<Link href={value.toString()} isExternal wordBreak="break-all">
|
||||
{value.toString()} <ExternalLinkIcon mx="2px" />
|
||||
</Link>
|
||||
) : (
|
||||
<Text overflowY={'scroll'} wordBreak={'break-all'}>
|
||||
<Text overflowY="scroll" wordBreak="break-all">
|
||||
{value.toString()}
|
||||
</Text>
|
||||
)}
|
||||
@ -163,10 +163,10 @@ const ImageMetadataViewer = memo(
|
||||
|
||||
return (
|
||||
<div className={`image-metadata-viewer ${styleClass}`}>
|
||||
<Flex gap={1} direction={'column'} width={'100%'}>
|
||||
<Flex gap={1} direction="column" width="100%">
|
||||
<Flex gap={2}>
|
||||
<Text fontWeight={'semibold'}>File:</Text>
|
||||
<Link href={image.url} isExternal maxW={'calc(100% - 3rem)'}>
|
||||
<Text fontWeight="semibold">File:</Text>
|
||||
<Link href={image.url} isExternal maxW="calc(100% - 3rem)">
|
||||
{image.url.length > 64
|
||||
? image.url.substring(0, 64).concat('...')
|
||||
: image.url}
|
||||
@ -304,7 +304,7 @@ const ImageMetadataViewer = memo(
|
||||
)}
|
||||
{postprocessing && postprocessing.length > 0 && (
|
||||
<>
|
||||
<Heading size={'sm'}>Postprocessing</Heading>
|
||||
<Heading size="sm">Postprocessing</Heading>
|
||||
{postprocessing.map(
|
||||
(
|
||||
postprocess: InvokeAI.PostProcessedImageMetadata,
|
||||
@ -313,13 +313,8 @@ const ImageMetadataViewer = memo(
|
||||
if (postprocess.type === 'esrgan') {
|
||||
const { scale, strength, denoise_str } = postprocess;
|
||||
return (
|
||||
<Flex
|
||||
key={i}
|
||||
pl={'2rem'}
|
||||
gap={1}
|
||||
direction={'column'}
|
||||
>
|
||||
<Text size={'md'}>{`${
|
||||
<Flex key={i} pl="2rem" gap={1} direction="column">
|
||||
<Text size="md">{`${
|
||||
i + 1
|
||||
}: Upscale (ESRGAN)`}</Text>
|
||||
<MetadataItem
|
||||
@ -348,13 +343,8 @@ const ImageMetadataViewer = memo(
|
||||
} else if (postprocess.type === 'gfpgan') {
|
||||
const { strength } = postprocess;
|
||||
return (
|
||||
<Flex
|
||||
key={i}
|
||||
pl={'2rem'}
|
||||
gap={1}
|
||||
direction={'column'}
|
||||
>
|
||||
<Text size={'md'}>{`${
|
||||
<Flex key={i} pl="2rem" gap={1} direction="column">
|
||||
<Text size="md">{`${
|
||||
i + 1
|
||||
}: Face restoration (GFPGAN)`}</Text>
|
||||
|
||||
@ -371,13 +361,8 @@ const ImageMetadataViewer = memo(
|
||||
} else if (postprocess.type === 'codeformer') {
|
||||
const { strength, fidelity } = postprocess;
|
||||
return (
|
||||
<Flex
|
||||
key={i}
|
||||
pl={'2rem'}
|
||||
gap={1}
|
||||
direction={'column'}
|
||||
>
|
||||
<Text size={'md'}>{`${
|
||||
<Flex key={i} pl="2rem" gap={1} direction="column">
|
||||
<Text size="md">{`${
|
||||
i + 1
|
||||
}: Face restoration (Codeformer)`}</Text>
|
||||
|
||||
@ -413,30 +398,30 @@ const ImageMetadataViewer = memo(
|
||||
value={dreamPrompt}
|
||||
/>
|
||||
)}
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<Flex gap={2}>
|
||||
<Tooltip label={`Copy metadata JSON`}>
|
||||
<Tooltip label="Copy metadata JSON">
|
||||
<IconButton
|
||||
aria-label="Copy metadata JSON"
|
||||
icon={<FaCopy />}
|
||||
size={'xs'}
|
||||
variant={'ghost'}
|
||||
size="xs"
|
||||
variant="ghost"
|
||||
fontSize={14}
|
||||
onClick={() =>
|
||||
navigator.clipboard.writeText(metadataJSON)
|
||||
}
|
||||
/>
|
||||
</Tooltip>
|
||||
<Text fontWeight={'semibold'}>Metadata JSON:</Text>
|
||||
<Text fontWeight="semibold">Metadata JSON:</Text>
|
||||
</Flex>
|
||||
<div className={'image-json-viewer'}>
|
||||
<div className="image-json-viewer">
|
||||
<pre>{metadataJSON}</pre>
|
||||
</div>
|
||||
</Flex>
|
||||
</>
|
||||
) : (
|
||||
<Center width={'100%'} pt={10}>
|
||||
<Text fontSize={'lg'} fontWeight="semibold">
|
||||
<Center width="100%" pt={10}>
|
||||
<Text fontSize="lg" fontWeight="semibold">
|
||||
No metadata available
|
||||
</Text>
|
||||
</Center>
|
||||
|
@ -23,8 +23,8 @@ export default function InvokeAccordionItem(props: InvokeAccordionItemProps) {
|
||||
return (
|
||||
<AccordionItem className="advanced-parameters-item">
|
||||
<AccordionButton className="advanced-parameters-header">
|
||||
<Flex width={'100%'} gap={'0.5rem'} align={'center'}>
|
||||
<Box flexGrow={1} textAlign={'left'}>
|
||||
<Flex width="100%" gap="0.5rem" align="center">
|
||||
<Box flexGrow={1} textAlign="left">
|
||||
{header}
|
||||
</Box>
|
||||
{additionalHeaderComponents}
|
||||
|
@ -63,7 +63,7 @@ const FaceRestoreSettings = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex direction={'column'} gap={2}>
|
||||
<Flex direction="column" gap={2}>
|
||||
<IAISelect
|
||||
label={t('parameters:type')}
|
||||
validValues={FACETOOL_TYPES.concat()}
|
||||
|
@ -36,7 +36,7 @@ export default function ImageToImageStrength(props: ImageToImageStrengthProps) {
|
||||
styleClass={styleClass}
|
||||
withInput
|
||||
withSliderMarks
|
||||
inputWidth={'5.5rem'}
|
||||
inputWidth="5.5rem"
|
||||
withReset
|
||||
handleReset={handleImg2ImgStrengthReset}
|
||||
/>
|
||||
|
@ -49,7 +49,7 @@ const HiresStrength = () => {
|
||||
isInteger={false}
|
||||
withInput
|
||||
withSliderMarks
|
||||
inputWidth={'5.5rem'}
|
||||
inputWidth="5.5rem"
|
||||
withReset
|
||||
handleReset={handleHiResStrengthReset}
|
||||
isSliderDisabled={!hiresFix}
|
||||
@ -75,10 +75,10 @@ const HiresSettings = () => {
|
||||
dispatch(setHiresFix(e.target.checked));
|
||||
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<IAISwitch
|
||||
label={t('parameters:hiresOptim')}
|
||||
fontSize={'md'}
|
||||
fontSize="md"
|
||||
isChecked={hiresFix}
|
||||
onChange={handleChangeHiresFix}
|
||||
/>
|
||||
|
@ -3,7 +3,7 @@ import SeamlessSettings from './SeamlessSettings';
|
||||
|
||||
const ImageToImageOutputSettings = () => {
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<SeamlessSettings />
|
||||
</Flex>
|
||||
);
|
||||
|
@ -4,7 +4,7 @@ import SeamlessSettings from './SeamlessSettings';
|
||||
|
||||
const OutputSettings = () => {
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<SeamlessSettings />
|
||||
<HiresSettings />
|
||||
</Flex>
|
||||
|
@ -22,10 +22,10 @@ const SeamlessSettings = () => {
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<IAISwitch
|
||||
label={t('parameters:seamlessTiling')}
|
||||
fontSize={'md'}
|
||||
fontSize="md"
|
||||
isChecked={seamless}
|
||||
onChange={handleChangeSeamless}
|
||||
/>
|
||||
|
@ -10,7 +10,7 @@ import Threshold from './Threshold';
|
||||
*/
|
||||
const SeedSettings = () => {
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<RandomizeSeed />
|
||||
<Flex gap={2}>
|
||||
<Seed />
|
||||
|
@ -18,7 +18,7 @@ export default function ShuffleSeed() {
|
||||
|
||||
return (
|
||||
<Button
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
isDisabled={shouldRandomizeSeed}
|
||||
onClick={handleClickRandomizeSeed}
|
||||
padding="0 1.5rem"
|
||||
|
@ -18,7 +18,7 @@ export default function GenerateVariationsToggle() {
|
||||
return (
|
||||
<IAISwitch
|
||||
isChecked={shouldGenerateVariations}
|
||||
width={'auto'}
|
||||
width="auto"
|
||||
onChange={handleChangeShouldGenerateVariations}
|
||||
/>
|
||||
);
|
||||
|
@ -7,7 +7,7 @@ import VariationAmount from './VariationAmount';
|
||||
*/
|
||||
const VariationsSettings = () => {
|
||||
return (
|
||||
<Flex gap={2} direction={'column'}>
|
||||
<Flex gap={2} direction="column">
|
||||
<VariationAmount />
|
||||
<SeedWeights />
|
||||
</Flex>
|
||||
|
@ -71,7 +71,7 @@ const PromptInput = () => {
|
||||
id="prompt"
|
||||
name="prompt"
|
||||
placeholder={t('parameters:promptPlaceholder')}
|
||||
size={'lg'}
|
||||
size="lg"
|
||||
value={prompt}
|
||||
onChange={handleChangePrompt}
|
||||
onKeyDown={handleKeyDown}
|
||||
|
@ -27,7 +27,7 @@ const EmptyTempFolderButtonModal = () => {
|
||||
acceptCallback={acceptCallback}
|
||||
acceptButtonText={t('unifiedcanvas:emptyFolder')}
|
||||
triggerComponent={
|
||||
<IAIButton leftIcon={<FaTrash />} size={'sm'} isDisabled={isStaging}>
|
||||
<IAIButton leftIcon={<FaTrash />} size="sm" isDisabled={isStaging}>
|
||||
{t('unifiedcanvas:emptyTempImageFolder')}
|
||||
</IAIButton>
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ const Console = () => {
|
||||
bottom: 0,
|
||||
zIndex: 9999,
|
||||
}}
|
||||
maxHeight={'90vh'}
|
||||
maxHeight="90vh"
|
||||
>
|
||||
<div className="console" ref={viewerRef} onScroll={handleOnScroll}>
|
||||
{log.map((entry, i) => {
|
||||
@ -130,11 +130,11 @@ const Console = () => {
|
||||
label={shouldAutoscroll ? 'Autoscroll On' : 'Autoscroll Off'}
|
||||
>
|
||||
<IconButton
|
||||
className={'console-autoscroll-icon-button'}
|
||||
className="console-autoscroll-icon-button"
|
||||
data-autoscroll-enabled={shouldAutoscroll}
|
||||
size="sm"
|
||||
aria-label="Toggle autoscroll"
|
||||
variant={'solid'}
|
||||
variant="solid"
|
||||
icon={<FaAngleDoubleDown />}
|
||||
onClick={() => setShouldAutoscroll(!shouldAutoscroll)}
|
||||
/>
|
||||
@ -145,11 +145,11 @@ const Console = () => {
|
||||
label={shouldShowLogViewer ? 'Hide Console' : 'Show Console'}
|
||||
>
|
||||
<IconButton
|
||||
className={'console-toggle-icon-button'}
|
||||
className="console-toggle-icon-button"
|
||||
data-error-seen={hasError || !wasErrorSeen}
|
||||
size="sm"
|
||||
position={'fixed'}
|
||||
variant={'solid'}
|
||||
position="fixed"
|
||||
variant="solid"
|
||||
aria-label="Toggle Log Viewer"
|
||||
icon={shouldShowLogViewer ? <FaMinus /> : <FaCode />}
|
||||
onClick={handleClickLogViewerToggle}
|
||||
|
@ -54,7 +54,7 @@ export default function LanguagePicker() {
|
||||
aria-label={t('common:languagePickerLabel')}
|
||||
tooltip={t('common:languagePickerLabel')}
|
||||
icon={<FaLanguage />}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={26}
|
||||
|
@ -99,8 +99,8 @@ export default function AddCheckpointModel() {
|
||||
>
|
||||
{({ handleSubmit, errors, touched }) => (
|
||||
<form onSubmit={handleSubmit}>
|
||||
<VStack rowGap={'0.5rem'}>
|
||||
<Text fontSize={20} fontWeight="bold" alignSelf={'start'}>
|
||||
<VStack rowGap="0.5rem">
|
||||
<Text fontSize={20} fontWeight="bold" alignSelf="start">
|
||||
{t('modelmanager:manual')}
|
||||
</Text>
|
||||
{/* Name */}
|
||||
@ -111,7 +111,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="name" fontSize="sm">
|
||||
{t('modelmanager:name')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="name"
|
||||
@ -138,7 +138,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="description" fontSize="sm">
|
||||
{t('modelmanager:description')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="description"
|
||||
@ -164,7 +164,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="config" fontSize="sm">
|
||||
{t('modelmanager:config')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="config"
|
||||
@ -190,7 +190,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="config" fontSize="sm">
|
||||
{t('modelmanager:modelLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="weights"
|
||||
@ -213,7 +213,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="vae" fontSize="sm">
|
||||
{t('modelmanager:vaeLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae"
|
||||
@ -231,13 +231,13 @@ export default function AddCheckpointModel() {
|
||||
</VStack>
|
||||
</FormControl>
|
||||
|
||||
<HStack width={'100%'}>
|
||||
<HStack width="100%">
|
||||
{/* Width */}
|
||||
<FormControl isInvalid={!!errors.width && touched.width}>
|
||||
<FormLabel htmlFor="width" fontSize="sm">
|
||||
{t('modelmanager:width')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field id="width" name="width">
|
||||
{({
|
||||
field,
|
||||
@ -276,7 +276,7 @@ export default function AddCheckpointModel() {
|
||||
<FormLabel htmlFor="height" fontSize="sm">
|
||||
{t('modelmanager:height')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field id="height" name="height">
|
||||
{({
|
||||
field,
|
||||
|
@ -105,7 +105,7 @@ export default function AddDiffusersModel() {
|
||||
>
|
||||
{({ handleSubmit, errors, touched }) => (
|
||||
<form onSubmit={handleSubmit}>
|
||||
<VStack rowGap={'0.5rem'}>
|
||||
<VStack rowGap="0.5rem">
|
||||
<FormItemWrapper>
|
||||
{/* Name */}
|
||||
<FormControl
|
||||
@ -115,7 +115,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="name" fontSize="sm">
|
||||
{t('modelmanager:name')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="name"
|
||||
@ -145,7 +145,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="description" fontSize="sm">
|
||||
{t('modelmanager:description')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="description"
|
||||
@ -182,7 +182,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="path" fontSize="sm">
|
||||
{t('modelmanager:modelLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="path"
|
||||
@ -205,7 +205,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="repo_id" fontSize="sm">
|
||||
{t('modelmanager:repo_id')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="repo_id"
|
||||
@ -242,7 +242,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="vae.path" fontSize="sm">
|
||||
{t('modelmanager:vaeLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae.path"
|
||||
@ -267,7 +267,7 @@ export default function AddDiffusersModel() {
|
||||
<FormLabel htmlFor="vae.repo_id" fontSize="sm">
|
||||
{t('modelmanager:vaeRepoID')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae.repo_id"
|
||||
|
@ -72,9 +72,9 @@ export default function AddModel() {
|
||||
tooltip={t('modelmanager:addNewModel')}
|
||||
onClick={onOpen}
|
||||
className="modal-close-btn"
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
>
|
||||
<Flex columnGap={'0.5rem'} alignItems="center">
|
||||
<Flex columnGap="0.5rem" alignItems="center">
|
||||
<FaPlus />
|
||||
{t('modelmanager:addNew')}
|
||||
</Flex>
|
||||
|
@ -121,7 +121,7 @@ export default function CheckpointModelEdit() {
|
||||
>
|
||||
{({ handleSubmit, errors, touched }) => (
|
||||
<form onSubmit={handleSubmit}>
|
||||
<VStack rowGap={'0.5rem'} alignItems="start">
|
||||
<VStack rowGap="0.5rem" alignItems="start">
|
||||
{/* Description */}
|
||||
<FormControl
|
||||
isInvalid={!!errors.description && touched.description}
|
||||
@ -130,7 +130,7 @@ export default function CheckpointModelEdit() {
|
||||
<FormLabel htmlFor="description" fontSize="sm">
|
||||
{t('modelmanager:description')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="description"
|
||||
@ -156,7 +156,7 @@ export default function CheckpointModelEdit() {
|
||||
<FormLabel htmlFor="config" fontSize="sm">
|
||||
{t('modelmanager:config')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="config"
|
||||
@ -182,7 +182,7 @@ export default function CheckpointModelEdit() {
|
||||
<FormLabel htmlFor="config" fontSize="sm">
|
||||
{t('modelmanager:modelLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="weights"
|
||||
@ -205,7 +205,7 @@ export default function CheckpointModelEdit() {
|
||||
<FormLabel htmlFor="vae" fontSize="sm">
|
||||
{t('modelmanager:vaeLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae"
|
||||
@ -223,13 +223,13 @@ export default function CheckpointModelEdit() {
|
||||
</VStack>
|
||||
</FormControl>
|
||||
|
||||
<HStack width={'100%'}>
|
||||
<HStack width="100%">
|
||||
{/* Width */}
|
||||
<FormControl isInvalid={!!errors.width && touched.width}>
|
||||
<FormLabel htmlFor="width" fontSize="sm">
|
||||
{t('modelmanager:width')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field id="width" name="width">
|
||||
{({
|
||||
field,
|
||||
@ -267,7 +267,7 @@ export default function CheckpointModelEdit() {
|
||||
<FormLabel htmlFor="height" fontSize="sm">
|
||||
{t('modelmanager:height')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field id="height" name="height">
|
||||
{({
|
||||
field,
|
||||
|
@ -128,7 +128,7 @@ export default function DiffusersModelEdit() {
|
||||
>
|
||||
{({ handleSubmit, errors, touched }) => (
|
||||
<form onSubmit={handleSubmit}>
|
||||
<VStack rowGap={'0.5rem'} alignItems="start">
|
||||
<VStack rowGap="0.5rem" alignItems="start">
|
||||
{/* Description */}
|
||||
<FormControl
|
||||
isInvalid={!!errors.description && touched.description}
|
||||
@ -137,7 +137,7 @@ export default function DiffusersModelEdit() {
|
||||
<FormLabel htmlFor="description" fontSize="sm">
|
||||
{t('modelmanager:description')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="description"
|
||||
@ -163,7 +163,7 @@ export default function DiffusersModelEdit() {
|
||||
<FormLabel htmlFor="path" fontSize="sm">
|
||||
{t('modelmanager:modelLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="path"
|
||||
@ -186,7 +186,7 @@ export default function DiffusersModelEdit() {
|
||||
<FormLabel htmlFor="repo_id" fontSize="sm">
|
||||
{t('modelmanager:repo_id')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="repo_id"
|
||||
@ -211,7 +211,7 @@ export default function DiffusersModelEdit() {
|
||||
<FormLabel htmlFor="vae.path" fontSize="sm">
|
||||
{t('modelmanager:vaeLocation')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae.path"
|
||||
@ -236,7 +236,7 @@ export default function DiffusersModelEdit() {
|
||||
<FormLabel htmlFor="vae.repo_id" fontSize="sm">
|
||||
{t('modelmanager:vaeRepoID')}
|
||||
</FormLabel>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<Field
|
||||
as={IAIInput}
|
||||
id="vae.repo_id"
|
||||
|
@ -176,9 +176,9 @@ const ModelList = () => {
|
||||
}, [models, searchText, t, isSelectedFilter]);
|
||||
|
||||
return (
|
||||
<Flex flexDirection={'column'} rowGap="2rem" width="50%" minWidth="50%">
|
||||
<Flex justifyContent={'space-between'}>
|
||||
<Text fontSize={'1.4rem'} fontWeight="bold">
|
||||
<Flex flexDirection="column" rowGap="2rem" width="50%" minWidth="50%">
|
||||
<Flex justifyContent="space-between">
|
||||
<Text fontSize="1.4rem" fontWeight="bold">
|
||||
{t('modelmanager:availableModels')}
|
||||
</Text>
|
||||
<AddModel />
|
||||
@ -190,10 +190,10 @@ const ModelList = () => {
|
||||
/>
|
||||
|
||||
<Flex
|
||||
flexDirection={'column'}
|
||||
flexDirection="column"
|
||||
gap={1}
|
||||
maxHeight={window.innerHeight - 360}
|
||||
overflow={'scroll'}
|
||||
overflow="scroll"
|
||||
paddingRight="1rem"
|
||||
>
|
||||
<Flex columnGap="0.5rem">
|
||||
|
@ -56,7 +56,7 @@ export default function ModelListItem(props: ModelListItemProps) {
|
||||
|
||||
return (
|
||||
<Flex
|
||||
alignItems={'center'}
|
||||
alignItems="center"
|
||||
padding="0.5rem 0.5rem"
|
||||
borderRadius="0.2rem"
|
||||
backgroundColor={name === openModel ? 'var(--accent-color)' : ''}
|
||||
@ -69,14 +69,14 @@ export default function ModelListItem(props: ModelListItemProps) {
|
||||
>
|
||||
<Box onClick={openModelHandler} cursor="pointer">
|
||||
<Tooltip label={description} hasArrow placement="bottom">
|
||||
<Text fontWeight={'bold'}>{name}</Text>
|
||||
<Text fontWeight="bold">{name}</Text>
|
||||
</Tooltip>
|
||||
</Box>
|
||||
<Spacer onClick={openModelHandler} cursor="pointer" />
|
||||
<Flex gap={2} alignItems="center">
|
||||
<Text color={statusTextColor()}>{status}</Text>
|
||||
<Button
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={handleChangeModel}
|
||||
isDisabled={status === 'active' || isProcessing || !isConnected}
|
||||
className="modal-close-btn"
|
||||
@ -86,7 +86,7 @@ export default function ModelListItem(props: ModelListItemProps) {
|
||||
|
||||
<IAIIconButton
|
||||
icon={<EditIcon />}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={openModelHandler}
|
||||
aria-label="Modify Config"
|
||||
isDisabled={status === 'active' || isProcessing || !isConnected}
|
||||
@ -99,7 +99,7 @@ export default function ModelListItem(props: ModelListItemProps) {
|
||||
triggerComponent={
|
||||
<IAIIconButton
|
||||
icon={<DeleteIcon />}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
aria-label={t('modelmanager:deleteConfig')}
|
||||
isDisabled={status === 'active' || isProcessing || !isConnected}
|
||||
className=" modal-close-btn"
|
||||
@ -107,7 +107,7 @@ export default function ModelListItem(props: ModelListItemProps) {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex rowGap={'1rem'} flexDirection="column">
|
||||
<Flex rowGap="1rem" flexDirection="column">
|
||||
<p style={{ fontWeight: 'bold' }}>{t('modelmanager:deleteMsg1')}</p>
|
||||
<p style={{ color: 'var(--text-color-secondary' }}>
|
||||
{t('modelmanager:deleteMsg2')}
|
||||
|
@ -58,11 +58,7 @@ export default function ModelManagerModal({
|
||||
<ModalHeader fontWeight="bold">
|
||||
{t('modelmanager:modelManager')}
|
||||
</ModalHeader>
|
||||
<Flex
|
||||
padding={'0 1.5rem 1.5rem 1.5rem'}
|
||||
width="100%"
|
||||
columnGap={'2rem'}
|
||||
>
|
||||
<Flex padding="0 1.5rem 1.5rem 1.5rem" width="100%" columnGap="2rem">
|
||||
<ModelList />
|
||||
{openModel && model_list[openModel]['format'] === 'diffusers' ? (
|
||||
<DiffusersModelEdit />
|
||||
|
@ -52,16 +52,16 @@ function ModelExistsTag() {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<Box
|
||||
position={'absolute'}
|
||||
position="absolute"
|
||||
zIndex={2}
|
||||
right={4}
|
||||
top={4}
|
||||
fontSize="0.7rem"
|
||||
fontWeight={'bold'}
|
||||
backgroundColor={'var(--accent-color)'}
|
||||
padding={'0.2rem 0.5rem'}
|
||||
fontWeight="bold"
|
||||
backgroundColor="var(--accent-color)"
|
||||
padding="0.2rem 0.5rem"
|
||||
borderRadius="0.2rem"
|
||||
alignItems={'center'}
|
||||
alignItems="center"
|
||||
>
|
||||
{t('modelmanager:modelExists')}
|
||||
</Box>
|
||||
@ -96,7 +96,7 @@ function SearchModelEntry({
|
||||
value={model.name}
|
||||
label={
|
||||
<>
|
||||
<VStack alignItems={'start'}>
|
||||
<VStack alignItems="start">
|
||||
<p style={{ fontWeight: 'bold' }}>{model.name}</p>
|
||||
<p style={{ fontStyle: 'italic' }}>{model.location}</p>
|
||||
</VStack>
|
||||
@ -105,9 +105,9 @@ function SearchModelEntry({
|
||||
isChecked={modelsToAdd.includes(model.name)}
|
||||
isDisabled={existingModels.includes(model.location)}
|
||||
onChange={foundModelsChangeHandler}
|
||||
padding={'1rem'}
|
||||
backgroundColor={'var(--background-color)'}
|
||||
borderRadius={'0.5rem'}
|
||||
padding="1rem"
|
||||
backgroundColor="var(--background-color)"
|
||||
borderRadius="0.5rem"
|
||||
_checked={{
|
||||
backgroundColor: 'var(--accent-color)',
|
||||
color: 'var(--text-color)',
|
||||
@ -243,12 +243,12 @@ export default function SearchModels() {
|
||||
<>
|
||||
{searchFolder ? (
|
||||
<Flex
|
||||
flexDirection={'column'}
|
||||
padding={'1rem'}
|
||||
backgroundColor={'var(--background-color)'}
|
||||
flexDirection="column"
|
||||
padding="1rem"
|
||||
backgroundColor="var(--background-color)"
|
||||
borderRadius="0.5rem"
|
||||
rowGap={'0.5rem'}
|
||||
position={'relative'}
|
||||
rowGap="0.5rem"
|
||||
position="relative"
|
||||
>
|
||||
<p
|
||||
style={{
|
||||
@ -271,7 +271,7 @@ export default function SearchModels() {
|
||||
aria-label={t('modelmanager:scanAgain')}
|
||||
tooltip={t('modelmanager:scanAgain')}
|
||||
icon={<BiReset />}
|
||||
position={'absolute'}
|
||||
position="absolute"
|
||||
right={16}
|
||||
fontSize={18}
|
||||
disabled={isProcessing}
|
||||
@ -280,7 +280,7 @@ export default function SearchModels() {
|
||||
<IAIIconButton
|
||||
aria-label={t('modelmanager:clearCheckpointFolder')}
|
||||
icon={<FaPlus style={{ transform: 'rotate(45deg)' }} />}
|
||||
position={'absolute'}
|
||||
position="absolute"
|
||||
right={5}
|
||||
onClick={resetSearchModelHandler}
|
||||
/>
|
||||
@ -319,8 +319,8 @@ export default function SearchModels() {
|
||||
</Formik>
|
||||
)}
|
||||
{foundModels && (
|
||||
<Flex flexDirection={'column'} rowGap={'1rem'}>
|
||||
<Flex justifyContent={'space-between'} alignItems="center">
|
||||
<Flex flexDirection="column" rowGap="1rem">
|
||||
<Flex justifyContent="space-between" alignItems="center">
|
||||
<p>
|
||||
{t('modelmanager:modelsFound')}: {foundModels.length}
|
||||
</p>
|
||||
@ -328,8 +328,8 @@ export default function SearchModels() {
|
||||
{t('modelmanager:selected')}: {modelsToAdd.length}
|
||||
</p>
|
||||
</Flex>
|
||||
<Flex columnGap={'0.5rem'} justifyContent={'space-between'}>
|
||||
<Flex columnGap={'0.5rem'}>
|
||||
<Flex columnGap="0.5rem" justifyContent="space-between">
|
||||
<Flex columnGap="0.5rem">
|
||||
<IAIButton
|
||||
isDisabled={modelsToAdd.length === foundModels.length}
|
||||
onClick={addAllToSelected}
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
div {
|
||||
background-color: var(--progress-bar-color);
|
||||
transition: width 0.2s ease-in-out;
|
||||
|
||||
&[data-indeterminate] {
|
||||
background-color: unset;
|
||||
background-image: linear-gradient(
|
||||
|
@ -206,7 +206,7 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
|
||||
</div>
|
||||
|
||||
<div className="settings-modal-reset">
|
||||
<Heading size={'md'}>{t('settings:resetWebUI')}</Heading>
|
||||
<Heading size="md">{t('settings:resetWebUI')}</Heading>
|
||||
<Button colorScheme="red" onClick={handleClickResetWebUI}>
|
||||
{t('settings:resetWebUI')}
|
||||
</Button>
|
||||
@ -232,8 +232,8 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
|
||||
<ModalOverlay bg="blackAlpha.300" backdropFilter="blur(40px)" />
|
||||
<ModalContent>
|
||||
<ModalBody pb={6} pt={6}>
|
||||
<Flex justifyContent={'center'}>
|
||||
<Text fontSize={'lg'}>
|
||||
<Flex justifyContent="center">
|
||||
<Text fontSize="lg">
|
||||
<Text>{t('settings:resetComplete')}</Text>
|
||||
</Text>
|
||||
</Flex>
|
||||
|
@ -56,7 +56,7 @@ const SiteHeader = () => {
|
||||
<IAIIconButton
|
||||
aria-label={t('modelmanager:modelManager')}
|
||||
tooltip={t('modelmanager:modelManager')}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
@ -68,7 +68,7 @@ const SiteHeader = () => {
|
||||
<IAIIconButton
|
||||
aria-label={t('common:hotkeysLabel')}
|
||||
tooltip={t('common:hotkeysLabel')}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
@ -86,7 +86,7 @@ const SiteHeader = () => {
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
icon={
|
||||
<Link isExternal href="http://github.com/invoke-ai/InvokeAI/issues">
|
||||
<FaBug />
|
||||
@ -100,7 +100,7 @@ const SiteHeader = () => {
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
icon={
|
||||
<Link isExternal href="http://github.com/invoke-ai/InvokeAI">
|
||||
<FaGithub />
|
||||
@ -114,7 +114,7 @@ const SiteHeader = () => {
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
icon={
|
||||
<Link isExternal href="https://discord.gg/ZmtBAhwWhy">
|
||||
<FaDiscord />
|
||||
@ -129,7 +129,7 @@ const SiteHeader = () => {
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={22}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
icon={<MdSettings />}
|
||||
/>
|
||||
</SettingsModal>
|
||||
|
@ -46,7 +46,7 @@ export default function ThemeChanger() {
|
||||
width: '6rem',
|
||||
}}
|
||||
leftIcon={currentTheme === theme ? <FaCheck /> : undefined}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
onClick={() => handleChangeTheme(theme)}
|
||||
key={theme}
|
||||
>
|
||||
@ -64,7 +64,7 @@ export default function ThemeChanger() {
|
||||
triggerComponent={
|
||||
<IAIIconButton
|
||||
aria-label={t('common:themeLabel')}
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
variant="link"
|
||||
data-variant="link"
|
||||
fontSize={20}
|
||||
@ -72,7 +72,7 @@ export default function ThemeChanger() {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<VStack align={'stretch'}>{renderThemeOptions()}</VStack>
|
||||
<VStack align="stretch">{renderThemeOptions()}</VStack>
|
||||
</IAIPopover>
|
||||
);
|
||||
}
|
||||
|
@ -35,9 +35,9 @@ export default function InitImagePreview() {
|
||||
{initialImage && (
|
||||
<div className="init-image-preview">
|
||||
<Image
|
||||
fit={'contain'}
|
||||
maxWidth={'100%'}
|
||||
maxHeight={'100%'}
|
||||
fit="contain"
|
||||
maxWidth="100%"
|
||||
maxHeight="100%"
|
||||
src={
|
||||
typeof initialImage === 'string' ? initialImage : initialImage.url
|
||||
}
|
||||
|
@ -9,10 +9,10 @@ export default function InitialImageOverlay() {
|
||||
|
||||
return initialImage ? (
|
||||
<Image
|
||||
fit={'contain'}
|
||||
fit="contain"
|
||||
src={typeof initialImage === 'string' ? initialImage : initialImage.url}
|
||||
rounded={'md'}
|
||||
className={'checkerboard'}
|
||||
rounded="md"
|
||||
className="checkerboard"
|
||||
/>
|
||||
) : null;
|
||||
}
|
||||
|
@ -31,32 +31,32 @@ export interface InvokeTabInfo {
|
||||
|
||||
export const tabDict: Record<InvokeTabName, InvokeTabInfo> = {
|
||||
txt2img: {
|
||||
title: <TextToImageIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <TextToImageIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <TextToImageWorkarea />,
|
||||
tooltip: 'Text To Image',
|
||||
},
|
||||
img2img: {
|
||||
title: <ImageToImageIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <ImageToImageIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <ImageToImageWorkarea />,
|
||||
tooltip: 'Image To Image',
|
||||
},
|
||||
unifiedCanvas: {
|
||||
title: <UnifiedCanvasIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <UnifiedCanvasIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <UnifiedCanvasWorkarea />,
|
||||
tooltip: 'Unified Canvas',
|
||||
},
|
||||
nodes: {
|
||||
title: <NodesIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <NodesIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <NodesWIP />,
|
||||
tooltip: 'Nodes',
|
||||
},
|
||||
postprocess: {
|
||||
title: <PostprocessingIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <PostprocessingIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <PostProcessingWIP />,
|
||||
tooltip: 'Post Processing',
|
||||
},
|
||||
training: {
|
||||
title: <TrainingIcon fill={'black'} boxSize={'2.5rem'} />,
|
||||
title: <TrainingIcon fill="black" boxSize="2.5rem" />,
|
||||
workarea: <TrainingWIP />,
|
||||
tooltip: 'Training',
|
||||
},
|
||||
@ -122,7 +122,7 @@ export default function InvokeTabs() {
|
||||
key={key}
|
||||
hasArrow
|
||||
label={tabDict[key as keyof typeof tabDict].tooltip}
|
||||
placement={'right'}
|
||||
placement="right"
|
||||
>
|
||||
<Tab>{tabDict[key as keyof typeof tabDict].title}</Tab>
|
||||
</Tooltip>
|
||||
@ -147,7 +147,7 @@ export default function InvokeTabs() {
|
||||
<Tabs
|
||||
isLazy
|
||||
className="app-tabs"
|
||||
variant={'unstyled'}
|
||||
variant="unstyled"
|
||||
defaultIndex={activeTab}
|
||||
index={activeTab}
|
||||
onChange={(index: number) => {
|
||||
|
@ -45,21 +45,16 @@ const UnifiedCanvasDisplayBeta = () => {
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<div className={'workarea-single-view'}>
|
||||
<div className="workarea-single-view">
|
||||
<Flex
|
||||
flexDirection={'row'}
|
||||
flexDirection="row"
|
||||
width="100%"
|
||||
height="100%"
|
||||
columnGap={'1rem'}
|
||||
columnGap="1rem"
|
||||
padding="1rem"
|
||||
>
|
||||
<UnifiedCanvasToolbarBeta />
|
||||
<Flex
|
||||
width="100%"
|
||||
height="100%"
|
||||
flexDirection={'column'}
|
||||
rowGap={'1rem'}
|
||||
>
|
||||
<Flex width="100%" height="100%" flexDirection="column" rowGap="1rem">
|
||||
<UnifiedCanvasToolSettingsBeta />
|
||||
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />}
|
||||
</Flex>
|
||||
|
@ -4,7 +4,7 @@ import UnifiedCanvasLimitStrokesToBox from './UnifiedCanvasLimitStrokesToBox';
|
||||
|
||||
export default function UnifiedCanvasBaseBrushSettings() {
|
||||
return (
|
||||
<Flex gap={'1rem'} alignItems="center">
|
||||
<Flex gap="1rem" alignItems="center">
|
||||
<UnifiedCanvasBrushSettings />
|
||||
<UnifiedCanvasLimitStrokesToBox />
|
||||
</Flex>
|
||||
|
@ -4,7 +4,7 @@ import UnifiedCanvasColorPicker from './UnifiedCanvasColorPicker';
|
||||
|
||||
export default function UnifiedCanvasBrushSettings() {
|
||||
return (
|
||||
<Flex columnGap={'1rem'} alignItems="center">
|
||||
<Flex columnGap="1rem" alignItems="center">
|
||||
<UnifiedCanvasBrushSize />
|
||||
<UnifiedCanvasColorPicker />
|
||||
</Flex>
|
||||
|
@ -49,7 +49,7 @@ export default function UnifiedCanvasBrushSize() {
|
||||
onChange={(newSize) => dispatch(setBrushSize(newSize))}
|
||||
sliderNumberInputProps={{ max: 500 }}
|
||||
inputReadOnly={false}
|
||||
width={'100px'}
|
||||
width="100px"
|
||||
isCompact
|
||||
/>
|
||||
);
|
||||
|
@ -14,7 +14,7 @@ export default function UnifiedCanvasClearMask() {
|
||||
|
||||
return (
|
||||
<IAIButton
|
||||
size={'sm'}
|
||||
size="sm"
|
||||
leftIcon={<FaTrash />}
|
||||
onClick={handleClearMask}
|
||||
tooltip={`${t('unifiedcanvas:clearMask')} (Shift+C)`}
|
||||
|
@ -92,7 +92,7 @@ export default function UnifiedCanvasColorPicker() {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex minWidth={'15rem'} direction={'column'} gap={'1rem'} width={'100%'}>
|
||||
<Flex minWidth="15rem" direction="column" gap="1rem" width="100%">
|
||||
{layer === 'base' && (
|
||||
<IAIColorPicker
|
||||
style={{
|
||||
|
@ -6,7 +6,7 @@ import UnifiedCanvasPreserveMask from './UnifiedCanvasPreserveMask';
|
||||
|
||||
export default function UnifiedCanvasMaskBrushSettings() {
|
||||
return (
|
||||
<Flex gap={'1rem'} alignItems="center">
|
||||
<Flex gap="1rem" alignItems="center">
|
||||
<UnifiedCanvasBrushSettings />
|
||||
<UnifiedCanvasEnableMask />
|
||||
<UnifiedCanvasPreserveMask />
|
||||
|
@ -5,7 +5,7 @@ import UnifiedCanvasSnapToGrid from './UnifiedCanvasSnapToGrid';
|
||||
|
||||
export default function UnifiedCanvasMoveSettings() {
|
||||
return (
|
||||
<Flex alignItems={'center'} gap="1rem">
|
||||
<Flex alignItems="center" gap="1rem">
|
||||
<UnifiedCanvasShowGrid />
|
||||
<UnifiedCanvasSnapToGrid />
|
||||
<UnifiedCanvasDarkenOutsideSelection />
|
||||
|
@ -68,7 +68,7 @@ const UnifiedCanvasSettings = () => {
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Flex direction={'column'} gap={'0.5rem'}>
|
||||
<Flex direction="column" gap="0.5rem">
|
||||
<IAICheckbox
|
||||
label={t('unifiedcanvas:showIntermediates')}
|
||||
isChecked={shouldShowIntermediates}
|
||||
|
@ -28,7 +28,7 @@ export default function UnifiedCanvasToolSettingsBeta() {
|
||||
const { tool, layer } = useAppSelector(selector);
|
||||
|
||||
return (
|
||||
<Flex height="2rem" minHeight="2rem" maxHeight="2rem" alignItems={'center'}>
|
||||
<Flex height="2rem" minHeight="2rem" maxHeight="2rem" alignItems="center">
|
||||
{layer == 'base' && ['brush', 'eraser', 'colorPicker'].includes(tool) && (
|
||||
<UnifiedCanvasBaseBrushSettings />
|
||||
)}
|
||||
|
@ -25,7 +25,7 @@ export default function UnifiedCanvasProcessingButtons() {
|
||||
};
|
||||
|
||||
return (
|
||||
<Flex flexDirection={'column'} gap="0.5rem">
|
||||
<Flex flexDirection="column" gap="0.5rem">
|
||||
<IAIIconButton
|
||||
tooltip={`${t('parameters:showOptionsPanel')} (O)`}
|
||||
tooltipProps={{ placement: 'top' }}
|
||||
@ -38,7 +38,7 @@ export default function UnifiedCanvasProcessingButtons() {
|
||||
<InvokeButton iconButton />
|
||||
</Flex>
|
||||
<Flex>
|
||||
<CancelButton width={'100%'} height={'40px'} />
|
||||
<CancelButton width="100%" height="40px" />
|
||||
</Flex>
|
||||
</Flex>
|
||||
);
|
||||
|
@ -113,7 +113,7 @@ const UnifiedCanvasToolSelect = () => {
|
||||
const handleEraseBoundingBox = () => dispatch(addEraseRect());
|
||||
|
||||
return (
|
||||
<Flex flexDirection={'column'} gap={'0.5rem'}>
|
||||
<Flex flexDirection="column" gap="0.5rem">
|
||||
<ButtonGroup>
|
||||
<IAIIconButton
|
||||
aria-label={`${t('unifiedcanvas:brush')} (B)`}
|
||||
@ -155,7 +155,7 @@ const UnifiedCanvasToolSelect = () => {
|
||||
data-selected={tool === 'colorPicker' && !isStaging}
|
||||
isDisabled={isStaging}
|
||||
onClick={handleSelectColorPickerTool}
|
||||
width={'max-content'}
|
||||
width="max-content"
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
|
@ -23,30 +23,30 @@ const UnifiedCanvasToolbarBeta = () => {
|
||||
);
|
||||
|
||||
return (
|
||||
<Flex flexDirection={'column'} rowGap="0.5rem" width="6rem">
|
||||
<Flex flexDirection="column" rowGap="0.5rem" width="6rem">
|
||||
<UnifiedCanvasLayerSelect />
|
||||
<UnifiedCanvasToolSelect />
|
||||
|
||||
<Flex gap={'0.5rem'}>
|
||||
<Flex gap="0.5rem">
|
||||
<UnifiedCanvasMoveTool />
|
||||
<UnifiedCanvasResetView />
|
||||
</Flex>
|
||||
|
||||
<Flex columnGap={'0.5rem'}>
|
||||
<Flex columnGap="0.5rem">
|
||||
<UnifiedCanvasMergeVisible />
|
||||
<UnifiedCanvasSaveToGallery />
|
||||
</Flex>
|
||||
<Flex columnGap={'0.5rem'}>
|
||||
<Flex columnGap="0.5rem">
|
||||
<UnifiedCanvasCopyToClipboard />
|
||||
<UnifiedCanvasDownloadImage />
|
||||
</Flex>
|
||||
|
||||
<Flex gap={'0.5rem'}>
|
||||
<Flex gap="0.5rem">
|
||||
<IAICanvasUndoButton />
|
||||
<IAICanvasRedoButton />
|
||||
</Flex>
|
||||
|
||||
<Flex gap={'0.5rem'}>
|
||||
<Flex gap="0.5rem">
|
||||
<UnifiedCanvasFileUploader />
|
||||
<UnifiedCanvasResetCanvas />
|
||||
</Flex>
|
||||
|
@ -43,7 +43,7 @@ const UnifiedCanvasDisplay = () => {
|
||||
}, [dispatch]);
|
||||
|
||||
return (
|
||||
<div className={'workarea-single-view'}>
|
||||
<div className="workarea-single-view">
|
||||
<div className="workarea-split-view-left">
|
||||
<div className="inpainting-main-area">
|
||||
<IAICanvasOutpaintingControls />
|
||||
|
File diff suppressed because one or more lines are too long
@ -213,7 +213,9 @@ class Generate:
|
||||
print('>> xformers not installed')
|
||||
|
||||
# model caching system for fast switching
|
||||
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
||||
self.model_manager = ModelManager(mconfig, self.device, self.precision,
|
||||
max_loaded_models=max_loaded_models,
|
||||
sequential_offload=self.free_gpu_mem)
|
||||
# don't accept invalid models
|
||||
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
|
||||
model = model or fallback
|
||||
@ -480,7 +482,6 @@ class Generate:
|
||||
self.model.cond_stage_model.device = self.model.device
|
||||
self.model.cond_stage_model.to(self.model.device)
|
||||
except AttributeError:
|
||||
print(">> Warning: '--free_gpu_mem' is not yet supported when generating image using model based on HuggingFace Diffuser.")
|
||||
pass
|
||||
|
||||
try:
|
||||
|
@ -4,39 +4,34 @@ import dataclasses
|
||||
import inspect
|
||||
import psutil
|
||||
import secrets
|
||||
import sys
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
from typing_extensions import ParamSpec
|
||||
else:
|
||||
from typing import ParamSpec
|
||||
|
||||
import PIL.Image
|
||||
import einops
|
||||
import psutil
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
|
||||
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ...modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
|
||||
|
||||
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from diffusers.schedulers import KarrasDiffusionSchedulers
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
|
||||
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from diffusers.utils.outputs import BaseOutput
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
|
||||
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from ...modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -265,6 +260,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
feature_extractor ([`CLIPFeatureExtractor`]):
|
||||
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
||||
"""
|
||||
_model_group: ModelGroup
|
||||
|
||||
ID_LENGTH = 8
|
||||
|
||||
@ -274,7 +270,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
text_encoder: CLIPTextModel,
|
||||
tokenizer: CLIPTokenizer,
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
||||
scheduler: KarrasDiffusionSchedulers,
|
||||
safety_checker: Optional[StableDiffusionSafetyChecker],
|
||||
feature_extractor: Optional[CLIPFeatureExtractor],
|
||||
requires_safety_checker: bool = False,
|
||||
@ -304,8 +300,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
textual_inversion_manager=self.textual_inversion_manager
|
||||
)
|
||||
|
||||
self._model_group = FullyLoadedModelGroup(self.unet.device)
|
||||
self._model_group.install(*self._submodels)
|
||||
|
||||
def _adjust_memory_efficient_attention(self, latents: Torch.tensor):
|
||||
|
||||
def _adjust_memory_efficient_attention(self, latents: torch.Tensor):
|
||||
"""
|
||||
if xformers is available, use it, otherwise use sliced attention.
|
||||
"""
|
||||
@ -323,7 +322,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
elif self.device.type == 'cuda':
|
||||
mem_free, _ = torch.cuda.mem_get_info(self.device)
|
||||
else:
|
||||
raise ValueError(f"unrecognized device {device}")
|
||||
raise ValueError(f"unrecognized device {self.device}")
|
||||
# input tensor of [1, 4, h/8, w/8]
|
||||
# output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
|
||||
bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
|
||||
@ -337,6 +336,66 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
self.disable_attention_slicing()
|
||||
|
||||
|
||||
def enable_offload_submodels(self, device: torch.device):
|
||||
"""
|
||||
Offload each submodel when it's not in use.
|
||||
|
||||
Useful for low-vRAM situations where the size of the model in memory is a big chunk of
|
||||
the total available resource, and you want to free up as much for inference as possible.
|
||||
|
||||
This requires more moving parts and may add some delay as the U-Net is swapped out for the
|
||||
VAE and vice-versa.
|
||||
"""
|
||||
models = self._submodels
|
||||
if self._model_group is not None:
|
||||
self._model_group.uninstall(*models)
|
||||
group = LazilyLoadedModelGroup(device)
|
||||
group.install(*models)
|
||||
self._model_group = group
|
||||
|
||||
def disable_offload_submodels(self):
|
||||
"""
|
||||
Leave all submodels loaded.
|
||||
|
||||
Appropriate for cases where the size of the model in memory is small compared to the memory
|
||||
required for inference. Avoids the delay and complexity of shuffling the submodels to and
|
||||
from the GPU.
|
||||
"""
|
||||
models = self._submodels
|
||||
if self._model_group is not None:
|
||||
self._model_group.uninstall(*models)
|
||||
group = FullyLoadedModelGroup(self._model_group.execution_device)
|
||||
group.install(*models)
|
||||
self._model_group = group
|
||||
|
||||
def offload_all(self):
|
||||
"""Offload all this pipeline's models to CPU."""
|
||||
self._model_group.offload_current()
|
||||
|
||||
def ready(self):
|
||||
"""
|
||||
Ready this pipeline's models.
|
||||
|
||||
i.e. pre-load them to the GPU if appropriate.
|
||||
"""
|
||||
self._model_group.ready()
|
||||
|
||||
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
|
||||
if torch_device is None:
|
||||
return self
|
||||
self._model_group.set_device(torch_device)
|
||||
self._model_group.ready()
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
return self._model_group.execution_device
|
||||
|
||||
@property
|
||||
def _submodels(self) -> Sequence[torch.nn.Module]:
|
||||
module_names, _, _ = self.extract_init_dict(dict(self.config))
|
||||
values = [getattr(self, name) for name in module_names.keys()]
|
||||
return [m for m in values if isinstance(m, torch.nn.Module)]
|
||||
|
||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||
conditioning_data: ConditioningData,
|
||||
*,
|
||||
@ -378,7 +437,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
callback: Callable[[PipelineIntermediateState], None] = None
|
||||
) -> tuple[torch.Tensor, Optional[AttentionMapSaver]]:
|
||||
if timesteps is None:
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=self.unet.device)
|
||||
self.scheduler.set_timesteps(num_inference_steps, device=self._model_group.device_for(self.unet))
|
||||
timesteps = self.scheduler.timesteps
|
||||
infer_latents_from_embeddings = GeneratorToCallbackinator(self.generate_latents_from_embeddings, PipelineIntermediateState)
|
||||
result: PipelineIntermediateState = infer_latents_from_embeddings(
|
||||
@ -410,7 +469,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
|
||||
batch_size = latents.shape[0]
|
||||
batched_t = torch.full((batch_size,), timesteps[0],
|
||||
dtype=timesteps.dtype, device=self.unet.device)
|
||||
dtype=timesteps.dtype, device=self._model_group.device_for(self.unet))
|
||||
latents = self.scheduler.add_noise(latents, noise, batched_t)
|
||||
|
||||
attention_map_saver: Optional[AttentionMapSaver] = None
|
||||
@ -494,9 +553,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
initial_image_latents=torch.zeros_like(latents[:1], device=latents.device, dtype=latents.dtype)
|
||||
).add_mask_channels(latents)
|
||||
|
||||
return self.unet(sample=latents,
|
||||
timestep=t,
|
||||
encoder_hidden_states=text_embeddings,
|
||||
# First three args should be positional, not keywords, so torch hooks can see them.
|
||||
return self.unet(latents, t, text_embeddings,
|
||||
cross_attention_kwargs=cross_attention_kwargs).sample
|
||||
|
||||
def img2img_from_embeddings(self,
|
||||
@ -515,9 +573,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
init_image = einops.rearrange(init_image, 'c h w -> 1 c h w')
|
||||
|
||||
# 6. Prepare latent variables
|
||||
device = self.unet.device
|
||||
latents_dtype = self.unet.dtype
|
||||
initial_latents = self.non_noised_latents_from_image(init_image, device=device, dtype=latents_dtype)
|
||||
initial_latents = self.non_noised_latents_from_image(
|
||||
init_image, device=self._model_group.device_for(self.unet),
|
||||
dtype=self.unet.dtype)
|
||||
noise = noise_func(initial_latents)
|
||||
|
||||
return self.img2img_from_latents_and_embeddings(initial_latents, num_inference_steps,
|
||||
@ -530,7 +588,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
strength,
|
||||
noise: torch.Tensor, run_id=None, callback=None
|
||||
) -> InvokeAIStableDiffusionPipelineOutput:
|
||||
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength, self.unet.device)
|
||||
timesteps, _ = self.get_img2img_timesteps(num_inference_steps, strength,
|
||||
device=self._model_group.device_for(self.unet))
|
||||
result_latents, result_attention_maps = self.latents_from_embeddings(
|
||||
initial_latents, num_inference_steps, conditioning_data,
|
||||
timesteps=timesteps,
|
||||
@ -569,7 +628,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
run_id=None,
|
||||
noise_func=None,
|
||||
) -> InvokeAIStableDiffusionPipelineOutput:
|
||||
device = self.unet.device
|
||||
device = self._model_group.device_for(self.unet)
|
||||
latents_dtype = self.unet.dtype
|
||||
|
||||
if isinstance(init_image, PIL.Image.Image):
|
||||
@ -633,6 +692,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
|
||||
self.vae.to('cpu')
|
||||
init_image = init_image.to('cpu')
|
||||
else:
|
||||
self._model_group.load(self.vae)
|
||||
init_latent_dist = self.vae.encode(init_image).latent_dist
|
||||
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
|
||||
if device.type == 'mps':
|
||||
@ -644,8 +705,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
|
||||
def check_for_safety(self, output, dtype):
|
||||
with torch.inference_mode():
|
||||
screened_images, has_nsfw_concept = self.run_safety_checker(
|
||||
output.images, device=self._execution_device, dtype=dtype)
|
||||
screened_images, has_nsfw_concept = self.run_safety_checker(output.images, dtype=dtype)
|
||||
screened_attention_map_saver = None
|
||||
if has_nsfw_concept is None or not has_nsfw_concept:
|
||||
screened_attention_map_saver = output.attention_map_saver
|
||||
@ -654,6 +714,12 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# block the attention maps if NSFW content is detected
|
||||
attention_map_saver=screened_attention_map_saver)
|
||||
|
||||
def run_safety_checker(self, image, device=None, dtype=None):
|
||||
# overriding to use the model group for device info instead of requiring the caller to know.
|
||||
if self.safety_checker is not None:
|
||||
device = self._model_group.device_for(self.safety_checker)
|
||||
return super().run_safety_checker(image, device, dtype)
|
||||
|
||||
@torch.inference_mode()
|
||||
def get_learned_conditioning(self, c: List[List[str]], *, return_tokens=True, fragment_weights=None):
|
||||
"""
|
||||
@ -663,7 +729,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
text=c,
|
||||
fragment_weights=fragment_weights,
|
||||
should_return_tokens=return_tokens,
|
||||
device=self.device)
|
||||
device=self._model_group.device_for(self.unet))
|
||||
|
||||
@property
|
||||
def cond_stage_model(self):
|
||||
@ -684,6 +750,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
"""Compatible with DiffusionWrapper"""
|
||||
return self.unet.in_channels
|
||||
|
||||
def decode_latents(self, latents):
|
||||
# Explicit call to get the vae loaded, since `decode` isn't the forward method.
|
||||
self._model_group.load(self.vae)
|
||||
return super().decode_latents(latents)
|
||||
|
||||
def debug_latents(self, latents, msg):
|
||||
with torch.inference_mode():
|
||||
from ldm.util import debug_image
|
||||
|
@ -26,7 +26,6 @@ import torch
|
||||
import transformers
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers import logging as dlogging
|
||||
from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error
|
||||
from huggingface_hub import scan_cache_dir
|
||||
from omegaconf import OmegaConf
|
||||
from omegaconf.dictconfig import DictConfig
|
||||
@ -49,9 +48,10 @@ class ModelManager(object):
|
||||
def __init__(
|
||||
self,
|
||||
config: OmegaConf,
|
||||
device_type: str = "cpu",
|
||||
device_type: str | torch.device = "cpu",
|
||||
precision: str = "float16",
|
||||
max_loaded_models=DEFAULT_MAX_MODELS,
|
||||
sequential_offload = False
|
||||
):
|
||||
"""
|
||||
Initialize with the path to the models.yaml config file,
|
||||
@ -69,6 +69,7 @@ class ModelManager(object):
|
||||
self.models = {}
|
||||
self.stack = [] # this is an LRU FIFO
|
||||
self.current_model = None
|
||||
self.sequential_offload = sequential_offload
|
||||
|
||||
def valid_model(self, model_name: str) -> bool:
|
||||
"""
|
||||
@ -530,7 +531,10 @@ class ModelManager(object):
|
||||
dlogging.set_verbosity(verbosity)
|
||||
assert pipeline is not None, OSError(f'"{name_or_path}" could not be loaded')
|
||||
|
||||
pipeline.to(self.device)
|
||||
if self.sequential_offload:
|
||||
pipeline.enable_offload_submodels(self.device)
|
||||
else:
|
||||
pipeline.to(self.device)
|
||||
|
||||
model_hash = self._diffuser_sha256(name_or_path)
|
||||
|
||||
@ -746,7 +750,7 @@ class ModelManager(object):
|
||||
return
|
||||
|
||||
if model_path.stem in self.config: #already imported
|
||||
print(f' > Already imported. Skipping')
|
||||
print(' > Already imported. Skipping')
|
||||
return
|
||||
|
||||
# another round of heuristics to guess the correct config file.
|
||||
@ -755,7 +759,7 @@ class ModelManager(object):
|
||||
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
|
||||
print(f' > SD-v2 model detected; model will be converted to diffusers format')
|
||||
print(' > SD-v2 model detected; model will be converted to diffusers format')
|
||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml')
|
||||
convert = True
|
||||
|
||||
@ -765,10 +769,10 @@ class ModelManager(object):
|
||||
state_dict = checkpoint.get('state_dict') or checkpoint
|
||||
in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
|
||||
if in_channels == 9:
|
||||
print(f' > SD-v1 inpainting model detected')
|
||||
print(' > SD-v1 inpainting model detected')
|
||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml')
|
||||
elif in_channels == 4:
|
||||
print(f' > SD-v1 model detected')
|
||||
print(' > SD-v1 model detected')
|
||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
||||
else:
|
||||
print(f'** {thing} does not have an expected number of in_channels ({in_channels}). It will probably break when loaded.')
|
||||
@ -1094,12 +1098,12 @@ class ModelManager(object):
|
||||
if self.device == "cpu":
|
||||
return model
|
||||
|
||||
# diffusers really really doesn't like us moving a float16 model onto CPU
|
||||
verbosity = get_verbosity()
|
||||
set_verbosity_error()
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.offload_all()
|
||||
return model
|
||||
|
||||
model.cond_stage_model.device = "cpu"
|
||||
model.to("cpu")
|
||||
set_verbosity(verbosity)
|
||||
|
||||
for submodel in ("first_stage_model", "cond_stage_model", "model"):
|
||||
try:
|
||||
@ -1112,6 +1116,10 @@ class ModelManager(object):
|
||||
if self.device == "cpu":
|
||||
return model
|
||||
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.ready()
|
||||
return model
|
||||
|
||||
model.to(self.device)
|
||||
model.cond_stage_model.device = self.device
|
||||
|
||||
@ -1267,7 +1275,7 @@ class ModelManager(object):
|
||||
strategy.execute()
|
||||
|
||||
@staticmethod
|
||||
def _abs_path(path: Union(str, Path)) -> Path:
|
||||
def _abs_path(path: str | Path) -> Path:
|
||||
if path is None or Path(path).is_absolute():
|
||||
return path
|
||||
return Path(Globals.root, path).resolve()
|
||||
|
247
ldm/invoke/offloading.py
Normal file
247
ldm/invoke/offloading.py
Normal file
@ -0,0 +1,247 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
import weakref
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import MutableMapping
|
||||
from typing import Callable
|
||||
|
||||
import torch
|
||||
from accelerate.utils import send_to_device
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
|
||||
OFFLOAD_DEVICE = torch.device("cpu")
|
||||
|
||||
class _NoModel:
|
||||
"""Symbol that indicates no model is loaded.
|
||||
|
||||
(We can't weakref.ref(None), so this was my best idea at the time to come up with something
|
||||
type-checkable.)
|
||||
"""
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
def to(self, device: torch.device):
|
||||
pass
|
||||
|
||||
def __repr__(self):
|
||||
return "<NO MODEL>"
|
||||
|
||||
NO_MODEL = _NoModel()
|
||||
|
||||
|
||||
class ModelGroup(metaclass=ABCMeta):
|
||||
"""
|
||||
A group of models.
|
||||
|
||||
The use case I had in mind when writing this is the sub-models used by a DiffusionPipeline,
|
||||
e.g. its text encoder, U-net, VAE, etc.
|
||||
|
||||
Those models are :py:class:`diffusers.ModelMixin`, but "model" is interchangeable with
|
||||
:py:class:`torch.nn.Module` here.
|
||||
"""
|
||||
|
||||
def __init__(self, execution_device: torch.device):
|
||||
self.execution_device = execution_device
|
||||
|
||||
@abstractmethod
|
||||
def install(self, *models: torch.nn.Module):
|
||||
"""Add models to this group."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def uninstall(self, models: torch.nn.Module):
|
||||
"""Remove models from this group."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def uninstall_all(self):
|
||||
"""Remove all models from this group."""
|
||||
|
||||
@abstractmethod
|
||||
def load(self, model: torch.nn.Module):
|
||||
"""Load this model to the execution device."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def offload_current(self):
|
||||
"""Offload the current model(s) from the execution device."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def ready(self):
|
||||
"""Ready this group for use."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_device(self, device: torch.device):
|
||||
"""Change which device models from this group will execute on."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def device_for(self, model) -> torch.device:
|
||||
"""Get the device the given model will execute on.
|
||||
|
||||
The model should already be a member of this group.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def __contains__(self, model):
|
||||
"""Check if the model is a member of this group."""
|
||||
pass
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__} object at {id(self):x}: " \
|
||||
f"device={self.execution_device} >"
|
||||
|
||||
|
||||
class LazilyLoadedModelGroup(ModelGroup):
|
||||
"""
|
||||
Only one model from this group is loaded on the GPU at a time.
|
||||
|
||||
Running the forward method of a model will displace the previously-loaded model,
|
||||
offloading it to CPU.
|
||||
|
||||
If you call other methods on the model, e.g. ``model.encode(x)`` instead of ``model(x)``,
|
||||
you will need to explicitly load it with :py:method:`.load(model)`.
|
||||
|
||||
This implementation relies on pytorch forward-pre-hooks, and it will copy forward arguments
|
||||
to the appropriate execution device, as long as they are positional arguments and not keyword
|
||||
arguments. (I didn't make the rules; that's the way the pytorch 1.13 API works for hooks.)
|
||||
"""
|
||||
|
||||
_hooks: MutableMapping[torch.nn.Module, RemovableHandle]
|
||||
_current_model_ref: Callable[[], torch.nn.Module | _NoModel]
|
||||
|
||||
def __init__(self, execution_device: torch.device):
|
||||
super().__init__(execution_device)
|
||||
self._hooks = weakref.WeakKeyDictionary()
|
||||
self._current_model_ref = weakref.ref(NO_MODEL)
|
||||
|
||||
def install(self, *models: torch.nn.Module):
|
||||
for model in models:
|
||||
self._hooks[model] = model.register_forward_pre_hook(self._pre_hook)
|
||||
|
||||
def uninstall(self, *models: torch.nn.Module):
|
||||
for model in models:
|
||||
hook = self._hooks.pop(model)
|
||||
hook.remove()
|
||||
if self.is_current_model(model):
|
||||
# no longer hooked by this object, so don't claim to manage it
|
||||
self.clear_current_model()
|
||||
|
||||
def uninstall_all(self):
|
||||
self.uninstall(*self._hooks.keys())
|
||||
|
||||
def _pre_hook(self, module: torch.nn.Module, forward_input):
|
||||
self.load(module)
|
||||
if len(forward_input) == 0:
|
||||
warnings.warn(f"Hook for {module.__class__.__name__} got no input. "
|
||||
f"Inputs must be positional, not keywords.", stacklevel=3)
|
||||
return send_to_device(forward_input, self.execution_device)
|
||||
|
||||
def load(self, module):
|
||||
if not self.is_current_model(module):
|
||||
self.offload_current()
|
||||
self._load(module)
|
||||
|
||||
def offload_current(self):
|
||||
module = self._current_model_ref()
|
||||
if module is not NO_MODEL:
|
||||
module.to(device=OFFLOAD_DEVICE)
|
||||
self.clear_current_model()
|
||||
|
||||
def _load(self, module: torch.nn.Module) -> torch.nn.Module:
|
||||
assert self.is_empty(), f"A model is already loaded: {self._current_model_ref()}"
|
||||
module = module.to(self.execution_device)
|
||||
self.set_current_model(module)
|
||||
return module
|
||||
|
||||
def is_current_model(self, model: torch.nn.Module) -> bool:
|
||||
"""Is the given model the one currently loaded on the execution device?"""
|
||||
return self._current_model_ref() is model
|
||||
|
||||
def is_empty(self):
|
||||
"""Are none of this group's models loaded on the execution device?"""
|
||||
return self._current_model_ref() is NO_MODEL
|
||||
|
||||
def set_current_model(self, value):
|
||||
self._current_model_ref = weakref.ref(value)
|
||||
|
||||
def clear_current_model(self):
|
||||
self._current_model_ref = weakref.ref(NO_MODEL)
|
||||
|
||||
def set_device(self, device: torch.device):
|
||||
if device == self.execution_device:
|
||||
return
|
||||
self.execution_device = device
|
||||
current = self._current_model_ref()
|
||||
if current is not NO_MODEL:
|
||||
current.to(device)
|
||||
|
||||
def device_for(self, model):
|
||||
if model not in self:
|
||||
raise KeyError(f"This does not manage this model {type(model).__name__}", model)
|
||||
return self.execution_device # this implementation only dispatches to one device
|
||||
|
||||
def ready(self):
|
||||
pass # always ready to load on-demand
|
||||
|
||||
def __contains__(self, model):
|
||||
return model in self._hooks
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__} object at {id(self):x}: " \
|
||||
f"current_model={type(self._current_model_ref()).__name__} >"
|
||||
|
||||
|
||||
class FullyLoadedModelGroup(ModelGroup):
|
||||
"""
|
||||
A group of models without any implicit loading or unloading.
|
||||
|
||||
:py:meth:`.ready` loads _all_ the models to the execution device at once.
|
||||
"""
|
||||
_models: weakref.WeakSet
|
||||
|
||||
def __init__(self, execution_device: torch.device):
|
||||
super().__init__(execution_device)
|
||||
self._models = weakref.WeakSet()
|
||||
|
||||
def install(self, *models: torch.nn.Module):
|
||||
for model in models:
|
||||
self._models.add(model)
|
||||
model.to(device=self.execution_device)
|
||||
|
||||
def uninstall(self, *models: torch.nn.Module):
|
||||
for model in models:
|
||||
self._models.remove(model)
|
||||
|
||||
def uninstall_all(self):
|
||||
self.uninstall(*self._models)
|
||||
|
||||
def load(self, model):
|
||||
model.to(device=self.execution_device)
|
||||
|
||||
def offload_current(self):
|
||||
for model in self._models:
|
||||
model.to(device=OFFLOAD_DEVICE)
|
||||
|
||||
def ready(self):
|
||||
for model in self._models:
|
||||
self.load(model)
|
||||
|
||||
def set_device(self, device: torch.device):
|
||||
self.execution_device = device
|
||||
for model in self._models:
|
||||
if model.device != OFFLOAD_DEVICE:
|
||||
model.to(device=device)
|
||||
|
||||
def device_for(self, model):
|
||||
if model not in self:
|
||||
raise KeyError("This does not manage this model f{type(model).__name__}", model)
|
||||
return self.execution_device # this implementation only dispatches to one device
|
||||
|
||||
def __contains__(self, model):
|
||||
return model in self._models
|
@ -214,7 +214,7 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||
|
||||
def build_weighted_embedding_tensor(self, token_ids: torch.Tensor, per_token_weights: torch.Tensor) -> torch.Tensor:
|
||||
'''
|
||||
Build a tensor that embeds the passed-in token IDs and applyies the given per_token weights
|
||||
Build a tensor that embeds the passed-in token IDs and applies the given per_token weights
|
||||
:param token_ids: A tensor of shape `[self.max_length]` containing token IDs (ints)
|
||||
:param per_token_weights: A tensor of shape `[self.max_length]` containing weights (floats)
|
||||
:return: A tensor of shape `[1, self.max_length, token_dim]` representing the requested weighted embeddings
|
||||
@ -224,13 +224,12 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||
if token_ids.shape != torch.Size([self.max_length]):
|
||||
raise ValueError(f"token_ids has shape {token_ids.shape} - expected [{self.max_length}]")
|
||||
|
||||
z = self.text_encoder.forward(input_ids=token_ids.unsqueeze(0),
|
||||
return_dict=False)[0]
|
||||
z = self.text_encoder(token_ids.unsqueeze(0), return_dict=False)[0]
|
||||
empty_token_ids = torch.tensor([self.tokenizer.bos_token_id] +
|
||||
[self.tokenizer.pad_token_id] * (self.max_length-2) +
|
||||
[self.tokenizer.eos_token_id], dtype=torch.int, device=token_ids.device).unsqueeze(0)
|
||||
empty_z = self.text_encoder(input_ids=empty_token_ids).last_hidden_state
|
||||
batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape)
|
||||
[self.tokenizer.eos_token_id], dtype=torch.int, device=z.device).unsqueeze(0)
|
||||
empty_z = self.text_encoder(empty_token_ids).last_hidden_state
|
||||
batch_weights_expanded = per_token_weights.reshape(per_token_weights.shape + (1,)).expand(z.shape).to(z)
|
||||
z_delta_from_empty = z - empty_z
|
||||
weighted_z = empty_z + (z_delta_from_empty * batch_weights_expanded)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user