Merge branch 'main' into enhance/convert-inpaint-models

This commit is contained in:
Lincoln Stein 2023-02-07 06:59:35 -05:00 committed by GitHub
commit bdf683ec41
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 957 additions and 705 deletions

View File

@ -3,6 +3,7 @@ on:
push:
branches:
- 'main'
- 'update/ci/*'
tags:
- 'v*.*.*'
@ -47,11 +48,10 @@ jobs:
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha,enable=true,prefix=sha-,suffix=${{ matrix.flavor}},format=short
type=raw,value={{branch}}-${{ matrix.flavor }}
type=sha,enable=true,prefix=sha-,format=short
flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2

View File

@ -1,7 +1,4 @@
# syntax=docker/dockerfile:1
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9
##################
## base image ##
@ -85,3 +82,5 @@ ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"

View File

@ -54,8 +54,7 @@ Please enter 1, 2, 3, or 4: [1] 3
```
From the command line, with the InvokeAI virtual environment active,
you can launch the front end with the command `textual_inversion
--gui`.
you can launch the front end with the command `invokeai-ti --gui`.
This will launch a text-based front end that will look like this:
@ -227,12 +226,12 @@ It accepts a large number of arguments, which can be summarized by
passing the `--help` argument:
```sh
textual_inversion --help
invokeai-ti --help
```
Typical usage is shown here:
```sh
textual_inversion \
invokeai-ti \
--model=stable-diffusion-1.5 \
--resolution=512 \
--learnable_property=style \
@ -267,4 +266,4 @@ resources:
---
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team

0
installer/create_installer.sh Normal file → Executable file
View File

View File

@ -249,6 +249,7 @@ class InvokeAiInstance:
"--require-virtualenv",
"torch",
"torchvision",
"--force-reinstall",
"--find-links" if find_links is not None else None,
find_links,
"--extra-index-url" if extra_index_url is not None else None,
@ -325,6 +326,7 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory
"""
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1,len(sys.argv)):
el = sys.argv[i]
@ -344,9 +346,6 @@ class InvokeAiInstance:
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring!
# set sys.argv to a consistent state
invokeai_configure.main()
def install_user_scripts(self):

View File

@ -1208,12 +1208,18 @@ class InvokeAIWebServer:
)
except KeyboardInterrupt:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled")
raise
except CanceledException:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled")
pass
except Exception as e:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
print(e)
self.socketio.emit("error", {"message": (str(e))})
print("\n")
@ -1221,6 +1227,12 @@ class InvokeAIWebServer:
traceback.print_exc()
print("\n")
def empty_cuda_cache(self):
if self.generate.device.type == "cuda":
import torch.cuda
torch.cuda.empty_cache()
def parameters_to_generated_image_metadata(self, parameters):
try:
# top-level metadata minus `image` or `images`

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -7,8 +7,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="./assets/index.dd4ad8a1.js"></script>
<link rel="stylesheet" href="./assets/index.8badc8b4.css">
<script type="module" crossorigin src="./assets/index.f3fa9388.js"></script>
<link rel="stylesheet" href="./assets/index.1536494e.css">
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
</head>
@ -18,6 +18,6 @@
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8219c08f.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-4add591a.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
</body>
</html>

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",
@ -43,6 +44,7 @@
"invoke": "Invoke",
"cancel": "Cancel",
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts",
"sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity",
"seamSize": "Seam Size",
@ -43,6 +44,7 @@
"invoke": "Invoke",
"cancel": "Cancel",
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts",
"sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas",

View File

@ -16,6 +16,20 @@ export const SAMPLERS: Array<string> = [
'k_heun',
];
// Valid Diffusers Samplers
export const DIFFUSERS_SAMPLERS: Array<string> = [
'ddim',
'plms',
'k_lms',
'dpmpp_2',
'k_dpm_2',
'k_dpm_2_a',
'k_dpmpp_2',
'k_euler',
'k_euler_a',
'k_heun',
];
// Valid image widths
export const WIDTHS: Array<number> = [
64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960,

View File

@ -11,7 +11,6 @@ const useClickOutsideWatcher = () => {
function handleClickOutside(e: MouseEvent) {
watchers.forEach(({ ref, enable, callback }) => {
if (enable && ref.current && !ref.current.contains(e.target as Node)) {
console.log('callback');
callback();
}
});

View File

@ -0,0 +1,20 @@
import * as InvokeAI from 'app/invokeai';
import promptToString from './promptToString';
export function getPromptAndNegative(input_prompt: InvokeAI.Prompt) {
let prompt: string = promptToString(input_prompt);
let negativePrompt: string | null = null;
const negativePromptRegExp = new RegExp(/(?<=\[)[^\][]*(?=])/, 'gi');
const negativePromptMatches = [...prompt.matchAll(negativePromptRegExp)];
if (negativePromptMatches && negativePromptMatches.length > 0) {
negativePrompt = negativePromptMatches.join(', ');
prompt = prompt
.replaceAll(negativePromptRegExp, '')
.replaceAll('[]', '')
.trim();
}
return [prompt, negativePrompt];
}

View File

@ -100,12 +100,14 @@ export const frontendToBackendParameters = (
facetoolType,
height,
hiresFix,
hiresStrength,
img2imgStrength,
infillMethod,
initialImage,
iterations,
perlin,
prompt,
negativePrompt,
sampler,
seamBlur,
seamless,
@ -155,6 +157,10 @@ export const frontendToBackendParameters = (
let esrganParameters: false | BackendEsrGanParameters = false;
let facetoolParameters: false | BackendFacetoolParameters = false;
if (negativePrompt !== '') {
generationParameters.prompt = `${prompt} [${negativePrompt}]`;
}
generationParameters.seed = shouldRandomizeSeed
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
: seed;
@ -164,6 +170,8 @@ export const frontendToBackendParameters = (
generationParameters.seamless = seamless;
generationParameters.hires_fix = hiresFix;
if (hiresFix) generationParameters.strength = hiresStrength;
if (shouldRunESRGAN) {
esrganParameters = {
level: upscalingLevel,

View File

@ -9,6 +9,7 @@ import {
setAllParameters,
setInitialImage,
setIsLightBoxOpen,
setNegativePrompt,
setPrompt,
setSeed,
setShouldShowImageDetails,
@ -44,6 +45,7 @@ import { GalleryState } from 'features/gallery/store/gallerySlice';
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
import IAIPopover from 'common/components/IAIPopover';
import { useTranslation } from 'react-i18next';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
const systemSelector = createSelector(
[
@ -241,9 +243,18 @@ const CurrentImageButtons = () => {
[currentImage]
);
const handleClickUsePrompt = () =>
currentImage?.metadata?.image?.prompt &&
dispatch(setPrompt(currentImage.metadata.image.prompt));
const handleClickUsePrompt = () => {
if (currentImage?.metadata?.image?.prompt) {
const [prompt, negativePrompt] = getPromptAndNegative(
currentImage?.metadata?.image?.prompt
);
prompt && dispatch(setPrompt(prompt));
negativePrompt
? dispatch(setNegativePrompt(negativePrompt))
: dispatch(setNegativePrompt(''));
}
};
useHotkeys(
'p',

View File

@ -10,9 +10,10 @@ import { DragEvent, memo, useState } from 'react';
import {
setActiveTab,
setAllImageToImageParameters,
setAllTextToImageParameters,
setAllParameters,
setInitialImage,
setIsLightBoxOpen,
setNegativePrompt,
setPrompt,
setSeed,
} from 'features/options/store/optionsSlice';
@ -24,6 +25,7 @@ import {
} from 'features/canvas/store/canvasSlice';
import { hoverableImageSelector } from 'features/gallery/store/gallerySliceSelectors';
import { useTranslation } from 'react-i18next';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
interface HoverableImageProps {
image: InvokeAI.Image;
@ -62,7 +64,17 @@ const HoverableImage = memo((props: HoverableImageProps) => {
const handleMouseOut = () => setIsHovered(false);
const handleUsePrompt = () => {
image.metadata && dispatch(setPrompt(image.metadata.image.prompt));
if (image.metadata) {
const [prompt, negativePrompt] = getPromptAndNegative(
image.metadata?.image?.prompt
);
prompt && dispatch(setPrompt(prompt));
negativePrompt
? dispatch(setNegativePrompt(negativePrompt))
: dispatch(setNegativePrompt(''));
}
toast({
title: t('toast:promptSet'),
status: 'success',
@ -115,7 +127,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
};
const handleUseAllParameters = () => {
metadata && dispatch(setAllTextToImageParameters(metadata));
metadata && dispatch(setAllParameters(metadata));
toast({
title: t('toast:parametersSet'),
status: 'success',

View File

@ -38,7 +38,6 @@ export const uploadImage =
});
const image = (await response.json()) as InvokeAI.ImageUploadResponse;
console.log(image);
const newImage: InvokeAI.Image = {
uuid: uuidv4(),
category: 'user',

View File

@ -1,10 +1,53 @@
import { Flex } from '@chakra-ui/react';
import { ChangeEvent } from 'react';
import { RootState } from 'app/store';
import type { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISwitch from 'common/components/IAISwitch';
import { setHiresFix } from 'features/options/store/optionsSlice';
import {
setHiresFix,
setHiresStrength,
} from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next';
import IAISlider from 'common/components/IAISlider';
function HighResStrength() {
const hiresFix = useAppSelector((state: RootState) => state.options.hiresFix);
const hiresStrength = useAppSelector(
(state: RootState) => state.options.hiresStrength
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleHiresStrength = (v: number) => {
dispatch(setHiresStrength(v));
};
const handleHiResStrengthReset = () => {
dispatch(setHiresStrength(0.75));
};
return (
<IAISlider
label={t('options:hiresStrength')}
step={0.01}
min={0.01}
max={0.99}
onChange={handleHiresStrength}
value={hiresStrength}
isInteger={false}
withInput
withSliderMarks
inputWidth={'5.5rem'}
withReset
handleReset={handleHiResStrengthReset}
isSliderDisabled={!hiresFix}
isInputDisabled={!hiresFix}
isResetDisabled={!hiresFix}
/>
);
}
/**
* Hires Fix Toggle
@ -27,6 +70,7 @@ const HiresOptions = () => {
isChecked={hiresFix}
onChange={handleChangeHiresFix}
/>
<HighResStrength />
</Flex>
);
};

View File

@ -1,13 +1,16 @@
import React, { ChangeEvent } from 'react';
import { SAMPLERS } from 'app/constants';
import { DIFFUSERS_SAMPLERS, SAMPLERS } from 'app/constants';
import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISelect from 'common/components/IAISelect';
import { setSampler } from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next';
import _ from 'lodash';
import { activeModelSelector } from 'features/system/store/systemSelectors';
export default function MainSampler() {
const sampler = useAppSelector((state: RootState) => state.options.sampler);
const activeModel = useAppSelector(activeModelSelector);
const dispatch = useAppDispatch();
const { t } = useTranslation();
@ -19,7 +22,9 @@ export default function MainSampler() {
label={t('options:sampler')}
value={sampler}
onChange={handleChangeSampler}
validValues={SAMPLERS}
validValues={
activeModel.format === 'diffusers' ? DIFFUSERS_SAMPLERS : SAMPLERS
}
styleClass="main-option-block"
/>
);

View File

@ -0,0 +1,38 @@
import { FormControl, Textarea } from '@chakra-ui/react';
import type { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { setNegativePrompt } from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next';
export function NegativePromptInput() {
const negativePrompt = useAppSelector(
(state: RootState) => state.options.negativePrompt
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
return (
<FormControl>
<Textarea
id="negativePrompt"
name="negativePrompt"
value={negativePrompt}
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
background="var(--prompt-bg-color)"
placeholder={t('options:negativePrompts')}
_placeholder={{ fontSize: '0.8rem' }}
borderColor="var(--border-color)"
_hover={{
borderColor: 'var(--border-color-light)',
}}
_focusVisible={{
borderColor: 'var(--border-color-invalid)',
boxShadow: '0 0 10px var(--box-shadow-color-invalid)',
}}
fontSize="0.9rem"
color="var(--text-color-secondary)"
/>
</FormControl>
);
}

View File

@ -5,6 +5,7 @@ import promptToString from 'common/util/promptToString';
import { seedWeightsToString } from 'common/util/seedWeightPairs';
import { FACETOOL_TYPES } from 'app/constants';
import { InvokeTabName, tabMap } from 'features/tabs/tabMap';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
export type UpscalingLevel = 2 | 4;
@ -19,6 +20,7 @@ export interface OptionsState {
facetoolType: FacetoolType;
height: number;
hiresFix: boolean;
hiresStrength: number;
img2imgStrength: number;
infillMethod: string;
initialImage?: InvokeAI.Image | string; // can be an Image or url
@ -28,6 +30,7 @@ export interface OptionsState {
optionsPanelScrollPosition: number;
perlin: number;
prompt: string;
negativePrompt: string;
sampler: string;
seamBlur: number;
seamless: boolean;
@ -69,6 +72,7 @@ const initialOptionsState: OptionsState = {
facetoolType: 'gfpgan',
height: 512,
hiresFix: false,
hiresStrength: 0.75,
img2imgStrength: 0.75,
infillMethod: 'patchmatch',
isLightBoxOpen: false,
@ -77,6 +81,7 @@ const initialOptionsState: OptionsState = {
optionsPanelScrollPosition: 0,
perlin: 0,
prompt: '',
negativePrompt: '',
sampler: 'k_lms',
seamBlur: 16,
seamless: false,
@ -123,6 +128,17 @@ export const optionsSlice = createSlice({
state.prompt = promptToString(newPrompt);
}
},
setNegativePrompt: (
state,
action: PayloadAction<string | InvokeAI.Prompt>
) => {
const newPrompt = action.payload;
if (typeof newPrompt === 'string') {
state.negativePrompt = newPrompt;
} else {
state.negativePrompt = promptToString(newPrompt);
}
},
setIterations: (state, action: PayloadAction<number>) => {
state.iterations = action.payload;
},
@ -175,6 +191,9 @@ export const optionsSlice = createSlice({
setHiresFix: (state, action: PayloadAction<boolean>) => {
state.hiresFix = action.payload;
},
setHiresStrength: (state, action: PayloadAction<number>) => {
state.hiresStrength = action.payload;
},
setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => {
state.shouldFitToWidthHeight = action.payload;
},
@ -307,7 +326,14 @@ export const optionsSlice = createSlice({
state.shouldRandomizeSeed = false;
}
if (prompt) state.prompt = promptToString(prompt);
if (prompt) {
const [promptOnly, negativePrompt] = getPromptAndNegative(prompt);
if (promptOnly) state.prompt = promptOnly;
negativePrompt
? (state.negativePrompt = negativePrompt)
: (state.negativePrompt = '');
}
if (sampler) state.sampler = sampler;
if (steps) state.steps = steps;
if (cfg_scale) state.cfgScale = cfg_scale;
@ -438,6 +464,7 @@ export const {
setFacetoolType,
setHeight,
setHiresFix,
setHiresStrength,
setImg2imgStrength,
setInfillMethod,
setInitialImage,
@ -448,6 +475,7 @@ export const {
setParameter,
setPerlin,
setPrompt,
setNegativePrompt,
setSampler,
setSeamBlur,
setSeamless,

View File

@ -13,16 +13,16 @@ export default function LanguagePicker() {
const LANGUAGES = {
en: t('common:langEnglish'),
ru: t('common:langRussian'),
it: t('common:langItalian'),
pt_br: t('common:langBrPortuguese'),
de: t('common:langGerman'),
pl: t('common:langPolish'),
zh_cn: t('common:langSimplifiedChinese'),
es: t('common:langSpanish'),
ja: t('common:langJapanese'),
nl: t('common:langDutch'),
fr: t('common:langFrench'),
de: t('common:langGerman'),
it: t('common:langItalian'),
ja: t('common:langJapanese'),
pl: t('common:langPolish'),
pt_br: t('common:langBrPortuguese'),
ru: t('common:langRussian'),
zh_cn: t('common:langSimplifiedChinese'),
es: t('common:langSpanish'),
ua: t('common:langUkranian'),
};

View File

@ -316,7 +316,6 @@ export default function CheckpointModelEdit() {
) : (
<Flex
width="100%"
height="250px"
justifyContent="center"
alignItems="center"
backgroundColor="var(--background-color)"

View File

@ -271,7 +271,6 @@ export default function DiffusersModelEdit() {
) : (
<Flex
width="100%"
height="250px"
justifyContent="center"
alignItems="center"
backgroundColor="var(--background-color)"

View File

@ -5,27 +5,14 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISelect from 'common/components/IAISelect';
import _ from 'lodash';
import { ChangeEvent } from 'react';
import { systemSelector } from '../store/systemSelectors';
import { activeModelSelector, systemSelector } from '../store/systemSelectors';
const selector = createSelector(
[systemSelector],
(system) => {
const { isProcessing, model_list } = system;
const models = _.map(model_list, (model, key) => key);
const activeModel = _.reduce(
model_list,
(acc, model, key) => {
if (model.status === 'active') {
acc = key;
}
return acc;
},
''
);
const activeDesc = model_list[activeModel].description;
return { models, activeModel, isProcessing, activeDesc };
return { models, isProcessing };
},
{
memoizeOptions: {
@ -36,8 +23,8 @@ const selector = createSelector(
const ModelSelect = () => {
const dispatch = useAppDispatch();
const { models, activeModel, isProcessing, activeDesc } =
useAppSelector(selector);
const { models, isProcessing } = useAppSelector(selector);
const activeModel = useAppSelector(activeModelSelector);
const handleChangeModel = (e: ChangeEvent<HTMLSelectElement>) => {
dispatch(requestModelChange(e.target.value));
};
@ -50,9 +37,9 @@ const ModelSelect = () => {
>
<IAISelect
style={{ fontSize: '0.8rem' }}
tooltip={activeDesc}
tooltip={activeModel.description}
isDisabled={isProcessing}
value={activeModel}
value={activeModel.name}
validValues={models}
onChange={handleChangeModel}
/>

View File

@ -1,6 +1,31 @@
import { createSelector } from '@reduxjs/toolkit';
import { RootState } from 'app/store';
import { SystemState } from './systemSlice';
import _ from 'lodash';
export const systemSelector = (state: RootState): SystemState => state.system;
export const toastQueueSelector = (state: RootState) => state.system.toastQueue;
export const activeModelSelector = createSelector(
systemSelector,
(system) => {
const { model_list } = system;
const activeModel = _.reduce(
model_list,
(acc, model, key) => {
if (model.status === 'active') {
acc = key;
}
return acc;
},
''
);
return { ...model_list[activeModel], name: activeModel };
},
{
memoizeOptions: {
resultEqualityCheck: _.isEqual,
},
}
);

View File

@ -19,6 +19,8 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
import { useTranslation } from 'react-i18next';
import { Flex } from '@chakra-ui/react';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
export default function ImageToImagePanel() {
const { t } = useTranslation();
@ -67,7 +69,10 @@ export default function ImageToImagePanel() {
return (
<InvokeOptionsPanel>
<PromptInput />
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons />
<MainOptions />
<ImageToImageStrength

View File

@ -24,8 +24,8 @@
}
svg {
width: 26px;
height: 26px;
width: 24px;
height: 24px;
}
&[aria-selected='true'] {

View File

@ -1,3 +1,4 @@
import { Flex } from '@chakra-ui/react';
import { Feature } from 'app/features';
import FaceRestoreOptions from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreOptions';
import FaceRestoreToggle from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreToggle';
@ -10,6 +11,7 @@ import VariationsOptions from 'features/options/components/AdvancedOptions/Varia
import MainOptions from 'features/options/components/MainOptions/MainOptions';
import OptionsAccordion from 'features/options/components/OptionsAccordion';
import ProcessButtons from 'features/options/components/ProcessButtons/ProcessButtons';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
import PromptInput from 'features/options/components/PromptInput/PromptInput';
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import { useTranslation } from 'react-i18next';
@ -50,7 +52,10 @@ export default function TextToImagePanel() {
return (
<InvokeOptionsPanel>
<PromptInput />
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons />
<MainOptions />
<OptionsAccordion accordionInfo={textToImageAccordions} />

View File

@ -13,6 +13,8 @@ import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import BoundingBoxSettings from 'features/options/components/AdvancedOptions/Canvas/BoundingBoxSettings/BoundingBoxSettings';
import InfillAndScalingOptions from 'features/options/components/AdvancedOptions/Canvas/InfillAndScalingOptions';
import { useTranslation } from 'react-i18next';
import { Flex } from '@chakra-ui/react';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
export default function UnifiedCanvasPanel() {
const { t } = useTranslation();
@ -48,7 +50,10 @@ export default function UnifiedCanvasPanel() {
return (
<InvokeOptionsPanel>
<PromptInput />
<Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons />
<MainOptions />
<ImageToImageStrength

View File

@ -211,7 +211,7 @@ class Generate:
print('>> xformers memory-efficient attention is available but disabled')
else:
print('>> xformers not installed')
# model caching system for fast switching
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
# don't accept invalid models
@ -344,6 +344,7 @@ class Generate:
**args,
): # eat up additional cruft
self.clear_cuda_stats()
"""
ldm.generate.prompt2image() is the common entry point for txt2img() and img2img()
It takes the following arguments:
@ -548,6 +549,7 @@ class Generate:
inpaint_width = inpaint_width,
enable_image_debugging = enable_image_debugging,
free_gpu_mem=self.free_gpu_mem,
clear_cuda_cache=self.clear_cuda_cache
)
if init_color:
@ -565,11 +567,17 @@ class Generate:
image_callback = image_callback)
except KeyboardInterrupt:
# Clear the CUDA cache on an exception
self.clear_cuda_cache()
if catch_interrupts:
print('**Interrupted** Partial results will be returned.')
else:
raise KeyboardInterrupt
except RuntimeError:
# Clear the CUDA cache on an exception
self.clear_cuda_cache()
print(traceback.format_exc(), file=sys.stderr)
print('>> Could not generate image.')
@ -579,22 +587,42 @@ class Generate:
f'>> {len(results)} image(s) generated in', '%4.2fs' % (
toc - tic)
)
self.print_cuda_stats()
return results
def clear_cuda_cache(self):
if self._has_cuda():
self.max_memory_allocated = max(
self.max_memory_allocated,
torch.cuda.max_memory_allocated()
)
self.memory_allocated = max(
self.memory_allocated,
torch.cuda.memory_allocated()
)
self.session_peakmem = max(
self.session_peakmem,
torch.cuda.max_memory_allocated()
)
torch.cuda.empty_cache()
def clear_cuda_stats(self):
self.max_memory_allocated = 0
self.memory_allocated = 0
def print_cuda_stats(self):
if self._has_cuda():
print(
'>> Max VRAM used for this generation:',
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
'%4.2fG.' % (self.max_memory_allocated / 1e9),
'Current VRAM utilization:',
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
'%4.2fG' % (self.memory_allocated / 1e9),
)
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)
print(
'>> Max VRAM used since script start: ',
'%4.2fG' % (self.session_peakmem / 1e9),
)
return results
# this needs to be generalized to all sorts of postprocessors, which should be wrapped
# in a nice harmonized call signature. For now we have a bunch of if/elses!

View File

@ -1 +1 @@
__version__='2.3.0-rc4'
__version__='2.3.0-rc5'

View File

@ -123,8 +123,9 @@ class Generator:
seed = self.new_seed()
# Free up memory from the last generation.
if self.model.device.type == 'cuda':
torch.cuda.empty_cache()
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
if clear_cuda_cache is not None:
clear_cuda_cache()
return results

View File

@ -65,6 +65,11 @@ class Txt2Img2Img(Generator):
mode="bilinear"
)
# Free up memory from the last generation.
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
if clear_cuda_cache is not None:
clear_cuda_cache()
second_pass_noise = self.get_noise_like(resized_latents)
verbosity = get_verbosity()