Merge branch 'main' into ti-doc-update

This commit is contained in:
Lincoln Stein 2023-02-06 20:06:33 -05:00 committed by GitHub
commit ac6e9238f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 502 additions and 272 deletions

View File

@ -3,6 +3,7 @@ on:
push: push:
branches: branches:
- 'main' - 'main'
- 'update/ci/*'
tags: tags:
- 'v*.*.*' - 'v*.*.*'
@ -47,11 +48,10 @@ jobs:
type=semver,pattern={{version}} type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}} type=semver,pattern={{major}}
type=raw,value='sha'-{{sha}}-${{ matrix.flavor}} type=sha,enable=true,prefix=sha-,format=short
type=raw,value={{branch}}-${{ matrix.flavor }}
flavor: | flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v2 uses: docker/setup-qemu-action@v2

View File

@ -8,10 +8,11 @@ on:
- 'ready_for_review' - 'ready_for_review'
- 'opened' - 'opened'
- 'synchronize' - 'synchronize'
workflow_dispatch:
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
matrix: matrix:
@ -62,28 +63,13 @@ jobs:
# github-env: $env:GITHUB_ENV # github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
env:
PIP_USE_PEP517: '1'
steps: steps:
- name: Checkout sources - name: Checkout sources
id: checkout-sources id: checkout-sources
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set Cache-Directory Windows
if: runner.os == 'Windows'
id: set-cache-dir-windows
run: |
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
- name: Set Cache-Directory others
if: runner.os != 'Windows'
id: set-cache-dir-others
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
- name: set test prompt to main branch validation - name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }} if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }} run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
@ -92,26 +78,29 @@ jobs:
if: ${{ github.ref != 'refs/heads/main' }} if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: pip
cache-dependency-path: pyproject.toml
- name: install invokeai - name: install invokeai
env: env:
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }} PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
run: > run: >
pip3 install pip3 install
--use-pep517
--editable=".[test]" --editable=".[test]"
- name: run pytest - name: run pytest
id: run-pytest
run: pytest run: pytest
- name: Use Cached models - name: set INVOKEAI_OUTDIR
id: cache-sd-model run: >
uses: actions/cache@v3 python -c
env: "import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
cache-name: huggingface-models >> ${{ matrix.github-env }}
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.cache-name }}
enableCrossOsArchive: true
- name: run invokeai-configure - name: run invokeai-configure
id: run-preload-models id: run-preload-models
@ -124,9 +113,8 @@ jobs:
--full-precision --full-precision
# can't use fp16 weights without a GPU # can't use fp16 weights without a GPU
- name: Run the tests - name: run invokeai
if: runner.os != 'Windows' id: run-invokeai
id: run-tests
env: env:
# Set offline mode to make sure configure preloaded successfully. # Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1 HF_HUB_OFFLINE: 1
@ -137,10 +125,11 @@ jobs:
--no-patchmatch --no-patchmatch
--no-nsfw_checker --no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }} --from_file ${{ env.TEST_PROMPTS }}
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
- name: Archive results - name: Archive results
id: archive-results id: archive-results
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }} name: results
path: ${{ env.INVOKEAI_ROOT }}/outputs path: ${{ env.INVOKEAI_OUTDIR }}

View File

@ -1,7 +1,4 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9 ARG PYTHON_VERSION=3.9
################## ##################
## base image ## ## base image ##
@ -85,3 +82,5 @@ ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
ENTRYPOINT [ "invokeai" ] ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ] CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ] VOLUME [ "/data" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"

View File

@ -249,6 +249,7 @@ class InvokeAiInstance:
"--require-virtualenv", "--require-virtualenv",
"torch", "torch",
"torchvision", "torchvision",
"--force-reinstall",
"--find-links" if find_links is not None else None, "--find-links" if find_links is not None else None,
find_links, find_links,
"--extra-index-url" if extra_index_url is not None else None, "--extra-index-url" if extra_index_url is not None else None,
@ -325,6 +326,7 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory Configure the InvokeAI runtime directory
""" """
# set sys.argv to a consistent state
new_argv = [sys.argv[0]] new_argv = [sys.argv[0]]
for i in range(1,len(sys.argv)): for i in range(1,len(sys.argv)):
el = sys.argv[i] el = sys.argv[i]
@ -344,9 +346,6 @@ class InvokeAiInstance:
# NOTE: currently the config script does its own arg parsing! this means the command-line switches # NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script. # from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring! # this may change in the future with config refactoring!
# set sys.argv to a consistent state
invokeai_configure.main() invokeai_configure.main()
def install_user_scripts(self): def install_user_scripts(self):

View File

@ -1208,12 +1208,18 @@ class InvokeAIWebServer:
) )
except KeyboardInterrupt: except KeyboardInterrupt:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled") self.socketio.emit("processingCanceled")
raise raise
except CanceledException: except CanceledException:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
self.socketio.emit("processingCanceled") self.socketio.emit("processingCanceled")
pass pass
except Exception as e: except Exception as e:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
print(e) print(e)
self.socketio.emit("error", {"message": (str(e))}) self.socketio.emit("error", {"message": (str(e))})
print("\n") print("\n")
@ -1221,6 +1227,12 @@ class InvokeAIWebServer:
traceback.print_exc() traceback.print_exc()
print("\n") print("\n")
def empty_cuda_cache(self):
if self.generate.device.type == "cuda":
import torch.cuda
torch.cuda.empty_cache()
def parameters_to_generated_image_metadata(self, parameters): def parameters_to_generated_image_metadata(self, parameters):
try: try:
# top-level metadata minus `image` or `images` # top-level metadata minus `image` or `images`

View File

@ -1,28 +1,20 @@
# Stable Diffusion Web UI # InvokeAI UI dev setup
## Run The UI is in `invokeai/frontend`.
- `python scripts/dream.py --web` serves both frontend and backend at ## Environment set up
http://localhost:9090
## Evironment Install [node](https://nodejs.org/en/download/) (includes npm) and
Install [node](https://nodejs.org/en/download/) (includes npm) and optionally
[yarn](https://yarnpkg.com/getting-started/install). [yarn](https://yarnpkg.com/getting-started/install).
From `frontend/` run `npm install` / `yarn install` to install the frontend From `invokeai/frontend/` run `yarn install` to get everything set up.
packages.
## Dev ## Dev
1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server. 1. Start the dev server: `yarn dev`
2. Run `python scripts/dream.py --web`. 2. Start the InvokeAI UI per usual: `invokeai --web`
3. Navigate to the dev server address e.g. `http://localhost:5173/`. 3. Point your browser to the dev server address e.g. `http://localhost:5173/`
To build for dev: `npm build-dev` / `yarn build-dev` To build for dev: `yarn build-dev`
To build for production: `npm build` / `yarn build` To build for production: `yarn build`
## TODO
- Search repo for "TODO"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -7,8 +7,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="./assets/index.dd4ad8a1.js"></script> <script type="module" crossorigin src="./assets/index.b7daf15c.js"></script>
<link rel="stylesheet" href="./assets/index.8badc8b4.css"> <link rel="stylesheet" href="./assets/index.1536494e.css">
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script> <script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script> <script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
</head> </head>
@ -18,6 +18,6 @@
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script> <script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script> <script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8219c08f.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script> <script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-7649c4ae.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
</body> </body>
</html> </html>

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization", "hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size", "imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity", "codeformerFidelity": "Fidelity",
"seamSize": "Seam Size", "seamSize": "Seam Size",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization", "hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size", "imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity", "codeformerFidelity": "Fidelity",
"seamSize": "Seam Size", "seamSize": "Seam Size",
@ -43,6 +44,7 @@
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Cancel", "cancel": "Cancel",
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)", "promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts",
"sendTo": "Send to", "sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image", "sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas", "sendToUnifiedCanvas": "Send To Unified Canvas",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization", "hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size", "imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity", "codeformerFidelity": "Fidelity",
"seamSize": "Seam Size", "seamSize": "Seam Size",

View File

@ -24,6 +24,7 @@
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",
"hiresOptim": "High Res Optimization", "hiresOptim": "High Res Optimization",
"hiresStrength": "High Res Strength",
"imageFit": "Fit Initial Image To Output Size", "imageFit": "Fit Initial Image To Output Size",
"codeformerFidelity": "Fidelity", "codeformerFidelity": "Fidelity",
"seamSize": "Seam Size", "seamSize": "Seam Size",
@ -43,6 +44,7 @@
"invoke": "Invoke", "invoke": "Invoke",
"cancel": "Cancel", "cancel": "Cancel",
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)", "promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
"negativePrompts": "Negative Prompts",
"sendTo": "Send to", "sendTo": "Send to",
"sendToImg2Img": "Send to Image to Image", "sendToImg2Img": "Send to Image to Image",
"sendToUnifiedCanvas": "Send To Unified Canvas", "sendToUnifiedCanvas": "Send To Unified Canvas",

View File

@ -11,7 +11,6 @@ const useClickOutsideWatcher = () => {
function handleClickOutside(e: MouseEvent) { function handleClickOutside(e: MouseEvent) {
watchers.forEach(({ ref, enable, callback }) => { watchers.forEach(({ ref, enable, callback }) => {
if (enable && ref.current && !ref.current.contains(e.target as Node)) { if (enable && ref.current && !ref.current.contains(e.target as Node)) {
console.log('callback');
callback(); callback();
} }
}); });

View File

@ -0,0 +1,20 @@
import * as InvokeAI from 'app/invokeai';
import promptToString from './promptToString';
export function getPromptAndNegative(input_prompt: InvokeAI.Prompt) {
let prompt: string = promptToString(input_prompt);
let negativePrompt: string | null = null;
const negativePromptRegExp = new RegExp(/(?<=\[)[^\][]*(?=])/, 'gi');
const negativePromptMatches = [...prompt.matchAll(negativePromptRegExp)];
if (negativePromptMatches && negativePromptMatches.length > 0) {
negativePrompt = negativePromptMatches.join(', ');
prompt = prompt
.replaceAll(negativePromptRegExp, '')
.replaceAll('[]', '')
.trim();
}
return [prompt, negativePrompt];
}

View File

@ -100,12 +100,14 @@ export const frontendToBackendParameters = (
facetoolType, facetoolType,
height, height,
hiresFix, hiresFix,
hiresStrength,
img2imgStrength, img2imgStrength,
infillMethod, infillMethod,
initialImage, initialImage,
iterations, iterations,
perlin, perlin,
prompt, prompt,
negativePrompt,
sampler, sampler,
seamBlur, seamBlur,
seamless, seamless,
@ -155,6 +157,10 @@ export const frontendToBackendParameters = (
let esrganParameters: false | BackendEsrGanParameters = false; let esrganParameters: false | BackendEsrGanParameters = false;
let facetoolParameters: false | BackendFacetoolParameters = false; let facetoolParameters: false | BackendFacetoolParameters = false;
if (negativePrompt !== '') {
generationParameters.prompt = `${prompt} [${negativePrompt}]`;
}
generationParameters.seed = shouldRandomizeSeed generationParameters.seed = shouldRandomizeSeed
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX) ? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
: seed; : seed;
@ -164,6 +170,8 @@ export const frontendToBackendParameters = (
generationParameters.seamless = seamless; generationParameters.seamless = seamless;
generationParameters.hires_fix = hiresFix; generationParameters.hires_fix = hiresFix;
if (hiresFix) generationParameters.strength = hiresStrength;
if (shouldRunESRGAN) { if (shouldRunESRGAN) {
esrganParameters = { esrganParameters = {
level: upscalingLevel, level: upscalingLevel,

View File

@ -9,6 +9,7 @@ import {
setAllParameters, setAllParameters,
setInitialImage, setInitialImage,
setIsLightBoxOpen, setIsLightBoxOpen,
setNegativePrompt,
setPrompt, setPrompt,
setSeed, setSeed,
setShouldShowImageDetails, setShouldShowImageDetails,
@ -44,6 +45,7 @@ import { GalleryState } from 'features/gallery/store/gallerySlice';
import { activeTabNameSelector } from 'features/options/store/optionsSelectors'; import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
import IAIPopover from 'common/components/IAIPopover'; import IAIPopover from 'common/components/IAIPopover';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
const systemSelector = createSelector( const systemSelector = createSelector(
[ [
@ -241,9 +243,18 @@ const CurrentImageButtons = () => {
[currentImage] [currentImage]
); );
const handleClickUsePrompt = () => const handleClickUsePrompt = () => {
currentImage?.metadata?.image?.prompt && if (currentImage?.metadata?.image?.prompt) {
dispatch(setPrompt(currentImage.metadata.image.prompt)); const [prompt, negativePrompt] = getPromptAndNegative(
currentImage?.metadata?.image?.prompt
);
prompt && dispatch(setPrompt(prompt));
negativePrompt
? dispatch(setNegativePrompt(negativePrompt))
: dispatch(setNegativePrompt(''));
}
};
useHotkeys( useHotkeys(
'p', 'p',

View File

@ -10,9 +10,10 @@ import { DragEvent, memo, useState } from 'react';
import { import {
setActiveTab, setActiveTab,
setAllImageToImageParameters, setAllImageToImageParameters,
setAllTextToImageParameters, setAllParameters,
setInitialImage, setInitialImage,
setIsLightBoxOpen, setIsLightBoxOpen,
setNegativePrompt,
setPrompt, setPrompt,
setSeed, setSeed,
} from 'features/options/store/optionsSlice'; } from 'features/options/store/optionsSlice';
@ -24,6 +25,7 @@ import {
} from 'features/canvas/store/canvasSlice'; } from 'features/canvas/store/canvasSlice';
import { hoverableImageSelector } from 'features/gallery/store/gallerySliceSelectors'; import { hoverableImageSelector } from 'features/gallery/store/gallerySliceSelectors';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
interface HoverableImageProps { interface HoverableImageProps {
image: InvokeAI.Image; image: InvokeAI.Image;
@ -62,7 +64,17 @@ const HoverableImage = memo((props: HoverableImageProps) => {
const handleMouseOut = () => setIsHovered(false); const handleMouseOut = () => setIsHovered(false);
const handleUsePrompt = () => { const handleUsePrompt = () => {
image.metadata && dispatch(setPrompt(image.metadata.image.prompt)); if (image.metadata) {
const [prompt, negativePrompt] = getPromptAndNegative(
image.metadata?.image?.prompt
);
prompt && dispatch(setPrompt(prompt));
negativePrompt
? dispatch(setNegativePrompt(negativePrompt))
: dispatch(setNegativePrompt(''));
}
toast({ toast({
title: t('toast:promptSet'), title: t('toast:promptSet'),
status: 'success', status: 'success',
@ -115,7 +127,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
}; };
const handleUseAllParameters = () => { const handleUseAllParameters = () => {
metadata && dispatch(setAllTextToImageParameters(metadata)); metadata && dispatch(setAllParameters(metadata));
toast({ toast({
title: t('toast:parametersSet'), title: t('toast:parametersSet'),
status: 'success', status: 'success',

View File

@ -38,7 +38,6 @@ export const uploadImage =
}); });
const image = (await response.json()) as InvokeAI.ImageUploadResponse; const image = (await response.json()) as InvokeAI.ImageUploadResponse;
console.log(image);
const newImage: InvokeAI.Image = { const newImage: InvokeAI.Image = {
uuid: uuidv4(), uuid: uuidv4(),
category: 'user', category: 'user',

View File

@ -1,10 +1,53 @@
import { Flex } from '@chakra-ui/react'; import { Flex } from '@chakra-ui/react';
import { ChangeEvent } from 'react'; import { ChangeEvent } from 'react';
import { RootState } from 'app/store'; import type { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISwitch from 'common/components/IAISwitch'; import IAISwitch from 'common/components/IAISwitch';
import { setHiresFix } from 'features/options/store/optionsSlice'; import {
setHiresFix,
setHiresStrength,
} from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import IAISlider from 'common/components/IAISlider';
function HighResStrength() {
const hiresFix = useAppSelector((state: RootState) => state.options.hiresFix);
const hiresStrength = useAppSelector(
(state: RootState) => state.options.hiresStrength
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
const handleHiresStrength = (v: number) => {
dispatch(setHiresStrength(v));
};
const handleHiResStrengthReset = () => {
dispatch(setHiresStrength(0.75));
};
return (
<IAISlider
label={t('options:hiresStrength')}
step={0.01}
min={0.01}
max={0.99}
onChange={handleHiresStrength}
value={hiresStrength}
isInteger={false}
withInput
withSliderMarks
inputWidth={'5.5rem'}
withReset
handleReset={handleHiResStrengthReset}
isSliderDisabled={!hiresFix}
isInputDisabled={!hiresFix}
isResetDisabled={!hiresFix}
/>
);
}
/** /**
* Hires Fix Toggle * Hires Fix Toggle
@ -27,6 +70,7 @@ const HiresOptions = () => {
isChecked={hiresFix} isChecked={hiresFix}
onChange={handleChangeHiresFix} onChange={handleChangeHiresFix}
/> />
<HighResStrength />
</Flex> </Flex>
); );
}; };

View File

@ -0,0 +1,38 @@
import { FormControl, Textarea } from '@chakra-ui/react';
import type { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { setNegativePrompt } from 'features/options/store/optionsSlice';
import { useTranslation } from 'react-i18next';
export function NegativePromptInput() {
const negativePrompt = useAppSelector(
(state: RootState) => state.options.negativePrompt
);
const dispatch = useAppDispatch();
const { t } = useTranslation();
return (
<FormControl>
<Textarea
id="negativePrompt"
name="negativePrompt"
value={negativePrompt}
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
background="var(--prompt-bg-color)"
placeholder={t('options:negativePrompts')}
_placeholder={{ fontSize: '0.8rem' }}
borderColor="var(--border-color)"
_hover={{
borderColor: 'var(--border-color-light)',
}}
_focusVisible={{
borderColor: 'var(--border-color-invalid)',
boxShadow: '0 0 10px var(--box-shadow-color-invalid)',
}}
fontSize="0.9rem"
color="var(--text-color-secondary)"
/>
</FormControl>
);
}

View File

@ -5,6 +5,7 @@ import promptToString from 'common/util/promptToString';
import { seedWeightsToString } from 'common/util/seedWeightPairs'; import { seedWeightsToString } from 'common/util/seedWeightPairs';
import { FACETOOL_TYPES } from 'app/constants'; import { FACETOOL_TYPES } from 'app/constants';
import { InvokeTabName, tabMap } from 'features/tabs/tabMap'; import { InvokeTabName, tabMap } from 'features/tabs/tabMap';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
export type UpscalingLevel = 2 | 4; export type UpscalingLevel = 2 | 4;
@ -19,6 +20,7 @@ export interface OptionsState {
facetoolType: FacetoolType; facetoolType: FacetoolType;
height: number; height: number;
hiresFix: boolean; hiresFix: boolean;
hiresStrength: number;
img2imgStrength: number; img2imgStrength: number;
infillMethod: string; infillMethod: string;
initialImage?: InvokeAI.Image | string; // can be an Image or url initialImage?: InvokeAI.Image | string; // can be an Image or url
@ -28,6 +30,7 @@ export interface OptionsState {
optionsPanelScrollPosition: number; optionsPanelScrollPosition: number;
perlin: number; perlin: number;
prompt: string; prompt: string;
negativePrompt: string;
sampler: string; sampler: string;
seamBlur: number; seamBlur: number;
seamless: boolean; seamless: boolean;
@ -69,6 +72,7 @@ const initialOptionsState: OptionsState = {
facetoolType: 'gfpgan', facetoolType: 'gfpgan',
height: 512, height: 512,
hiresFix: false, hiresFix: false,
hiresStrength: 0.75,
img2imgStrength: 0.75, img2imgStrength: 0.75,
infillMethod: 'patchmatch', infillMethod: 'patchmatch',
isLightBoxOpen: false, isLightBoxOpen: false,
@ -77,6 +81,7 @@ const initialOptionsState: OptionsState = {
optionsPanelScrollPosition: 0, optionsPanelScrollPosition: 0,
perlin: 0, perlin: 0,
prompt: '', prompt: '',
negativePrompt: '',
sampler: 'k_lms', sampler: 'k_lms',
seamBlur: 16, seamBlur: 16,
seamless: false, seamless: false,
@ -123,6 +128,17 @@ export const optionsSlice = createSlice({
state.prompt = promptToString(newPrompt); state.prompt = promptToString(newPrompt);
} }
}, },
setNegativePrompt: (
state,
action: PayloadAction<string | InvokeAI.Prompt>
) => {
const newPrompt = action.payload;
if (typeof newPrompt === 'string') {
state.negativePrompt = newPrompt;
} else {
state.negativePrompt = promptToString(newPrompt);
}
},
setIterations: (state, action: PayloadAction<number>) => { setIterations: (state, action: PayloadAction<number>) => {
state.iterations = action.payload; state.iterations = action.payload;
}, },
@ -175,6 +191,9 @@ export const optionsSlice = createSlice({
setHiresFix: (state, action: PayloadAction<boolean>) => { setHiresFix: (state, action: PayloadAction<boolean>) => {
state.hiresFix = action.payload; state.hiresFix = action.payload;
}, },
setHiresStrength: (state, action: PayloadAction<number>) => {
state.hiresStrength = action.payload;
},
setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => { setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => {
state.shouldFitToWidthHeight = action.payload; state.shouldFitToWidthHeight = action.payload;
}, },
@ -307,7 +326,14 @@ export const optionsSlice = createSlice({
state.shouldRandomizeSeed = false; state.shouldRandomizeSeed = false;
} }
if (prompt) state.prompt = promptToString(prompt); if (prompt) {
const [promptOnly, negativePrompt] = getPromptAndNegative(prompt);
if (promptOnly) state.prompt = promptOnly;
negativePrompt
? (state.negativePrompt = negativePrompt)
: (state.negativePrompt = '');
}
if (sampler) state.sampler = sampler; if (sampler) state.sampler = sampler;
if (steps) state.steps = steps; if (steps) state.steps = steps;
if (cfg_scale) state.cfgScale = cfg_scale; if (cfg_scale) state.cfgScale = cfg_scale;
@ -438,6 +464,7 @@ export const {
setFacetoolType, setFacetoolType,
setHeight, setHeight,
setHiresFix, setHiresFix,
setHiresStrength,
setImg2imgStrength, setImg2imgStrength,
setInfillMethod, setInfillMethod,
setInitialImage, setInitialImage,
@ -448,6 +475,7 @@ export const {
setParameter, setParameter,
setPerlin, setPerlin,
setPrompt, setPrompt,
setNegativePrompt,
setSampler, setSampler,
setSeamBlur, setSeamBlur,
setSeamless, setSeamless,

View File

@ -13,16 +13,16 @@ export default function LanguagePicker() {
const LANGUAGES = { const LANGUAGES = {
en: t('common:langEnglish'), en: t('common:langEnglish'),
ru: t('common:langRussian'),
it: t('common:langItalian'),
pt_br: t('common:langBrPortuguese'),
de: t('common:langGerman'),
pl: t('common:langPolish'),
zh_cn: t('common:langSimplifiedChinese'),
es: t('common:langSpanish'),
ja: t('common:langJapanese'),
nl: t('common:langDutch'), nl: t('common:langDutch'),
fr: t('common:langFrench'), fr: t('common:langFrench'),
de: t('common:langGerman'),
it: t('common:langItalian'),
ja: t('common:langJapanese'),
pl: t('common:langPolish'),
pt_br: t('common:langBrPortuguese'),
ru: t('common:langRussian'),
zh_cn: t('common:langSimplifiedChinese'),
es: t('common:langSpanish'),
ua: t('common:langUkranian'), ua: t('common:langUkranian'),
}; };

View File

@ -316,7 +316,6 @@ export default function CheckpointModelEdit() {
) : ( ) : (
<Flex <Flex
width="100%" width="100%"
height="250px"
justifyContent="center" justifyContent="center"
alignItems="center" alignItems="center"
backgroundColor="var(--background-color)" backgroundColor="var(--background-color)"

View File

@ -271,7 +271,6 @@ export default function DiffusersModelEdit() {
) : ( ) : (
<Flex <Flex
width="100%" width="100%"
height="250px"
justifyContent="center" justifyContent="center"
alignItems="center" alignItems="center"
backgroundColor="var(--background-color)" backgroundColor="var(--background-color)"

View File

@ -19,6 +19,8 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel'; import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import { activeTabNameSelector } from 'features/options/store/optionsSelectors'; import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { Flex } from '@chakra-ui/react';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
export default function ImageToImagePanel() { export default function ImageToImagePanel() {
const { t } = useTranslation(); const { t } = useTranslation();
@ -67,7 +69,10 @@ export default function ImageToImagePanel() {
return ( return (
<InvokeOptionsPanel> <InvokeOptionsPanel>
<PromptInput /> <Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons /> <ProcessButtons />
<MainOptions /> <MainOptions />
<ImageToImageStrength <ImageToImageStrength

View File

@ -24,8 +24,8 @@
} }
svg { svg {
width: 26px; width: 24px;
height: 26px; height: 24px;
} }
&[aria-selected='true'] { &[aria-selected='true'] {

View File

@ -1,3 +1,4 @@
import { Flex } from '@chakra-ui/react';
import { Feature } from 'app/features'; import { Feature } from 'app/features';
import FaceRestoreOptions from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreOptions'; import FaceRestoreOptions from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreOptions';
import FaceRestoreToggle from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreToggle'; import FaceRestoreToggle from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreToggle';
@ -10,6 +11,7 @@ import VariationsOptions from 'features/options/components/AdvancedOptions/Varia
import MainOptions from 'features/options/components/MainOptions/MainOptions'; import MainOptions from 'features/options/components/MainOptions/MainOptions';
import OptionsAccordion from 'features/options/components/OptionsAccordion'; import OptionsAccordion from 'features/options/components/OptionsAccordion';
import ProcessButtons from 'features/options/components/ProcessButtons/ProcessButtons'; import ProcessButtons from 'features/options/components/ProcessButtons/ProcessButtons';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
import PromptInput from 'features/options/components/PromptInput/PromptInput'; import PromptInput from 'features/options/components/PromptInput/PromptInput';
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel'; import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
@ -50,7 +52,10 @@ export default function TextToImagePanel() {
return ( return (
<InvokeOptionsPanel> <InvokeOptionsPanel>
<PromptInput /> <Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons /> <ProcessButtons />
<MainOptions /> <MainOptions />
<OptionsAccordion accordionInfo={textToImageAccordions} /> <OptionsAccordion accordionInfo={textToImageAccordions} />

View File

@ -13,6 +13,8 @@ import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
import BoundingBoxSettings from 'features/options/components/AdvancedOptions/Canvas/BoundingBoxSettings/BoundingBoxSettings'; import BoundingBoxSettings from 'features/options/components/AdvancedOptions/Canvas/BoundingBoxSettings/BoundingBoxSettings';
import InfillAndScalingOptions from 'features/options/components/AdvancedOptions/Canvas/InfillAndScalingOptions'; import InfillAndScalingOptions from 'features/options/components/AdvancedOptions/Canvas/InfillAndScalingOptions';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { Flex } from '@chakra-ui/react';
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
export default function UnifiedCanvasPanel() { export default function UnifiedCanvasPanel() {
const { t } = useTranslation(); const { t } = useTranslation();
@ -48,7 +50,10 @@ export default function UnifiedCanvasPanel() {
return ( return (
<InvokeOptionsPanel> <InvokeOptionsPanel>
<PromptInput /> <Flex flexDir="column" rowGap="0.5rem">
<PromptInput />
<NegativePromptInput />
</Flex>
<ProcessButtons /> <ProcessButtons />
<MainOptions /> <MainOptions />
<ImageToImageStrength <ImageToImageStrength

View File

@ -211,7 +211,7 @@ class Generate:
print('>> xformers memory-efficient attention is available but disabled') print('>> xformers memory-efficient attention is available but disabled')
else: else:
print('>> xformers not installed') print('>> xformers not installed')
# model caching system for fast switching # model caching system for fast switching
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models) self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
# don't accept invalid models # don't accept invalid models
@ -344,6 +344,7 @@ class Generate:
**args, **args,
): # eat up additional cruft ): # eat up additional cruft
self.clear_cuda_stats()
""" """
ldm.generate.prompt2image() is the common entry point for txt2img() and img2img() ldm.generate.prompt2image() is the common entry point for txt2img() and img2img()
It takes the following arguments: It takes the following arguments:
@ -548,6 +549,7 @@ class Generate:
inpaint_width = inpaint_width, inpaint_width = inpaint_width,
enable_image_debugging = enable_image_debugging, enable_image_debugging = enable_image_debugging,
free_gpu_mem=self.free_gpu_mem, free_gpu_mem=self.free_gpu_mem,
clear_cuda_cache=self.clear_cuda_cache
) )
if init_color: if init_color:
@ -565,11 +567,17 @@ class Generate:
image_callback = image_callback) image_callback = image_callback)
except KeyboardInterrupt: except KeyboardInterrupt:
# Clear the CUDA cache on an exception
self.clear_cuda_cache()
if catch_interrupts: if catch_interrupts:
print('**Interrupted** Partial results will be returned.') print('**Interrupted** Partial results will be returned.')
else: else:
raise KeyboardInterrupt raise KeyboardInterrupt
except RuntimeError: except RuntimeError:
# Clear the CUDA cache on an exception
self.clear_cuda_cache()
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
print('>> Could not generate image.') print('>> Could not generate image.')
@ -579,22 +587,42 @@ class Generate:
f'>> {len(results)} image(s) generated in', '%4.2fs' % ( f'>> {len(results)} image(s) generated in', '%4.2fs' % (
toc - tic) toc - tic)
) )
self.print_cuda_stats()
return results
def clear_cuda_cache(self):
if self._has_cuda():
self.max_memory_allocated = max(
self.max_memory_allocated,
torch.cuda.max_memory_allocated()
)
self.memory_allocated = max(
self.memory_allocated,
torch.cuda.memory_allocated()
)
self.session_peakmem = max(
self.session_peakmem,
torch.cuda.max_memory_allocated()
)
torch.cuda.empty_cache()
def clear_cuda_stats(self):
self.max_memory_allocated = 0
self.memory_allocated = 0
def print_cuda_stats(self):
if self._has_cuda(): if self._has_cuda():
print( print(
'>> Max VRAM used for this generation:', '>> Max VRAM used for this generation:',
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9), '%4.2fG.' % (self.max_memory_allocated / 1e9),
'Current VRAM utilization:', 'Current VRAM utilization:',
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9), '%4.2fG' % (self.memory_allocated / 1e9),
) )
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)
print( print(
'>> Max VRAM used since script start: ', '>> Max VRAM used since script start: ',
'%4.2fG' % (self.session_peakmem / 1e9), '%4.2fG' % (self.session_peakmem / 1e9),
) )
return results
# this needs to be generalized to all sorts of postprocessors, which should be wrapped # this needs to be generalized to all sorts of postprocessors, which should be wrapped
# in a nice harmonized call signature. For now we have a bunch of if/elses! # in a nice harmonized call signature. For now we have a bunch of if/elses!

View File

@ -122,6 +122,11 @@ class Generator:
seed = self.new_seed() seed = self.new_seed()
# Free up memory from the last generation.
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
if clear_cuda_cache is not None:
clear_cuda_cache()
return results return results
def sample_to_image(self,samples)->Image.Image: def sample_to_image(self,samples)->Image.Image:
@ -240,7 +245,12 @@ class Generator:
def get_perlin_noise(self,width,height): def get_perlin_noise(self,width,height):
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
noise = torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device) # limit noise to only the diffusion image channels, not the mask channels
input_channels = min(self.latent_channels, 4)
noise = torch.stack([
rand_perlin_2d((height, width),
(8, 8),
device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device)
return noise return noise
def new_seed(self): def new_seed(self):
@ -341,3 +351,27 @@ class Generator:
def torch_dtype(self)->torch.dtype: def torch_dtype(self)->torch.dtype:
return torch.float16 if self.precision == 'float16' else torch.float32 return torch.float16 if self.precision == 'float16' else torch.float32
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self,width,height):
device = self.model.device
# limit noise to only the diffusion image channels, not the mask channels
input_channels = min(self.latent_channels, 4)
if self.use_mps_noise or device.type == 'mps':
x = torch.randn([1,
input_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device='cpu').to(device)
else:
x = torch.randn([1,
input_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device=device)
if self.perlin > 0.0:
perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
x = (1-self.perlin)*x + self.perlin*perlin_noise
return x

View File

@ -63,22 +63,3 @@ class Img2Img(Generator):
shape = like.shape shape = like.shape
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2]) x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
return x return x
def get_noise(self,width,height):
# copy of the Txt2Img.get_noise
device = self.model.device
if self.use_mps_noise or device.type == 'mps':
x = torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
device='cpu').to(device)
else:
x = torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
device=device)
if self.perlin > 0.0:
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
return x

View File

@ -51,26 +51,4 @@ class Txt2Img(Generator):
return make_image return make_image
# returns a tensor filled with random numbers from a normal distribution
def get_noise(self,width,height):
device = self.model.device
# limit noise to only the diffusion image channels, not the mask channels
input_channels = min(self.latent_channels, 4)
if self.use_mps_noise or device.type == 'mps':
x = torch.randn([1,
input_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device='cpu').to(device)
else:
x = torch.randn([1,
input_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
dtype=self.torch_dtype(),
device=device)
if self.perlin > 0.0:
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
return x

View File

@ -65,6 +65,11 @@ class Txt2Img2Img(Generator):
mode="bilinear" mode="bilinear"
) )
# Free up memory from the last generation.
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
if clear_cuda_cache is not None:
clear_cuda_cache()
second_pass_noise = self.get_noise_like(resized_latents) second_pass_noise = self.get_noise_like(resized_latents)
verbosity = get_verbosity() verbosity = get_verbosity()

View File

@ -15,20 +15,18 @@ from pathlib import Path
from typing import List, Union from typing import List, Union
import npyscreen import npyscreen
from diffusers import DiffusionPipeline, logging as dlogging from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from ldm.invoke.globals import ( from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
Globals, global_models_dir, global_set_root)
global_cache_dir,
global_config_file,
global_models_dir,
global_set_root,
)
from ldm.invoke.model_manager import ModelManager from ldm.invoke.model_manager import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models" DEST_MERGED_MODEL_DIR = "merged_models"
def merge_diffusion_models( def merge_diffusion_models(
model_ids_or_paths: List[Union[str, Path]], model_ids_or_paths: List[Union[str, Path]],
alpha: float = 0.5, alpha: float = 0.5,
@ -48,10 +46,10 @@ def merge_diffusion_models(
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
""" """
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter('ignore') warnings.simplefilter("ignore")
verbosity = dlogging.get_verbosity() verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error() dlogging.set_verbosity_error()
pipe = DiffusionPipeline.from_pretrained( pipe = DiffusionPipeline.from_pretrained(
model_ids_or_paths[0], model_ids_or_paths[0],
cache_dir=kwargs.get("cache_dir", global_cache_dir()), cache_dir=kwargs.get("cache_dir", global_cache_dir()),
@ -188,13 +186,12 @@ class FloatTitleSlider(npyscreen.TitleText):
class mergeModelsForm(npyscreen.FormMultiPageAction): class mergeModelsForm(npyscreen.FormMultiPageAction):
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"] interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"]
def __init__(self, parentApp, name): def __init__(self, parentApp, name):
self.parentApp = parentApp self.parentApp = parentApp
self.ALLOW_RESIZE=True self.ALLOW_RESIZE = True
self.FIX_MINIMUM_SIZE_WHEN_CREATED=False self.FIX_MINIMUM_SIZE_WHEN_CREATED = False
super().__init__(parentApp, name) super().__init__(parentApp, name)
@property @property
@ -205,29 +202,29 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.parentApp.setNextForm(None) self.parentApp.setNextForm(None)
def create(self): def create(self):
window_height,window_width=curses.initscr().getmaxyx() window_height, window_width = curses.initscr().getmaxyx()
self.model_names = self.get_model_names() self.model_names = self.get_model_names()
max_width = max([len(x) for x in self.model_names]) max_width = max([len(x) for x in self.model_names])
max_width += 6 max_width += 6
horizontal_layout = max_width*3 < window_width horizontal_layout = max_width * 3 < window_width
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
color='CONTROL', color="CONTROL",
value=f"Select two models to merge and optionally a third.", value=f"Select two models to merge and optionally a third.",
editable=False, editable=False,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
color='CONTROL', color="CONTROL",
value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.", value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
editable=False, editable=False,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
value='MODEL 1', value="MODEL 1",
color='GOOD', color="GOOD",
editable=False, editable=False,
rely=4 if horizontal_layout else None, rely=4 if horizontal_layout else None,
) )
@ -242,57 +239,57 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
) )
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
value='MODEL 2', value="MODEL 2",
color='GOOD', color="GOOD",
editable=False, editable=False,
relx=max_width+3 if horizontal_layout else None, relx=max_width + 3 if horizontal_layout else None,
rely=4 if horizontal_layout else None, rely=4 if horizontal_layout else None,
) )
self.model2 = self.add_widget_intelligent( self.model2 = self.add_widget_intelligent(
npyscreen.SelectOne, npyscreen.SelectOne,
name='(2)', name="(2)",
values=self.model_names, values=self.model_names,
value=1, value=1,
max_height=len(self.model_names), max_height=len(self.model_names),
max_width=max_width, max_width=max_width,
relx=max_width+3 if horizontal_layout else None, relx=max_width + 3 if horizontal_layout else None,
rely=5 if horizontal_layout else None, rely=5 if horizontal_layout else None,
scroll_exit=True, scroll_exit=True,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
npyscreen.FixedText, npyscreen.FixedText,
value='MODEL 3', value="MODEL 3",
color='GOOD', color="GOOD",
editable=False, editable=False,
relx=max_width*2+3 if horizontal_layout else None, relx=max_width * 2 + 3 if horizontal_layout else None,
rely=4 if horizontal_layout else None, rely=4 if horizontal_layout else None,
) )
models_plus_none = self.model_names.copy() models_plus_none = self.model_names.copy()
models_plus_none.insert(0,'None') models_plus_none.insert(0, "None")
self.model3 = self.add_widget_intelligent( self.model3 = self.add_widget_intelligent(
npyscreen.SelectOne, npyscreen.SelectOne,
name='(3)', name="(3)",
values=models_plus_none, values=models_plus_none,
value=0, value=0,
max_height=len(self.model_names)+1, max_height=len(self.model_names) + 1,
max_width=max_width, max_width=max_width,
scroll_exit=True, scroll_exit=True,
relx=max_width*2+3 if horizontal_layout else None, relx=max_width * 2 + 3 if horizontal_layout else None,
rely=5 if horizontal_layout else None, rely=5 if horizontal_layout else None,
) )
for m in [self.model1,self.model2,self.model3]: for m in [self.model1, self.model2, self.model3]:
m.when_value_edited = self.models_changed m.when_value_edited = self.models_changed
self.merged_model_name = self.add_widget_intelligent( self.merged_model_name = self.add_widget_intelligent(
npyscreen.TitleText, npyscreen.TitleText,
name="Name for merged model:", name="Name for merged model:",
labelColor='CONTROL', labelColor="CONTROL",
value="", value="",
scroll_exit=True, scroll_exit=True,
) )
self.force = self.add_widget_intelligent( self.force = self.add_widget_intelligent(
npyscreen.Checkbox, npyscreen.Checkbox,
name="Force merge of incompatible models", name="Force merge of incompatible models",
labelColor='CONTROL', labelColor="CONTROL",
value=False, value=False,
scroll_exit=True, scroll_exit=True,
) )
@ -301,7 +298,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
name="Merge Method:", name="Merge Method:",
values=self.interpolations, values=self.interpolations,
value=0, value=0,
labelColor='CONTROL', labelColor="CONTROL",
max_height=len(self.interpolations) + 1, max_height=len(self.interpolations) + 1,
scroll_exit=True, scroll_exit=True,
) )
@ -312,7 +309,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
step=0.05, step=0.05,
lowest=0, lowest=0,
value=0.5, value=0.5,
labelColor='CONTROL', labelColor="CONTROL",
scroll_exit=True, scroll_exit=True,
) )
self.model1.editing = True self.model1.editing = True
@ -322,43 +319,43 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
selected_model1 = self.model1.value[0] selected_model1 = self.model1.value[0]
selected_model2 = self.model2.value[0] selected_model2 = self.model2.value[0]
selected_model3 = self.model3.value[0] selected_model3 = self.model3.value[0]
merged_model_name = f'{models[selected_model1]}+{models[selected_model2]}' merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}"
self.merged_model_name.value = merged_model_name self.merged_model_name.value = merged_model_name
if selected_model3 > 0: if selected_model3 > 0:
self.merge_method.values=['add_difference'], self.merge_method.values = (["add_difference"],)
self.merged_model_name.value += f'+{models[selected_model3]}' self.merged_model_name.value += f"+{models[selected_model3]}"
else: else:
self.merge_method.values=self.interpolations self.merge_method.values = self.interpolations
self.merge_method.value=0 self.merge_method.value = 0
def on_ok(self): def on_ok(self):
if self.validate_field_values() and self.check_for_overwrite(): if self.validate_field_values() and self.check_for_overwrite():
self.parentApp.setNextForm(None) self.parentApp.setNextForm(None)
self.editing = False self.editing = False
self.parentApp.merge_arguments = self.marshall_arguments() self.parentApp.merge_arguments = self.marshall_arguments()
npyscreen.notify('Starting the merge...') npyscreen.notify("Starting the merge...")
else: else:
self.editing = True self.editing = True
def on_cancel(self): def on_cancel(self):
sys.exit(0) sys.exit(0)
def marshall_arguments(self)->dict: def marshall_arguments(self) -> dict:
model_names = self.model_names model_names = self.model_names
models = [ models = [
model_names[self.model1.value[0]], model_names[self.model1.value[0]],
model_names[self.model2.value[0]], model_names[self.model2.value[0]],
] ]
if self.model3.value[0] > 0: if self.model3.value[0] > 0:
models.append(model_names[self.model3.value[0]-1]) models.append(model_names[self.model3.value[0] - 1])
args = dict( args = dict(
models=models, models=models,
alpha = self.alpha.value, alpha=self.alpha.value,
interp = self.interpolations[self.merge_method.value[0]], interp=self.interpolations[self.merge_method.value[0]],
force = self.force.value, force=self.force.value,
merged_model_name = self.merged_model_name.value, merged_model_name=self.merged_model_name.value,
) )
return args return args
@ -371,18 +368,22 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?" f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
) )
def validate_field_values(self)->bool: def validate_field_values(self) -> bool:
bad_fields = [] bad_fields = []
model_names = self.model_names model_names = self.model_names
selected_models = set((model_names[self.model1.value[0]],model_names[self.model2.value[0]])) selected_models = set(
(model_names[self.model1.value[0]], model_names[self.model2.value[0]])
)
if self.model3.value[0] > 0: if self.model3.value[0] > 0:
selected_models.add(model_names[self.model3.value[0]-1]) selected_models.add(model_names[self.model3.value[0] - 1])
if len(selected_models) < 2: if len(selected_models) < 2:
bad_fields.append(f'Please select two or three DIFFERENT models to compare. You selected {selected_models}') bad_fields.append(
f"Please select two or three DIFFERENT models to compare. You selected {selected_models}"
)
if len(bad_fields) > 0: if len(bad_fields) > 0:
message = 'The following problems were detected and must be corrected:' message = "The following problems were detected and must be corrected:"
for problem in bad_fields: for problem in bad_fields:
message += f'\n* {problem}' message += f"\n* {problem}"
npyscreen.notify_confirm(message) npyscreen.notify_confirm(message)
return False return False
else: else:
@ -410,6 +411,7 @@ class Mergeapp(npyscreen.NPSAppManaged):
npyscreen.setTheme(npyscreen.Themes.ElegantTheme) npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings") self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
def run_gui(args: Namespace): def run_gui(args: Namespace):
mergeapp = Mergeapp() mergeapp = Mergeapp()
mergeapp.run() mergeapp.run()
@ -450,5 +452,27 @@ def main():
] = cache_dir # because not clear the merge pipeline is honoring cache_dir ] = cache_dir # because not clear the merge pipeline is honoring cache_dir
args.cache_dir = cache_dir args.cache_dir = cache_dir
try:
if args.front_end:
run_gui(args)
else:
run_cli(args)
print(f">> Conversion successful. New model is named {args.merged_model_name}")
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** You need to have at least two diffusers models defined in models.yaml in order to merge"
)
else:
print(f"** A layout error has occurred: {str(e)}")
sys.exit(-1)
except Exception as e:
print(">> An error occurred:")
traceback.print_exc()
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -753,7 +753,7 @@ class ModelManager(object):
return search_folder, found_models return search_folder, found_models
def _choose_diffusers_vae(self, model_name:str, vae:str=None)->Union[dict,str]: def _choose_diffusers_vae(self, model_name:str, vae:str=None)->Union[dict,str]:
# In the event that the original entry is using a custom ckpt VAE, we try to # In the event that the original entry is using a custom ckpt VAE, we try to
# map that VAE onto a diffuser VAE using a hard-coded dictionary. # map that VAE onto a diffuser VAE using a hard-coded dictionary.
# I would prefer to do this differently: We load the ckpt model into memory, swap the # I would prefer to do this differently: We load the ckpt model into memory, swap the
@ -954,7 +954,7 @@ class ModelManager(object):
def _has_cuda(self) -> bool: def _has_cuda(self) -> bool:
return self.device.type == 'cuda' return self.device.type == 'cuda'
def _diffuser_sha256(self,name_or_path:Union[str, Path])->Union[str,bytes]: def _diffuser_sha256(self,name_or_path:Union[str, Path],chunksize=4096)->Union[str,bytes]:
path = None path = None
if isinstance(name_or_path,Path): if isinstance(name_or_path,Path):
path = name_or_path path = name_or_path
@ -976,7 +976,8 @@ class ModelManager(object):
for name in files: for name in files:
count += 1 count += 1
with open(os.path.join(root,name),'rb') as f: with open(os.path.join(root,name),'rb') as f:
sha.update(f.read()) while chunk := f.read(chunksize):
sha.update(chunk)
hash = sha.hexdigest() hash = sha.hexdigest()
toc = time.time() toc = time.time()
print(f' | sha256 = {hash} ({count} files hashed in','%4.2fs)' % (toc - tic)) print(f' | sha256 = {hash} ({count} files hashed in','%4.2fs)' % (toc - tic))

View File

@ -17,6 +17,7 @@ from pathlib import Path
from typing import List, Tuple from typing import List, Tuple
import npyscreen import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
from ldm.invoke.globals import Globals, global_set_root from ldm.invoke.globals import Globals, global_set_root
@ -295,7 +296,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
for idx in range(len(model_names)) for idx in range(len(model_names))
if "default" in conf[model_names[idx]] if "default" in conf[model_names[idx]]
] ]
default = defaults[0] if len(defaults)>0 else 0 default = defaults[0] if len(defaults) > 0 else 0
return (model_names, default) return (model_names, default)
def marshall_arguments(self) -> dict: def marshall_arguments(self) -> dict:
@ -438,11 +439,20 @@ def main():
do_front_end(args) do_front_end(args)
else: else:
do_textual_inversion_training(**vars(args)) do_textual_inversion_training(**vars(args))
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** You need to have at least one diffusers models defined in models.yaml in order to train"
)
else:
print(f"** A layout error has occurred: {str(e)}")
sys.exit(-1)
except AssertionError as e: except AssertionError as e:
print(str(e)) print(str(e))
sys.exit(-1) sys.exit(-1)
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -1,26 +1,26 @@
import requests as request import requests
import ldm.invoke._version as version from ldm.invoke import __app_name__, __version__
local_version = str(version.__version__) local_version = str(__version__).replace("-", "")
package_name = str(__app_name__)
def get_pypi_versions(package_name="InvokeAI") -> list[str]: def get_pypi_versions(package_name=package_name) -> list[str]:
"""Get the versions of the package from PyPI""" """Get the versions of the package from PyPI"""
url = f"https://pypi.org/pypi/{package_name}/json" url = f"https://pypi.org/pypi/{package_name}/json"
response = request.get(url).json() response = requests.get(url).json()
versions: list[str] = list(response["releases"].keys()) versions: list[str] = list(response["releases"].keys())
return versions return versions
def local_on_pypi(package_name="InvokeAI", local_version=local_version) -> bool: def local_on_pypi(package_name=package_name, local_version=local_version) -> bool:
"""Compare the versions of the package from PyPI and the local package""" """Compare the versions of the package from PyPI and the local package"""
pypi_versions = get_pypi_versions(package_name) pypi_versions = get_pypi_versions(package_name)
return local_version in pypi_versions return local_version in pypi_versions
if __name__ == "__main__": if __name__ == "__main__":
package_name = "InvokeAI"
if local_on_pypi(): if local_on_pypi():
print(f"Package {package_name} is up to date") print(f"Package {package_name} is up to date")
else: else: