mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into ti-doc-update
This commit is contained in:
commit
ac6e9238f1
6
.github/workflows/build-container.yml
vendored
6
.github/workflows/build-container.yml
vendored
@ -3,6 +3,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'update/ci/*'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
@ -47,11 +48,10 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value='sha'-{{sha}}-${{ matrix.flavor}}
|
||||
type=raw,value={{branch}}-${{ matrix.flavor }}
|
||||
type=sha,enable=true,prefix=sha-,format=short
|
||||
flavor: |
|
||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
|
||||
suffix=-${{ matrix.flavor }},onlatest=false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
|
57
.github/workflows/test-invoke-pip.yml
vendored
57
.github/workflows/test-invoke-pip.yml
vendored
@ -8,10 +8,11 @@ on:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
@ -62,28 +63,13 @@ jobs:
|
||||
# github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
PIP_USE_PEP517: '1'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set Cache-Directory Windows
|
||||
if: runner.os == 'Windows'
|
||||
id: set-cache-dir-windows
|
||||
run: |
|
||||
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
|
||||
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: Set Cache-Directory others
|
||||
if: runner.os != 'Windows'
|
||||
id: set-cache-dir-others
|
||||
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
@ -92,26 +78,29 @@ jobs:
|
||||
if: ${{ github.ref != 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: pip
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install invokeai
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
pip3 install
|
||||
--use-pep517
|
||||
--editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
id: run-pytest
|
||||
run: pytest
|
||||
|
||||
- name: Use Cached models
|
||||
id: cache-sd-model
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: huggingface-models
|
||||
with:
|
||||
path: ${{ env.CACHE_DIR }}
|
||||
key: ${{ env.cache-name }}
|
||||
enableCrossOsArchive: true
|
||||
- name: set INVOKEAI_OUTDIR
|
||||
run: >
|
||||
python -c
|
||||
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
||||
>> ${{ matrix.github-env }}
|
||||
|
||||
- name: run invokeai-configure
|
||||
id: run-preload-models
|
||||
@ -124,9 +113,8 @@ jobs:
|
||||
--full-precision
|
||||
# can't use fp16 weights without a GPU
|
||||
|
||||
- name: Run the tests
|
||||
if: runner.os != 'Windows'
|
||||
id: run-tests
|
||||
- name: run invokeai
|
||||
id: run-invokeai
|
||||
env:
|
||||
# Set offline mode to make sure configure preloaded successfully.
|
||||
HF_HUB_OFFLINE: 1
|
||||
@ -137,10 +125,11 @@ jobs:
|
||||
--no-patchmatch
|
||||
--no-nsfw_checker
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
|
||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
||||
name: results
|
||||
path: ${{ env.INVOKEAI_OUTDIR }}
|
||||
|
@ -1,7 +1,4 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Maintained by Matthias Wild <mauwii@outlook.de>
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
##################
|
||||
## base image ##
|
||||
@ -85,3 +82,5 @@ ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||
ENTRYPOINT [ "invokeai" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
VOLUME [ "/data" ]
|
||||
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
|
@ -249,6 +249,7 @@ class InvokeAiInstance:
|
||||
"--require-virtualenv",
|
||||
"torch",
|
||||
"torchvision",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
"--extra-index-url" if extra_index_url is not None else None,
|
||||
@ -325,6 +326,7 @@ class InvokeAiInstance:
|
||||
Configure the InvokeAI runtime directory
|
||||
"""
|
||||
|
||||
# set sys.argv to a consistent state
|
||||
new_argv = [sys.argv[0]]
|
||||
for i in range(1,len(sys.argv)):
|
||||
el = sys.argv[i]
|
||||
@ -344,9 +346,6 @@ class InvokeAiInstance:
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
# this may change in the future with config refactoring!
|
||||
|
||||
# set sys.argv to a consistent state
|
||||
|
||||
invokeai_configure.main()
|
||||
|
||||
def install_user_scripts(self):
|
||||
|
@ -1208,12 +1208,18 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.empty_cuda_cache()
|
||||
self.socketio.emit("processingCanceled")
|
||||
raise
|
||||
except CanceledException:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.empty_cuda_cache()
|
||||
self.socketio.emit("processingCanceled")
|
||||
pass
|
||||
except Exception as e:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.empty_cuda_cache()
|
||||
print(e)
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
@ -1221,6 +1227,12 @@ class InvokeAIWebServer:
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
def empty_cuda_cache(self):
|
||||
if self.generate.device.type == "cuda":
|
||||
import torch.cuda
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def parameters_to_generated_image_metadata(self, parameters):
|
||||
try:
|
||||
# top-level metadata minus `image` or `images`
|
||||
|
@ -1,28 +1,20 @@
|
||||
# Stable Diffusion Web UI
|
||||
# InvokeAI UI dev setup
|
||||
|
||||
## Run
|
||||
The UI is in `invokeai/frontend`.
|
||||
|
||||
- `python scripts/dream.py --web` serves both frontend and backend at
|
||||
http://localhost:9090
|
||||
## Environment set up
|
||||
|
||||
## Evironment
|
||||
|
||||
Install [node](https://nodejs.org/en/download/) (includes npm) and optionally
|
||||
Install [node](https://nodejs.org/en/download/) (includes npm) and
|
||||
[yarn](https://yarnpkg.com/getting-started/install).
|
||||
|
||||
From `frontend/` run `npm install` / `yarn install` to install the frontend
|
||||
packages.
|
||||
From `invokeai/frontend/` run `yarn install` to get everything set up.
|
||||
|
||||
## Dev
|
||||
|
||||
1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server.
|
||||
2. Run `python scripts/dream.py --web`.
|
||||
3. Navigate to the dev server address e.g. `http://localhost:5173/`.
|
||||
1. Start the dev server: `yarn dev`
|
||||
2. Start the InvokeAI UI per usual: `invokeai --web`
|
||||
3. Point your browser to the dev server address e.g. `http://localhost:5173/`
|
||||
|
||||
To build for dev: `npm build-dev` / `yarn build-dev`
|
||||
To build for dev: `yarn build-dev`
|
||||
|
||||
To build for production: `npm build` / `yarn build`
|
||||
|
||||
## TODO
|
||||
|
||||
- Search repo for "TODO"
|
||||
To build for production: `yarn build`
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
6
invokeai/frontend/dist/index.html
vendored
6
invokeai/frontend/dist/index.html
vendored
@ -7,8 +7,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.dd4ad8a1.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.8badc8b4.css">
|
||||
<script type="module" crossorigin src="./assets/index.b7daf15c.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.1536494e.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
</head>
|
||||
@ -18,6 +18,6 @@
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8219c08f.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-7649c4ae.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -24,6 +24,7 @@
|
||||
"otherOptions": "Other Options",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"hiresStrength": "High Res Strength",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
|
@ -24,6 +24,7 @@
|
||||
"otherOptions": "Other Options",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"hiresStrength": "High Res Strength",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
@ -43,6 +44,7 @@
|
||||
"invoke": "Invoke",
|
||||
"cancel": "Cancel",
|
||||
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
|
||||
"negativePrompts": "Negative Prompts",
|
||||
"sendTo": "Send to",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
|
@ -24,6 +24,7 @@
|
||||
"otherOptions": "Other Options",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"hiresStrength": "High Res Strength",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
|
@ -24,6 +24,7 @@
|
||||
"otherOptions": "Other Options",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"hiresStrength": "High Res Strength",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
@ -43,6 +44,7 @@
|
||||
"invoke": "Invoke",
|
||||
"cancel": "Cancel",
|
||||
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
|
||||
"negativePrompts": "Negative Prompts",
|
||||
"sendTo": "Send to",
|
||||
"sendToImg2Img": "Send to Image to Image",
|
||||
"sendToUnifiedCanvas": "Send To Unified Canvas",
|
||||
|
@ -11,7 +11,6 @@ const useClickOutsideWatcher = () => {
|
||||
function handleClickOutside(e: MouseEvent) {
|
||||
watchers.forEach(({ ref, enable, callback }) => {
|
||||
if (enable && ref.current && !ref.current.contains(e.target as Node)) {
|
||||
console.log('callback');
|
||||
callback();
|
||||
}
|
||||
});
|
||||
|
20
invokeai/frontend/src/common/util/getPromptAndNegative.ts
Normal file
20
invokeai/frontend/src/common/util/getPromptAndNegative.ts
Normal file
@ -0,0 +1,20 @@
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import promptToString from './promptToString';
|
||||
|
||||
export function getPromptAndNegative(input_prompt: InvokeAI.Prompt) {
|
||||
let prompt: string = promptToString(input_prompt);
|
||||
let negativePrompt: string | null = null;
|
||||
|
||||
const negativePromptRegExp = new RegExp(/(?<=\[)[^\][]*(?=])/, 'gi');
|
||||
const negativePromptMatches = [...prompt.matchAll(negativePromptRegExp)];
|
||||
|
||||
if (negativePromptMatches && negativePromptMatches.length > 0) {
|
||||
negativePrompt = negativePromptMatches.join(', ');
|
||||
prompt = prompt
|
||||
.replaceAll(negativePromptRegExp, '')
|
||||
.replaceAll('[]', '')
|
||||
.trim();
|
||||
}
|
||||
|
||||
return [prompt, negativePrompt];
|
||||
}
|
@ -100,12 +100,14 @@ export const frontendToBackendParameters = (
|
||||
facetoolType,
|
||||
height,
|
||||
hiresFix,
|
||||
hiresStrength,
|
||||
img2imgStrength,
|
||||
infillMethod,
|
||||
initialImage,
|
||||
iterations,
|
||||
perlin,
|
||||
prompt,
|
||||
negativePrompt,
|
||||
sampler,
|
||||
seamBlur,
|
||||
seamless,
|
||||
@ -155,6 +157,10 @@ export const frontendToBackendParameters = (
|
||||
let esrganParameters: false | BackendEsrGanParameters = false;
|
||||
let facetoolParameters: false | BackendFacetoolParameters = false;
|
||||
|
||||
if (negativePrompt !== '') {
|
||||
generationParameters.prompt = `${prompt} [${negativePrompt}]`;
|
||||
}
|
||||
|
||||
generationParameters.seed = shouldRandomizeSeed
|
||||
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
||||
: seed;
|
||||
@ -164,6 +170,8 @@ export const frontendToBackendParameters = (
|
||||
generationParameters.seamless = seamless;
|
||||
generationParameters.hires_fix = hiresFix;
|
||||
|
||||
if (hiresFix) generationParameters.strength = hiresStrength;
|
||||
|
||||
if (shouldRunESRGAN) {
|
||||
esrganParameters = {
|
||||
level: upscalingLevel,
|
||||
|
@ -9,6 +9,7 @@ import {
|
||||
setAllParameters,
|
||||
setInitialImage,
|
||||
setIsLightBoxOpen,
|
||||
setNegativePrompt,
|
||||
setPrompt,
|
||||
setSeed,
|
||||
setShouldShowImageDetails,
|
||||
@ -44,6 +45,7 @@ import { GalleryState } from 'features/gallery/store/gallerySlice';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import IAIPopover from 'common/components/IAIPopover';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
|
||||
const systemSelector = createSelector(
|
||||
[
|
||||
@ -241,9 +243,18 @@ const CurrentImageButtons = () => {
|
||||
[currentImage]
|
||||
);
|
||||
|
||||
const handleClickUsePrompt = () =>
|
||||
currentImage?.metadata?.image?.prompt &&
|
||||
dispatch(setPrompt(currentImage.metadata.image.prompt));
|
||||
const handleClickUsePrompt = () => {
|
||||
if (currentImage?.metadata?.image?.prompt) {
|
||||
const [prompt, negativePrompt] = getPromptAndNegative(
|
||||
currentImage?.metadata?.image?.prompt
|
||||
);
|
||||
|
||||
prompt && dispatch(setPrompt(prompt));
|
||||
negativePrompt
|
||||
? dispatch(setNegativePrompt(negativePrompt))
|
||||
: dispatch(setNegativePrompt(''));
|
||||
}
|
||||
};
|
||||
|
||||
useHotkeys(
|
||||
'p',
|
||||
|
@ -10,9 +10,10 @@ import { DragEvent, memo, useState } from 'react';
|
||||
import {
|
||||
setActiveTab,
|
||||
setAllImageToImageParameters,
|
||||
setAllTextToImageParameters,
|
||||
setAllParameters,
|
||||
setInitialImage,
|
||||
setIsLightBoxOpen,
|
||||
setNegativePrompt,
|
||||
setPrompt,
|
||||
setSeed,
|
||||
} from 'features/options/store/optionsSlice';
|
||||
@ -24,6 +25,7 @@ import {
|
||||
} from 'features/canvas/store/canvasSlice';
|
||||
import { hoverableImageSelector } from 'features/gallery/store/gallerySliceSelectors';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
|
||||
interface HoverableImageProps {
|
||||
image: InvokeAI.Image;
|
||||
@ -62,7 +64,17 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
const handleMouseOut = () => setIsHovered(false);
|
||||
|
||||
const handleUsePrompt = () => {
|
||||
image.metadata && dispatch(setPrompt(image.metadata.image.prompt));
|
||||
if (image.metadata) {
|
||||
const [prompt, negativePrompt] = getPromptAndNegative(
|
||||
image.metadata?.image?.prompt
|
||||
);
|
||||
|
||||
prompt && dispatch(setPrompt(prompt));
|
||||
negativePrompt
|
||||
? dispatch(setNegativePrompt(negativePrompt))
|
||||
: dispatch(setNegativePrompt(''));
|
||||
}
|
||||
|
||||
toast({
|
||||
title: t('toast:promptSet'),
|
||||
status: 'success',
|
||||
@ -115,7 +127,7 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
||||
};
|
||||
|
||||
const handleUseAllParameters = () => {
|
||||
metadata && dispatch(setAllTextToImageParameters(metadata));
|
||||
metadata && dispatch(setAllParameters(metadata));
|
||||
toast({
|
||||
title: t('toast:parametersSet'),
|
||||
status: 'success',
|
||||
|
@ -38,7 +38,6 @@ export const uploadImage =
|
||||
});
|
||||
|
||||
const image = (await response.json()) as InvokeAI.ImageUploadResponse;
|
||||
console.log(image);
|
||||
const newImage: InvokeAI.Image = {
|
||||
uuid: uuidv4(),
|
||||
category: 'user',
|
||||
|
@ -1,10 +1,53 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { ChangeEvent } from 'react';
|
||||
import { RootState } from 'app/store';
|
||||
import type { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import IAISwitch from 'common/components/IAISwitch';
|
||||
import { setHiresFix } from 'features/options/store/optionsSlice';
|
||||
import {
|
||||
setHiresFix,
|
||||
setHiresStrength,
|
||||
} from 'features/options/store/optionsSlice';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import IAISlider from 'common/components/IAISlider';
|
||||
|
||||
function HighResStrength() {
|
||||
const hiresFix = useAppSelector((state: RootState) => state.options.hiresFix);
|
||||
const hiresStrength = useAppSelector(
|
||||
(state: RootState) => state.options.hiresStrength
|
||||
);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleHiresStrength = (v: number) => {
|
||||
dispatch(setHiresStrength(v));
|
||||
};
|
||||
|
||||
const handleHiResStrengthReset = () => {
|
||||
dispatch(setHiresStrength(0.75));
|
||||
};
|
||||
|
||||
return (
|
||||
<IAISlider
|
||||
label={t('options:hiresStrength')}
|
||||
step={0.01}
|
||||
min={0.01}
|
||||
max={0.99}
|
||||
onChange={handleHiresStrength}
|
||||
value={hiresStrength}
|
||||
isInteger={false}
|
||||
withInput
|
||||
withSliderMarks
|
||||
inputWidth={'5.5rem'}
|
||||
withReset
|
||||
handleReset={handleHiResStrengthReset}
|
||||
isSliderDisabled={!hiresFix}
|
||||
isInputDisabled={!hiresFix}
|
||||
isResetDisabled={!hiresFix}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hires Fix Toggle
|
||||
@ -27,6 +70,7 @@ const HiresOptions = () => {
|
||||
isChecked={hiresFix}
|
||||
onChange={handleChangeHiresFix}
|
||||
/>
|
||||
<HighResStrength />
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
@ -0,0 +1,38 @@
|
||||
import { FormControl, Textarea } from '@chakra-ui/react';
|
||||
import type { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import { setNegativePrompt } from 'features/options/store/optionsSlice';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
export function NegativePromptInput() {
|
||||
const negativePrompt = useAppSelector(
|
||||
(state: RootState) => state.options.negativePrompt
|
||||
);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<Textarea
|
||||
id="negativePrompt"
|
||||
name="negativePrompt"
|
||||
value={negativePrompt}
|
||||
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
|
||||
background="var(--prompt-bg-color)"
|
||||
placeholder={t('options:negativePrompts')}
|
||||
_placeholder={{ fontSize: '0.8rem' }}
|
||||
borderColor="var(--border-color)"
|
||||
_hover={{
|
||||
borderColor: 'var(--border-color-light)',
|
||||
}}
|
||||
_focusVisible={{
|
||||
borderColor: 'var(--border-color-invalid)',
|
||||
boxShadow: '0 0 10px var(--box-shadow-color-invalid)',
|
||||
}}
|
||||
fontSize="0.9rem"
|
||||
color="var(--text-color-secondary)"
|
||||
/>
|
||||
</FormControl>
|
||||
);
|
||||
}
|
@ -5,6 +5,7 @@ import promptToString from 'common/util/promptToString';
|
||||
import { seedWeightsToString } from 'common/util/seedWeightPairs';
|
||||
import { FACETOOL_TYPES } from 'app/constants';
|
||||
import { InvokeTabName, tabMap } from 'features/tabs/tabMap';
|
||||
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
|
||||
export type UpscalingLevel = 2 | 4;
|
||||
|
||||
@ -19,6 +20,7 @@ export interface OptionsState {
|
||||
facetoolType: FacetoolType;
|
||||
height: number;
|
||||
hiresFix: boolean;
|
||||
hiresStrength: number;
|
||||
img2imgStrength: number;
|
||||
infillMethod: string;
|
||||
initialImage?: InvokeAI.Image | string; // can be an Image or url
|
||||
@ -28,6 +30,7 @@ export interface OptionsState {
|
||||
optionsPanelScrollPosition: number;
|
||||
perlin: number;
|
||||
prompt: string;
|
||||
negativePrompt: string;
|
||||
sampler: string;
|
||||
seamBlur: number;
|
||||
seamless: boolean;
|
||||
@ -69,6 +72,7 @@ const initialOptionsState: OptionsState = {
|
||||
facetoolType: 'gfpgan',
|
||||
height: 512,
|
||||
hiresFix: false,
|
||||
hiresStrength: 0.75,
|
||||
img2imgStrength: 0.75,
|
||||
infillMethod: 'patchmatch',
|
||||
isLightBoxOpen: false,
|
||||
@ -77,6 +81,7 @@ const initialOptionsState: OptionsState = {
|
||||
optionsPanelScrollPosition: 0,
|
||||
perlin: 0,
|
||||
prompt: '',
|
||||
negativePrompt: '',
|
||||
sampler: 'k_lms',
|
||||
seamBlur: 16,
|
||||
seamless: false,
|
||||
@ -123,6 +128,17 @@ export const optionsSlice = createSlice({
|
||||
state.prompt = promptToString(newPrompt);
|
||||
}
|
||||
},
|
||||
setNegativePrompt: (
|
||||
state,
|
||||
action: PayloadAction<string | InvokeAI.Prompt>
|
||||
) => {
|
||||
const newPrompt = action.payload;
|
||||
if (typeof newPrompt === 'string') {
|
||||
state.negativePrompt = newPrompt;
|
||||
} else {
|
||||
state.negativePrompt = promptToString(newPrompt);
|
||||
}
|
||||
},
|
||||
setIterations: (state, action: PayloadAction<number>) => {
|
||||
state.iterations = action.payload;
|
||||
},
|
||||
@ -175,6 +191,9 @@ export const optionsSlice = createSlice({
|
||||
setHiresFix: (state, action: PayloadAction<boolean>) => {
|
||||
state.hiresFix = action.payload;
|
||||
},
|
||||
setHiresStrength: (state, action: PayloadAction<number>) => {
|
||||
state.hiresStrength = action.payload;
|
||||
},
|
||||
setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldFitToWidthHeight = action.payload;
|
||||
},
|
||||
@ -307,7 +326,14 @@ export const optionsSlice = createSlice({
|
||||
state.shouldRandomizeSeed = false;
|
||||
}
|
||||
|
||||
if (prompt) state.prompt = promptToString(prompt);
|
||||
if (prompt) {
|
||||
const [promptOnly, negativePrompt] = getPromptAndNegative(prompt);
|
||||
if (promptOnly) state.prompt = promptOnly;
|
||||
negativePrompt
|
||||
? (state.negativePrompt = negativePrompt)
|
||||
: (state.negativePrompt = '');
|
||||
}
|
||||
|
||||
if (sampler) state.sampler = sampler;
|
||||
if (steps) state.steps = steps;
|
||||
if (cfg_scale) state.cfgScale = cfg_scale;
|
||||
@ -438,6 +464,7 @@ export const {
|
||||
setFacetoolType,
|
||||
setHeight,
|
||||
setHiresFix,
|
||||
setHiresStrength,
|
||||
setImg2imgStrength,
|
||||
setInfillMethod,
|
||||
setInitialImage,
|
||||
@ -448,6 +475,7 @@ export const {
|
||||
setParameter,
|
||||
setPerlin,
|
||||
setPrompt,
|
||||
setNegativePrompt,
|
||||
setSampler,
|
||||
setSeamBlur,
|
||||
setSeamless,
|
||||
|
@ -13,16 +13,16 @@ export default function LanguagePicker() {
|
||||
|
||||
const LANGUAGES = {
|
||||
en: t('common:langEnglish'),
|
||||
ru: t('common:langRussian'),
|
||||
it: t('common:langItalian'),
|
||||
pt_br: t('common:langBrPortuguese'),
|
||||
de: t('common:langGerman'),
|
||||
pl: t('common:langPolish'),
|
||||
zh_cn: t('common:langSimplifiedChinese'),
|
||||
es: t('common:langSpanish'),
|
||||
ja: t('common:langJapanese'),
|
||||
nl: t('common:langDutch'),
|
||||
fr: t('common:langFrench'),
|
||||
de: t('common:langGerman'),
|
||||
it: t('common:langItalian'),
|
||||
ja: t('common:langJapanese'),
|
||||
pl: t('common:langPolish'),
|
||||
pt_br: t('common:langBrPortuguese'),
|
||||
ru: t('common:langRussian'),
|
||||
zh_cn: t('common:langSimplifiedChinese'),
|
||||
es: t('common:langSpanish'),
|
||||
ua: t('common:langUkranian'),
|
||||
};
|
||||
|
||||
|
@ -316,7 +316,6 @@ export default function CheckpointModelEdit() {
|
||||
) : (
|
||||
<Flex
|
||||
width="100%"
|
||||
height="250px"
|
||||
justifyContent="center"
|
||||
alignItems="center"
|
||||
backgroundColor="var(--background-color)"
|
||||
|
@ -271,7 +271,6 @@ export default function DiffusersModelEdit() {
|
||||
) : (
|
||||
<Flex
|
||||
width="100%"
|
||||
height="250px"
|
||||
justifyContent="center"
|
||||
alignItems="center"
|
||||
backgroundColor="var(--background-color)"
|
||||
|
@ -19,6 +19,8 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
|
||||
|
||||
export default function ImageToImagePanel() {
|
||||
const { t } = useTranslation();
|
||||
@ -67,7 +69,10 @@ export default function ImageToImagePanel() {
|
||||
|
||||
return (
|
||||
<InvokeOptionsPanel>
|
||||
<PromptInput />
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainOptions />
|
||||
<ImageToImageStrength
|
||||
|
@ -24,8 +24,8 @@
|
||||
}
|
||||
|
||||
svg {
|
||||
width: 26px;
|
||||
height: 26px;
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
}
|
||||
|
||||
&[aria-selected='true'] {
|
||||
|
@ -1,3 +1,4 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { Feature } from 'app/features';
|
||||
import FaceRestoreOptions from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreOptions';
|
||||
import FaceRestoreToggle from 'features/options/components/AdvancedOptions/FaceRestore/FaceRestoreToggle';
|
||||
@ -10,6 +11,7 @@ import VariationsOptions from 'features/options/components/AdvancedOptions/Varia
|
||||
import MainOptions from 'features/options/components/MainOptions/MainOptions';
|
||||
import OptionsAccordion from 'features/options/components/OptionsAccordion';
|
||||
import ProcessButtons from 'features/options/components/ProcessButtons/ProcessButtons';
|
||||
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
|
||||
import PromptInput from 'features/options/components/PromptInput/PromptInput';
|
||||
import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
@ -50,7 +52,10 @@ export default function TextToImagePanel() {
|
||||
|
||||
return (
|
||||
<InvokeOptionsPanel>
|
||||
<PromptInput />
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainOptions />
|
||||
<OptionsAccordion accordionInfo={textToImageAccordions} />
|
||||
|
@ -13,6 +13,8 @@ import InvokeOptionsPanel from 'features/tabs/components/InvokeOptionsPanel';
|
||||
import BoundingBoxSettings from 'features/options/components/AdvancedOptions/Canvas/BoundingBoxSettings/BoundingBoxSettings';
|
||||
import InfillAndScalingOptions from 'features/options/components/AdvancedOptions/Canvas/InfillAndScalingOptions';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import { NegativePromptInput } from 'features/options/components/PromptInput/NegativePromptInput';
|
||||
|
||||
export default function UnifiedCanvasPanel() {
|
||||
const { t } = useTranslation();
|
||||
@ -48,7 +50,10 @@ export default function UnifiedCanvasPanel() {
|
||||
|
||||
return (
|
||||
<InvokeOptionsPanel>
|
||||
<PromptInput />
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainOptions />
|
||||
<ImageToImageStrength
|
||||
|
@ -211,7 +211,7 @@ class Generate:
|
||||
print('>> xformers memory-efficient attention is available but disabled')
|
||||
else:
|
||||
print('>> xformers not installed')
|
||||
|
||||
|
||||
# model caching system for fast switching
|
||||
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
||||
# don't accept invalid models
|
||||
@ -344,6 +344,7 @@ class Generate:
|
||||
|
||||
**args,
|
||||
): # eat up additional cruft
|
||||
self.clear_cuda_stats()
|
||||
"""
|
||||
ldm.generate.prompt2image() is the common entry point for txt2img() and img2img()
|
||||
It takes the following arguments:
|
||||
@ -548,6 +549,7 @@ class Generate:
|
||||
inpaint_width = inpaint_width,
|
||||
enable_image_debugging = enable_image_debugging,
|
||||
free_gpu_mem=self.free_gpu_mem,
|
||||
clear_cuda_cache=self.clear_cuda_cache
|
||||
)
|
||||
|
||||
if init_color:
|
||||
@ -565,11 +567,17 @@ class Generate:
|
||||
image_callback = image_callback)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.clear_cuda_cache()
|
||||
|
||||
if catch_interrupts:
|
||||
print('**Interrupted** Partial results will be returned.')
|
||||
else:
|
||||
raise KeyboardInterrupt
|
||||
except RuntimeError:
|
||||
# Clear the CUDA cache on an exception
|
||||
self.clear_cuda_cache()
|
||||
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print('>> Could not generate image.')
|
||||
|
||||
@ -579,22 +587,42 @@ class Generate:
|
||||
f'>> {len(results)} image(s) generated in', '%4.2fs' % (
|
||||
toc - tic)
|
||||
)
|
||||
self.print_cuda_stats()
|
||||
return results
|
||||
|
||||
def clear_cuda_cache(self):
|
||||
if self._has_cuda():
|
||||
self.max_memory_allocated = max(
|
||||
self.max_memory_allocated,
|
||||
torch.cuda.max_memory_allocated()
|
||||
)
|
||||
self.memory_allocated = max(
|
||||
self.memory_allocated,
|
||||
torch.cuda.memory_allocated()
|
||||
)
|
||||
self.session_peakmem = max(
|
||||
self.session_peakmem,
|
||||
torch.cuda.max_memory_allocated()
|
||||
)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def clear_cuda_stats(self):
|
||||
self.max_memory_allocated = 0
|
||||
self.memory_allocated = 0
|
||||
|
||||
def print_cuda_stats(self):
|
||||
if self._has_cuda():
|
||||
print(
|
||||
'>> Max VRAM used for this generation:',
|
||||
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||
'%4.2fG.' % (self.max_memory_allocated / 1e9),
|
||||
'Current VRAM utilization:',
|
||||
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
||||
'%4.2fG' % (self.memory_allocated / 1e9),
|
||||
)
|
||||
|
||||
self.session_peakmem = max(
|
||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||
)
|
||||
print(
|
||||
'>> Max VRAM used since script start: ',
|
||||
'%4.2fG' % (self.session_peakmem / 1e9),
|
||||
)
|
||||
return results
|
||||
|
||||
# this needs to be generalized to all sorts of postprocessors, which should be wrapped
|
||||
# in a nice harmonized call signature. For now we have a bunch of if/elses!
|
||||
|
@ -122,6 +122,11 @@ class Generator:
|
||||
|
||||
seed = self.new_seed()
|
||||
|
||||
# Free up memory from the last generation.
|
||||
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
|
||||
if clear_cuda_cache is not None:
|
||||
clear_cuda_cache()
|
||||
|
||||
return results
|
||||
|
||||
def sample_to_image(self,samples)->Image.Image:
|
||||
@ -240,7 +245,12 @@ class Generator:
|
||||
|
||||
def get_perlin_noise(self,width,height):
|
||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||
noise = torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||
# limit noise to only the diffusion image channels, not the mask channels
|
||||
input_channels = min(self.latent_channels, 4)
|
||||
noise = torch.stack([
|
||||
rand_perlin_2d((height, width),
|
||||
(8, 8),
|
||||
device = self.model.device).to(fixdevice) for _ in range(input_channels)], dim=0).to(self.model.device)
|
||||
return noise
|
||||
|
||||
def new_seed(self):
|
||||
@ -341,3 +351,27 @@ class Generator:
|
||||
|
||||
def torch_dtype(self)->torch.dtype:
|
||||
return torch.float16 if self.precision == 'float16' else torch.float32
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self,width,height):
|
||||
device = self.model.device
|
||||
# limit noise to only the diffusion image channels, not the mask channels
|
||||
input_channels = min(self.latent_channels, 4)
|
||||
if self.use_mps_noise or device.type == 'mps':
|
||||
x = torch.randn([1,
|
||||
input_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device='cpu').to(device)
|
||||
else:
|
||||
x = torch.randn([1,
|
||||
input_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device=device)
|
||||
if self.perlin > 0.0:
|
||||
perlin_noise = self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||
x = (1-self.perlin)*x + self.perlin*perlin_noise
|
||||
return x
|
||||
|
@ -63,22 +63,3 @@ class Img2Img(Generator):
|
||||
shape = like.shape
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
|
||||
return x
|
||||
|
||||
def get_noise(self,width,height):
|
||||
# copy of the Txt2Img.get_noise
|
||||
device = self.model.device
|
||||
if self.use_mps_noise or device.type == 'mps':
|
||||
x = torch.randn([1,
|
||||
self.latent_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
device='cpu').to(device)
|
||||
else:
|
||||
x = torch.randn([1,
|
||||
self.latent_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
device=device)
|
||||
if self.perlin > 0.0:
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||
return x
|
||||
|
@ -51,26 +51,4 @@ class Txt2Img(Generator):
|
||||
return make_image
|
||||
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self,width,height):
|
||||
device = self.model.device
|
||||
# limit noise to only the diffusion image channels, not the mask channels
|
||||
input_channels = min(self.latent_channels, 4)
|
||||
if self.use_mps_noise or device.type == 'mps':
|
||||
x = torch.randn([1,
|
||||
input_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device='cpu').to(device)
|
||||
else:
|
||||
x = torch.randn([1,
|
||||
input_channels,
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor],
|
||||
dtype=self.torch_dtype(),
|
||||
device=device)
|
||||
if self.perlin > 0.0:
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||
return x
|
||||
|
||||
|
@ -65,6 +65,11 @@ class Txt2Img2Img(Generator):
|
||||
mode="bilinear"
|
||||
)
|
||||
|
||||
# Free up memory from the last generation.
|
||||
clear_cuda_cache = kwargs['clear_cuda_cache'] or None
|
||||
if clear_cuda_cache is not None:
|
||||
clear_cuda_cache()
|
||||
|
||||
second_pass_noise = self.get_noise_like(resized_latents)
|
||||
|
||||
verbosity = get_verbosity()
|
||||
|
@ -15,20 +15,18 @@ from pathlib import Path
|
||||
from typing import List, Union
|
||||
|
||||
import npyscreen
|
||||
from diffusers import DiffusionPipeline, logging as dlogging
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers import logging as dlogging
|
||||
from npyscreen import widget
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
from ldm.invoke.globals import (
|
||||
Globals,
|
||||
global_cache_dir,
|
||||
global_config_file,
|
||||
global_models_dir,
|
||||
global_set_root,
|
||||
)
|
||||
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
|
||||
global_models_dir, global_set_root)
|
||||
from ldm.invoke.model_manager import ModelManager
|
||||
|
||||
DEST_MERGED_MODEL_DIR = "merged_models"
|
||||
|
||||
|
||||
def merge_diffusion_models(
|
||||
model_ids_or_paths: List[Union[str, Path]],
|
||||
alpha: float = 0.5,
|
||||
@ -48,10 +46,10 @@ def merge_diffusion_models(
|
||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||
"""
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore')
|
||||
warnings.simplefilter("ignore")
|
||||
verbosity = dlogging.get_verbosity()
|
||||
dlogging.set_verbosity_error()
|
||||
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(
|
||||
model_ids_or_paths[0],
|
||||
cache_dir=kwargs.get("cache_dir", global_cache_dir()),
|
||||
@ -188,13 +186,12 @@ class FloatTitleSlider(npyscreen.TitleText):
|
||||
|
||||
|
||||
class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"]
|
||||
|
||||
def __init__(self, parentApp, name):
|
||||
self.parentApp = parentApp
|
||||
self.ALLOW_RESIZE=True
|
||||
self.FIX_MINIMUM_SIZE_WHEN_CREATED=False
|
||||
self.ALLOW_RESIZE = True
|
||||
self.FIX_MINIMUM_SIZE_WHEN_CREATED = False
|
||||
super().__init__(parentApp, name)
|
||||
|
||||
@property
|
||||
@ -205,29 +202,29 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
self.parentApp.setNextForm(None)
|
||||
|
||||
def create(self):
|
||||
window_height,window_width=curses.initscr().getmaxyx()
|
||||
|
||||
window_height, window_width = curses.initscr().getmaxyx()
|
||||
|
||||
self.model_names = self.get_model_names()
|
||||
max_width = max([len(x) for x in self.model_names])
|
||||
max_width += 6
|
||||
horizontal_layout = max_width*3 < window_width
|
||||
|
||||
horizontal_layout = max_width * 3 < window_width
|
||||
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
color='CONTROL',
|
||||
color="CONTROL",
|
||||
value=f"Select two models to merge and optionally a third.",
|
||||
editable=False,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
color='CONTROL',
|
||||
color="CONTROL",
|
||||
value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
|
||||
editable=False,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value='MODEL 1',
|
||||
color='GOOD',
|
||||
value="MODEL 1",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
rely=4 if horizontal_layout else None,
|
||||
)
|
||||
@ -242,57 +239,57 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value='MODEL 2',
|
||||
color='GOOD',
|
||||
value="MODEL 2",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
relx=max_width+3 if horizontal_layout else None,
|
||||
relx=max_width + 3 if horizontal_layout else None,
|
||||
rely=4 if horizontal_layout else None,
|
||||
)
|
||||
self.model2 = self.add_widget_intelligent(
|
||||
npyscreen.SelectOne,
|
||||
name='(2)',
|
||||
name="(2)",
|
||||
values=self.model_names,
|
||||
value=1,
|
||||
max_height=len(self.model_names),
|
||||
max_width=max_width,
|
||||
relx=max_width+3 if horizontal_layout else None,
|
||||
relx=max_width + 3 if horizontal_layout else None,
|
||||
rely=5 if horizontal_layout else None,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value='MODEL 3',
|
||||
color='GOOD',
|
||||
value="MODEL 3",
|
||||
color="GOOD",
|
||||
editable=False,
|
||||
relx=max_width*2+3 if horizontal_layout else None,
|
||||
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||
rely=4 if horizontal_layout else None,
|
||||
)
|
||||
models_plus_none = self.model_names.copy()
|
||||
models_plus_none.insert(0,'None')
|
||||
models_plus_none.insert(0, "None")
|
||||
self.model3 = self.add_widget_intelligent(
|
||||
npyscreen.SelectOne,
|
||||
name='(3)',
|
||||
name="(3)",
|
||||
values=models_plus_none,
|
||||
value=0,
|
||||
max_height=len(self.model_names)+1,
|
||||
max_height=len(self.model_names) + 1,
|
||||
max_width=max_width,
|
||||
scroll_exit=True,
|
||||
relx=max_width*2+3 if horizontal_layout else None,
|
||||
relx=max_width * 2 + 3 if horizontal_layout else None,
|
||||
rely=5 if horizontal_layout else None,
|
||||
)
|
||||
for m in [self.model1,self.model2,self.model3]:
|
||||
for m in [self.model1, self.model2, self.model3]:
|
||||
m.when_value_edited = self.models_changed
|
||||
self.merged_model_name = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name="Name for merged model:",
|
||||
labelColor='CONTROL',
|
||||
labelColor="CONTROL",
|
||||
value="",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.force = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Force merge of incompatible models",
|
||||
labelColor='CONTROL',
|
||||
labelColor="CONTROL",
|
||||
value=False,
|
||||
scroll_exit=True,
|
||||
)
|
||||
@ -301,7 +298,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
name="Merge Method:",
|
||||
values=self.interpolations,
|
||||
value=0,
|
||||
labelColor='CONTROL',
|
||||
labelColor="CONTROL",
|
||||
max_height=len(self.interpolations) + 1,
|
||||
scroll_exit=True,
|
||||
)
|
||||
@ -312,7 +309,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
step=0.05,
|
||||
lowest=0,
|
||||
value=0.5,
|
||||
labelColor='CONTROL',
|
||||
labelColor="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.model1.editing = True
|
||||
@ -322,43 +319,43 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
selected_model1 = self.model1.value[0]
|
||||
selected_model2 = self.model2.value[0]
|
||||
selected_model3 = self.model3.value[0]
|
||||
merged_model_name = f'{models[selected_model1]}+{models[selected_model2]}'
|
||||
merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}"
|
||||
self.merged_model_name.value = merged_model_name
|
||||
|
||||
|
||||
if selected_model3 > 0:
|
||||
self.merge_method.values=['add_difference'],
|
||||
self.merged_model_name.value += f'+{models[selected_model3]}'
|
||||
self.merge_method.values = (["add_difference"],)
|
||||
self.merged_model_name.value += f"+{models[selected_model3]}"
|
||||
else:
|
||||
self.merge_method.values=self.interpolations
|
||||
self.merge_method.value=0
|
||||
self.merge_method.values = self.interpolations
|
||||
self.merge_method.value = 0
|
||||
|
||||
def on_ok(self):
|
||||
if self.validate_field_values() and self.check_for_overwrite():
|
||||
self.parentApp.setNextForm(None)
|
||||
self.editing = False
|
||||
self.parentApp.merge_arguments = self.marshall_arguments()
|
||||
npyscreen.notify('Starting the merge...')
|
||||
npyscreen.notify("Starting the merge...")
|
||||
else:
|
||||
self.editing = True
|
||||
|
||||
def on_cancel(self):
|
||||
sys.exit(0)
|
||||
|
||||
def marshall_arguments(self)->dict:
|
||||
def marshall_arguments(self) -> dict:
|
||||
model_names = self.model_names
|
||||
models = [
|
||||
model_names[self.model1.value[0]],
|
||||
model_names[self.model2.value[0]],
|
||||
]
|
||||
]
|
||||
if self.model3.value[0] > 0:
|
||||
models.append(model_names[self.model3.value[0]-1])
|
||||
models.append(model_names[self.model3.value[0] - 1])
|
||||
|
||||
args = dict(
|
||||
models=models,
|
||||
alpha = self.alpha.value,
|
||||
interp = self.interpolations[self.merge_method.value[0]],
|
||||
force = self.force.value,
|
||||
merged_model_name = self.merged_model_name.value,
|
||||
alpha=self.alpha.value,
|
||||
interp=self.interpolations[self.merge_method.value[0]],
|
||||
force=self.force.value,
|
||||
merged_model_name=self.merged_model_name.value,
|
||||
)
|
||||
return args
|
||||
|
||||
@ -371,18 +368,22 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
|
||||
)
|
||||
|
||||
def validate_field_values(self)->bool:
|
||||
def validate_field_values(self) -> bool:
|
||||
bad_fields = []
|
||||
model_names = self.model_names
|
||||
selected_models = set((model_names[self.model1.value[0]],model_names[self.model2.value[0]]))
|
||||
selected_models = set(
|
||||
(model_names[self.model1.value[0]], model_names[self.model2.value[0]])
|
||||
)
|
||||
if self.model3.value[0] > 0:
|
||||
selected_models.add(model_names[self.model3.value[0]-1])
|
||||
selected_models.add(model_names[self.model3.value[0] - 1])
|
||||
if len(selected_models) < 2:
|
||||
bad_fields.append(f'Please select two or three DIFFERENT models to compare. You selected {selected_models}')
|
||||
bad_fields.append(
|
||||
f"Please select two or three DIFFERENT models to compare. You selected {selected_models}"
|
||||
)
|
||||
if len(bad_fields) > 0:
|
||||
message = 'The following problems were detected and must be corrected:'
|
||||
message = "The following problems were detected and must be corrected:"
|
||||
for problem in bad_fields:
|
||||
message += f'\n* {problem}'
|
||||
message += f"\n* {problem}"
|
||||
npyscreen.notify_confirm(message)
|
||||
return False
|
||||
else:
|
||||
@ -410,6 +411,7 @@ class Mergeapp(npyscreen.NPSAppManaged):
|
||||
npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
|
||||
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
|
||||
|
||||
|
||||
def run_gui(args: Namespace):
|
||||
mergeapp = Mergeapp()
|
||||
mergeapp.run()
|
||||
@ -450,5 +452,27 @@ def main():
|
||||
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
|
||||
args.cache_dir = cache_dir
|
||||
|
||||
try:
|
||||
if args.front_end:
|
||||
run_gui(args)
|
||||
else:
|
||||
run_cli(args)
|
||||
print(f">> Conversion successful. New model is named {args.merged_model_name}")
|
||||
except widget.NotEnoughSpaceForWidget as e:
|
||||
if str(e).startswith("Height of 1 allocated"):
|
||||
print(
|
||||
"** You need to have at least two diffusers models defined in models.yaml in order to merge"
|
||||
)
|
||||
else:
|
||||
print(f"** A layout error has occurred: {str(e)}")
|
||||
sys.exit(-1)
|
||||
except Exception as e:
|
||||
print(">> An error occurred:")
|
||||
traceback.print_exc()
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -753,7 +753,7 @@ class ModelManager(object):
|
||||
return search_folder, found_models
|
||||
|
||||
def _choose_diffusers_vae(self, model_name:str, vae:str=None)->Union[dict,str]:
|
||||
|
||||
|
||||
# In the event that the original entry is using a custom ckpt VAE, we try to
|
||||
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
|
||||
# I would prefer to do this differently: We load the ckpt model into memory, swap the
|
||||
@ -954,7 +954,7 @@ class ModelManager(object):
|
||||
def _has_cuda(self) -> bool:
|
||||
return self.device.type == 'cuda'
|
||||
|
||||
def _diffuser_sha256(self,name_or_path:Union[str, Path])->Union[str,bytes]:
|
||||
def _diffuser_sha256(self,name_or_path:Union[str, Path],chunksize=4096)->Union[str,bytes]:
|
||||
path = None
|
||||
if isinstance(name_or_path,Path):
|
||||
path = name_or_path
|
||||
@ -976,7 +976,8 @@ class ModelManager(object):
|
||||
for name in files:
|
||||
count += 1
|
||||
with open(os.path.join(root,name),'rb') as f:
|
||||
sha.update(f.read())
|
||||
while chunk := f.read(chunksize):
|
||||
sha.update(chunk)
|
||||
hash = sha.hexdigest()
|
||||
toc = time.time()
|
||||
print(f' | sha256 = {hash} ({count} files hashed in','%4.2fs)' % (toc - tic))
|
||||
|
@ -17,6 +17,7 @@ from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
import npyscreen
|
||||
from npyscreen import widget
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
from ldm.invoke.globals import Globals, global_set_root
|
||||
@ -295,7 +296,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
for idx in range(len(model_names))
|
||||
if "default" in conf[model_names[idx]]
|
||||
]
|
||||
default = defaults[0] if len(defaults)>0 else 0
|
||||
default = defaults[0] if len(defaults) > 0 else 0
|
||||
return (model_names, default)
|
||||
|
||||
def marshall_arguments(self) -> dict:
|
||||
@ -438,11 +439,20 @@ def main():
|
||||
do_front_end(args)
|
||||
else:
|
||||
do_textual_inversion_training(**vars(args))
|
||||
except widget.NotEnoughSpaceForWidget as e:
|
||||
if str(e).startswith("Height of 1 allocated"):
|
||||
print(
|
||||
"** You need to have at least one diffusers models defined in models.yaml in order to train"
|
||||
)
|
||||
else:
|
||||
print(f"** A layout error has occurred: {str(e)}")
|
||||
sys.exit(-1)
|
||||
except AssertionError as e:
|
||||
print(str(e))
|
||||
sys.exit(-1)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,26 +1,26 @@
|
||||
import requests as request
|
||||
import requests
|
||||
|
||||
import ldm.invoke._version as version
|
||||
from ldm.invoke import __app_name__, __version__
|
||||
|
||||
local_version = str(version.__version__)
|
||||
local_version = str(__version__).replace("-", "")
|
||||
package_name = str(__app_name__)
|
||||
|
||||
|
||||
def get_pypi_versions(package_name="InvokeAI") -> list[str]:
|
||||
def get_pypi_versions(package_name=package_name) -> list[str]:
|
||||
"""Get the versions of the package from PyPI"""
|
||||
url = f"https://pypi.org/pypi/{package_name}/json"
|
||||
response = request.get(url).json()
|
||||
response = requests.get(url).json()
|
||||
versions: list[str] = list(response["releases"].keys())
|
||||
return versions
|
||||
|
||||
|
||||
def local_on_pypi(package_name="InvokeAI", local_version=local_version) -> bool:
|
||||
def local_on_pypi(package_name=package_name, local_version=local_version) -> bool:
|
||||
"""Compare the versions of the package from PyPI and the local package"""
|
||||
pypi_versions = get_pypi_versions(package_name)
|
||||
return local_version in pypi_versions
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
package_name = "InvokeAI"
|
||||
if local_on_pypi():
|
||||
print(f"Package {package_name} is up to date")
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user