Merge branch 'development' into model-switching

This commit is contained in:
Lincoln Stein 2022-10-14 13:18:59 -04:00 committed by GitHub
commit fe2a2cfc8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 727 additions and 575 deletions

View File

@ -319,7 +319,7 @@ class InvokeAIWebServer:
elif postprocessing_parameters['type'] == 'gfpgan': elif postprocessing_parameters['type'] == 'gfpgan':
image = self.gfpgan.process( image = self.gfpgan.process(
image=image, image=image,
strength=postprocessing_parameters['gfpgan_strength'], strength=postprocessing_parameters['facetool_strength'],
seed=seed, seed=seed,
) )
else: else:
@ -625,7 +625,7 @@ class InvokeAIWebServer:
seed=seed, seed=seed,
) )
postprocessing = True postprocessing = True
all_parameters['gfpgan_strength'] = gfpgan_parameters[ all_parameters['facetool_strength'] = gfpgan_parameters[
'strength' 'strength'
] ]
@ -723,6 +723,7 @@ class InvokeAIWebServer:
'height', 'height',
'extra', 'extra',
'seamless', 'seamless',
'hires_fix',
] ]
rfc_dict = {} rfc_dict = {}
@ -735,12 +736,12 @@ class InvokeAIWebServer:
postprocessing = [] postprocessing = []
# 'postprocessing' is either null or an # 'postprocessing' is either null or an
if 'gfpgan_strength' in parameters: if 'facetool_strength' in parameters:
postprocessing.append( postprocessing.append(
{ {
'type': 'gfpgan', 'type': 'gfpgan',
'strength': float(parameters['gfpgan_strength']), 'strength': float(parameters['facetool_strength']),
} }
) )
@ -837,7 +838,7 @@ class InvokeAIWebServer:
elif parameters['type'] == 'gfpgan': elif parameters['type'] == 'gfpgan':
postprocessing_metadata['type'] = 'gfpgan' postprocessing_metadata['type'] = 'gfpgan'
postprocessing_metadata['strength'] = parameters[ postprocessing_metadata['strength'] = parameters[
'gfpgan_strength' 'facetool_strength'
] ]
else: else:
raise TypeError(f"Invalid type: {parameters['type']}") raise TypeError(f"Invalid type: {parameters['type']}")

View File

@ -36,6 +36,8 @@ def parameters_to_command(params):
switches.append(f'-A {params["sampler_name"]}') switches.append(f'-A {params["sampler_name"]}')
if "seamless" in params and params["seamless"] == True: if "seamless" in params and params["seamless"] == True:
switches.append(f"--seamless") switches.append(f"--seamless")
if "hires_fix" in params and params["hires_fix"] == True:
switches.append(f"--hires")
if "init_img" in params and len(params["init_img"]) > 0: if "init_img" in params and len(params["init_img"]) > 0:
switches.append(f'-I {params["init_img"]}') switches.append(f'-I {params["init_img"]}')
if "init_mask" in params and len(params["init_mask"]) > 0: if "init_mask" in params and len(params["init_mask"]) > 0:
@ -46,8 +48,14 @@ def parameters_to_command(params):
switches.append(f'-f {params["strength"]}') switches.append(f'-f {params["strength"]}')
if "fit" in params and params["fit"] == True: if "fit" in params and params["fit"] == True:
switches.append(f"--fit") switches.append(f"--fit")
if "gfpgan_strength" in params and params["gfpgan_strength"]: if "facetool" in params:
switches.append(f'-ft {params["facetool"]}')
if "facetool_strength" in params and params["facetool_strength"]:
switches.append(f'-G {params["facetool_strength"]}')
elif "gfpgan_strength" in params and params["gfpgan_strength"]:
switches.append(f'-G {params["gfpgan_strength"]}') switches.append(f'-G {params["gfpgan_strength"]}')
if "codeformer_fidelity" in params:
switches.append(f'-cf {params["codeformer_fidelity"]}')
if "upscale" in params and params["upscale"]: if "upscale" in params and params["upscale"]:
switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}') switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
if "variation_amount" in params and params["variation_amount"] > 0: if "variation_amount" in params and params["variation_amount"] > 0:

View File

@ -349,7 +349,7 @@ def handle_run_gfpgan_event(original_image, gfpgan_parameters):
eventlet.sleep(0) eventlet.sleep(0)
image = gfpgan.process( image = gfpgan.process(
image=image, strength=gfpgan_parameters["gfpgan_strength"], seed=seed image=image, strength=gfpgan_parameters["facetool_strength"], seed=seed
) )
progress["currentStatus"] = "Saving image" progress["currentStatus"] = "Saving image"
@ -464,7 +464,7 @@ def parameters_to_post_processed_image_metadata(parameters, original_image_path,
image["strength"] = parameters["upscale"][1] image["strength"] = parameters["upscale"][1]
elif type == "gfpgan": elif type == "gfpgan":
image["type"] = "gfpgan" image["type"] = "gfpgan"
image["strength"] = parameters["gfpgan_strength"] image["strength"] = parameters["facetool_strength"]
else: else:
raise TypeError(f"Invalid type: {type}") raise TypeError(f"Invalid type: {type}")
@ -493,6 +493,7 @@ def parameters_to_generated_image_metadata(parameters):
"height", "height",
"extra", "extra",
"seamless", "seamless",
"hires_fix",
] ]
rfc_dict = {} rfc_dict = {}
@ -505,10 +506,10 @@ def parameters_to_generated_image_metadata(parameters):
postprocessing = [] postprocessing = []
# 'postprocessing' is either null or an # 'postprocessing' is either null or an
if "gfpgan_strength" in parameters: if "facetool_strength" in parameters:
postprocessing.append( postprocessing.append(
{"type": "gfpgan", "strength": float(parameters["gfpgan_strength"])} {"type": "gfpgan", "strength": float(parameters["facetool_strength"])}
) )
if "upscale" in parameters: if "upscale" in parameters:
@ -751,7 +752,7 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
image=image, strength=gfpgan_parameters["strength"], seed=seed image=image, strength=gfpgan_parameters["strength"], seed=seed
) )
postprocessing = True postprocessing = True
all_parameters["gfpgan_strength"] = gfpgan_parameters["strength"] all_parameters["facetool_strength"] = gfpgan_parameters["strength"]
progress["currentStatus"] = "Saving image" progress["currentStatus"] = "Saving image"
socketio.emit("progressUpdate", progress) socketio.emit("progressUpdate", progress)

View File

@ -154,7 +154,9 @@ Here are the invoke> command that apply to txt2img:
| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt | | --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt |
| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) | | --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
| --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. | | --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
| --gfpgan_strength <float> | -G <float> | -G0 | Fix faces using the GFPGAN algorithm; argument indicates how hard the algorithm should try (0.0-1.0) | | --facetool_strength <float> | -G <float> | -G0 | Fix faces (defaults to using the GFPGAN algorithm); argument indicates how hard the algorithm should try (0.0-1.0) |
| --facetool <name> | -ft <name> | -ft gfpgan | Select face restoration algorithm to use: gfpgan, codeformer |
| --codeformer_fidelity | -cf <float> | 0.75 | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality |
| --save_original | -save_orig| False | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. | | --save_original | -save_orig| False | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
| --variation <float> |-v<float>| 0.0 | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with -S<seed> and -n<int> to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). | | --variation <float> |-v<float>| 0.0 | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with -S<seed> and -n<int> to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
| --with_variations <pattern> | | None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. | | --with_variations <pattern> | | None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |

View File

@ -69,7 +69,7 @@ If you do not explicitly specify an upscaling_strength, it will default to 0.75.
### Face Restoration ### Face Restoration
`-G : <gfpgan_strength>` `-G : <facetool_strength>`
This prompt argument controls the strength of the face restoration that is being This prompt argument controls the strength of the face restoration that is being
applied. Similar to upscaling, values between `0.5 to 0.8` are recommended. applied. Similar to upscaling, values between `0.5 to 0.8` are recommended.

File diff suppressed because one or more lines are too long

483
frontend/dist/assets/index.ea68b5f5.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -6,7 +6,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="/assets/favicon.0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="/assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="/assets/index.989a0ca2.js"></script> <script type="module" crossorigin src="/assets/index.ea68b5f5.js"></script>
<link rel="stylesheet" href="/assets/index.58175ea1.css"> <link rel="stylesheet" href="/assets/index.58175ea1.css">
</head> </head>

View File

@ -50,6 +50,7 @@ export const PARAMETERS: { [key: string]: string } = {
maskPath: 'Initial Image Mask', maskPath: 'Initial Image Mask',
shouldFitToWidthHeight: 'Fit Initial Image', shouldFitToWidthHeight: 'Fit Initial Image',
seamless: 'Seamless Tiling', seamless: 'Seamless Tiling',
hiresFix: 'High Resolution Optimizations',
}; };
export const NUMPY_RAND_MIN = 0; export const NUMPY_RAND_MIN = 0;

View File

@ -14,10 +14,13 @@ export enum Feature {
FACE_CORRECTION, FACE_CORRECTION,
IMAGE_TO_IMAGE, IMAGE_TO_IMAGE,
} }
/** For each tooltip in the UI, the below feature definitions & props will pull relevant information into the tooltip.
*
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
*/
export const FEATURES: Record<Feature, FeatureHelpInfo> = { export const FEATURES: Record<Feature, FeatureHelpInfo> = {
[Feature.PROMPT]: { [Feature.PROMPT]: {
text: 'This field will take all prompt text, including both content and stylistic terms. CLI Commands will not work in the prompt.', text: 'This field will take all prompt text, including both content and stylistic terms. While weights can be included in the prompt, standard CLI Commands/parameters will not work.',
href: 'link/to/docs/feature3.html', href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
@ -27,17 +30,16 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
[Feature.OTHER]: { [Feature.OTHER]: {
text: 'Additional Options', text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ', href: 'link/to/docs/feature3.html',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
[Feature.SEED]: { [Feature.SEED]: {
text: 'Seed values provide an initial set of noise which guide the denoising process.', text: 'Seed values provide an initial set of noise which guide the denoising process, and can be randomized or populated with a seed from a previous invocation. The Threshold feature can be used to mitigate undesirable outcomes at higher CFG values (try between 0-10), and Perlin can be used to add Perlin noise into the denoising process - Both serve to add variation to your outputs. ',
href: 'link/to/docs/feature3.html', href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
[Feature.VARIATIONS]: { [Feature.VARIATIONS]: {
text: 'Try a variation with an amount of between 0 and 1 to change the output image for the set seed.', text: 'Try a variation with an amount of between 0 and 1 to change the output image for the set seed - Interesting variations on the seed are found between 0.1 and 0.3.',
href: 'link/to/docs/feature3.html', href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
@ -47,8 +49,8 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
[Feature.FACE_CORRECTION]: { [Feature.FACE_CORRECTION]: {
text: 'Using GFPGAN or CodeFormer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs.', text: 'Using GFPGAN, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs, resulting in more appealing faces (with less respect for accuracy of the original subject).',
href: 'link/to/docs/feature2.html', href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif', guideImage: 'asset/path.gif',
}, },
[Feature.IMAGE_TO_IMAGE]: { [Feature.IMAGE_TO_IMAGE]: {

View File

@ -55,6 +55,7 @@ export declare type CommonGeneratedImageMetadata = {
width: number; width: number;
height: number; height: number;
seamless: boolean; seamless: boolean;
hires_fix: boolean;
extra: null | Record<string, never>; // Pending development of RFC #266 extra: null | Record<string, never>; // Pending development of RFC #266
}; };

View File

@ -76,7 +76,7 @@ const makeSocketIOEmitters = (
const { gfpganStrength } = getState().options; const { gfpganStrength } = getState().options;
const gfpganParameters = { const gfpganParameters = {
gfpgan_strength: gfpganStrength, facetool_strength: gfpganStrength,
}; };
socketio.emit('runPostprocessing', imageToProcess, { socketio.emit('runPostprocessing', imageToProcess, {
type: 'gfpgan', type: 'gfpgan',

View File

@ -29,6 +29,7 @@ export const frontendToBackendParameters = (
sampler, sampler,
seed, seed,
seamless, seamless,
hiresFix,
shouldUseInitImage, shouldUseInitImage,
img2imgStrength, img2imgStrength,
initialImagePath, initialImagePath,
@ -59,6 +60,7 @@ export const frontendToBackendParameters = (
sampler_name: sampler, sampler_name: sampler,
seed, seed,
seamless, seamless,
hires_fix: hiresFix,
progress_images: shouldDisplayInProgress, progress_images: shouldDisplayInProgress,
}; };
@ -123,10 +125,11 @@ export const backendToFrontendParameters = (parameters: {
sampler_name, sampler_name,
seed, seed,
seamless, seamless,
hires_fix,
progress_images, progress_images,
variation_amount, variation_amount,
with_variations, with_variations,
gfpgan_strength, facetool_strength,
upscale, upscale,
init_img, init_img,
init_mask, init_mask,
@ -151,9 +154,9 @@ export const backendToFrontendParameters = (parameters: {
} }
} }
if (gfpgan_strength > 0) { if (facetool_strength > 0) {
options.shouldRunGFPGAN = true; options.shouldRunGFPGAN = true;
options.gfpganStrength = gfpgan_strength; options.gfpganStrength = facetool_strength;
} }
if (upscale) { if (upscale) {
@ -185,6 +188,7 @@ export const backendToFrontendParameters = (parameters: {
options.sampler = sampler_name; options.sampler = sampler_name;
options.seed = seed; options.seed = seed;
options.seamless = seamless; options.seamless = seamless;
options.hiresFix = hires_fix;
} }
return options; return options;

View File

@ -16,11 +16,13 @@ import {
setCfgScale, setCfgScale,
setGfpganStrength, setGfpganStrength,
setHeight, setHeight,
setHiresFix,
setImg2imgStrength, setImg2imgStrength,
setInitialImagePath, setInitialImagePath,
setMaskPath, setMaskPath,
setPrompt, setPrompt,
setSampler, setSampler,
setSeamless,
setSeed, setSeed,
setSeedWeights, setSeedWeights,
setShouldFitToWidthHeight, setShouldFitToWidthHeight,
@ -116,6 +118,7 @@ const ImageMetadataViewer = memo(
steps, steps,
cfg_scale, cfg_scale,
seamless, seamless,
hires_fix,
width, width,
height, height,
strength, strength,
@ -214,7 +217,14 @@ const ImageMetadataViewer = memo(
<MetadataItem <MetadataItem
label="Seamless" label="Seamless"
value={seamless} value={seamless}
onClick={() => dispatch(setWidth(seamless))} onClick={() => dispatch(setSeamless(seamless))}
/>
)}
{hires_fix && (
<MetadataItem
label="High Resolution Optimization"
value={hires_fix}
onClick={() => dispatch(setHiresFix(hires_fix))}
/> />
)} )}
{width && ( {width && (

View File

@ -0,0 +1,32 @@
import { Flex } from '@chakra-ui/react';
import { RootState } from '../../app/store';
import { useAppDispatch, useAppSelector } from '../../app/store';
import { setHiresFix } from './optionsSlice';
import { ChangeEvent } from 'react';
import IAISwitch from '../../common/components/IAISwitch';
/**
* Image output options. Includes width, height, seamless tiling.
*/
const HiresOptions = () => {
const dispatch = useAppDispatch();
const hiresFix = useAppSelector((state: RootState) => state.options.hiresFix);
const handleChangeHiresFix = (e: ChangeEvent<HTMLInputElement>) =>
dispatch(setHiresFix(e.target.checked));
return (
<Flex gap={2} direction={'column'}>
<IAISwitch
label="High Res Optimization"
fontSize={'md'}
isChecked={hiresFix}
onChange={handleChangeHiresFix}
/>
</Flex>
);
};
export default HiresOptions;

View File

@ -1,29 +1,14 @@
import { Flex } from '@chakra-ui/react'; import { Flex } from '@chakra-ui/react';
import { RootState } from '../../app/store';
import { useAppDispatch, useAppSelector } from '../../app/store';
import { setSeamless } from './optionsSlice';
import { ChangeEvent } from 'react';
import IAISwitch from '../../common/components/IAISwitch';
/** import HiresOptions from './HiresOptions';
* Image output options. Includes width, height, seamless tiling. import SeamlessOptions from './SeamlessOptions';
*/
const OutputOptions = () => { const OutputOptions = () => {
const dispatch = useAppDispatch();
const seamless = useAppSelector((state: RootState) => state.options.seamless);
const handleChangeSeamless = (e: ChangeEvent<HTMLInputElement>) =>
dispatch(setSeamless(e.target.checked));
return ( return (
<Flex gap={2} direction={'column'}> <Flex gap={2} direction={'column'}>
<IAISwitch <SeamlessOptions />
label="Seamless tiling" <HiresOptions />
fontSize={'md'}
isChecked={seamless}
onChange={handleChangeSeamless}
/>
</Flex> </Flex>
); );
}; };

View File

@ -0,0 +1,28 @@
import { Flex } from '@chakra-ui/react';
import { RootState } from '../../app/store';
import { useAppDispatch, useAppSelector } from '../../app/store';
import { setSeamless } from './optionsSlice';
import { ChangeEvent } from 'react';
import IAISwitch from '../../common/components/IAISwitch';
const SeamlessOptions = () => {
const dispatch = useAppDispatch();
const seamless = useAppSelector((state: RootState) => state.options.seamless);
const handleChangeSeamless = (e: ChangeEvent<HTMLInputElement>) =>
dispatch(setSeamless(e.target.checked));
return (
<Flex gap={2} direction={'column'}>
<IAISwitch
label="Seamless tiling"
fontSize={'md'}
isChecked={seamless}
onChange={handleChangeSeamless}
/>
</Flex>
);
};
export default SeamlessOptions;

View File

@ -25,6 +25,7 @@ export interface OptionsState {
initialImagePath: string | null; initialImagePath: string | null;
maskPath: string; maskPath: string;
seamless: boolean; seamless: boolean;
hiresFix: boolean;
shouldFitToWidthHeight: boolean; shouldFitToWidthHeight: boolean;
shouldGenerateVariations: boolean; shouldGenerateVariations: boolean;
variationAmount: number; variationAmount: number;
@ -50,6 +51,7 @@ const initialOptionsState: OptionsState = {
perlin: 0, perlin: 0,
seed: 0, seed: 0,
seamless: false, seamless: false,
hiresFix: false,
shouldUseInitImage: false, shouldUseInitImage: false,
img2imgStrength: 0.75, img2imgStrength: 0.75,
initialImagePath: null, initialImagePath: null,
@ -138,6 +140,9 @@ export const optionsSlice = createSlice({
setSeamless: (state, action: PayloadAction<boolean>) => { setSeamless: (state, action: PayloadAction<boolean>) => {
state.seamless = action.payload; state.seamless = action.payload;
}, },
setHiresFix: (state, action: PayloadAction<boolean>) => {
state.hiresFix = action.payload;
},
setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => { setShouldFitToWidthHeight: (state, action: PayloadAction<boolean>) => {
state.shouldFitToWidthHeight = action.payload; state.shouldFitToWidthHeight = action.payload;
}, },
@ -180,6 +185,7 @@ export const optionsSlice = createSlice({
threshold, threshold,
perlin, perlin,
seamless, seamless,
hires_fix,
width, width,
height, height,
strength, strength,
@ -256,6 +262,7 @@ export const optionsSlice = createSlice({
if (perlin) state.perlin = perlin; if (perlin) state.perlin = perlin;
if (typeof perlin === 'undefined') state.perlin = 0; if (typeof perlin === 'undefined') state.perlin = 0;
if (typeof seamless === 'boolean') state.seamless = seamless; if (typeof seamless === 'boolean') state.seamless = seamless;
if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix;
if (width) state.width = width; if (width) state.width = width;
if (height) state.height = height; if (height) state.height = height;
}, },
@ -301,6 +308,7 @@ export const {
setSampler, setSampler,
setSeed, setSeed,
setSeamless, setSeamless,
setHiresFix,
setImg2imgStrength, setImg2imgStrength,
setGfpganStrength, setGfpganStrength,
setUpscalingLevel, setUpscalingLevel,

View File

@ -35,6 +35,24 @@ from ldm.invoke.devices import choose_torch_device, choose_precision
from ldm.invoke.conditioning import get_uc_and_c from ldm.invoke.conditioning import get_uc_and_c
from ldm.invoke.model_cache import ModelCache from ldm.invoke.model_cache import ModelCache
def fix_func(orig):
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
def new_func(*args, **kw):
device = kw.get("device", "mps")
kw["device"]="cpu"
return orig(*args, **kw).to(device)
return new_func
return orig
torch.rand = fix_func(torch.rand)
torch.rand_like = fix_func(torch.rand_like)
torch.randn = fix_func(torch.randn)
torch.randn_like = fix_func(torch.randn_like)
torch.randint = fix_func(torch.randint)
torch.randint_like = fix_func(torch.randint_like)
torch.bernoulli = fix_func(torch.bernoulli)
torch.multinomial = fix_func(torch.multinomial)
"""Simplified text to image API for stable diffusion/latent diffusion """Simplified text to image API for stable diffusion/latent diffusion
Example Usage: Example Usage:
@ -137,6 +155,7 @@ class Generate:
self.precision = precision self.precision = precision
self.strength = 0.75 self.strength = 0.75
self.seamless = False self.seamless = False
self.hires_fix = False
self.embedding_path = embedding_path self.embedding_path = embedding_path
self.model = None # empty for now self.model = None # empty for now
self.model_hash = None self.model_hash = None
@ -156,6 +175,7 @@ class Generate:
# device to Generate(). However the device was then ignored, so # device to Generate(). However the device was then ignored, so
# it wasn't actually doing anything. This logic could be reinstated. # it wasn't actually doing anything. This logic could be reinstated.
device_type = choose_torch_device() device_type = choose_torch_device()
print(f'>> Using device_type {device_type}')
self.device = torch.device(device_type) self.device = torch.device(device_type)
if full_precision: if full_precision:
if self.precision != 'auto': if self.precision != 'auto':
@ -236,7 +256,7 @@ class Generate:
embiggen_tiles = None, embiggen_tiles = None,
# these are specific to GFPGAN/ESRGAN # these are specific to GFPGAN/ESRGAN
facetool = None, facetool = None,
gfpgan_strength = 0, facetool_strength = 0,
codeformer_fidelity = None, codeformer_fidelity = None,
save_original = False, save_original = False,
upscale = None, upscale = None,
@ -256,9 +276,10 @@ class Generate:
height // height of image, in multiples of 64 (512) height // height of image, in multiples of 64 (512)
cfg_scale // how strongly the prompt influences the image (7.5) (must be >1) cfg_scale // how strongly the prompt influences the image (7.5) (must be >1)
seamless // whether the generated image should tile seamless // whether the generated image should tile
hires_fix // whether the Hires Fix should be applied during generation
init_img // path to an initial image init_img // path to an initial image
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely facetool_strength // strength for GFPGAN/CodeFormer. 0.0 preserves image exactly, 1.0 replaces it completely
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image) ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
step_callback // a function or method that will be called each step step_callback // a function or method that will be called each step
image_callback // a function or method that will be called each time an image is generated image_callback // a function or method that will be called each time an image is generated
@ -289,6 +310,7 @@ class Generate:
width = width or self.width width = width or self.width
height = height or self.height height = height or self.height
seamless = seamless or self.seamless seamless = seamless or self.seamless
hires_fix = hires_fix or self.hires_fix
cfg_scale = cfg_scale or self.cfg_scale cfg_scale = cfg_scale or self.cfg_scale
ddim_eta = ddim_eta or self.ddim_eta ddim_eta = ddim_eta or self.ddim_eta
iterations = iterations or self.iterations iterations = iterations or self.iterations
@ -405,11 +427,11 @@ class Generate:
reference_image_path = init_color, reference_image_path = init_color,
image_callback = image_callback) image_callback = image_callback)
if upscale is not None or gfpgan_strength > 0: if upscale is not None or facetool_strength > 0:
self.upscale_and_reconstruct(results, self.upscale_and_reconstruct(results,
upscale = upscale, upscale = upscale,
facetool = facetool, facetool = facetool,
strength = gfpgan_strength, strength = facetool_strength,
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
save_original = save_original, save_original = save_original,
image_callback = image_callback) image_callback = image_callback)
@ -452,7 +474,7 @@ class Generate:
self, self,
image_path, image_path,
tool = 'gfpgan', # one of 'upscale', 'gfpgan', 'codeformer', 'outpaint', or 'embiggen' tool = 'gfpgan', # one of 'upscale', 'gfpgan', 'codeformer', 'outpaint', or 'embiggen'
gfpgan_strength = 0.0, facetool_strength = 0.0,
codeformer_fidelity = 0.75, codeformer_fidelity = 0.75,
upscale = None, upscale = None,
out_direction = None, out_direction = None,
@ -499,11 +521,11 @@ class Generate:
facetool = 'codeformer' facetool = 'codeformer'
elif tool == 'upscale': elif tool == 'upscale':
facetool = 'gfpgan' # but won't be run facetool = 'gfpgan' # but won't be run
gfpgan_strength = 0 facetool_strength = 0
return self.upscale_and_reconstruct( return self.upscale_and_reconstruct(
[[image,seed]], [[image,seed]],
facetool = facetool, facetool = facetool,
strength = gfpgan_strength, strength = facetool_strength,
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
save_original = save_original, save_original = save_original,
upscale = upscale, upscale = upscale,

View File

@ -242,9 +242,13 @@ class Args(object):
else: else:
switches.append(f'-A {a["sampler_name"]}') switches.append(f'-A {a["sampler_name"]}')
# gfpgan-specific parameters # facetool-specific parameters
if a['gfpgan_strength']: if a['facetool']:
switches.append(f'-G {a["gfpgan_strength"]}') switches.append(f'-ft {a["facetool"]}')
if a['facetool_strength']:
switches.append(f'-G {a["facetool_strength"]}')
if a['codeformer_fidelity']:
switches.append(f'-cf {a["codeformer_fidelity"]}')
if a['outcrop']: if a['outcrop']:
switches.append(f'-c {" ".join([str(u) for u in a["outcrop"]])}') switches.append(f'-c {" ".join([str(u) for u in a["outcrop"]])}')
@ -636,6 +640,13 @@ class Args(object):
dest='hires_fix', dest='hires_fix',
help='Create hires image using img2img to prevent duplicated objects' help='Create hires image using img2img to prevent duplicated objects'
) )
render_group.add_argument(
'--save_intermediates',
type=int,
default=0,
dest='save_intermediates',
help='Save every nth intermediate image into an "intermediates" directory within the output directory'
)
img2img_group.add_argument( img2img_group.add_argument(
'-I', '-I',
'--init_img', '--init_img',
@ -692,6 +703,7 @@ class Args(object):
) )
postprocessing_group.add_argument( postprocessing_group.add_argument(
'-G', '-G',
'--facetool_strength',
'--gfpgan_strength', '--gfpgan_strength',
type=float, type=float,
help='The strength at which to apply the face restoration to the result.', help='The strength at which to apply the face restoration to the result.',

View File

@ -33,6 +33,7 @@ COMMANDS = (
'--perlin', '--perlin',
'--grid','-g', '--grid','-g',
'--individual','-i', '--individual','-i',
'--save_intermediates',
'--init_img','-I', '--init_img','-I',
'--init_mask','-M', '--init_mask','-M',
'--init_color', '--init_color',
@ -43,7 +44,9 @@ COMMANDS = (
'--embedding_path', '--embedding_path',
'--device', '--device',
'--grid','-g', '--grid','-g',
'--gfpgan_strength','-G', '--facetool','-ft',
'--facetool_strength','-G',
'--codeformer_fidelity','-cf',
'--upscale','-U', '--upscale','-U',
'-save_orig','--save_original', '-save_orig','--save_original',
'--skip_normalize','-x', '--skip_normalize','-x',

View File

@ -31,12 +31,13 @@ def build_opt(post_data, seed, gfpgan_model_exists):
setattr(opt, 'embiggen', None) setattr(opt, 'embiggen', None)
setattr(opt, 'embiggen_tiles', None) setattr(opt, 'embiggen_tiles', None)
setattr(opt, 'gfpgan_strength', float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0) setattr(opt, 'facetool_strength', float(post_data['facetool_strength']) if gfpgan_model_exists else 0)
setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None) setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None)
setattr(opt, 'progress_images', 'progress_images' in post_data) setattr(opt, 'progress_images', 'progress_images' in post_data)
setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed'])) setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed']))
setattr(opt, 'threshold', float(post_data['threshold'])) setattr(opt, 'threshold', float(post_data['threshold']))
setattr(opt, 'perlin', float(post_data['perlin'])) setattr(opt, 'perlin', float(post_data['perlin']))
setattr(opt, 'hires_fix', 'hires_fix' in post_data)
setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0) setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0)
setattr(opt, 'with_variations', []) setattr(opt, 'with_variations', [])
setattr(opt, 'embiggen', None) setattr(opt, 'embiggen', None)
@ -196,7 +197,7 @@ class DreamServer(BaseHTTPRequestHandler):
) + '\n',"utf-8")) ) + '\n',"utf-8"))
# control state of the "postprocessing..." message # control state of the "postprocessing..." message
upscaling_requested = opt.upscale or opt.gfpgan_strength > 0 upscaling_requested = opt.upscale or opt.facetool_strength > 0
nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure. nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure.
nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure. nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure.
if upscaled: if upscaled:

View File

@ -98,7 +98,8 @@ class KSampler(Sampler):
rho=7., rho=7.,
device=self.device, device=self.device,
) )
self.sigmas = self.karras_sigmas self.sigmas = self.model_sigmas
#self.sigmas = self.karras_sigmas
# ALERT: We are completely overriding the sample() method in the base class, which # ALERT: We are completely overriding the sample() method in the base class, which
# means that inpainting will not work. To get this to work we need to be able to # means that inpainting will not work. To get this to work we need to be able to

View File

@ -140,7 +140,7 @@ class Sampler(object):
conditioning=None, conditioning=None,
callback=None, callback=None,
normals_sequence=None, normals_sequence=None,
img_callback=None, img_callback=None, # TODO: this is very confusing because it is called "step_callback" elsewhere. Change.
quantize_x0=False, quantize_x0=False,
eta=0.0, eta=0.0,
mask=None, mask=None,

View File

@ -49,9 +49,15 @@ class Upsample(nn.Module):
padding=1) padding=1)
def forward(self, x): def forward(self, x):
cpu_m1_cond = True if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available() and \
x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3] % 2**27 == 0 else False
if cpu_m1_cond:
x = x.to('cpu') # send to cpu
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv: if self.with_conv:
x = self.conv(x) x = self.conv(x)
if cpu_m1_cond:
x = x.to('mps') # return to mps
return x return x
@ -117,6 +123,14 @@ class ResnetBlock(nn.Module):
padding=0) padding=0)
def forward(self, x, temb): def forward(self, x, temb):
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
x_size = x.size()
if (x_size[0] * x_size[1] * x_size[2] * x_size[3]) % 2**29 == 0:
self.to('cpu')
x = x.to('cpu')
else:
self.to('mps')
x = x.to('mps')
h = self.norm1(x) h = self.norm1(x)
h = silu(h) h = silu(h)
h = self.conv1(h) h = self.conv1(h)

View File

@ -6,7 +6,7 @@
"id": "ycYWcsEKc6w7" "id": "ycYWcsEKc6w7"
}, },
"source": [ "source": [
"# Stable Diffusion AI Notebook (Release 1.14)\n", "# Stable Diffusion AI Notebook (Release 2.0.0)\n",
"\n", "\n",
"<img src=\"https://user-images.githubusercontent.com/60411196/186547976-d9de378a-9de8-4201-9c25-c057a9c59bad.jpeg\" alt=\"stable-diffusion-ai\" width=\"170px\"/> <br>\n", "<img src=\"https://user-images.githubusercontent.com/60411196/186547976-d9de378a-9de8-4201-9c25-c057a9c59bad.jpeg\" alt=\"stable-diffusion-ai\" width=\"170px\"/> <br>\n",
"#### Instructions:\n", "#### Instructions:\n",
@ -58,8 +58,8 @@
"from os.path import exists\n", "from os.path import exists\n",
"\n", "\n",
"!git clone --quiet https://github.com/invoke-ai/InvokeAI.git # Original repo\n", "!git clone --quiet https://github.com/invoke-ai/InvokeAI.git # Original repo\n",
"%cd /content/stable-diffusion/\n", "%cd /content/InvokeAI/\n",
"!git checkout --quiet tags/release-1.14.1" "!git checkout --quiet tags/v2.0.0"
] ]
}, },
{ {
@ -79,6 +79,7 @@
"!pip install colab-xterm\n", "!pip install colab-xterm\n",
"!pip install -r requirements-lin-win-colab-CUDA.txt\n", "!pip install -r requirements-lin-win-colab-CUDA.txt\n",
"!pip install clean-fid torchtext\n", "!pip install clean-fid torchtext\n",
"!pip install transformers\n",
"gc.collect()" "gc.collect()"
] ]
}, },
@ -106,7 +107,7 @@
"source": [ "source": [
"#@title 5. Load small ML models required\n", "#@title 5. Load small ML models required\n",
"import gc\n", "import gc\n",
"%cd /content/stable-diffusion/\n", "%cd /content/InvokeAI/\n",
"!python scripts/preload_models.py\n", "!python scripts/preload_models.py\n",
"gc.collect()" "gc.collect()"
] ]
@ -171,18 +172,18 @@
"import os \n", "import os \n",
"\n", "\n",
"# Folder creation if it doesn't exist\n", "# Folder creation if it doesn't exist\n",
"if exists(\"/content/stable-diffusion/models/ldm/stable-diffusion-v1\"):\n", "if exists(\"/content/InvokeAI/models/ldm/stable-diffusion-v1\"):\n",
" print(\"❗ Dir stable-diffusion-v1 already exists\")\n", " print(\"❗ Dir stable-diffusion-v1 already exists\")\n",
"else:\n", "else:\n",
" %mkdir /content/stable-diffusion/models/ldm/stable-diffusion-v1\n", " %mkdir /content/InvokeAI/models/ldm/stable-diffusion-v1\n",
" print(\"✅ Dir stable-diffusion-v1 created\")\n", " print(\"✅ Dir stable-diffusion-v1 created\")\n",
"\n", "\n",
"# Symbolic link if it doesn't exist\n", "# Symbolic link if it doesn't exist\n",
"if exists(\"/content/stable-diffusion/models/ldm/stable-diffusion-v1/model.ckpt\"):\n", "if exists(\"/content/InvokeAI/models/ldm/stable-diffusion-v1/model.ckpt\"):\n",
" print(\"❗ Symlink already created\")\n", " print(\"❗ Symlink already created\")\n",
"else: \n", "else: \n",
" src = model_path\n", " src = model_path\n",
" dst = '/content/stable-diffusion/models/ldm/stable-diffusion-v1/model.ckpt'\n", " dst = '/content/InvokeAI/models/ldm/stable-diffusion-v1/model.ckpt'\n",
" os.symlink(src, dst) \n", " os.symlink(src, dst) \n",
" print(\"✅ Symbolic link created successfully\")" " print(\"✅ Symbolic link created successfully\")"
] ]
@ -207,7 +208,7 @@
"source": [ "source": [
"#@title 9. Run Terminal and Execute Dream bot\n", "#@title 9. Run Terminal and Execute Dream bot\n",
"#@markdown <font color=\"blue\">Steps:</font> <br>\n", "#@markdown <font color=\"blue\">Steps:</font> <br>\n",
"#@markdown 1. Execute command `python scripts/dream.py` to run dream bot.<br>\n", "#@markdown 1. Execute command `python scripts/invoke.py` to run InvokeAI.<br>\n",
"#@markdown 2. After initialized you'll see `Dream>` line.<br>\n", "#@markdown 2. After initialized you'll see `Dream>` line.<br>\n",
"#@markdown 3. Example text: `Astronaut floating in a distant galaxy` <br>\n", "#@markdown 3. Example text: `Astronaut floating in a distant galaxy` <br>\n",
"#@markdown 4. To quit Dream bot use: `q` command.<br>\n", "#@markdown 4. To quit Dream bot use: `q` command.<br>\n",
@ -233,7 +234,7 @@
"%matplotlib inline\n", "%matplotlib inline\n",
"\n", "\n",
"images = []\n", "images = []\n",
"for img_path in sorted(glob.glob('/content/stable-diffusion/outputs/img-samples/*.png'), reverse=True):\n", "for img_path in sorted(glob.glob('/content/InvokeAI/outputs/img-samples/*.png'), reverse=True):\n",
" images.append(mpimg.imread(img_path))\n", " images.append(mpimg.imread(img_path))\n",
"\n", "\n",
"images = images[:15] \n", "images = images[:15] \n",

View File

@ -1,12 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) # Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
import sys import warnings
import os.path import invoke
script_path = sys.argv[0]
script_args = sys.argv[1:]
script_dir,script_name = os.path.split(script_path)
script_dest = os.path.join(script_dir,'invoke.py')
os.execlp('python3','python3',script_dest,*script_args)
if __name__ == '__main__':
warnings.warn("dream.py is being deprecated, please run invoke.py for the "
"new UI/API or legacy_api.py for the old API",
DeprecationWarning)
invoke.main()

View File

@ -236,6 +236,7 @@ def main_loop(gen, opt, infile):
grid_images = dict() # seed -> Image, only used if `opt.grid` grid_images = dict() # seed -> Image, only used if `opt.grid`
prior_variations = opt.with_variations or [] prior_variations = opt.with_variations or []
prefix = file_writer.unique_prefix() prefix = file_writer.unique_prefix()
step_callback = make_step_callback(gen, opt, prefix) if opt.save_intermediates > 0 else None
def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None): def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None):
# note the seed is the seed of the current image # note the seed is the seed of the current image
@ -297,6 +298,7 @@ def main_loop(gen, opt, infile):
opt.last_operation='generate' opt.last_operation='generate'
gen.prompt2image( gen.prompt2image(
image_callback=image_writer, image_callback=image_writer,
step_callback=step_callback,
catch_interrupts=catch_ctrl_c, catch_interrupts=catch_ctrl_c,
**vars(opt) **vars(opt)
) )
@ -494,7 +496,7 @@ def do_postprocess (gen, opt, callback):
file_path = os.path.join(opt.outdir,file_path) file_path = os.path.join(opt.outdir,file_path)
tool=None tool=None
if opt.gfpgan_strength > 0: if opt.facetool_strength > 0:
tool = opt.facetool tool = opt.facetool
elif opt.embiggen: elif opt.embiggen:
tool = 'embiggen' tool = 'embiggen'
@ -510,7 +512,7 @@ def do_postprocess (gen, opt, callback):
gen.apply_postprocessor( gen.apply_postprocessor(
image_path = file_path, image_path = file_path,
tool = tool, tool = tool,
gfpgan_strength = opt.gfpgan_strength, facetool_strength = opt.facetool_strength,
codeformer_fidelity = opt.codeformer_fidelity, codeformer_fidelity = opt.codeformer_fidelity,
save_original = opt.save_original, save_original = opt.save_original,
upscale = opt.upscale, upscale = opt.upscale,
@ -666,6 +668,17 @@ def load_face_restoration(opt):
return gfpgan,codeformer,esrgan return gfpgan,codeformer,esrgan
def make_step_callback(gen, opt, prefix):
destination = os.path.join(opt.outdir,'intermediates',prefix)
os.makedirs(destination,exist_ok=True)
print(f'>> Intermediate images will be written into {destination}')
def callback(img, step):
if step % opt.save_intermediates == 0 or step == opt.steps-1:
filename = os.path.join(destination,f'{step:04}.png')
image = gen.sample_to_image(img)
image.save(filename,'PNG')
return callback
def retrieve_dream_command(opt,file_path,completer): def retrieve_dream_command(opt,file_path,completer):
''' '''
Given a full or partial path to a previously-generated image file, Given a full or partial path to a previously-generated image file,

View File

@ -35,13 +35,14 @@ class DreamBase():
perlin: float = 0.0 perlin: float = 0.0
sampler_name: string = 'klms' sampler_name: string = 'klms'
seamless: bool = False seamless: bool = False
hires_fix: bool = False
model: str = None # The model to use (currently unused) model: str = None # The model to use (currently unused)
embeddings = None # The embeddings to use (currently unused) embeddings = None # The embeddings to use (currently unused)
progress_images: bool = False progress_images: bool = False
# GFPGAN # GFPGAN
enable_gfpgan: bool enable_gfpgan: bool
gfpgan_strength: float = 0 facetool_strength: float = 0
# Upscale # Upscale
enable_upscale: bool enable_upscale: bool
@ -91,12 +92,13 @@ class DreamBase():
# model: str = None # The model to use (currently unused) # model: str = None # The model to use (currently unused)
# embeddings = None # The embeddings to use (currently unused) # embeddings = None # The embeddings to use (currently unused)
self.seamless = 'seamless' in j self.seamless = 'seamless' in j
self.hires_fix = 'hires_fix' in j
self.progress_images = 'progress_images' in j self.progress_images = 'progress_images' in j
# GFPGAN # GFPGAN
self.enable_gfpgan = 'enable_gfpgan' in j and bool(j.get('enable_gfpgan')) self.enable_gfpgan = 'enable_gfpgan' in j and bool(j.get('enable_gfpgan'))
if self.enable_gfpgan: if self.enable_gfpgan:
self.gfpgan_strength = float(j.get('gfpgan_strength')) self.facetool_strength = float(j.get('facetool_strength'))
# Upscale # Upscale
self.enable_upscale = 'enable_upscale' in j and bool(j.get('enable_upscale')) self.enable_upscale = 'enable_upscale' in j and bool(j.get('enable_upscale'))

View File

@ -334,11 +334,11 @@ class GeneratorService:
# TODO: Support no generation (just upscaling/gfpgan) # TODO: Support no generation (just upscaling/gfpgan)
upscale = None if not jobRequest.enable_upscale else jobRequest.upscale upscale = None if not jobRequest.enable_upscale else jobRequest.upscale
gfpgan_strength = 0 if not jobRequest.enable_gfpgan else jobRequest.gfpgan_strength facetool_strength = 0 if not jobRequest.enable_gfpgan else jobRequest.facetool_strength
if not jobRequest.enable_generate: if not jobRequest.enable_generate:
# If not generating, check if we're upscaling or running gfpgan # If not generating, check if we're upscaling or running gfpgan
if not upscale and not gfpgan_strength: if not upscale and not facetool_strength:
# Invalid settings (TODO: Add message to help user) # Invalid settings (TODO: Add message to help user)
raise CanceledException() raise CanceledException()
@ -347,7 +347,7 @@ class GeneratorService:
self.__model.upscale_and_reconstruct( self.__model.upscale_and_reconstruct(
image_list = [[image,0]], image_list = [[image,0]],
upscale = upscale, upscale = upscale,
strength = gfpgan_strength, strength = facetool_strength,
save_original = False, save_original = False,
image_callback = lambda image, seed, upscaled=False: self.__on_image_result(jobRequest, image, seed, upscaled)) image_callback = lambda image, seed, upscaled=False: self.__on_image_result(jobRequest, image, seed, upscaled))
@ -371,10 +371,11 @@ class GeneratorService:
steps = jobRequest.steps, steps = jobRequest.steps,
variation_amount = jobRequest.variation_amount, variation_amount = jobRequest.variation_amount,
with_variations = jobRequest.with_variations, with_variations = jobRequest.with_variations,
gfpgan_strength = gfpgan_strength, facetool_strength = facetool_strength,
upscale = upscale, upscale = upscale,
sampler_name = jobRequest.sampler_name, sampler_name = jobRequest.sampler_name,
seamless = jobRequest.seamless, seamless = jobRequest.seamless,
hires_fix = jobRequest.hires_fix,
embiggen = jobRequest.embiggen, embiggen = jobRequest.embiggen,
embiggen_tiles = jobRequest.embiggen_tiles, embiggen_tiles = jobRequest.embiggen_tiles,
step_callback = lambda sample, step: self.__on_progress(jobRequest, sample, step), step_callback = lambda sample, step: self.__on_progress(jobRequest, sample, step),

View File

@ -144,8 +144,8 @@
<input type="checkbox" name="enable_gfpgan" id="enable_gfpgan"> <input type="checkbox" name="enable_gfpgan" id="enable_gfpgan">
<label for="enable_gfpgan">Enable gfpgan</label> <label for="enable_gfpgan">Enable gfpgan</label>
</legend> </legend>
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength:</label> <label title="Strength of the gfpgan (face fixing) algorithm." for="facetool_strength">GPFGAN Strength:</label>
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05"> <input value="0.8" min="0" max="1" type="number" id="facetool_strength" name="facetool_strength" step="0.05">
</fieldset> </fieldset>
<fieldset id="upscale"> <fieldset id="upscale">
<legend> <legend>

View File

@ -100,8 +100,8 @@
</fieldset> </fieldset>
<fieldset id="gfpgan"> <fieldset id="gfpgan">
<div class="section-header">Post-processing options</div> <div class="section-header">Post-processing options</div>
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label> <label title="Strength of the gfpgan (face fixing) algorithm." for="facetool_strength">GPFGAN Strength (0 to disable):</label>
<input value="0.0" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.1"> <input value="0.0" min="0" max="1" type="number" id="facetool_strength" name="facetool_strength" step="0.1">
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label> <label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
<select id="upscale_level" name="upscale_level" value=""> <select id="upscale_level" name="upscale_level" value="">
<option value="" selected>None</option> <option value="" selected>None</option>