add option to show intermediate latent space

This commit is contained in:
damian 2022-11-01 11:17:43 +01:00 committed by Lincoln Stein
parent be1393a41c
commit cdb107dcda
9 changed files with 751 additions and 7 deletions

View File

@ -604,12 +604,15 @@ class InvokeAIWebServer:
progress.set_current_status("Generating")
progress.set_current_status_has_steps(True)
wants_progress_image = generation_parameters['progress_images'] and step % 5 == 0
wants_progress_latents = generation_parameters['progress_latents']
if (
generation_parameters["progress_images"]
and step % 5 == 0
and step < generation_parameters["steps"] - 1
wants_progress_image | wants_progress_latents
and step < generation_parameters['steps'] - 1
):
image = self.generate.sample_to_image(sample)
image = self.generate.sample_to_image(sample) if wants_progress_image \
else self.generate.sample_to_lowres_estimated_image(sample)
metadata = self.parameters_to_generated_image_metadata(
generation_parameters
)

690
frontend/dist/assets/index.ae92a637.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -62,7 +62,7 @@ export const frontendToBackendParameters = (
shouldRandomizeSeed,
} = optionsState;
const { shouldDisplayInProgress } = systemState;
const { shouldDisplayInProgress, shouldDisplayInProgressLatents } = systemState;
const generationParameters: { [k: string]: any } = {
prompt,
@ -76,6 +76,7 @@ export const frontendToBackendParameters = (
sampler_name: sampler,
seed,
progress_images: shouldDisplayInProgress,
progress_latents: shouldDisplayInProgressLatents,
};
generationParameters.seed = shouldRandomizeSeed

View File

@ -21,6 +21,7 @@ import {
setShouldConfirmOnDelete,
setShouldDisplayGuides,
setShouldDisplayInProgress,
setShouldDisplayInProgressLatents,
SystemState,
} from '../systemSlice';
import ModelList from './ModelList';
@ -31,12 +32,14 @@ const systemSelector = createSelector(
(system: SystemState) => {
const {
shouldDisplayInProgress,
shouldDisplayInProgressLatents,
shouldConfirmOnDelete,
shouldDisplayGuides,
model_list,
} = system;
return {
shouldDisplayInProgress,
shouldDisplayInProgressLatents,
shouldConfirmOnDelete,
shouldDisplayGuides,
models: _.map(model_list, (_model, key) => key),
@ -73,6 +76,7 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
const {
shouldDisplayInProgress,
shouldDisplayInProgressLatents,
shouldConfirmOnDelete,
shouldDisplayGuides,
} = useAppSelector(systemSelector);
@ -108,6 +112,12 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
dispatcher={setShouldDisplayInProgress}
/>
<SettingsModalItem
settingTitle="Display In-Progress Latents (quick; lo-res)"
isChecked={shouldDisplayInProgressLatents}
dispatcher={setShouldDisplayInProgressLatents}
/>
<SettingsModalItem
settingTitle="Confirm on Delete"
isChecked={shouldConfirmOnDelete}

View File

@ -19,6 +19,7 @@ export interface SystemState
extends InvokeAI.SystemStatus,
InvokeAI.SystemConfig {
shouldDisplayInProgress: boolean;
shouldDisplayInProgressLatents: boolean;
log: Array<LogEntry>;
shouldShowLogViewer: boolean;
isGFPGANAvailable: boolean;
@ -44,6 +45,7 @@ const initialSystemState = {
log: [],
shouldShowLogViewer: false,
shouldDisplayInProgress: false,
shouldDisplayInProgressLatents: false,
shouldDisplayGuides: true,
isGFPGANAvailable: true,
isESRGANAvailable: true,
@ -76,6 +78,9 @@ export const systemSlice = createSlice({
setShouldDisplayInProgress: (state, action: PayloadAction<boolean>) => {
state.shouldDisplayInProgress = action.payload;
},
setShouldDisplayInProgressLatents: (state, action: PayloadAction<boolean>) => {
state.shouldDisplayInProgressLatents = action.payload;
},
setIsProcessing: (state, action: PayloadAction<boolean>) => {
state.isProcessing = action.payload;
},
@ -183,6 +188,7 @@ export const systemSlice = createSlice({
export const {
setShouldDisplayInProgress,
setShouldDisplayInProgressLatents,
setIsProcessing,
addLogEntry,
setShouldShowLogViewer,

View File

@ -901,6 +901,9 @@ class Generate:
def sample_to_image(self, samples):
return self._make_base().sample_to_image(samples)
def sample_to_lowres_estimated_image(self, samples):
return self._make_base().sample_to_lowres_estimated_image(samples)
# very repetitive code - can this be simplified? The KSampler names are
# consistent, at least
def _set_sampler(self):

View File

@ -116,6 +116,29 @@ class Generator():
)
return Image.fromarray(x_sample.astype(np.uint8))
# write an approximate RGB image from latent samples for a single step to PNG
def sample_to_lowres_estimated_image(self,samples):
# adapted from code by @erucipe and @keturn here:
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
# these numbers were determined empirically by @keturn
v1_4_latent_rgb_factors = torch.tensor([
# R G B
[ 0.298, 0.207, 0.208], # L1
[ 0.187, 0.286, 0.173], # L2
[-0.158, 0.189, 0.264], # L3
[-0.184, -0.271, -0.473], # L4
], dtype=samples.dtype, device=samples.device)
latent_image = samples[0].permute(1, 2, 0) @ v1_4_latent_rgb_factors
latents_ubyte = (((latent_image + 1) / 2)
.clamp(0, 1) # change scale from -1..1 to 0..1
.mul(0xFF) # to 0..255
.byte()).cpu()
return Image.fromarray(latents_ubyte.numpy())
def generate_initial_noise(self, seed, width, height):
initial_noise = None
if self.variation_amount > 0 or len(self.with_variations) > 0:

View File

@ -34,6 +34,7 @@ def build_opt(post_data, seed, gfpgan_model_exists):
setattr(opt, 'facetool_strength', float(post_data['facetool_strength']) if gfpgan_model_exists else 0)
setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None)
setattr(opt, 'progress_images', 'progress_images' in post_data)
setattr(opt, 'progress_latents', 'progress_latents' in post_data)
setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed']))
setattr(opt, 'threshold', float(post_data['threshold']))
setattr(opt, 'perlin', float(post_data['perlin']))
@ -227,8 +228,13 @@ class DreamServer(BaseHTTPRequestHandler):
# since rendering images is moderately expensive, only render every 5th image
# and don't bother with the last one, since it'll render anyway
nonlocal step_index
if opt.progress_images and step % 5 == 0 and step < opt.steps - 1:
image = self.model.sample_to_image(sample)
wants_progress_latents = opt.progress_latents
wants_progress_image = opt.progress_image and step % 5 == 0
if (wants_progress_image | wants_progress_latents) and step < opt.steps - 1:
image = self.model.sample_to_image(sample) if wants_progress_image \
else self.model.sample_to_lowres_estimated_image(sample)
step_index_padded = str(step_index).rjust(len(str(opt.steps)), '0')
name = f'{prefix}.{opt.seed}.{step_index_padded}.png'
metadata = f'{opt.prompt} -S{opt.seed} [intermediate]'

View File

@ -39,6 +39,7 @@ class DreamBase():
model: str = None # The model to use (currently unused)
embeddings = None # The embeddings to use (currently unused)
progress_images: bool = False
progress_latents: bool = False
# GFPGAN
enable_gfpgan: bool
@ -94,6 +95,7 @@ class DreamBase():
self.seamless = 'seamless' in j
self.hires_fix = 'hires_fix' in j
self.progress_images = 'progress_images' in j
self.progress_latents = 'progress_latents' in j
# GFPGAN
self.enable_gfpgan = 'enable_gfpgan' in j and bool(j.get('enable_gfpgan'))