2021-12-21 02:23:41 +00:00
|
|
|
"""
|
|
|
|
wild mixture of
|
|
|
|
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
|
|
|
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
|
|
|
|
https://github.com/CompVis/taming-transformers
|
|
|
|
-- merci
|
|
|
|
"""
|
|
|
|
|
|
|
|
import torch
|
2022-08-23 22:26:28 +00:00
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
import torch.nn as nn
|
2022-08-23 22:26:28 +00:00
|
|
|
import os
|
2021-12-21 02:23:41 +00:00
|
|
|
import numpy as np
|
|
|
|
import pytorch_lightning as pl
|
|
|
|
from torch.optim.lr_scheduler import LambdaLR
|
|
|
|
from einops import rearrange, repeat
|
|
|
|
from contextlib import contextmanager
|
|
|
|
from functools import partial
|
|
|
|
from tqdm import tqdm
|
|
|
|
from torchvision.utils import make_grid
|
|
|
|
from pytorch_lightning.utilities.distributed import rank_zero_only
|
2022-10-25 04:30:48 +00:00
|
|
|
from omegaconf import ListConfig
|
2022-08-17 22:06:30 +00:00
|
|
|
import urllib
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
from ldm.util import (
|
|
|
|
log_txt_as_img,
|
|
|
|
exists,
|
|
|
|
default,
|
|
|
|
ismap,
|
|
|
|
isimage,
|
|
|
|
mean_flat,
|
|
|
|
count_params,
|
|
|
|
instantiate_from_config,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
from ldm.modules.ema import LitEma
|
2022-08-26 07:15:42 +00:00
|
|
|
from ldm.modules.distributions.distributions import (
|
|
|
|
normal_kl,
|
|
|
|
DiagonalGaussianDistribution,
|
|
|
|
)
|
|
|
|
from ldm.models.autoencoder import (
|
|
|
|
VQModelInterface,
|
|
|
|
IdentityFirstStage,
|
|
|
|
AutoencoderKL,
|
|
|
|
)
|
|
|
|
from ldm.modules.diffusionmodules.util import (
|
|
|
|
make_beta_schedule,
|
|
|
|
extract_into_tensor,
|
|
|
|
noise_like,
|
|
|
|
)
|
2021-12-22 14:57:23 +00:00
|
|
|
from ldm.models.diffusion.ddim import DDIMSampler
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
__conditioning_keys__ = {
|
|
|
|
'concat': 'c_concat',
|
|
|
|
'crossattn': 'c_crossattn',
|
|
|
|
'adm': 'y',
|
|
|
|
}
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
def disabled_train(self, mode=True):
|
|
|
|
"""Overwrite model.train with this function to make sure train/eval mode
|
|
|
|
does not change anymore."""
|
|
|
|
return self
|
|
|
|
|
|
|
|
|
|
|
|
def uniform_on_device(r1, r2, shape, device):
|
|
|
|
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
|
|
|
|
|
|
|
|
|
|
|
class DDPM(pl.LightningModule):
|
|
|
|
# classic DDPM with Gaussian diffusion, in image space
|
2022-08-26 07:15:42 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
unet_config,
|
|
|
|
timesteps=1000,
|
|
|
|
beta_schedule='linear',
|
|
|
|
loss_type='l2',
|
|
|
|
ckpt_path=None,
|
|
|
|
ignore_keys=[],
|
|
|
|
load_only_unet=False,
|
|
|
|
monitor='val/loss',
|
|
|
|
use_ema=True,
|
|
|
|
first_stage_key='image',
|
|
|
|
image_size=256,
|
|
|
|
channels=3,
|
|
|
|
log_every_t=100,
|
|
|
|
clip_denoised=True,
|
|
|
|
linear_start=1e-4,
|
|
|
|
linear_end=2e-2,
|
|
|
|
cosine_s=8e-3,
|
|
|
|
given_betas=None,
|
|
|
|
original_elbo_weight=0.0,
|
|
|
|
embedding_reg_weight=0.0,
|
|
|
|
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
|
|
|
l_simple_weight=1.0,
|
|
|
|
conditioning_key=None,
|
|
|
|
parameterization='eps', # all assuming fixed variance schedules
|
|
|
|
scheduler_config=None,
|
|
|
|
use_positional_encodings=False,
|
|
|
|
learn_logvar=False,
|
|
|
|
logvar_init=0.0,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
super().__init__()
|
2022-08-26 07:15:42 +00:00
|
|
|
assert parameterization in [
|
|
|
|
'eps',
|
|
|
|
'x0',
|
|
|
|
], 'currently only supporting "eps" and "x0"'
|
2021-12-21 02:23:41 +00:00
|
|
|
self.parameterization = parameterization
|
2022-08-26 07:15:42 +00:00
|
|
|
print(
|
2022-10-14 03:48:07 +00:00
|
|
|
f' | {self.__class__.__name__}: Running in {self.parameterization}-prediction mode'
|
2022-08-26 07:15:42 +00:00
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
self.cond_stage_model = None
|
|
|
|
self.clip_denoised = clip_denoised
|
|
|
|
self.log_every_t = log_every_t
|
|
|
|
self.first_stage_key = first_stage_key
|
|
|
|
self.image_size = image_size # try conv?
|
|
|
|
self.channels = channels
|
|
|
|
self.use_positional_encodings = use_positional_encodings
|
|
|
|
self.model = DiffusionWrapper(unet_config, conditioning_key)
|
|
|
|
count_params(self.model, verbose=True)
|
|
|
|
self.use_ema = use_ema
|
|
|
|
if self.use_ema:
|
|
|
|
self.model_ema = LitEma(self.model)
|
2022-10-25 04:30:48 +00:00
|
|
|
print(f' | Keeping EMAs of {len(list(self.model_ema.buffers()))}.')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
self.use_scheduler = scheduler_config is not None
|
|
|
|
if self.use_scheduler:
|
|
|
|
self.scheduler_config = scheduler_config
|
|
|
|
|
|
|
|
self.v_posterior = v_posterior
|
|
|
|
self.original_elbo_weight = original_elbo_weight
|
|
|
|
self.l_simple_weight = l_simple_weight
|
2022-08-23 22:26:28 +00:00
|
|
|
self.embedding_reg_weight = embedding_reg_weight
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if monitor is not None:
|
|
|
|
self.monitor = monitor
|
|
|
|
if ckpt_path is not None:
|
2022-08-26 07:15:42 +00:00
|
|
|
self.init_from_ckpt(
|
|
|
|
ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet
|
|
|
|
)
|
|
|
|
|
|
|
|
self.register_schedule(
|
|
|
|
given_betas=given_betas,
|
|
|
|
beta_schedule=beta_schedule,
|
|
|
|
timesteps=timesteps,
|
|
|
|
linear_start=linear_start,
|
|
|
|
linear_end=linear_end,
|
|
|
|
cosine_s=cosine_s,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
self.loss_type = loss_type
|
|
|
|
|
|
|
|
self.learn_logvar = learn_logvar
|
2022-08-26 07:15:42 +00:00
|
|
|
self.logvar = torch.full(
|
|
|
|
fill_value=logvar_init, size=(self.num_timesteps,)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if self.learn_logvar:
|
|
|
|
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def register_schedule(
|
|
|
|
self,
|
|
|
|
given_betas=None,
|
|
|
|
beta_schedule='linear',
|
|
|
|
timesteps=1000,
|
|
|
|
linear_start=1e-4,
|
|
|
|
linear_end=2e-2,
|
|
|
|
cosine_s=8e-3,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
if exists(given_betas):
|
|
|
|
betas = given_betas
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
betas = make_beta_schedule(
|
|
|
|
beta_schedule,
|
|
|
|
timesteps,
|
|
|
|
linear_start=linear_start,
|
|
|
|
linear_end=linear_end,
|
|
|
|
cosine_s=cosine_s,
|
|
|
|
)
|
|
|
|
alphas = 1.0 - betas
|
2021-12-21 02:23:41 +00:00
|
|
|
alphas_cumprod = np.cumprod(alphas, axis=0)
|
2022-08-26 07:15:42 +00:00
|
|
|
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
(timesteps,) = betas.shape
|
2021-12-21 02:23:41 +00:00
|
|
|
self.num_timesteps = int(timesteps)
|
|
|
|
self.linear_start = linear_start
|
|
|
|
self.linear_end = linear_end
|
2022-08-26 07:15:42 +00:00
|
|
|
assert (
|
|
|
|
alphas_cumprod.shape[0] == self.num_timesteps
|
|
|
|
), 'alphas have to be defined for each timestep'
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
to_torch = partial(torch.tensor, dtype=torch.float32)
|
|
|
|
|
|
|
|
self.register_buffer('betas', to_torch(betas))
|
|
|
|
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
2022-08-26 07:15:42 +00:00
|
|
|
self.register_buffer(
|
|
|
|
'alphas_cumprod_prev', to_torch(alphas_cumprod_prev)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# calculations for diffusion q(x_t | x_{t-1}) and others
|
2022-08-26 07:15:42 +00:00
|
|
|
self.register_buffer(
|
|
|
|
'sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'sqrt_one_minus_alphas_cumprod',
|
|
|
|
to_torch(np.sqrt(1.0 - alphas_cumprod)),
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'log_one_minus_alphas_cumprod',
|
|
|
|
to_torch(np.log(1.0 - alphas_cumprod)),
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'sqrt_recip_alphas_cumprod',
|
|
|
|
to_torch(np.sqrt(1.0 / alphas_cumprod)),
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'sqrt_recipm1_alphas_cumprod',
|
|
|
|
to_torch(np.sqrt(1.0 / alphas_cumprod - 1)),
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
2022-08-26 07:15:42 +00:00
|
|
|
posterior_variance = (1 - self.v_posterior) * betas * (
|
|
|
|
1.0 - alphas_cumprod_prev
|
|
|
|
) / (1.0 - alphas_cumprod) + self.v_posterior * betas
|
2021-12-21 02:23:41 +00:00
|
|
|
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
2022-08-26 07:15:42 +00:00
|
|
|
self.register_buffer(
|
|
|
|
'posterior_variance', to_torch(posterior_variance)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
2022-08-26 07:15:42 +00:00
|
|
|
self.register_buffer(
|
|
|
|
'posterior_log_variance_clipped',
|
|
|
|
to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'posterior_mean_coef1',
|
|
|
|
to_torch(
|
|
|
|
betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
self.register_buffer(
|
|
|
|
'posterior_mean_coef2',
|
|
|
|
to_torch(
|
|
|
|
(1.0 - alphas_cumprod_prev)
|
|
|
|
* np.sqrt(alphas)
|
|
|
|
/ (1.0 - alphas_cumprod)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.parameterization == 'eps':
|
|
|
|
lvlb_weights = self.betas**2 / (
|
|
|
|
2
|
|
|
|
* self.posterior_variance
|
|
|
|
* to_torch(alphas)
|
|
|
|
* (1 - self.alphas_cumprod)
|
|
|
|
)
|
|
|
|
elif self.parameterization == 'x0':
|
|
|
|
lvlb_weights = (
|
|
|
|
0.5
|
|
|
|
* np.sqrt(torch.Tensor(alphas_cumprod))
|
|
|
|
/ (2.0 * 1 - torch.Tensor(alphas_cumprod))
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
raise NotImplementedError('mu not supported')
|
2021-12-21 02:23:41 +00:00
|
|
|
# TODO how to choose this term
|
|
|
|
lvlb_weights[0] = lvlb_weights[1]
|
|
|
|
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
|
|
|
assert not torch.isnan(self.lvlb_weights).all()
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def ema_scope(self, context=None):
|
|
|
|
if self.use_ema:
|
|
|
|
self.model_ema.store(self.model.parameters())
|
|
|
|
self.model_ema.copy_to(self.model)
|
|
|
|
if context is not None:
|
2022-08-26 07:15:42 +00:00
|
|
|
print(f'{context}: Switched to EMA weights')
|
2021-12-21 02:23:41 +00:00
|
|
|
try:
|
|
|
|
yield None
|
|
|
|
finally:
|
|
|
|
if self.use_ema:
|
|
|
|
self.model_ema.restore(self.model.parameters())
|
|
|
|
if context is not None:
|
2022-08-26 07:15:42 +00:00
|
|
|
print(f'{context}: Restored training weights')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
2022-08-26 07:15:42 +00:00
|
|
|
sd = torch.load(path, map_location='cpu')
|
|
|
|
if 'state_dict' in list(sd.keys()):
|
|
|
|
sd = sd['state_dict']
|
2021-12-21 02:23:41 +00:00
|
|
|
keys = list(sd.keys())
|
|
|
|
for k in keys:
|
|
|
|
for ik in ignore_keys:
|
|
|
|
if k.startswith(ik):
|
2022-08-26 07:15:42 +00:00
|
|
|
print('Deleting key {} from state_dict.'.format(k))
|
2021-12-21 02:23:41 +00:00
|
|
|
del sd[k]
|
2022-08-26 07:15:42 +00:00
|
|
|
missing, unexpected = (
|
|
|
|
self.load_state_dict(sd, strict=False)
|
|
|
|
if not only_model
|
|
|
|
else self.model.load_state_dict(sd, strict=False)
|
|
|
|
)
|
|
|
|
print(
|
|
|
|
f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if len(missing) > 0:
|
2022-08-26 07:15:42 +00:00
|
|
|
print(f'Missing Keys: {missing}')
|
2021-12-21 02:23:41 +00:00
|
|
|
if len(unexpected) > 0:
|
2022-08-26 07:15:42 +00:00
|
|
|
print(f'Unexpected Keys: {unexpected}')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def q_mean_variance(self, x_start, t):
|
|
|
|
"""
|
|
|
|
Get the distribution q(x_t | x_0).
|
|
|
|
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
|
|
|
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
|
|
|
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
|
|
|
"""
|
2022-08-26 07:15:42 +00:00
|
|
|
mean = (
|
|
|
|
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape)
|
|
|
|
* x_start
|
|
|
|
)
|
|
|
|
variance = extract_into_tensor(
|
|
|
|
1.0 - self.alphas_cumprod, t, x_start.shape
|
|
|
|
)
|
|
|
|
log_variance = extract_into_tensor(
|
|
|
|
self.log_one_minus_alphas_cumprod, t, x_start.shape
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
return mean, variance, log_variance
|
|
|
|
|
|
|
|
def predict_start_from_noise(self, x_t, t, noise):
|
|
|
|
return (
|
2022-08-26 07:15:42 +00:00
|
|
|
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape)
|
|
|
|
* x_t
|
|
|
|
- extract_into_tensor(
|
|
|
|
self.sqrt_recipm1_alphas_cumprod, t, x_t.shape
|
|
|
|
)
|
|
|
|
* noise
|
2021-12-21 02:23:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def q_posterior(self, x_start, x_t, t):
|
|
|
|
posterior_mean = (
|
2022-08-26 07:15:42 +00:00
|
|
|
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape)
|
|
|
|
* x_start
|
|
|
|
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape)
|
|
|
|
* x_t
|
|
|
|
)
|
|
|
|
posterior_variance = extract_into_tensor(
|
|
|
|
self.posterior_variance, t, x_t.shape
|
|
|
|
)
|
|
|
|
posterior_log_variance_clipped = extract_into_tensor(
|
|
|
|
self.posterior_log_variance_clipped, t, x_t.shape
|
|
|
|
)
|
|
|
|
return (
|
|
|
|
posterior_mean,
|
|
|
|
posterior_variance,
|
|
|
|
posterior_log_variance_clipped,
|
2021-12-21 02:23:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def p_mean_variance(self, x, t, clip_denoised: bool):
|
|
|
|
model_out = self.model(x, t)
|
2022-08-26 07:15:42 +00:00
|
|
|
if self.parameterization == 'eps':
|
2021-12-21 02:23:41 +00:00
|
|
|
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
2022-08-26 07:15:42 +00:00
|
|
|
elif self.parameterization == 'x0':
|
2021-12-21 02:23:41 +00:00
|
|
|
x_recon = model_out
|
|
|
|
if clip_denoised:
|
2022-08-26 07:15:42 +00:00
|
|
|
x_recon.clamp_(-1.0, 1.0)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
(
|
|
|
|
model_mean,
|
|
|
|
posterior_variance,
|
|
|
|
posterior_log_variance,
|
|
|
|
) = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
2021-12-21 02:23:41 +00:00
|
|
|
return model_mean, posterior_variance, posterior_log_variance
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
|
|
|
b, *_, device = *x.shape, x.device
|
2022-08-26 07:15:42 +00:00
|
|
|
model_mean, _, model_log_variance = self.p_mean_variance(
|
|
|
|
x=x, t=t, clip_denoised=clip_denoised
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
noise = noise_like(x.shape, device, repeat_noise)
|
|
|
|
# no noise when t == 0
|
2022-08-26 07:15:42 +00:00
|
|
|
nonzero_mask = (1 - (t == 0).float()).reshape(
|
|
|
|
b, *((1,) * (len(x.shape) - 1))
|
|
|
|
)
|
|
|
|
return (
|
|
|
|
model_mean
|
|
|
|
+ nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def p_sample_loop(self, shape, return_intermediates=False):
|
|
|
|
device = self.betas.device
|
|
|
|
b = shape[0]
|
|
|
|
img = torch.randn(shape, device=device)
|
|
|
|
intermediates = [img]
|
2022-08-26 07:15:42 +00:00
|
|
|
for i in tqdm(
|
|
|
|
reversed(range(0, self.num_timesteps)),
|
|
|
|
desc='Sampling t',
|
|
|
|
total=self.num_timesteps,
|
|
|
|
dynamic_ncols=True,
|
|
|
|
):
|
|
|
|
img = self.p_sample(
|
|
|
|
img,
|
|
|
|
torch.full((b,), i, device=device, dtype=torch.long),
|
|
|
|
clip_denoised=self.clip_denoised,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
|
|
|
intermediates.append(img)
|
|
|
|
if return_intermediates:
|
|
|
|
return img, intermediates
|
|
|
|
return img
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def sample(self, batch_size=16, return_intermediates=False):
|
|
|
|
image_size = self.image_size
|
|
|
|
channels = self.channels
|
2022-08-26 07:15:42 +00:00
|
|
|
return self.p_sample_loop(
|
|
|
|
(batch_size, channels, image_size, image_size),
|
|
|
|
return_intermediates=return_intermediates,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def q_sample(self, x_start, t, noise=None):
|
|
|
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape)
|
|
|
|
* x_start
|
|
|
|
+ extract_into_tensor(
|
|
|
|
self.sqrt_one_minus_alphas_cumprod, t, x_start.shape
|
|
|
|
)
|
|
|
|
* noise
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def get_loss(self, pred, target, mean=True):
|
|
|
|
if self.loss_type == 'l1':
|
|
|
|
loss = (target - pred).abs()
|
|
|
|
if mean:
|
|
|
|
loss = loss.mean()
|
|
|
|
elif self.loss_type == 'l2':
|
|
|
|
if mean:
|
|
|
|
loss = torch.nn.functional.mse_loss(target, pred)
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
loss = torch.nn.functional.mse_loss(
|
|
|
|
target, pred, reduction='none'
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError("unknown loss type '{loss_type}'")
|
|
|
|
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def p_losses(self, x_start, t, noise=None):
|
|
|
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
|
|
|
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
|
|
|
model_out = self.model(x_noisy, t)
|
|
|
|
|
|
|
|
loss_dict = {}
|
2022-08-26 07:15:42 +00:00
|
|
|
if self.parameterization == 'eps':
|
2021-12-21 02:23:41 +00:00
|
|
|
target = noise
|
2022-08-26 07:15:42 +00:00
|
|
|
elif self.parameterization == 'x0':
|
2021-12-21 02:23:41 +00:00
|
|
|
target = x_start
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
raise NotImplementedError(
|
|
|
|
f'Paramterization {self.parameterization} not yet supported'
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
|
|
|
|
|
|
|
log_prefix = 'train' if self.training else 'val'
|
|
|
|
|
|
|
|
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
|
|
|
loss_simple = loss.mean() * self.l_simple_weight
|
|
|
|
|
|
|
|
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
|
|
|
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
|
|
|
|
|
|
|
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
|
|
|
|
|
|
|
loss_dict.update({f'{log_prefix}/loss': loss})
|
|
|
|
|
|
|
|
return loss, loss_dict
|
|
|
|
|
|
|
|
def forward(self, x, *args, **kwargs):
|
|
|
|
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
|
|
|
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
2022-08-26 07:15:42 +00:00
|
|
|
t = torch.randint(
|
|
|
|
0, self.num_timesteps, (x.shape[0],), device=self.device
|
|
|
|
).long()
|
2021-12-21 02:23:41 +00:00
|
|
|
return self.p_losses(x, t, *args, **kwargs)
|
|
|
|
|
|
|
|
def get_input(self, batch, k):
|
|
|
|
x = batch[k]
|
|
|
|
if len(x.shape) == 3:
|
|
|
|
x = x[..., None]
|
|
|
|
x = rearrange(x, 'b h w c -> b c h w')
|
|
|
|
x = x.to(memory_format=torch.contiguous_format).float()
|
|
|
|
return x
|
|
|
|
|
|
|
|
def shared_step(self, batch):
|
|
|
|
x = self.get_input(batch, self.first_stage_key)
|
|
|
|
loss, loss_dict = self(x)
|
|
|
|
return loss, loss_dict
|
|
|
|
|
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
loss, loss_dict = self.shared_step(batch)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
self.log_dict(
|
|
|
|
loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
self.log(
|
|
|
|
'global_step',
|
|
|
|
self.global_step,
|
|
|
|
prog_bar=True,
|
|
|
|
logger=True,
|
|
|
|
on_step=True,
|
|
|
|
on_epoch=False,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if self.use_scheduler:
|
|
|
|
lr = self.optimizers().param_groups[0]['lr']
|
2022-08-26 07:15:42 +00:00
|
|
|
self.log(
|
|
|
|
'lr_abs',
|
|
|
|
lr,
|
|
|
|
prog_bar=True,
|
|
|
|
logger=True,
|
|
|
|
on_step=True,
|
|
|
|
on_epoch=False,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
return loss
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
_, loss_dict_no_ema = self.shared_step(batch)
|
|
|
|
with self.ema_scope():
|
|
|
|
_, loss_dict_ema = self.shared_step(batch)
|
2022-08-26 07:15:42 +00:00
|
|
|
loss_dict_ema = {
|
|
|
|
key + '_ema': loss_dict_ema[key] for key in loss_dict_ema
|
|
|
|
}
|
|
|
|
self.log_dict(
|
|
|
|
loss_dict_no_ema,
|
|
|
|
prog_bar=False,
|
|
|
|
logger=True,
|
|
|
|
on_step=False,
|
|
|
|
on_epoch=True,
|
|
|
|
)
|
|
|
|
self.log_dict(
|
|
|
|
loss_dict_ema,
|
|
|
|
prog_bar=False,
|
|
|
|
logger=True,
|
|
|
|
on_step=False,
|
|
|
|
on_epoch=True,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def on_train_batch_end(self, *args, **kwargs):
|
|
|
|
if self.use_ema:
|
|
|
|
self.model_ema(self.model)
|
|
|
|
|
|
|
|
def _get_rows_from_list(self, samples):
|
|
|
|
n_imgs_per_row = len(samples)
|
|
|
|
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
|
|
|
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
|
|
|
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
|
|
|
return denoise_grid
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def log_images(
|
|
|
|
self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
log = dict()
|
|
|
|
x = self.get_input(batch, self.first_stage_key)
|
|
|
|
N = min(x.shape[0], N)
|
|
|
|
n_row = min(x.shape[0], n_row)
|
|
|
|
x = x.to(self.device)[:N]
|
2022-08-26 07:15:42 +00:00
|
|
|
log['inputs'] = x
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# get diffusion row
|
|
|
|
diffusion_row = list()
|
|
|
|
x_start = x[:n_row]
|
|
|
|
|
|
|
|
for t in range(self.num_timesteps):
|
|
|
|
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
|
|
|
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
|
|
|
t = t.to(self.device).long()
|
|
|
|
noise = torch.randn_like(x_start)
|
|
|
|
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
|
|
|
diffusion_row.append(x_noisy)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
log['diffusion_row'] = self._get_rows_from_list(diffusion_row)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if sample:
|
|
|
|
# get denoise row
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting'):
|
|
|
|
samples, denoise_row = self.sample(
|
|
|
|
batch_size=N, return_intermediates=True
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
log['samples'] = samples
|
|
|
|
log['denoise_row'] = self._get_rows_from_list(denoise_row)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if return_keys:
|
|
|
|
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
|
|
|
return log
|
|
|
|
else:
|
|
|
|
return {key: log[key] for key in return_keys}
|
|
|
|
return log
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
lr = self.learning_rate
|
|
|
|
params = list(self.model.parameters())
|
|
|
|
if self.learn_logvar:
|
|
|
|
params = params + [self.logvar]
|
|
|
|
opt = torch.optim.AdamW(params, lr=lr)
|
|
|
|
return opt
|
|
|
|
|
|
|
|
|
|
|
|
class LatentDiffusion(DDPM):
|
|
|
|
"""main class"""
|
2022-08-26 07:15:42 +00:00
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
first_stage_config,
|
|
|
|
cond_stage_config,
|
|
|
|
personalization_config,
|
|
|
|
num_timesteps_cond=None,
|
|
|
|
cond_stage_key='image',
|
|
|
|
cond_stage_trainable=False,
|
|
|
|
concat_mode=True,
|
|
|
|
cond_stage_forward=None,
|
|
|
|
conditioning_key=None,
|
|
|
|
scale_factor=1.0,
|
|
|
|
scale_by_std=False,
|
|
|
|
*args,
|
|
|
|
**kwargs,
|
|
|
|
):
|
2022-08-23 22:26:28 +00:00
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
|
|
|
self.scale_by_std = scale_by_std
|
|
|
|
assert self.num_timesteps_cond <= kwargs['timesteps']
|
|
|
|
# for backwards compatibility after implementation of DiffusionWrapper
|
|
|
|
if conditioning_key is None:
|
|
|
|
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
|
|
|
if cond_stage_config == '__is_unconditional__':
|
|
|
|
conditioning_key = None
|
2022-08-26 07:15:42 +00:00
|
|
|
ckpt_path = kwargs.pop('ckpt_path', None)
|
|
|
|
ignore_keys = kwargs.pop('ignore_keys', [])
|
2021-12-21 02:23:41 +00:00
|
|
|
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
|
|
|
|
self.concat_mode = concat_mode
|
|
|
|
self.cond_stage_trainable = cond_stage_trainable
|
|
|
|
self.cond_stage_key = cond_stage_key
|
2022-08-23 22:26:28 +00:00
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
try:
|
2022-08-26 07:15:42 +00:00
|
|
|
self.num_downs = (
|
|
|
|
len(first_stage_config.params.ddconfig.ch_mult) - 1
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
except:
|
|
|
|
self.num_downs = 0
|
|
|
|
if not scale_by_std:
|
|
|
|
self.scale_factor = scale_factor
|
|
|
|
else:
|
|
|
|
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
|
|
|
self.instantiate_first_stage(first_stage_config)
|
|
|
|
self.instantiate_cond_stage(cond_stage_config)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
self.cond_stage_forward = cond_stage_forward
|
|
|
|
self.clip_denoised = False
|
2022-08-26 07:15:42 +00:00
|
|
|
self.bbox_tokenizer = None
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
self.restarted_from_ckpt = False
|
|
|
|
if ckpt_path is not None:
|
|
|
|
self.init_from_ckpt(ckpt_path, ignore_keys)
|
|
|
|
self.restarted_from_ckpt = True
|
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
self.cond_stage_model.train = disabled_train
|
|
|
|
for param in self.cond_stage_model.parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
|
|
|
|
self.model.eval()
|
|
|
|
self.model.train = disabled_train
|
|
|
|
for param in self.model.parameters():
|
|
|
|
param.requires_grad = False
|
2022-08-26 07:15:42 +00:00
|
|
|
|
|
|
|
self.embedding_manager = self.instantiate_embedding_manager(
|
|
|
|
personalization_config, self.cond_stage_model
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
|
|
|
self.emb_ckpt_counter = 0
|
|
|
|
|
|
|
|
# if self.embedding_manager.is_clip:
|
|
|
|
# self.cond_stage_model.update_embedding_func(self.embedding_manager)
|
|
|
|
|
|
|
|
for param in self.embedding_manager.embedding_parameters():
|
|
|
|
param.requires_grad = True
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def make_cond_schedule(
|
|
|
|
self,
|
|
|
|
):
|
|
|
|
self.cond_ids = torch.full(
|
|
|
|
size=(self.num_timesteps,),
|
|
|
|
fill_value=self.num_timesteps - 1,
|
|
|
|
dtype=torch.long,
|
|
|
|
)
|
|
|
|
ids = torch.round(
|
|
|
|
torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)
|
|
|
|
).long()
|
|
|
|
self.cond_ids[: self.num_timesteps_cond] = ids
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
@rank_zero_only
|
|
|
|
@torch.no_grad()
|
2022-09-25 17:12:11 +00:00
|
|
|
def on_train_batch_start(self, batch, batch_idx, dataloader_idx=None):
|
2021-12-21 02:23:41 +00:00
|
|
|
# only for very first batch
|
2022-08-26 07:15:42 +00:00
|
|
|
if (
|
|
|
|
self.scale_by_std
|
|
|
|
and self.current_epoch == 0
|
|
|
|
and self.global_step == 0
|
|
|
|
and batch_idx == 0
|
|
|
|
and not self.restarted_from_ckpt
|
|
|
|
):
|
|
|
|
assert (
|
|
|
|
self.scale_factor == 1.0
|
|
|
|
), 'rather not use custom rescaling and std-rescaling simultaneously'
|
2021-12-21 02:23:41 +00:00
|
|
|
# set rescale weight to 1./std of encodings
|
2022-08-26 07:15:42 +00:00
|
|
|
print('### USING STD-RESCALING ###')
|
2021-12-21 02:23:41 +00:00
|
|
|
x = super().get_input(batch, self.first_stage_key)
|
|
|
|
x = x.to(self.device)
|
|
|
|
encoder_posterior = self.encode_first_stage(x)
|
|
|
|
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
|
|
|
del self.scale_factor
|
2022-08-26 07:15:42 +00:00
|
|
|
self.register_buffer('scale_factor', 1.0 / z.flatten().std())
|
|
|
|
print(f'setting self.scale_factor to {self.scale_factor}')
|
|
|
|
print('### USING STD-RESCALING ###')
|
|
|
|
|
|
|
|
def register_schedule(
|
|
|
|
self,
|
|
|
|
given_betas=None,
|
|
|
|
beta_schedule='linear',
|
|
|
|
timesteps=1000,
|
|
|
|
linear_start=1e-4,
|
|
|
|
linear_end=2e-2,
|
|
|
|
cosine_s=8e-3,
|
|
|
|
):
|
|
|
|
super().register_schedule(
|
|
|
|
given_betas,
|
|
|
|
beta_schedule,
|
|
|
|
timesteps,
|
|
|
|
linear_start,
|
|
|
|
linear_end,
|
|
|
|
cosine_s,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
|
|
|
if self.shorten_cond_schedule:
|
|
|
|
self.make_cond_schedule()
|
|
|
|
|
|
|
|
def instantiate_first_stage(self, config):
|
|
|
|
model = instantiate_from_config(config)
|
|
|
|
self.first_stage_model = model.eval()
|
|
|
|
self.first_stage_model.train = disabled_train
|
|
|
|
for param in self.first_stage_model.parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
|
|
|
|
def instantiate_cond_stage(self, config):
|
|
|
|
if not self.cond_stage_trainable:
|
2022-08-26 07:15:42 +00:00
|
|
|
if config == '__is_first_stage__':
|
|
|
|
print('Using first stage also as cond stage.')
|
2021-12-21 02:23:41 +00:00
|
|
|
self.cond_stage_model = self.first_stage_model
|
2022-08-26 07:15:42 +00:00
|
|
|
elif config == '__is_unconditional__':
|
|
|
|
print(
|
|
|
|
f'Training {self.__class__.__name__} as an unconditional model.'
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
self.cond_stage_model = None
|
|
|
|
# self.be_unconditional = True
|
|
|
|
else:
|
|
|
|
model = instantiate_from_config(config)
|
|
|
|
self.cond_stage_model = model.eval()
|
|
|
|
self.cond_stage_model.train = disabled_train
|
|
|
|
for param in self.cond_stage_model.parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
else:
|
|
|
|
assert config != '__is_first_stage__'
|
|
|
|
assert config != '__is_unconditional__'
|
2022-08-17 22:06:30 +00:00
|
|
|
try:
|
|
|
|
model = instantiate_from_config(config)
|
|
|
|
except urllib.error.URLError:
|
2022-08-26 07:15:42 +00:00
|
|
|
raise SystemExit(
|
|
|
|
"* Couldn't load a dependency. Try running scripts/preload_models.py from an internet-conected machine."
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
self.cond_stage_model = model
|
2022-08-26 07:15:42 +00:00
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
def instantiate_embedding_manager(self, config, embedder):
|
|
|
|
model = instantiate_from_config(config, embedder=embedder)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if config.params.get(
|
|
|
|
'embedding_manager_ckpt', None
|
|
|
|
): # do not load if missing OR empty string
|
2022-08-23 22:26:28 +00:00
|
|
|
model.load(config.params.embedding_manager_ckpt)
|
2022-08-26 07:15:42 +00:00
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
return model
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def _get_denoise_row_from_list(
|
|
|
|
self, samples, desc='', force_no_decoder_quantization=False
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
denoise_row = []
|
|
|
|
for zd in tqdm(samples, desc=desc):
|
2022-08-26 07:15:42 +00:00
|
|
|
denoise_row.append(
|
|
|
|
self.decode_first_stage(
|
|
|
|
zd.to(self.device),
|
|
|
|
force_not_quantize=force_no_decoder_quantization,
|
|
|
|
)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
n_imgs_per_row = len(denoise_row)
|
|
|
|
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
|
|
|
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
|
|
|
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
|
|
|
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
|
|
|
return denoise_grid
|
|
|
|
|
|
|
|
def get_first_stage_encoding(self, encoder_posterior):
|
|
|
|
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
|
|
|
z = encoder_posterior.sample()
|
|
|
|
elif isinstance(encoder_posterior, torch.Tensor):
|
|
|
|
z = encoder_posterior
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
raise NotImplementedError(
|
|
|
|
f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented"
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
return self.scale_factor * z
|
|
|
|
|
2022-10-19 23:42:04 +00:00
|
|
|
def get_learned_conditioning(self, c, **kwargs):
|
2021-12-21 02:23:41 +00:00
|
|
|
if self.cond_stage_forward is None:
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self.cond_stage_model, 'encode') and callable(
|
|
|
|
self.cond_stage_model.encode
|
|
|
|
):
|
|
|
|
c = self.cond_stage_model.encode(
|
2022-10-26 22:25:48 +00:00
|
|
|
c, embedding_manager=self.embedding_manager,**kwargs
|
2022-08-26 07:15:42 +00:00
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if isinstance(c, DiagonalGaussianDistribution):
|
|
|
|
c = c.mode()
|
|
|
|
else:
|
2022-10-19 23:42:04 +00:00
|
|
|
c = self.cond_stage_model(c, **kwargs)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
2022-10-19 23:42:04 +00:00
|
|
|
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c, **kwargs)
|
2021-12-21 02:23:41 +00:00
|
|
|
return c
|
|
|
|
|
|
|
|
def meshgrid(self, h, w):
|
|
|
|
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
|
|
|
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
|
|
|
|
|
|
|
arr = torch.cat([y, x], dim=-1)
|
|
|
|
return arr
|
|
|
|
|
|
|
|
def delta_border(self, h, w):
|
|
|
|
"""
|
|
|
|
:param h: height
|
|
|
|
:param w: width
|
|
|
|
:return: normalized distance to image border,
|
|
|
|
wtith min distance = 0 at border and max dist = 0.5 at image center
|
|
|
|
"""
|
|
|
|
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
|
|
|
arr = self.meshgrid(h, w) / lower_right_corner
|
|
|
|
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
|
|
|
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
2022-08-26 07:15:42 +00:00
|
|
|
edge_dist = torch.min(
|
|
|
|
torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1
|
|
|
|
)[0]
|
2021-12-21 02:23:41 +00:00
|
|
|
return edge_dist
|
|
|
|
|
|
|
|
def get_weighting(self, h, w, Ly, Lx, device):
|
|
|
|
weighting = self.delta_border(h, w)
|
2022-08-26 07:15:42 +00:00
|
|
|
weighting = torch.clip(
|
|
|
|
weighting,
|
|
|
|
self.split_input_params['clip_min_weight'],
|
|
|
|
self.split_input_params['clip_max_weight'],
|
|
|
|
)
|
|
|
|
weighting = (
|
|
|
|
weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if self.split_input_params['tie_braker']:
|
2021-12-21 02:23:41 +00:00
|
|
|
L_weighting = self.delta_border(Ly, Lx)
|
2022-08-26 07:15:42 +00:00
|
|
|
L_weighting = torch.clip(
|
|
|
|
L_weighting,
|
|
|
|
self.split_input_params['clip_min_tie_weight'],
|
|
|
|
self.split_input_params['clip_max_tie_weight'],
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
|
|
|
weighting = weighting * L_weighting
|
|
|
|
return weighting
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def get_fold_unfold(
|
|
|
|
self, x, kernel_size, stride, uf=1, df=1
|
|
|
|
): # todo load once not every time, shorten code
|
2021-12-21 02:23:41 +00:00
|
|
|
"""
|
|
|
|
:param x: img of size (bs, c, h, w)
|
|
|
|
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
|
|
|
"""
|
|
|
|
bs, nc, h, w = x.shape
|
|
|
|
|
|
|
|
# number of crops in image
|
|
|
|
Ly = (h - kernel_size[0]) // stride[0] + 1
|
|
|
|
Lx = (w - kernel_size[1]) // stride[1] + 1
|
|
|
|
|
|
|
|
if uf == 1 and df == 1:
|
2022-08-26 07:15:42 +00:00
|
|
|
fold_params = dict(
|
|
|
|
kernel_size=kernel_size, dilation=1, padding=0, stride=stride
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
unfold = torch.nn.Unfold(**fold_params)
|
|
|
|
|
|
|
|
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
weighting = self.get_weighting(
|
|
|
|
kernel_size[0], kernel_size[1], Ly, Lx, x.device
|
|
|
|
).to(x.dtype)
|
|
|
|
normalization = fold(weighting).view(
|
|
|
|
1, 1, h, w
|
|
|
|
) # normalizes the overlap
|
|
|
|
weighting = weighting.view(
|
|
|
|
(1, 1, kernel_size[0], kernel_size[1], Ly * Lx)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
elif uf > 1 and df == 1:
|
2022-08-26 07:15:42 +00:00
|
|
|
fold_params = dict(
|
|
|
|
kernel_size=kernel_size, dilation=1, padding=0, stride=stride
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
unfold = torch.nn.Unfold(**fold_params)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold_params2 = dict(
|
|
|
|
kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
|
|
|
dilation=1,
|
|
|
|
padding=0,
|
|
|
|
stride=(stride[0] * uf, stride[1] * uf),
|
|
|
|
)
|
|
|
|
fold = torch.nn.Fold(
|
|
|
|
output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2
|
|
|
|
)
|
|
|
|
|
|
|
|
weighting = self.get_weighting(
|
|
|
|
kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device
|
|
|
|
).to(x.dtype)
|
|
|
|
normalization = fold(weighting).view(
|
|
|
|
1, 1, h * uf, w * uf
|
|
|
|
) # normalizes the overlap
|
|
|
|
weighting = weighting.view(
|
|
|
|
(1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
elif df > 1 and uf == 1:
|
2022-08-26 07:15:42 +00:00
|
|
|
fold_params = dict(
|
|
|
|
kernel_size=kernel_size, dilation=1, padding=0, stride=stride
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
unfold = torch.nn.Unfold(**fold_params)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold_params2 = dict(
|
|
|
|
kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
|
|
|
dilation=1,
|
|
|
|
padding=0,
|
|
|
|
stride=(stride[0] // df, stride[1] // df),
|
|
|
|
)
|
|
|
|
fold = torch.nn.Fold(
|
|
|
|
output_size=(x.shape[2] // df, x.shape[3] // df),
|
|
|
|
**fold_params2,
|
|
|
|
)
|
|
|
|
|
|
|
|
weighting = self.get_weighting(
|
|
|
|
kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device
|
|
|
|
).to(x.dtype)
|
|
|
|
normalization = fold(weighting).view(
|
|
|
|
1, 1, h // df, w // df
|
|
|
|
) # normalizes the overlap
|
|
|
|
weighting = weighting.view(
|
|
|
|
(1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
return fold, unfold, normalization, weighting
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def get_input(
|
|
|
|
self,
|
|
|
|
batch,
|
|
|
|
k,
|
|
|
|
return_first_stage_outputs=False,
|
|
|
|
force_c_encode=False,
|
|
|
|
cond_key=None,
|
|
|
|
return_original_cond=False,
|
|
|
|
bs=None,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
x = super().get_input(batch, k)
|
|
|
|
if bs is not None:
|
|
|
|
x = x[:bs]
|
|
|
|
x = x.to(self.device)
|
|
|
|
encoder_posterior = self.encode_first_stage(x)
|
|
|
|
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
|
|
|
|
|
|
|
if self.model.conditioning_key is not None:
|
|
|
|
if cond_key is None:
|
|
|
|
cond_key = self.cond_stage_key
|
|
|
|
if cond_key != self.first_stage_key:
|
|
|
|
if cond_key in ['caption', 'coordinates_bbox']:
|
|
|
|
xc = batch[cond_key]
|
|
|
|
elif cond_key == 'class_label':
|
|
|
|
xc = batch
|
|
|
|
else:
|
|
|
|
xc = super().get_input(batch, cond_key).to(self.device)
|
|
|
|
else:
|
|
|
|
xc = x
|
|
|
|
if not self.cond_stage_trainable or force_c_encode:
|
|
|
|
if isinstance(xc, dict) or isinstance(xc, list):
|
|
|
|
# import pudb; pudb.set_trace()
|
|
|
|
c = self.get_learned_conditioning(xc)
|
|
|
|
else:
|
|
|
|
c = self.get_learned_conditioning(xc.to(self.device))
|
|
|
|
else:
|
|
|
|
c = xc
|
|
|
|
if bs is not None:
|
|
|
|
c = c[:bs]
|
|
|
|
|
|
|
|
if self.use_positional_encodings:
|
|
|
|
pos_x, pos_y = self.compute_latent_shifts(batch)
|
|
|
|
ckey = __conditioning_keys__[self.model.conditioning_key]
|
|
|
|
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
|
|
|
|
|
|
|
else:
|
|
|
|
c = None
|
|
|
|
xc = None
|
|
|
|
if self.use_positional_encodings:
|
|
|
|
pos_x, pos_y = self.compute_latent_shifts(batch)
|
|
|
|
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
|
|
|
out = [z, c]
|
|
|
|
if return_first_stage_outputs:
|
|
|
|
xrec = self.decode_first_stage(z)
|
|
|
|
out.extend([x, xrec])
|
|
|
|
if return_original_cond:
|
|
|
|
out.append(xc)
|
|
|
|
return out
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def decode_first_stage(
|
|
|
|
self, z, predict_cids=False, force_not_quantize=False
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
if predict_cids:
|
|
|
|
if z.dim() == 4:
|
|
|
|
z = torch.argmax(z.exp(), dim=1).long()
|
2022-08-26 07:15:42 +00:00
|
|
|
z = self.first_stage_model.quantize.get_codebook_entry(
|
|
|
|
z, shape=None
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
z = 1.0 / self.scale_factor * z
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self, 'split_input_params'):
|
|
|
|
if self.split_input_params['patch_distributed_vq']:
|
|
|
|
ks = self.split_input_params['ks'] # eg. (128, 128)
|
|
|
|
stride = self.split_input_params['stride'] # eg. (64, 64)
|
|
|
|
uf = self.split_input_params['vqf']
|
2021-12-21 02:23:41 +00:00
|
|
|
bs, nc, h, w = z.shape
|
|
|
|
if ks[0] > h or ks[1] > w:
|
|
|
|
ks = (min(ks[0], h), min(ks[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing Kernel')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if stride[0] > h or stride[1] > w:
|
|
|
|
stride = (min(stride[0], h), min(stride[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing stride')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold, unfold, normalization, weighting = self.get_fold_unfold(
|
|
|
|
z, ks, stride, uf=uf
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
z = unfold(z) # (bn, nc * prod(**ks), L)
|
|
|
|
# 1. Reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
z = z.view(
|
|
|
|
(z.shape[0], -1, ks[0], ks[1], z.shape[-1])
|
|
|
|
) # (bn, nc, ks[0], ks[1], L )
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# 2. apply model loop over last dim
|
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
2022-08-26 07:15:42 +00:00
|
|
|
output_list = [
|
|
|
|
self.first_stage_model.decode(
|
|
|
|
z[:, :, :, :, i],
|
|
|
|
force_not_quantize=predict_cids
|
|
|
|
or force_not_quantize,
|
|
|
|
)
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
output_list = [
|
|
|
|
self.first_stage_model.decode(z[:, :, :, :, i])
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
o = torch.stack(
|
|
|
|
output_list, axis=-1
|
|
|
|
) # # (bn, nc, ks[0], ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
o = o * weighting
|
|
|
|
# Reverse 1. reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
o = o.view(
|
|
|
|
(o.shape[0], -1, o.shape[-1])
|
|
|
|
) # (bn, nc * ks[0] * ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
# stitch crops together
|
|
|
|
decoded = fold(o)
|
|
|
|
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
|
|
|
return decoded
|
|
|
|
else:
|
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
2022-08-26 07:15:42 +00:00
|
|
|
return self.first_stage_model.decode(
|
|
|
|
z,
|
|
|
|
force_not_quantize=predict_cids or force_not_quantize,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
return self.first_stage_model.decode(z)
|
|
|
|
|
|
|
|
else:
|
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
2022-08-26 07:15:42 +00:00
|
|
|
return self.first_stage_model.decode(
|
|
|
|
z, force_not_quantize=predict_cids or force_not_quantize
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
return self.first_stage_model.decode(z)
|
|
|
|
|
|
|
|
# same as above but without decorator
|
2022-08-26 07:15:42 +00:00
|
|
|
def differentiable_decode_first_stage(
|
|
|
|
self, z, predict_cids=False, force_not_quantize=False
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
if predict_cids:
|
|
|
|
if z.dim() == 4:
|
|
|
|
z = torch.argmax(z.exp(), dim=1).long()
|
2022-08-26 07:15:42 +00:00
|
|
|
z = self.first_stage_model.quantize.get_codebook_entry(
|
|
|
|
z, shape=None
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
z = 1.0 / self.scale_factor * z
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self, 'split_input_params'):
|
|
|
|
if self.split_input_params['patch_distributed_vq']:
|
|
|
|
ks = self.split_input_params['ks'] # eg. (128, 128)
|
|
|
|
stride = self.split_input_params['stride'] # eg. (64, 64)
|
|
|
|
uf = self.split_input_params['vqf']
|
2021-12-21 02:23:41 +00:00
|
|
|
bs, nc, h, w = z.shape
|
|
|
|
if ks[0] > h or ks[1] > w:
|
|
|
|
ks = (min(ks[0], h), min(ks[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing Kernel')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if stride[0] > h or stride[1] > w:
|
|
|
|
stride = (min(stride[0], h), min(stride[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing stride')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold, unfold, normalization, weighting = self.get_fold_unfold(
|
|
|
|
z, ks, stride, uf=uf
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
z = unfold(z) # (bn, nc * prod(**ks), L)
|
|
|
|
# 1. Reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
z = z.view(
|
|
|
|
(z.shape[0], -1, ks[0], ks[1], z.shape[-1])
|
|
|
|
) # (bn, nc, ks[0], ks[1], L )
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# 2. apply model loop over last dim
|
2022-08-26 07:15:42 +00:00
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
|
|
|
output_list = [
|
|
|
|
self.first_stage_model.decode(
|
|
|
|
z[:, :, :, :, i],
|
|
|
|
force_not_quantize=predict_cids
|
|
|
|
or force_not_quantize,
|
|
|
|
)
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
output_list = [
|
|
|
|
self.first_stage_model.decode(z[:, :, :, :, i])
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
o = torch.stack(
|
|
|
|
output_list, axis=-1
|
|
|
|
) # # (bn, nc, ks[0], ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
o = o * weighting
|
|
|
|
# Reverse 1. reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
o = o.view(
|
|
|
|
(o.shape[0], -1, o.shape[-1])
|
|
|
|
) # (bn, nc * ks[0] * ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
# stitch crops together
|
|
|
|
decoded = fold(o)
|
|
|
|
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
|
|
|
return decoded
|
|
|
|
else:
|
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
2022-08-26 07:15:42 +00:00
|
|
|
return self.first_stage_model.decode(
|
|
|
|
z,
|
|
|
|
force_not_quantize=predict_cids or force_not_quantize,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
return self.first_stage_model.decode(z)
|
|
|
|
|
|
|
|
else:
|
|
|
|
if isinstance(self.first_stage_model, VQModelInterface):
|
2022-08-26 07:15:42 +00:00
|
|
|
return self.first_stage_model.decode(
|
|
|
|
z, force_not_quantize=predict_cids or force_not_quantize
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
return self.first_stage_model.decode(z)
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def encode_first_stage(self, x):
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self, 'split_input_params'):
|
|
|
|
if self.split_input_params['patch_distributed_vq']:
|
|
|
|
ks = self.split_input_params['ks'] # eg. (128, 128)
|
|
|
|
stride = self.split_input_params['stride'] # eg. (64, 64)
|
|
|
|
df = self.split_input_params['vqf']
|
2021-12-21 02:23:41 +00:00
|
|
|
self.split_input_params['original_image_size'] = x.shape[-2:]
|
|
|
|
bs, nc, h, w = x.shape
|
|
|
|
if ks[0] > h or ks[1] > w:
|
|
|
|
ks = (min(ks[0], h), min(ks[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing Kernel')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if stride[0] > h or stride[1] > w:
|
|
|
|
stride = (min(stride[0], h), min(stride[1], w))
|
2022-08-26 07:15:42 +00:00
|
|
|
print('reducing stride')
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold, unfold, normalization, weighting = self.get_fold_unfold(
|
|
|
|
x, ks, stride, df=df
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
z = unfold(x) # (bn, nc * prod(**ks), L)
|
|
|
|
# Reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
z = z.view(
|
|
|
|
(z.shape[0], -1, ks[0], ks[1], z.shape[-1])
|
|
|
|
) # (bn, nc, ks[0], ks[1], L )
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
output_list = [
|
|
|
|
self.first_stage_model.encode(z[:, :, :, :, i])
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
o = torch.stack(output_list, axis=-1)
|
|
|
|
o = o * weighting
|
|
|
|
|
|
|
|
# Reverse reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
o = o.view(
|
|
|
|
(o.shape[0], -1, o.shape[-1])
|
|
|
|
) # (bn, nc * ks[0] * ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
# stitch crops together
|
|
|
|
decoded = fold(o)
|
|
|
|
decoded = decoded / normalization
|
|
|
|
return decoded
|
|
|
|
|
|
|
|
else:
|
|
|
|
return self.first_stage_model.encode(x)
|
|
|
|
else:
|
|
|
|
return self.first_stage_model.encode(x)
|
|
|
|
|
|
|
|
def shared_step(self, batch, **kwargs):
|
|
|
|
x, c = self.get_input(batch, self.first_stage_key)
|
|
|
|
loss = self(x, c)
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def forward(self, x, c, *args, **kwargs):
|
2022-08-26 07:15:42 +00:00
|
|
|
t = torch.randint(
|
|
|
|
0, self.num_timesteps, (x.shape[0],), device=self.device
|
|
|
|
).long()
|
2021-12-21 02:23:41 +00:00
|
|
|
if self.model.conditioning_key is not None:
|
|
|
|
assert c is not None
|
|
|
|
if self.cond_stage_trainable:
|
|
|
|
c = self.get_learned_conditioning(c)
|
|
|
|
if self.shorten_cond_schedule: # TODO: drop this option
|
|
|
|
tc = self.cond_ids[t].to(self.device)
|
2022-08-26 07:15:42 +00:00
|
|
|
c = self.q_sample(
|
|
|
|
x_start=c, t=tc, noise=torch.randn_like(c.float())
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
return self.p_losses(x, c, t, *args, **kwargs)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def _rescale_annotations(
|
|
|
|
self, bboxes, crop_coordinates
|
|
|
|
): # TODO: move to dataset
|
2021-12-21 02:23:41 +00:00
|
|
|
def rescale_bbox(bbox):
|
|
|
|
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
|
|
|
|
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
|
|
|
|
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
|
|
|
|
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
|
|
|
|
return x0, y0, w, h
|
|
|
|
|
|
|
|
return [rescale_bbox(b) for b in bboxes]
|
|
|
|
|
|
|
|
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
|
|
|
|
|
|
|
if isinstance(cond, dict):
|
|
|
|
# hybrid case, cond is exptected to be a dict
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if not isinstance(cond, list):
|
|
|
|
cond = [cond]
|
2022-08-26 07:15:42 +00:00
|
|
|
key = (
|
|
|
|
'c_concat'
|
|
|
|
if self.model.conditioning_key == 'concat'
|
|
|
|
else 'c_crossattn'
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
cond = {key: cond}
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self, 'split_input_params'):
|
|
|
|
assert (
|
|
|
|
len(cond) == 1
|
|
|
|
) # todo can only deal with one conditioning atm
|
|
|
|
assert not return_ids
|
|
|
|
ks = self.split_input_params['ks'] # eg. (128, 128)
|
|
|
|
stride = self.split_input_params['stride'] # eg. (64, 64)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
h, w = x_noisy.shape[-2:]
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
fold, unfold, normalization, weighting = self.get_fold_unfold(
|
|
|
|
x_noisy, ks, stride
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
|
|
|
|
# Reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
z = z.view(
|
|
|
|
(z.shape[0], -1, ks[0], ks[1], z.shape[-1])
|
|
|
|
) # (bn, nc, ks[0], ks[1], L )
|
2021-12-21 02:23:41 +00:00
|
|
|
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if (
|
|
|
|
self.cond_stage_key
|
|
|
|
in ['image', 'LR_image', 'segmentation', 'bbox_img']
|
|
|
|
and self.model.conditioning_key
|
|
|
|
): # todo check for completeness
|
2021-12-21 02:23:41 +00:00
|
|
|
c_key = next(iter(cond.keys())) # get key
|
|
|
|
c = next(iter(cond.values())) # get value
|
2022-08-26 07:15:42 +00:00
|
|
|
assert (
|
|
|
|
len(c) == 1
|
|
|
|
) # todo extend to list with more than one elem
|
2021-12-21 02:23:41 +00:00
|
|
|
c = c[0] # get element
|
|
|
|
|
|
|
|
c = unfold(c)
|
2022-08-26 07:15:42 +00:00
|
|
|
c = c.view(
|
|
|
|
(c.shape[0], -1, ks[0], ks[1], c.shape[-1])
|
|
|
|
) # (bn, nc, ks[0], ks[1], L )
|
2021-12-21 02:23:41 +00:00
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
cond_list = [
|
|
|
|
{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
elif self.cond_stage_key == 'coordinates_bbox':
|
2022-08-26 07:15:42 +00:00
|
|
|
assert (
|
|
|
|
'original_image_size' in self.split_input_params
|
|
|
|
), 'BoudingBoxRescaling is missing original_image_size'
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# assuming padding of unfold is always 0 and its dilation is always 1
|
|
|
|
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
2022-08-26 07:15:42 +00:00
|
|
|
full_img_h, full_img_w = self.split_input_params[
|
|
|
|
'original_image_size'
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
# as we are operating on latents, we need the factor from the original image size to the
|
|
|
|
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
|
|
|
|
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
|
|
|
rescale_latent = 2 ** (num_downs)
|
|
|
|
|
2022-10-18 12:08:58 +00:00
|
|
|
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
|
2021-12-21 02:23:41 +00:00
|
|
|
# need to rescale the tl patch coordinates to be in between (0,1)
|
2022-08-26 07:15:42 +00:00
|
|
|
tl_patch_coordinates = [
|
|
|
|
(
|
|
|
|
rescale_latent
|
|
|
|
* stride[0]
|
|
|
|
* (patch_nr % n_patches_per_row)
|
|
|
|
/ full_img_w,
|
|
|
|
rescale_latent
|
|
|
|
* stride[1]
|
|
|
|
* (patch_nr // n_patches_per_row)
|
|
|
|
/ full_img_h,
|
|
|
|
)
|
|
|
|
for patch_nr in range(z.shape[-1])
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
|
2022-08-26 07:15:42 +00:00
|
|
|
patch_limits = [
|
|
|
|
(
|
|
|
|
x_tl,
|
|
|
|
y_tl,
|
|
|
|
rescale_latent * ks[0] / full_img_w,
|
|
|
|
rescale_latent * ks[1] / full_img_h,
|
|
|
|
)
|
|
|
|
for x_tl, y_tl in tl_patch_coordinates
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
|
|
|
|
|
|
|
|
# tokenize crop coordinates for the bounding boxes of the respective patches
|
2022-08-26 07:15:42 +00:00
|
|
|
patch_limits_tknzd = [
|
|
|
|
torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[
|
|
|
|
None
|
|
|
|
].to(self.device)
|
|
|
|
for bbox in patch_limits
|
|
|
|
] # list of length l with tensors of shape (1, 2)
|
2021-12-21 02:23:41 +00:00
|
|
|
print(patch_limits_tknzd[0].shape)
|
|
|
|
# cut tknzd crop position from conditioning
|
2022-08-26 07:15:42 +00:00
|
|
|
assert isinstance(
|
|
|
|
cond, dict
|
|
|
|
), 'cond must be dict to be fed into model'
|
2021-12-21 02:23:41 +00:00
|
|
|
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
|
|
|
|
print(cut_cond.shape)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
adapted_cond = torch.stack(
|
|
|
|
[
|
|
|
|
torch.cat([cut_cond, p], dim=1)
|
|
|
|
for p in patch_limits_tknzd
|
|
|
|
]
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
|
|
|
|
print(adapted_cond.shape)
|
|
|
|
adapted_cond = self.get_learned_conditioning(adapted_cond)
|
|
|
|
print(adapted_cond.shape)
|
2022-08-26 07:15:42 +00:00
|
|
|
adapted_cond = rearrange(
|
|
|
|
adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
print(adapted_cond.shape)
|
|
|
|
|
|
|
|
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
|
|
|
|
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
cond_list = [
|
|
|
|
cond for i in range(z.shape[-1])
|
|
|
|
] # Todo make this more efficient
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# apply model by loop over crops
|
2022-08-26 07:15:42 +00:00
|
|
|
output_list = [
|
|
|
|
self.model(z_list[i], t, **cond_list[i])
|
|
|
|
for i in range(z.shape[-1])
|
|
|
|
]
|
|
|
|
assert not isinstance(
|
|
|
|
output_list[0], tuple
|
|
|
|
) # todo cant deal with multiple model outputs check this never happens
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
o = torch.stack(output_list, axis=-1)
|
|
|
|
o = o * weighting
|
|
|
|
# Reverse reshape to img shape
|
2022-08-26 07:15:42 +00:00
|
|
|
o = o.view(
|
|
|
|
(o.shape[0], -1, o.shape[-1])
|
|
|
|
) # (bn, nc * ks[0] * ks[1], L)
|
2021-12-21 02:23:41 +00:00
|
|
|
# stitch crops together
|
|
|
|
x_recon = fold(o) / normalization
|
|
|
|
|
|
|
|
else:
|
|
|
|
x_recon = self.model(x_noisy, t, **cond)
|
|
|
|
|
|
|
|
if isinstance(x_recon, tuple) and not return_ids:
|
|
|
|
return x_recon[0]
|
|
|
|
else:
|
|
|
|
return x_recon
|
|
|
|
|
|
|
|
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape)
|
|
|
|
* x_t
|
|
|
|
- pred_xstart
|
|
|
|
) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def _prior_bpd(self, x_start):
|
|
|
|
"""
|
|
|
|
Get the prior KL term for the variational lower-bound, measured in
|
|
|
|
bits-per-dim.
|
|
|
|
This term can't be optimized, as it only depends on the encoder.
|
|
|
|
:param x_start: the [N x C x ...] tensor of inputs.
|
|
|
|
:return: a batch of [N] KL values (in bits), one per batch element.
|
|
|
|
"""
|
|
|
|
batch_size = x_start.shape[0]
|
2022-08-26 07:15:42 +00:00
|
|
|
t = torch.tensor(
|
|
|
|
[self.num_timesteps - 1] * batch_size, device=x_start.device
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
2022-08-26 07:15:42 +00:00
|
|
|
kl_prior = normal_kl(
|
|
|
|
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
return mean_flat(kl_prior) / np.log(2.0)
|
|
|
|
|
|
|
|
def p_losses(self, x_start, cond, t, noise=None):
|
|
|
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
|
|
|
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
|
|
|
model_output = self.apply_model(x_noisy, t, cond)
|
|
|
|
|
|
|
|
loss_dict = {}
|
|
|
|
prefix = 'train' if self.training else 'val'
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if self.parameterization == 'x0':
|
2021-12-21 02:23:41 +00:00
|
|
|
target = x_start
|
2022-08-26 07:15:42 +00:00
|
|
|
elif self.parameterization == 'eps':
|
2021-12-21 02:23:41 +00:00
|
|
|
target = noise
|
|
|
|
else:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
loss_simple = self.get_loss(model_output, target, mean=False).mean(
|
|
|
|
[1, 2, 3]
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
|
|
|
|
Merge dev into main for 2.2.0 (#1642)
* Fixes inpainting + code cleanup
* Disable stage info in Inpainting Tab
* Mask Brush Preview now always at 0.5 opacity
The new mask is only visible properly at max opacity but at max opacity the brush preview becomes fully opaque blocking the view. So the mask brush preview no remains at 0.5 no matter what the Brush opacity is.
* Remove save button from Canvas Controls (cleanup)
* Implements invert mask
* Changes "Invert Mask" to "Preserve Masked Areas"
* Fixes (?) spacebar issues
* Patches redux-persist and redux-deep-persist with debounced persists
Our app changes redux state very, very often. As our undo/redo history grows, the calls to persist state start to take in the 100ms range, due to a the deep cloning of the history. This causes very noticeable performance lag.
The deep cloning is required because we need to blacklist certain items in redux from being persisted (e.g. the app's connection status).
Debouncing the whole process of persistence is a simple and effective solution. Unfortunately, `redux-persist` dropped `debounce` between v4 and v5, replacing it with `throttle`. `throttle`, instead of delaying the expensive action until a period of X ms of inactivity, simply ensures the action is executed at least every X ms. Of course, this does not fix our performance issue.
The patch is very simple. It adds a `debounce` argument - a number of milliseconds - and debounces `redux-persist`'s `update()` method (provided by `createPersistoid`) by that many ms.
Before this, I also tried writing a custom storage adapter for `redux-persist` to debounce the calls to `localStorage.setItem()`. While this worked and was far less invasive, it doesn't actually address the issue. It turns out `setItem()` is a very fast part of the process.
We use `redux-deep-persist` to simplify the `redux-persist` configuration, which can get complicated when you need to blacklist or whitelist deeply nested state. There is also a patch here for that library because it uses the same types as `redux-persist`.
Unfortunately, the last release of `redux-persist` used a package `flat-stream` which was malicious and has been removed from npm. The latest commits to `redux-persist` (about 1 year ago) do not build; we cannot use the master branch. And between the last release and last commit, the changes have all been breaking.
Patching this last release (about 3 years old at this point) directly is far simpler than attempting to fix the upstream library's master branch or figuring out an alternative to the malicious and now non-existent dependency.
* Adds debouncing
* Fixes AttributeError: 'dict' object has no attribute 'invert_mask'
* Updates package.json to use redux-persist patches
* Attempts to fix redux-persist debounce patch
* Fixes undo/redo
* Fixes invert mask
* Debounce > 300ms
* Limits history to 256 for each of undo and redo
* Canvas styling
* Hotkeys improvement
* Add Metadata To Viewer
* Increases CFG Scale max to 200
* Fix gallery width size for Outpainting
Also fixes the canvas resizing failing n fast pushes
* Fixes disappearing canvas grid lines
* Adds staging area
* Fixes "use all" not setting variationAmount
Now sets to 0 when the image had variations.
* Builds fresh bundle
* Outpainting tab loads to empty canvas instead of upload
* Fixes wonky canvas layer ordering & compositing
* Fixes error on inpainting paste back
`TypeError: 'float' object cannot be interpreted as an integer`
* Hides staging area outline on mouseover prev/next
* Fixes inpainting not doing img2img when no mask
* Fixes bbox not resizing in outpainting if partially off screen
* Fixes crashes during iterative outpaint. Still doesn't work correctly though.
* Fix iterative outpainting by restoring original images
* Moves image uploading to HTTP
- It all seems to work fine
- A lot of cleanup is still needed
- Logging needs to be added
- May need types to be reviewed
* Fixes: outpainting temp images show in gallery
* WIP refactor to unified canvas
* Removes console.log from redux-persist patch
* Initial unification of canvas
* Removes all references to split inpainting/outpainting canvas
* Add patchmatch and infill_method parameter to prompt2image (options are 'patchmatch' or 'tile').
* Fixes app after removing in/out-painting refs
* Rebases on dev, updates new env files w/ patchmatch
* Organises features/canvas
* Fixes bounding box ending up offscreen
* Organises features/canvas
* Stops unnecessary canvas rescales on gallery state change
* Fixes 2px layout shift on toggle canvas lock
* Clips lines drawn while canvas locked
When drawing with the locked canvas, if a brush stroke gets too close to the edge of the canvas and its stroke would extend past the edge of the canvas, the edge of that stroke will be seen after unlocking the canvas.
This could cause a problem if you unlock the canvas and now have a bunch of strokes just outside the init image area, which are far back in undo history and you cannot easily erase.
With this change, lines drawn while the canvas is locked get clipped to the initial image bbox, fixing this issue.
Additionally, the merge and save to gallery functions have been updated to respect the initial image bbox so they function how you'd expect.
* Fixes reset canvas view when locked
* Fixes send to buttons
* Fixes bounding box not being rounded to 64
* Abandons "inpainting" canvas lock
* Fixes save to gallery including empty area, adds download and copy image
* Fix Current Image display background going over image bounds
* Sets status immediately when clicking Invoke
* Adds hotkeys and refactors sharing of konva instances
Adds hotkeys to canvas. As part of this change, the access to konva instance objects was refactored:
Previously closure'd refs were used to indirectly get access to the konva instances outside of react components.
Now, a getter and setter function are used to provide access directly to the konva objects.
* Updates hotkeys
* Fixes canvas showing spinner on first load
Also adds good default canvas scale and positioning when no image is on it
* Fixes possible hang on MaskCompositer
* Improves behaviour when setting init canvas image/reset view
* Resets bounding box coords/dims when no image present
* Disables canvas actions which cannot be done during processing
* Adds useToastWatcher hook
- Dispatch an `addToast` action with standard Chakra toast options object to add a toast to the toastQueue
- The hook is called in App.tsx and just useEffect's w/ toastQueue as dependency to create the toasts
- So now you can add toasts anywhere you have access to `dispatch`, which includes middleware and thunks
- Adds first usage of this for the save image buttons in canvas
* Update Hotkey Info
Add missing tooltip hotkeys and update the hotkeys modal to reflect the new hotkeys for the Unified Canvas.
* Fix theme changer not displaying current theme on page refresh
* Fix tab count in hotkeys panel
* Unify Brush and Eraser Sizes
* Fix staging area display toggle not working
* Staging Area delete button is now red
So it doesnt feel blended into to the rest of them.
* Revert "Fix theme changer not displaying current theme on page refresh"
This reverts commit 903edfb803e743500242589ff093a8a8a0912726.
* Add arguments to use SSL to webserver
* Integrates #1487 - touch events
Need to add:
- Pinch zoom
- Touch-specific handling (some things aren't quite right)
* Refactors upload-related async thunks
- Now standard thunks instead of RTK createAsyncThunk()
- Adds toasts for all canvas upload-related actions
* Reorganises app file structure
* Fixes Canvas Auto Save to Gallery
* Fixes staging area outline
* Adds staging area hotkeys, disables gallery left/right when staging
* Fixes Use All Parameters
* Fix metadata viewer image url length when viewing intermediate
* Fixes intermediate images being tiny in txt2img/img2img
* Removes stale code
* Improves canvas status text and adds option to toggle debug info
* Fixes paste image to upload
* Adds model drop-down to site header
* Adds theme changer popover
* Fix missing key on ThemeChanger map
* Fixes stage position changing on zoom
* Hotkey Cleanup
- Viewer is now Z
- Canvas Move tool is V - sync with PS
- Removed some unused hotkeys
* Fix canvas resizing when both options and gallery are unpinned
* Implements thumbnails for gallery
- Thumbnails are saved whenever an image is saved, and when gallery requests images from server
- Thumbnails saved at original image aspect ratio with width of 128px as WEBP
- If the thumbnail property of an image is unavailable for whatever reason, the image's full size URL is used instead
* Saves thumbnails to separate thumbnails directory
* Thumbnail size = 256px
* Fix Lightbox Issues
* Disables canvas image saving functions when processing
* Fix index error on going past last image in Gallery
* WIP - Lightbox Fixes
Still need to fix the images not being centered on load when the image res changes
* Fixes another similar index error, simplifies logic
* Reworks canvas toolbar
* Fixes canvas toolbar upload button
* Cleans up IAICanvasStatusText
* Improves metadata handling, fixes #1450
- Removes model list from metadata
- Adds generation's specific model to metadata
- Displays full metadata in JSON viewer
* Gracefully handles corrupted images; fixes #1486
- App does not crash if corrupted image loaded
- Error is displayed in the UI console and CLI output if an image cannot be loaded
* Adds hotkey to reset canvas interaction state
If the canvas' interaction state (e.g. isMovingBoundingBox, isDrawing, etc) get stuck somehow, user can press Escape to reset the state.
* Removes stray console.log()
* Fixes bug causing gallery to close on context menu open
* Minor bugfixes
- When doing long-running canvas image exporting actions, display indeterminate progress bar
- Fix staging area image outline not displaying after committing/discarding results
* Removes unused imports
* Fixes repo root .gitignore ignoring frontend things
* Builds fresh bundle
* Styling updates
* Removes reasonsWhyNotReady
The popover doesn't play well with the button being disabled, and I don't think adds any value.
* Image gallery resize/style tweaks
* Styles buttons for clearing canvas history and mask
* First pass on Canvas options panel
* Fixes bug where discarding staged images results in loss of history
* Adds Save to Gallery button to staging toolbar
* Rearrange some canvas toolbar icons
Put brush stuff together and canvas movement stuff together
* Fix gallery maxwidth on unified canvas
* Update Layer hotkey display to UI
* Adds option to crop to bounding box on save
* Masking option tweaks
* Crop to Bounding Box > Save Box Region Only
* Adds clear temp folder
* Updates mask options popover behavior
* Builds fresh bundle
* Fix styling on alert modals
* Fix input checkbox styling being incorrect on light theme
* Styling fixes
* Improves gallery resize behaviour
* Cap gallery size on canvas tab so it doesnt overflow
* Fixes bug when postprocessing image with no metadata
* Adds IAIAlertDialog component
* Moves Loopback to app settings
* Fixes metadata viewer not showing metadata after refresh
Also adds Dream-style prompt to metadata
* Adds outpainting specific options
* Linting
* Fixes gallery width on lightbox, fixes gallery button expansion
* Builds fresh bundle
* Fix Lightbox images of different res not centering
* Update feature tooltip text
* Highlight mask icon when on mask layer
* Fix gallery not resizing correctly on open and close
* Add loopback to just img2img. Remove from settings.
* Fix to gallery resizing
* Removes Advanced checkbox, cleans up options panel for unified canvas
* Minor styling fixes to new options panel layout
* Styling Updates
* Adds infill method
* Tab Styling Fixes
* memoize outpainting options
* Fix unnecessary gallery re-renders
* Isolate Cursor Pos debug text on canvas to prevent rerenders
* Fixes missing postprocessed image metadata before refresh
* Builds fresh bundle
* Fix rerenders on model select
* Floating panel re-render fix
* Simplify fullscreen hotkey selector
* Add Training WIP Tab
* Adds Training icon
* Move full screen hotkey to floating to prevent tab rerenders
* Adds single-column gallery layout
* Fixes crash on cancel with intermediates enabled, fixes #1416
* Updates npm dependencies
* Fixes img2img attempting inpaint when init image has transparency
* Fixes missing threshold and perlin parameters in metadata viewer
* Renames "Threshold" > "Noise Threshold"
* Fixes postprocessing not being disabled when clicking use all
* Builds fresh bundle
* Adds color picker
* Lints & builds fresh bundle
* Fixes iterations being disabled when seed random & variations are off
* Un-floors cursor position
* Changes color picker preview to circles
* Fixes variation params not set correctly when recalled
* Fixes invoke hotkey not working in input fields
* Simplifies Accordion
Prep for adding reset buttons for each section
* Fixes mask brush preview color
* Committing color picker color changes tool to brush
* Color picker does not overwrite user-selected alpha
* Adds brush color alpha hotkey
* Lints
* Removes force_outpaint param
* Add inpaint size options to inpaint at a larger size than the actual inpaint image, then scale back down for recombination
* Bug fix for inpaint size
* Adds inpaint size (as scale bounding box) to UI
* Adds auto-scaling for inpaint size
* Improves scaled bbox display logic
* Fixes bug with clear mask and history
* Fixes shouldShowStagingImage not resetting to true on commit
* Builds fresh bundle
* Fixes canvas failing to scale on first run
* Builds fresh bundle
* Fixes unnecessary canvas scaling
* Adds gallery drag and drop to img2img/canvas
* Builds fresh bundle
* Fix desktop mode being broken with new versions of flaskwebgui
* Fixes canvas dimensions not setting on first load
* Builds fresh bundle
* stop crash on !import_models call on model inside rootdir
- addresses bug report #1546
* prevent "!switch state gets confused if model switching fails"
- If !switch were to fail on a particular model, then generate got
confused and wouldn't try again until you switch to a different working
model and back again.
- This commit fixes and closes #1547
* Revert "make the docstring more readable and improve the list_models logic"
This reverts commit 248068fe5d57b5639ea7a87ee6cbf023104d957d.
* fix model cache path
* also set fail-fast to it's default (true)
in this way the whole action fails if one job fails
this should unblock the runners!!!
* fix output path for Archive results
* disable checks for python 3.9
* Update-requirements and test-invoke-pip workflow (#1574)
* update requirements files
* update test-invoke-pip workflow
* move requirements-mkdocs.txt to docs folder (#1575)
* move requirements-mkdocs.txt to docs folder
* update copyright
* Fixes outpainting with resized inpaint size
* Interactive configuration (#1517)
* Update scripts/configure_invokeai.py
prevent crash if output exists
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
* implement changes requested by reviews
* default to correct root and output directory on Windows systems
- Previously the script was relying on the readline buffer editing
feature to set up the correct default. But this feature doesn't
exist on windows.
- This commit detects when user typed return with an empty directory
value and replaces with the default directory.
* improved readability of directory choices
* Update scripts/configure_invokeai.py
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
* better error reporting at startup
- If user tries to run the script outside of the repo or runtime directory,
a more informative message will appear explaining the problem.
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
* Embedding merging (#1526)
* add whole <style token> to vocab for concept library embeddings
* add ability to load multiple concept .bin files
* make --log_tokenization respect custom tokens
* start working on concept downloading system
* preliminary support for dynamic loading and merging of multiple embedded models
- The embedding_manager is now enhanced with ldm.invoke.concepts_lib,
which handles dynamic downloading and caching of embedded models from
the Hugging Face concepts library (https://huggingface.co/sd-concepts-library)
- Downloading of a embedded model is triggered by the presence of one or more
<concept> tags in the prompt.
- Once the embedded model is downloaded, its trigger phrase will be loaded
into the embedding manager and the prompt's <concept> tag will be replaced
with the <trigger_phrase>
- The downloaded model stays on disk for fast loading later.
- The CLI autocomplete will complete partial <concept> tags for you. Type a
'<' and hit tab to get all ~700 concepts.
BUGS AND LIMITATIONS:
- MODEL NAME VS TRIGGER PHRASE
You must use the name of the concept embed model from the SD
library, and not the trigger phrase itself. Usually these are the
same, but not always. For example, the model named "hoi4-leaders"
corresponds to the trigger "<HOI4-Leader>"
One reason for this design choice is that there is no apparent
constraint on the uniqueness of the trigger phrases and one trigger
phrase may map onto multiple models. So we use the model name
instead.
The second reason is that there is no way I know of to search
Hugging Face for models with certain trigger phrases. So we'd have
to download all 700 models to index the phrases.
The problem this presents is that this may confuse users, who will
want to reuse prompts from distributions that use the trigger phrase
directly. Usually this will work, but not always.
- WON'T WORK ON A FIREWALLED SYSTEM
If the host running IAI has no internet connection, it can't
download the concept libraries. I will add a script that allows
users to preload a list of concept models.
- BUG IN PROMPT REPLACEMENT WHEN MODEL NOT FOUND
There's a small bug that occurs when the user provides an invalid
model name. The <concept> gets replaced with <None> in the prompt.
* fix loading .pt embeddings; allow multi-vector embeddings; warn on dupes
* simplify replacement logic and remove cuda assumption
* download list of concepts from hugging face
* remove misleading customization of '*' placeholder
the existing code as-is did not do anything; unclear what it was supposed to do.
the obvious alternative -- setting using 'placeholder_strings' instead of
'placeholder_tokens' to match model.params.personalization_config.params.placeholder_strings --
caused a crash. i think this is because the passed string also needed to be handed over
on init of the PersonalizedBase as the 'placeholder_token' argument.
this is weird config dict magic and i don't want to touch it. put a
breakpoint in personalzied.py line 116 (top of PersonalizedBase.__init__) if
you want to have a crack at it yourself.
* address all the issues raised by damian0815 in review of PR #1526
* actually resize the token_embeddings
* multiple improvements to the concept loader based on code reviews
1. Activated the --embedding_directory option (alias --embedding_path)
to load a single embedding or an entire directory of embeddings at
startup time.
2. Can turn off automatic loading of embeddings using --no-embeddings.
3. Embedding checkpoints are scanned with the pickle scanner.
4. More informative error messages when a concept can't be loaded due
either to a 404 not found error or a network error.
* autocomplete terms end with ">" now
* fix startup error and network unreachable
1. If the .invokeai file does not contain the --root and --outdir options,
invoke.py will now fix it.
2. Catch and handle network problems when downloading hugging face textual
inversion concepts.
* fix misformatted error string
Co-authored-by: Damian Stewart <d@damianstewart.com>
* model_cache.py: fix list_models
Signed-off-by: devops117 <55235206+devops117@users.noreply.github.com>
* add statement of values (#1584)
* this adds the Statement of Values
Google doc source = https://docs.google.com/document/d/1-PrUKDJcxy8OyNGc8CyiHhv2VgLvjt7LRGlEpbg1nmQ/edit?usp=sharing
* Fix heading
* Update InvokeAI_Statement_of_Values.md
* Update InvokeAI_Statement_of_Values.md
* Update InvokeAI_Statement_of_Values.md
* Update InvokeAI_Statement_of_Values.md
* Update InvokeAI_Statement_of_Values.md
* add keturn and mauwii to the team member list
* Fix punctuation
* this adds the Statement of Values
Google doc source = https://docs.google.com/document/d/1-PrUKDJcxy8OyNGc8CyiHhv2VgLvjt7LRGlEpbg1nmQ/edit?usp=sharing
* add keturn and mauwii to the team member list
* fix formating
- make sub bullets use * (decide to all use - or *)
- indent sub bullets
Sorry, first only looked at the code version and found this only after
looking at the markdown rendered version
* use multiparagraph numbered sections
* Break up Statement Of Values as per comments on #1584
* remove duplicated word, reduce vagueness
it's important not to overstate how many artists we are consulting.
* fix typo (sorry blessedcoolant)
Co-authored-by: mauwii <Mauwii@outlook.de>
Co-authored-by: damian <git@damianstewart.com>
* update dockerfile (#1551)
* update dockerfile
* remove not existing file from .dockerignore
* remove bloat and unecesary step
also use --no-cache-dir for pip install
image is now close to 2GB
* make Dockerfile a variable
* set base image to `ubuntu:22.10`
* add build-essential
* link outputs folder for persistence
* update tag variable
* update docs
* fix not customizeable build args, add reqs output
* !model_import autocompletes in ROOTDIR
* Adds psychedelicious to statement of values signature (#1602)
* add a --no-patchmatch option to disable patchmatch loading (#1598)
This feature was added to prevent the CI Macintosh tests from erroring
out when patchmatch is unable to retrieve its shared library from
github assets.
* Fix #1599 by relaxing the `match_trigger` regex (#1601)
* Fix #1599 by relaxing the `match_trigger` regex
Also simplify logic and reduce duplication.
* restrict trigger regex again (but not so far)
* make concepts library work with Web UI
This PR makes it possible to include a Hugging Face concepts library
<style-or-subject-trigger> in the WebUI prompt. The metadata seems
to be correctly handled.
* documentation enhancements (#1603)
- Add documentation for the Hugging Face concepts library and TI embedding.
- Fixup index.md to point to each of the feature documentation files,
including ones that are pending.
* tweak setup and environment files for linux & pypatchmatch (#1580)
* tweak setup and environment files for linux & pypatchmatch
- Downgrade python requirements to 3.9 because 3.10 is not supported
on Ubuntu 20.04 LTS (widely-used distro)
- Use our github pypatchmatch 0.1.3 in order to install Makefile
where it needs to be.
- Restored "-e ." as the last install step on pip installs. Hopefully
this will not trigger the high-CPU hang we've previously experienced.
* keep windows on basicsr 1.4.1
* keep windows on basicsr 1.4.1
* bump pypatchmatch requirement to 0.1.4
- This brings in a version of pypatchmatch that will gracefully
handle internet connection not available at startup time.
- Also refactors and simplifies the handling of gfpgan's basicsr requirement
across various platforms.
* revert to older version of list_models() (#1611)
This restores the correct behavior of list_models() and quenches
the bug of list_models() returning a single model entry named "name".
I have not investigated what was wrong with the new version, but I
think it may have to do with changes to the behavior in dict.update()
* Fixes for #1604 (#1605)
* Converts ESRGAN image input to RGB
- Also adds typing for image input.
- Partially resolves #1604
* ensure there are unmasked pixels before color matching
Co-authored-by: Kyle Schouviller <kyle0654@hotmail.com>
* update index.md (#1609)
- comment out non existing link
- fix indention
- add seperator between feature categories
* Debloat-docker (#1612)
* debloat Dockerfile
- less options more but more userfriendly
- better Entrypoint to simulate CLI usage
- without command the container still starts the web-host
* debloat build.sh
* better syntax in run.sh
* update Docker docs
- fix description of VOLUMENAME
- update run script example to reflect new entrypoint
* Test installer (#1618)
* test linux install
* try removing http from parsed requirements
* pip install confirmed working on linux
* ready for linux testing
- rebuilt py3.10-linux-x86_64-cuda-reqs.txt to include pypatchmatch
dependency.
- point install.sh and install.bat to test-installer branch.
* Updates MPS reqs
* detect broken readline history files
* fix download.pytorch.org URL
* Test installer (Win 11) (#1620)
Co-authored-by: Cyrus Chan <cyruswkc@hku.hk>
* Test installer (MacOS 13.0.1 w/ torch==1.12.0) (#1621)
* Test installer (Win 11)
* Test installer (MacOS 13.0.1 w/ torch==1.12.0)
Co-authored-by: Cyrus Chan <cyruswkc@hku.hk>
* change sourceball to development for testing
* Test installer (MacOS 13.0.1 w/ torch==1.12.1 & torchvision==1.13.1) (#1622)
* Test installer (Win 11)
* Test installer (MacOS 13.0.1 w/ torch==1.12.0)
* Test installer (MacOS 13.0.1 w/ torch==1.12.1 & torchvision==1.13.1)
Co-authored-by: Cyrus Chan <cyruswkc@hku.hk>
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
Co-authored-by: Cyrus Chan <82143712+cyruschan360@users.noreply.github.com>
Co-authored-by: Cyrus Chan <cyruswkc@hku.hk>
* 2.2 Doc Updates (#1589)
* Unified Canvas Docs & Assets
Unified Canvas draft
Advanced Tools Updates
Doc Updates (lstein feedback)
* copy edits to Unified Canvas docs
- consistent capitalisation and feature naming
- more intimate address (replace "the user" with "you") for improved User
Engagement(tm)
- grammatical massaging and *poesie*
Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
Co-authored-by: damian <git@damianstewart.com>
* include a step after config to `cat ~/.invokeai` (#1629)
* disable patchmatch in CI actions (#1626)
* disable patchmatch in CI actions
* fix indention
* replace tab with spaces
Co-authored-by: Matthias Wild <40327258+mauwii@users.noreply.github.com>
Co-authored-by: mauwii <Mauwii@outlook.de>
* Fix installer script for macOS. (#1630)
* refer to the platform as 'osx' instead of 'mac', otherwise the
composed URL to micromamba is wrong.
* move the `-O` option to `tar` to be grouped with the other tar flags
to avoid the `-O` being interpreted as something to unarchive.
* Removes symlinked environment.yaml (#1631)
Was unintentionally added in #1621
* Fix inpainting with iterations (#1635)
* fix error when inpainting using runwayml inpainting model (#1634)
- error was "Omnibus object has no attribute pil_image"
- closes #1596
* add k_dpmpp_2_a and k_dpmpp_2 solvers options (#1389)
* add k_dpmpp_2_a and k_dpmpp_2 solvers options
* update frontend
Co-authored-by: Victor <victorca25@users.noreply.github.com>
Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
* add .editorconfig (#1636)
* Web UI 2.2 bugfixes (#1572)
* Fixes bug preventing multiple images from being generated
* Fixes valid seam strength value range
* Update Delete Alert Text
Indicates to the user that images are not permanently deleted.
* Fixes left/right arrows not working on gallery
* Fixes initial image on load erroneously set to a user uploaded image
Should be a result gallery image.
* Lightbox Fixes
- Lightbox is now a button in the current image buttons
- Lightbox is also now available in the gallery context menu
- Lightbox zoom issues fixed
- Lightbox has a fade in animation.
* Fix image display wrapper in current preview not overflow bounds
* Revert "Fix image display wrapper in current preview not overflow bounds"
This reverts commit 5511c82714dbf1d1999d64e8bc357bafa34ddf37.
* Change Staging Area discard icon from Bin to X
* Expose Snap Threshold and Move Snap Settings to BBox Panel
* Changes img2img strength default to 0.75
* Fixes drawing triggering when mouse enters canvas w/ button down
When we only supported inpainting and no zoom, this was useful. It allowed the cursor to leave the canvas (which was easy to do given the limited canvas dimensions) and without losing the "I am drawing" state.
With a zoomable canvas this is no longer as useful.
Additionally, we have more popovers and tools (like the color pickers) which result in unexpected brush strokes. This fixes that issue.
* Revert "Expose Snap Threshold and Move Snap Settings to BBox Panel"
We will handle this a bit differently - by allowing the grid origin to be moved. I will dig in at some point.
This reverts commit 33c92ecf4da724c2f17d9d91c7ea31a43a2f6deb.
* Adds Limit Strokes to Box
* Adds fill bounding box button
* Adds erase bounding box button
* Changes Staging area discard icon to match others
* Fixes right click breaking move tool
* Fixes brush preview visibility issue with "darken outside box"
* Fixes history bugs with addFillRect, addEraseRect, and other actions
* Adds missing `key`
* Fixes postprocessing being applied to canvas generations
* Fixes bbox not getting scaled in various situations
* Fixes staging area show image toggle not resetting on accept/discard
* Locks down canvas while generating/staging
* Fixes move tool breaking when canvas loses focus during move/transform
* Hides cursor when restrict strokes is on and mouse outside bbox
* Lints
* Builds fresh bundle
* Fix overlapping hotkey for Fill Bounding Box
* Build Fresh Bundle
* Fixes bug with mask and bbox overlay
* Builds fresh bundle
Co-authored-by: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com>
Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com>
* disable NSFW checker loading during the CI tests (#1641)
* disable NSFW checker loading during the CI tests
The NSFW filter apparently causes invoke.py to crash during CI testing,
possibly due to out of memory errors. This workaround disables NSFW
model loading.
* doc change
* fix formatting errors in yml files
* Configure the NSFW checker at install time with default on (#1624)
* configure the NSFW checker at install time with default on
1. Changes the --safety_checker argument to --nsfw_checker and
--no-nsfw_checker. The original argument is recognized for backward
compatibility.
2. The configure script asks users whether to enable the checker
(default yes). Also offers users ability to select default sampler and
number of generation steps.
3.Enables the pasting of the caution icon on blurred images when
InvokeAI is installed into the package directory.
4. Adds documentation for the NSFW checker, including caveats about
accuracy, memory requirements, and intermediate image dispaly.
* use better fitting icon
* NSFW defaults false for testing
* set default back to nsfw active
Co-authored-by: Matthias Wild <40327258+mauwii@users.noreply.github.com>
Co-authored-by: mauwii <Mauwii@outlook.de>
Signed-off-by: devops117 <55235206+devops117@users.noreply.github.com>
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
Co-authored-by: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com>
Co-authored-by: Kyle Schouviller <kyle0654@hotmail.com>
Co-authored-by: javl <mail@jaspervanloenen.com>
Co-authored-by: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com>
Co-authored-by: mauwii <Mauwii@outlook.de>
Co-authored-by: Matthias Wild <40327258+mauwii@users.noreply.github.com>
Co-authored-by: Damian Stewart <d@damianstewart.com>
Co-authored-by: DevOps117 <55235206+devops117@users.noreply.github.com>
Co-authored-by: damian <git@damianstewart.com>
Co-authored-by: Damian Stewart <null@damianstewart.com>
Co-authored-by: Cyrus Chan <82143712+cyruschan360@users.noreply.github.com>
Co-authored-by: Cyrus Chan <cyruswkc@hku.hk>
Co-authored-by: Andre LaBranche <dre@mac.com>
Co-authored-by: victorca25 <41912303+victorca25@users.noreply.github.com>
Co-authored-by: Victor <victorca25@users.noreply.github.com>
2022-11-30 21:12:23 +00:00
|
|
|
logvar_t = self.logvar[t.item()].to(self.device)
|
2021-12-21 02:23:41 +00:00
|
|
|
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
|
|
|
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
|
|
|
if self.learn_logvar:
|
|
|
|
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
|
|
|
loss_dict.update({'logvar': self.logvar.data.mean()})
|
|
|
|
|
|
|
|
loss = self.l_simple_weight * loss.mean()
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
loss_vlb = self.get_loss(model_output, target, mean=False).mean(
|
|
|
|
dim=(1, 2, 3)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
|
|
|
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
2022-08-26 07:15:42 +00:00
|
|
|
loss += self.original_elbo_weight * loss_vlb
|
2021-12-21 02:23:41 +00:00
|
|
|
loss_dict.update({f'{prefix}/loss': loss})
|
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
if self.embedding_reg_weight > 0:
|
2022-08-26 07:15:42 +00:00
|
|
|
loss_embedding_reg = (
|
|
|
|
self.embedding_manager.embedding_to_coarse_loss().mean()
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
|
|
|
loss_dict.update({f'{prefix}/loss_emb_reg': loss_embedding_reg})
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
loss += self.embedding_reg_weight * loss_embedding_reg
|
2022-08-23 22:26:28 +00:00
|
|
|
loss_dict.update({f'{prefix}/loss': loss})
|
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
return loss, loss_dict
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
def p_mean_variance(
|
|
|
|
self,
|
|
|
|
x,
|
|
|
|
c,
|
|
|
|
t,
|
|
|
|
clip_denoised: bool,
|
|
|
|
return_codebook_ids=False,
|
|
|
|
quantize_denoised=False,
|
|
|
|
return_x0=False,
|
|
|
|
score_corrector=None,
|
|
|
|
corrector_kwargs=None,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
t_in = t
|
2022-08-26 07:15:42 +00:00
|
|
|
model_out = self.apply_model(
|
|
|
|
x, t_in, c, return_ids=return_codebook_ids
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if score_corrector is not None:
|
2022-08-26 07:15:42 +00:00
|
|
|
assert self.parameterization == 'eps'
|
|
|
|
model_out = score_corrector.modify_score(
|
|
|
|
self, model_out, x, t, c, **corrector_kwargs
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if return_codebook_ids:
|
|
|
|
model_out, logits = model_out
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
if self.parameterization == 'eps':
|
2021-12-21 02:23:41 +00:00
|
|
|
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
2022-08-26 07:15:42 +00:00
|
|
|
elif self.parameterization == 'x0':
|
2021-12-21 02:23:41 +00:00
|
|
|
x_recon = model_out
|
|
|
|
else:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
if clip_denoised:
|
2022-08-26 07:15:42 +00:00
|
|
|
x_recon.clamp_(-1.0, 1.0)
|
2021-12-21 02:23:41 +00:00
|
|
|
if quantize_denoised:
|
2022-08-26 07:15:42 +00:00
|
|
|
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(
|
|
|
|
x_recon
|
|
|
|
)
|
|
|
|
(
|
|
|
|
model_mean,
|
|
|
|
posterior_variance,
|
|
|
|
posterior_log_variance,
|
|
|
|
) = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
2021-12-21 02:23:41 +00:00
|
|
|
if return_codebook_ids:
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
model_mean,
|
|
|
|
posterior_variance,
|
|
|
|
posterior_log_variance,
|
|
|
|
logits,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
elif return_x0:
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
model_mean,
|
|
|
|
posterior_variance,
|
|
|
|
posterior_log_variance,
|
|
|
|
x_recon,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
|
|
|
return model_mean, posterior_variance, posterior_log_variance
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def p_sample(
|
|
|
|
self,
|
|
|
|
x,
|
|
|
|
c,
|
|
|
|
t,
|
|
|
|
clip_denoised=False,
|
|
|
|
repeat_noise=False,
|
|
|
|
return_codebook_ids=False,
|
|
|
|
quantize_denoised=False,
|
|
|
|
return_x0=False,
|
|
|
|
temperature=1.0,
|
|
|
|
noise_dropout=0.0,
|
|
|
|
score_corrector=None,
|
|
|
|
corrector_kwargs=None,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
b, *_, device = *x.shape, x.device
|
2022-08-26 07:15:42 +00:00
|
|
|
outputs = self.p_mean_variance(
|
|
|
|
x=x,
|
|
|
|
c=c,
|
|
|
|
t=t,
|
|
|
|
clip_denoised=clip_denoised,
|
|
|
|
return_codebook_ids=return_codebook_ids,
|
|
|
|
quantize_denoised=quantize_denoised,
|
|
|
|
return_x0=return_x0,
|
|
|
|
score_corrector=score_corrector,
|
|
|
|
corrector_kwargs=corrector_kwargs,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if return_codebook_ids:
|
2022-08-26 07:15:42 +00:00
|
|
|
raise DeprecationWarning('Support dropped.')
|
2021-12-21 02:23:41 +00:00
|
|
|
model_mean, _, model_log_variance, logits = outputs
|
|
|
|
elif return_x0:
|
|
|
|
model_mean, _, model_log_variance, x0 = outputs
|
|
|
|
else:
|
|
|
|
model_mean, _, model_log_variance = outputs
|
|
|
|
|
|
|
|
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
2022-08-26 07:15:42 +00:00
|
|
|
if noise_dropout > 0.0:
|
2021-12-21 02:23:41 +00:00
|
|
|
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
|
|
|
# no noise when t == 0
|
2022-08-26 07:15:42 +00:00
|
|
|
nonzero_mask = (1 - (t == 0).float()).reshape(
|
|
|
|
b, *((1,) * (len(x.shape) - 1))
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if return_codebook_ids:
|
2022-08-26 07:15:42 +00:00
|
|
|
return model_mean + nonzero_mask * (
|
|
|
|
0.5 * model_log_variance
|
|
|
|
).exp() * noise, logits.argmax(dim=1)
|
2021-12-21 02:23:41 +00:00
|
|
|
if return_x0:
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
model_mean
|
|
|
|
+ nonzero_mask * (0.5 * model_log_variance).exp() * noise,
|
|
|
|
x0,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
return (
|
|
|
|
model_mean
|
|
|
|
+ nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def progressive_denoising(
|
|
|
|
self,
|
|
|
|
cond,
|
|
|
|
shape,
|
|
|
|
verbose=True,
|
|
|
|
callback=None,
|
|
|
|
quantize_denoised=False,
|
|
|
|
img_callback=None,
|
|
|
|
mask=None,
|
|
|
|
x0=None,
|
|
|
|
temperature=1.0,
|
|
|
|
noise_dropout=0.0,
|
|
|
|
score_corrector=None,
|
|
|
|
corrector_kwargs=None,
|
|
|
|
batch_size=None,
|
|
|
|
x_T=None,
|
|
|
|
start_T=None,
|
|
|
|
log_every_t=None,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
if not log_every_t:
|
|
|
|
log_every_t = self.log_every_t
|
|
|
|
timesteps = self.num_timesteps
|
|
|
|
if batch_size is not None:
|
|
|
|
b = batch_size if batch_size is not None else shape[0]
|
|
|
|
shape = [batch_size] + list(shape)
|
|
|
|
else:
|
|
|
|
b = batch_size = shape[0]
|
|
|
|
if x_T is None:
|
|
|
|
img = torch.randn(shape, device=self.device)
|
|
|
|
else:
|
|
|
|
img = x_T
|
|
|
|
intermediates = []
|
|
|
|
if cond is not None:
|
|
|
|
if isinstance(cond, dict):
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = {
|
|
|
|
key: cond[key][:batch_size]
|
|
|
|
if not isinstance(cond[key], list)
|
|
|
|
else list(map(lambda x: x[:batch_size], cond[key]))
|
|
|
|
for key in cond
|
|
|
|
}
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = (
|
|
|
|
[c[:batch_size] for c in cond]
|
|
|
|
if isinstance(cond, list)
|
|
|
|
else cond[:batch_size]
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if start_T is not None:
|
|
|
|
timesteps = min(timesteps, start_T)
|
2022-08-26 07:15:42 +00:00
|
|
|
iterator = (
|
|
|
|
tqdm(
|
|
|
|
reversed(range(0, timesteps)),
|
|
|
|
desc='Progressive Generation',
|
|
|
|
total=timesteps,
|
|
|
|
)
|
|
|
|
if verbose
|
|
|
|
else reversed(range(0, timesteps))
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if type(temperature) == float:
|
|
|
|
temperature = [temperature] * timesteps
|
|
|
|
|
|
|
|
for i in iterator:
|
|
|
|
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
|
|
|
if self.shorten_cond_schedule:
|
|
|
|
assert self.model.conditioning_key != 'hybrid'
|
|
|
|
tc = self.cond_ids[ts].to(cond.device)
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = self.q_sample(
|
|
|
|
x_start=cond, t=tc, noise=torch.randn_like(cond)
|
|
|
|
)
|
|
|
|
|
|
|
|
img, x0_partial = self.p_sample(
|
|
|
|
img,
|
|
|
|
cond,
|
|
|
|
ts,
|
|
|
|
clip_denoised=self.clip_denoised,
|
|
|
|
quantize_denoised=quantize_denoised,
|
|
|
|
return_x0=True,
|
|
|
|
temperature=temperature[i],
|
|
|
|
noise_dropout=noise_dropout,
|
|
|
|
score_corrector=score_corrector,
|
|
|
|
corrector_kwargs=corrector_kwargs,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if mask is not None:
|
|
|
|
assert x0 is not None
|
|
|
|
img_orig = self.q_sample(x0, ts)
|
2022-08-26 07:15:42 +00:00
|
|
|
img = img_orig * mask + (1.0 - mask) * img
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if i % log_every_t == 0 or i == timesteps - 1:
|
|
|
|
intermediates.append(x0_partial)
|
2022-08-26 07:15:42 +00:00
|
|
|
if callback:
|
|
|
|
callback(i)
|
|
|
|
if img_callback:
|
|
|
|
img_callback(img, i)
|
2021-12-21 02:23:41 +00:00
|
|
|
return img, intermediates
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def p_sample_loop(
|
|
|
|
self,
|
|
|
|
cond,
|
|
|
|
shape,
|
|
|
|
return_intermediates=False,
|
|
|
|
x_T=None,
|
|
|
|
verbose=True,
|
|
|
|
callback=None,
|
|
|
|
timesteps=None,
|
|
|
|
quantize_denoised=False,
|
|
|
|
mask=None,
|
|
|
|
x0=None,
|
|
|
|
img_callback=None,
|
|
|
|
start_T=None,
|
|
|
|
log_every_t=None,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if not log_every_t:
|
|
|
|
log_every_t = self.log_every_t
|
|
|
|
device = self.betas.device
|
|
|
|
b = shape[0]
|
|
|
|
if x_T is None:
|
|
|
|
img = torch.randn(shape, device=device)
|
|
|
|
else:
|
|
|
|
img = x_T
|
|
|
|
|
|
|
|
intermediates = [img]
|
|
|
|
if timesteps is None:
|
|
|
|
timesteps = self.num_timesteps
|
|
|
|
|
|
|
|
if start_T is not None:
|
|
|
|
timesteps = min(timesteps, start_T)
|
2022-08-26 07:15:42 +00:00
|
|
|
iterator = (
|
|
|
|
tqdm(
|
|
|
|
reversed(range(0, timesteps)),
|
|
|
|
desc='Sampling t',
|
|
|
|
total=timesteps,
|
|
|
|
)
|
|
|
|
if verbose
|
|
|
|
else reversed(range(0, timesteps))
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if mask is not None:
|
|
|
|
assert x0 is not None
|
2022-08-26 07:15:42 +00:00
|
|
|
assert (
|
|
|
|
x0.shape[2:3] == mask.shape[2:3]
|
|
|
|
) # spatial size has to match
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
for i in iterator:
|
|
|
|
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
|
|
|
if self.shorten_cond_schedule:
|
|
|
|
assert self.model.conditioning_key != 'hybrid'
|
|
|
|
tc = self.cond_ids[ts].to(cond.device)
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = self.q_sample(
|
|
|
|
x_start=cond, t=tc, noise=torch.randn_like(cond)
|
|
|
|
)
|
|
|
|
|
|
|
|
img = self.p_sample(
|
|
|
|
img,
|
|
|
|
cond,
|
|
|
|
ts,
|
|
|
|
clip_denoised=self.clip_denoised,
|
|
|
|
quantize_denoised=quantize_denoised,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if mask is not None:
|
|
|
|
img_orig = self.q_sample(x0, ts)
|
2022-08-26 07:15:42 +00:00
|
|
|
img = img_orig * mask + (1.0 - mask) * img
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if i % log_every_t == 0 or i == timesteps - 1:
|
|
|
|
intermediates.append(img)
|
2022-08-26 07:15:42 +00:00
|
|
|
if callback:
|
|
|
|
callback(i)
|
|
|
|
if img_callback:
|
|
|
|
img_callback(img, i)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if return_intermediates:
|
|
|
|
return img, intermediates
|
|
|
|
return img
|
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def sample(
|
|
|
|
self,
|
|
|
|
cond,
|
|
|
|
batch_size=16,
|
|
|
|
return_intermediates=False,
|
|
|
|
x_T=None,
|
|
|
|
verbose=True,
|
|
|
|
timesteps=None,
|
|
|
|
quantize_denoised=False,
|
|
|
|
mask=None,
|
|
|
|
x0=None,
|
|
|
|
shape=None,
|
|
|
|
**kwargs,
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
if shape is None:
|
2022-08-26 07:15:42 +00:00
|
|
|
shape = (
|
|
|
|
batch_size,
|
|
|
|
self.channels,
|
|
|
|
self.image_size,
|
|
|
|
self.image_size,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
if cond is not None:
|
|
|
|
if isinstance(cond, dict):
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = {
|
|
|
|
key: cond[key][:batch_size]
|
|
|
|
if not isinstance(cond[key], list)
|
|
|
|
else list(map(lambda x: x[:batch_size], cond[key]))
|
|
|
|
for key in cond
|
|
|
|
}
|
2021-12-21 02:23:41 +00:00
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
cond = (
|
|
|
|
[c[:batch_size] for c in cond]
|
|
|
|
if isinstance(cond, list)
|
|
|
|
else cond[:batch_size]
|
|
|
|
)
|
|
|
|
return self.p_sample_loop(
|
|
|
|
cond,
|
|
|
|
shape,
|
|
|
|
return_intermediates=return_intermediates,
|
|
|
|
x_T=x_T,
|
|
|
|
verbose=verbose,
|
|
|
|
timesteps=timesteps,
|
|
|
|
quantize_denoised=quantize_denoised,
|
|
|
|
mask=mask,
|
|
|
|
x0=x0,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
2021-12-22 14:57:23 +00:00
|
|
|
|
|
|
|
if ddim:
|
|
|
|
ddim_sampler = DDIMSampler(self)
|
|
|
|
shape = (self.channels, self.image_size, self.image_size)
|
2022-08-26 07:15:42 +00:00
|
|
|
samples, intermediates = ddim_sampler.sample(
|
|
|
|
ddim_steps, batch_size, shape, cond, verbose=False, **kwargs
|
|
|
|
)
|
2021-12-22 14:57:23 +00:00
|
|
|
|
|
|
|
else:
|
2022-08-26 07:15:42 +00:00
|
|
|
samples, intermediates = self.sample(
|
|
|
|
cond=cond,
|
|
|
|
batch_size=batch_size,
|
|
|
|
return_intermediates=True,
|
|
|
|
**kwargs,
|
|
|
|
)
|
2021-12-22 14:57:23 +00:00
|
|
|
|
|
|
|
return samples, intermediates
|
|
|
|
|
2022-10-25 04:30:48 +00:00
|
|
|
@torch.no_grad()
|
|
|
|
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
|
|
|
if null_label is not None:
|
|
|
|
xc = null_label
|
|
|
|
if isinstance(xc, ListConfig):
|
|
|
|
xc = list(xc)
|
|
|
|
if isinstance(xc, dict) or isinstance(xc, list):
|
|
|
|
c = self.get_learned_conditioning(xc)
|
|
|
|
else:
|
|
|
|
if hasattr(xc, "to"):
|
|
|
|
xc = xc.to(self.device)
|
|
|
|
c = self.get_learned_conditioning(xc)
|
|
|
|
else:
|
|
|
|
# todo: get null label from cond_stage_model
|
|
|
|
raise NotImplementedError()
|
|
|
|
c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device)
|
|
|
|
return c
|
|
|
|
|
2021-12-22 14:57:23 +00:00
|
|
|
@torch.no_grad()
|
2022-08-26 07:15:42 +00:00
|
|
|
def log_images(
|
|
|
|
self,
|
|
|
|
batch,
|
|
|
|
N=8,
|
|
|
|
n_row=4,
|
|
|
|
sample=True,
|
2022-09-25 17:12:11 +00:00
|
|
|
ddim_steps=50,
|
2022-08-26 07:15:42 +00:00
|
|
|
ddim_eta=1.0,
|
|
|
|
return_keys=None,
|
|
|
|
quantize_denoised=True,
|
|
|
|
inpaint=False,
|
|
|
|
plot_denoise_rows=False,
|
|
|
|
plot_progressive_rows=False,
|
|
|
|
plot_diffusion_rows=False,
|
|
|
|
**kwargs,
|
|
|
|
):
|
2021-12-22 14:57:23 +00:00
|
|
|
|
|
|
|
use_ddim = ddim_steps is not None
|
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
log = dict()
|
2022-08-26 07:15:42 +00:00
|
|
|
z, c, x, xrec, xc = self.get_input(
|
|
|
|
batch,
|
|
|
|
self.first_stage_key,
|
|
|
|
return_first_stage_outputs=True,
|
|
|
|
force_c_encode=True,
|
|
|
|
return_original_cond=True,
|
|
|
|
bs=N,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
N = min(x.shape[0], N)
|
|
|
|
n_row = min(x.shape[0], n_row)
|
2022-08-26 07:15:42 +00:00
|
|
|
log['inputs'] = x
|
|
|
|
log['reconstruction'] = xrec
|
2021-12-21 02:23:41 +00:00
|
|
|
if self.model.conditioning_key is not None:
|
2022-08-26 07:15:42 +00:00
|
|
|
if hasattr(self.cond_stage_model, 'decode'):
|
2021-12-21 02:23:41 +00:00
|
|
|
xc = self.cond_stage_model.decode(c)
|
2022-08-26 07:15:42 +00:00
|
|
|
log['conditioning'] = xc
|
|
|
|
elif self.cond_stage_key in ['caption']:
|
|
|
|
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch['caption'])
|
|
|
|
log['conditioning'] = xc
|
2021-12-21 02:23:41 +00:00
|
|
|
elif self.cond_stage_key == 'class_label':
|
2022-08-26 07:15:42 +00:00
|
|
|
xc = log_txt_as_img(
|
|
|
|
(x.shape[2], x.shape[3]), batch['human_label']
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
log['conditioning'] = xc
|
|
|
|
elif isimage(xc):
|
2022-08-26 07:15:42 +00:00
|
|
|
log['conditioning'] = xc
|
2021-12-21 02:23:41 +00:00
|
|
|
if ismap(xc):
|
2022-08-26 07:15:42 +00:00
|
|
|
log['original_conditioning'] = self.to_rgb(xc)
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if plot_diffusion_rows:
|
|
|
|
# get diffusion row
|
|
|
|
diffusion_row = list()
|
|
|
|
z_start = z[:n_row]
|
|
|
|
for t in range(self.num_timesteps):
|
|
|
|
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
|
|
|
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
|
|
|
t = t.to(self.device).long()
|
|
|
|
noise = torch.randn_like(z_start)
|
|
|
|
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
|
|
|
diffusion_row.append(self.decode_first_stage(z_noisy))
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
diffusion_row = torch.stack(
|
|
|
|
diffusion_row
|
|
|
|
) # n_log_step, n_row, C, H, W
|
2021-12-21 02:23:41 +00:00
|
|
|
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
2022-08-26 07:15:42 +00:00
|
|
|
diffusion_grid = rearrange(
|
|
|
|
diffusion_grid, 'b n c h w -> (b n) c h w'
|
|
|
|
)
|
|
|
|
diffusion_grid = make_grid(
|
|
|
|
diffusion_grid, nrow=diffusion_row.shape[0]
|
|
|
|
)
|
|
|
|
log['diffusion_row'] = diffusion_grid
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if sample:
|
|
|
|
# get denoise row
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting'):
|
|
|
|
samples, z_denoise_row = self.sample_log(
|
|
|
|
cond=c,
|
|
|
|
batch_size=N,
|
|
|
|
ddim=use_ddim,
|
|
|
|
ddim_steps=ddim_steps,
|
|
|
|
eta=ddim_eta,
|
|
|
|
)
|
2021-12-22 14:57:23 +00:00
|
|
|
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
2021-12-21 02:23:41 +00:00
|
|
|
x_samples = self.decode_first_stage(samples)
|
2022-08-26 07:15:42 +00:00
|
|
|
log['samples'] = x_samples
|
2021-12-21 02:23:41 +00:00
|
|
|
if plot_denoise_rows:
|
|
|
|
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
2022-08-26 07:15:42 +00:00
|
|
|
log['denoise_row'] = denoise_grid
|
|
|
|
|
|
|
|
uc = self.get_learned_conditioning(len(c) * [''])
|
|
|
|
sample_scaled, _ = self.sample_log(
|
|
|
|
cond=c,
|
|
|
|
batch_size=N,
|
|
|
|
ddim=use_ddim,
|
|
|
|
ddim_steps=ddim_steps,
|
|
|
|
eta=ddim_eta,
|
|
|
|
unconditional_guidance_scale=5.0,
|
|
|
|
unconditional_conditioning=uc,
|
|
|
|
)
|
|
|
|
log['samples_scaled'] = self.decode_first_stage(sample_scaled)
|
|
|
|
|
|
|
|
if (
|
|
|
|
quantize_denoised
|
|
|
|
and not isinstance(self.first_stage_model, AutoencoderKL)
|
|
|
|
and not isinstance(self.first_stage_model, IdentityFirstStage)
|
|
|
|
):
|
2021-12-21 02:23:41 +00:00
|
|
|
# also display when quantizing x0 while sampling
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting Quantized Denoised'):
|
|
|
|
samples, z_denoise_row = self.sample_log(
|
|
|
|
cond=c,
|
|
|
|
batch_size=N,
|
|
|
|
ddim=use_ddim,
|
|
|
|
ddim_steps=ddim_steps,
|
|
|
|
eta=ddim_eta,
|
|
|
|
quantize_denoised=True,
|
|
|
|
)
|
2021-12-22 14:57:23 +00:00
|
|
|
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
|
|
|
# quantize_denoised=True)
|
2021-12-21 02:23:41 +00:00
|
|
|
x_samples = self.decode_first_stage(samples.to(self.device))
|
2022-08-26 07:15:42 +00:00
|
|
|
log['samples_x0_quantized'] = x_samples
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if inpaint:
|
|
|
|
# make a simple center square
|
|
|
|
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
|
|
|
mask = torch.ones(N, h, w).to(self.device)
|
|
|
|
# zeros will be filled in
|
2022-08-26 07:15:42 +00:00
|
|
|
mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0
|
2021-12-21 02:23:41 +00:00
|
|
|
mask = mask[:, None, ...]
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting Inpaint'):
|
|
|
|
|
|
|
|
samples, _ = self.sample_log(
|
|
|
|
cond=c,
|
|
|
|
batch_size=N,
|
|
|
|
ddim=use_ddim,
|
|
|
|
eta=ddim_eta,
|
|
|
|
ddim_steps=ddim_steps,
|
|
|
|
x0=z[:N],
|
|
|
|
mask=mask,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
x_samples = self.decode_first_stage(samples.to(self.device))
|
2022-08-26 07:15:42 +00:00
|
|
|
log['samples_inpainting'] = x_samples
|
|
|
|
log['mask'] = mask
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
# outpaint
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting Outpaint'):
|
|
|
|
samples, _ = self.sample_log(
|
|
|
|
cond=c,
|
|
|
|
batch_size=N,
|
|
|
|
ddim=use_ddim,
|
|
|
|
eta=ddim_eta,
|
|
|
|
ddim_steps=ddim_steps,
|
|
|
|
x0=z[:N],
|
|
|
|
mask=mask,
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
x_samples = self.decode_first_stage(samples.to(self.device))
|
2022-08-26 07:15:42 +00:00
|
|
|
log['samples_outpainting'] = x_samples
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if plot_progressive_rows:
|
2022-08-26 07:15:42 +00:00
|
|
|
with self.ema_scope('Plotting Progressives'):
|
|
|
|
img, progressives = self.progressive_denoising(
|
|
|
|
c,
|
|
|
|
shape=(self.channels, self.image_size, self.image_size),
|
|
|
|
batch_size=N,
|
|
|
|
)
|
|
|
|
prog_row = self._get_denoise_row_from_list(
|
|
|
|
progressives, desc='Progressive Generation'
|
|
|
|
)
|
|
|
|
log['progressive_row'] = prog_row
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
if return_keys:
|
|
|
|
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
|
|
|
return log
|
|
|
|
else:
|
|
|
|
return {key: log[key] for key in return_keys}
|
|
|
|
return log
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
lr = self.learning_rate
|
2022-08-23 22:26:28 +00:00
|
|
|
|
|
|
|
if self.embedding_manager is not None:
|
|
|
|
params = list(self.embedding_manager.embedding_parameters())
|
|
|
|
# params = list(self.cond_stage_model.transformer.text_model.embeddings.embedding_manager.embedding_parameters())
|
|
|
|
else:
|
|
|
|
params = list(self.model.parameters())
|
|
|
|
if self.cond_stage_trainable:
|
2022-08-26 07:15:42 +00:00
|
|
|
print(
|
|
|
|
f'{self.__class__.__name__}: Also optimizing conditioner params!'
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
params = params + list(self.cond_stage_model.parameters())
|
|
|
|
if self.learn_logvar:
|
|
|
|
print('Diffusion model optimizing logvar')
|
|
|
|
params.append(self.logvar)
|
2021-12-21 02:23:41 +00:00
|
|
|
opt = torch.optim.AdamW(params, lr=lr)
|
|
|
|
if self.use_scheduler:
|
|
|
|
assert 'target' in self.scheduler_config
|
|
|
|
scheduler = instantiate_from_config(self.scheduler_config)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
print('Setting up LambdaLR scheduler...')
|
2021-12-21 02:23:41 +00:00
|
|
|
scheduler = [
|
|
|
|
{
|
|
|
|
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
|
|
|
'interval': 'step',
|
2022-08-26 07:15:42 +00:00
|
|
|
'frequency': 1,
|
|
|
|
}
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
return [opt], scheduler
|
|
|
|
return opt
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def to_rgb(self, x):
|
|
|
|
x = x.float()
|
2022-08-26 07:15:42 +00:00
|
|
|
if not hasattr(self, 'colorize'):
|
2021-12-21 02:23:41 +00:00
|
|
|
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
|
|
|
x = nn.functional.conv2d(x, weight=self.colorize)
|
2022-08-26 07:15:42 +00:00
|
|
|
x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0
|
2021-12-21 02:23:41 +00:00
|
|
|
return x
|
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
@rank_zero_only
|
|
|
|
def on_save_checkpoint(self, checkpoint):
|
|
|
|
checkpoint.clear()
|
2022-08-26 07:15:42 +00:00
|
|
|
|
2022-08-23 22:26:28 +00:00
|
|
|
if os.path.isdir(self.trainer.checkpoint_callback.dirpath):
|
2022-08-26 07:15:42 +00:00
|
|
|
self.embedding_manager.save(
|
|
|
|
os.path.join(
|
|
|
|
self.trainer.checkpoint_callback.dirpath, 'embeddings.pt'
|
|
|
|
)
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
|
|
|
if (self.global_step - self.emb_ckpt_counter) > 500:
|
2022-08-26 07:15:42 +00:00
|
|
|
self.embedding_manager.save(
|
|
|
|
os.path.join(
|
|
|
|
self.trainer.checkpoint_callback.dirpath,
|
|
|
|
f'embeddings_gs-{self.global_step}.pt',
|
|
|
|
)
|
|
|
|
)
|
2022-08-23 22:26:28 +00:00
|
|
|
|
|
|
|
self.emb_ckpt_counter += 500
|
|
|
|
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
class DiffusionWrapper(pl.LightningModule):
|
|
|
|
def __init__(self, diff_model_config, conditioning_key):
|
|
|
|
super().__init__()
|
|
|
|
self.diffusion_model = instantiate_from_config(diff_model_config)
|
|
|
|
self.conditioning_key = conditioning_key
|
2022-08-26 07:15:42 +00:00
|
|
|
assert self.conditioning_key in [
|
|
|
|
None,
|
|
|
|
'concat',
|
|
|
|
'crossattn',
|
|
|
|
'hybrid',
|
|
|
|
'adm',
|
|
|
|
]
|
2021-12-21 02:23:41 +00:00
|
|
|
|
|
|
|
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
|
|
|
|
if self.conditioning_key is None:
|
|
|
|
out = self.diffusion_model(x, t)
|
|
|
|
elif self.conditioning_key == 'concat':
|
|
|
|
xc = torch.cat([x] + c_concat, dim=1)
|
|
|
|
out = self.diffusion_model(xc, t)
|
|
|
|
elif self.conditioning_key == 'crossattn':
|
|
|
|
cc = torch.cat(c_crossattn, 1)
|
|
|
|
out = self.diffusion_model(x, t, context=cc)
|
|
|
|
elif self.conditioning_key == 'hybrid':
|
|
|
|
cc = torch.cat(c_crossattn, 1)
|
2022-10-25 04:30:48 +00:00
|
|
|
xc = torch.cat([x] + c_concat, dim=1)
|
2021-12-21 02:23:41 +00:00
|
|
|
out = self.diffusion_model(xc, t, context=cc)
|
|
|
|
elif self.conditioning_key == 'adm':
|
|
|
|
cc = c_crossattn[0]
|
|
|
|
out = self.diffusion_model(x, t, y=cc)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
class Layout2ImgDiffusion(LatentDiffusion):
|
|
|
|
# TODO: move all layout-specific hacks to this class
|
|
|
|
def __init__(self, cond_stage_key, *args, **kwargs):
|
2022-08-26 07:15:42 +00:00
|
|
|
assert (
|
|
|
|
cond_stage_key == 'coordinates_bbox'
|
|
|
|
), 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
2021-12-21 02:23:41 +00:00
|
|
|
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
|
|
|
|
|
|
|
|
def log_images(self, batch, N=8, *args, **kwargs):
|
|
|
|
logs = super().log_images(batch=batch, N=N, *args, **kwargs)
|
|
|
|
|
|
|
|
key = 'train' if self.training else 'validation'
|
|
|
|
dset = self.trainer.datamodule.datasets[key]
|
|
|
|
mapper = dset.conditional_builders[self.cond_stage_key]
|
|
|
|
|
|
|
|
bbox_imgs = []
|
2022-08-26 07:15:42 +00:00
|
|
|
map_fn = lambda catno: dset.get_textual_label(
|
|
|
|
dset.get_category_id(catno)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
for tknzd_bbox in batch[self.cond_stage_key][:N]:
|
2022-08-26 07:15:42 +00:00
|
|
|
bboximg = mapper.plot(
|
|
|
|
tknzd_bbox.detach().cpu(), map_fn, (256, 256)
|
|
|
|
)
|
2021-12-21 02:23:41 +00:00
|
|
|
bbox_imgs.append(bboximg)
|
|
|
|
|
|
|
|
cond_img = torch.stack(bbox_imgs, dim=0)
|
|
|
|
logs['bbox_image'] = cond_img
|
|
|
|
return logs
|
2022-10-25 04:30:48 +00:00
|
|
|
|
|
|
|
class LatentInpaintDiffusion(LatentDiffusion):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
concat_keys=("mask", "masked_image"),
|
|
|
|
masked_image_key="masked_image",
|
|
|
|
finetune_keys=None,
|
|
|
|
*args,
|
|
|
|
**kwargs,
|
|
|
|
):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
self.masked_image_key = masked_image_key
|
|
|
|
assert self.masked_image_key in concat_keys
|
|
|
|
self.concat_keys = concat_keys
|
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def get_input(
|
|
|
|
self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False
|
|
|
|
):
|
|
|
|
# note: restricted to non-trainable encoders currently
|
|
|
|
assert (
|
|
|
|
not self.cond_stage_trainable
|
|
|
|
), "trainable cond stages not yet supported for inpainting"
|
|
|
|
z, c, x, xrec, xc = super().get_input(
|
|
|
|
batch,
|
|
|
|
self.first_stage_key,
|
|
|
|
return_first_stage_outputs=True,
|
|
|
|
force_c_encode=True,
|
|
|
|
return_original_cond=True,
|
|
|
|
bs=bs,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert exists(self.concat_keys)
|
|
|
|
c_cat = list()
|
|
|
|
for ck in self.concat_keys:
|
|
|
|
cc = (
|
|
|
|
rearrange(batch[ck], "b h w c -> b c h w")
|
|
|
|
.to(memory_format=torch.contiguous_format)
|
|
|
|
.float()
|
|
|
|
)
|
|
|
|
if bs is not None:
|
|
|
|
cc = cc[:bs]
|
|
|
|
cc = cc.to(self.device)
|
|
|
|
bchw = z.shape
|
|
|
|
if ck != self.masked_image_key:
|
|
|
|
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
|
|
|
|
else:
|
|
|
|
cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
|
|
|
|
c_cat.append(cc)
|
|
|
|
c_cat = torch.cat(c_cat, dim=1)
|
|
|
|
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
|
|
|
if return_first_stage_outputs:
|
|
|
|
return z, all_conds, x, xrec, xc
|
|
|
|
return z, all_conds
|