mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add code
This commit is contained in:
44
models/first_stage_models/kl-f16/config.yaml
Normal file
44
models/first_stage_models/kl-f16/config.yaml
Normal file
@ -0,0 +1,44 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 16
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 1.0e-06
|
||||
disc_weight: 0.5
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 16
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
dropout: 0.0
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 6
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
46
models/first_stage_models/kl-f32/config.yaml
Normal file
46
models/first_stage_models/kl-f32/config.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 64
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 1.0e-06
|
||||
disc_weight: 0.5
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 64
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
- 8
|
||||
dropout: 0.0
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 6
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
41
models/first_stage_models/kl-f4/config.yaml
Normal file
41
models/first_stage_models/kl-f4/config.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 3
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 1.0e-06
|
||||
disc_weight: 0.5
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 10
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
42
models/first_stage_models/kl-f8/config.yaml
Normal file
42
models/first_stage_models/kl-f8/config.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
monitor: val/rec_loss
|
||||
embed_dim: 4
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.LPIPSWithDiscriminator
|
||||
params:
|
||||
disc_start: 50001
|
||||
kl_weight: 1.0e-06
|
||||
disc_weight: 0.5
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 4
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
49
models/first_stage_models/vq-f16/config.yaml
Normal file
49
models/first_stage_models/vq-f16/config.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.VQModel
|
||||
params:
|
||||
embed_dim: 8
|
||||
n_embed: 16384
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 8
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 16
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
|
||||
params:
|
||||
disc_conditional: false
|
||||
disc_in_channels: 3
|
||||
disc_start: 250001
|
||||
disc_weight: 0.75
|
||||
disc_num_layers: 2
|
||||
codebook_weight: 1.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 14
|
||||
num_workers: 20
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
46
models/first_stage_models/vq-f4-noattn/config.yaml
Normal file
46
models/first_stage_models/vq-f4-noattn/config.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.VQModel
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
|
||||
ddconfig:
|
||||
attn_type: none
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
|
||||
params:
|
||||
disc_conditional: false
|
||||
disc_in_channels: 3
|
||||
disc_start: 11
|
||||
disc_weight: 0.75
|
||||
codebook_weight: 1.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 8
|
||||
num_workers: 12
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
crop_size: 256
|
45
models/first_stage_models/vq-f4/config.yaml
Normal file
45
models/first_stage_models/vq-f4/config.yaml
Normal file
@ -0,0 +1,45 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.VQModel
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
|
||||
params:
|
||||
disc_conditional: false
|
||||
disc_in_channels: 3
|
||||
disc_start: 0
|
||||
disc_weight: 0.75
|
||||
codebook_weight: 1.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 8
|
||||
num_workers: 16
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
crop_size: 256
|
48
models/first_stage_models/vq-f8-n256/config.yaml
Normal file
48
models/first_stage_models/vq-f8-n256/config.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.VQModel
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 256
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
|
||||
params:
|
||||
disc_conditional: false
|
||||
disc_in_channels: 3
|
||||
disc_start: 250001
|
||||
disc_weight: 0.75
|
||||
codebook_weight: 1.0
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 10
|
||||
num_workers: 20
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
48
models/first_stage_models/vq-f8/config.yaml
Normal file
48
models/first_stage_models/vq-f8/config.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
model:
|
||||
base_learning_rate: 4.5e-06
|
||||
target: ldm.models.autoencoder.VQModel
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 16384
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
|
||||
params:
|
||||
disc_conditional: false
|
||||
disc_in_channels: 3
|
||||
disc_num_layers: 2
|
||||
disc_start: 1
|
||||
disc_weight: 0.6
|
||||
codebook_weight: 1.0
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 10
|
||||
num_workers: 20
|
||||
wrap: true
|
||||
train:
|
||||
target: ldm.data.openimages.FullOpenImagesTrain
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.FullOpenImagesValidation
|
||||
params:
|
||||
size: 384
|
||||
crop_size: 256
|
80
models/ldm/bsr_sr/config.yaml
Normal file
80
models/ldm/bsr_sr/config.yaml
Normal file
@ -0,0 +1,80 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0155
|
||||
log_every_t: 100
|
||||
timesteps: 1000
|
||||
loss_type: l2
|
||||
first_stage_key: image
|
||||
cond_stage_key: LR_image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
concat_mode: true
|
||||
cond_stage_trainable: false
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 6
|
||||
out_channels: 3
|
||||
model_channels: 160
|
||||
attention_resolutions:
|
||||
- 16
|
||||
- 8
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: torch.nn.Identity
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 64
|
||||
wrap: false
|
||||
num_workers: 12
|
||||
train:
|
||||
target: ldm.data.openimages.SuperresOpenImagesAdvancedTrain
|
||||
params:
|
||||
size: 256
|
||||
degradation: bsrgan_light
|
||||
downscale_f: 4
|
||||
min_crop_f: 0.5
|
||||
max_crop_f: 1.0
|
||||
random_crop: true
|
||||
validation:
|
||||
target: ldm.data.openimages.SuperresOpenImagesAdvancedValidation
|
||||
params:
|
||||
size: 256
|
||||
degradation: bsrgan_light
|
||||
downscale_f: 4
|
||||
min_crop_f: 0.5
|
||||
max_crop_f: 1.0
|
||||
random_crop: true
|
70
models/ldm/celeba256/config.yaml
Normal file
70
models/ldm/celeba256/config.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: false
|
||||
concat_mode: false
|
||||
monitor: val/loss
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.faceshq.CelebAHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.faceshq.CelebAHQValidation
|
||||
params:
|
||||
size: 256
|
80
models/ldm/cin256/config.yaml
Normal file
80
models/ldm/cin256/config.yaml
Normal file
@ -0,0 +1,80 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 256
|
||||
attention_resolutions:
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 512
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 4
|
||||
n_embed: 16384
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions:
|
||||
- 32
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.ClassEmbedder
|
||||
params:
|
||||
embed_dim: 512
|
||||
key: class_label
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 64
|
||||
num_workers: 12
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.imagenet.ImageNetTrain
|
||||
params:
|
||||
config:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.imagenet.ImageNetValidation
|
||||
params:
|
||||
config:
|
||||
size: 256
|
70
models/ldm/ffhq256/config.yaml
Normal file
70
models/ldm/ffhq256/config.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: false
|
||||
concat_mode: false
|
||||
monitor: val/loss
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 42
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.faceshq.FFHQTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.faceshq.FFHQValidation
|
||||
params:
|
||||
size: 256
|
67
models/ldm/inpainting_big/config.yaml
Normal file
67
models/ldm/inpainting_big/config.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0205
|
||||
log_every_t: 100
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: image
|
||||
cond_stage_key: masked_image
|
||||
image_size: 64
|
||||
channels: 3
|
||||
concat_mode: true
|
||||
monitor: val/loss
|
||||
scheduler_config:
|
||||
target: ldm.lr_scheduler.LambdaWarmUpCosineScheduler
|
||||
params:
|
||||
verbosity_interval: 0
|
||||
warm_up_steps: 1000
|
||||
max_decay_steps: 50000
|
||||
lr_start: 0.001
|
||||
lr_max: 0.1
|
||||
lr_min: 0.0001
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 7
|
||||
out_channels: 3
|
||||
model_channels: 256
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_heads: 8
|
||||
resblock_updown: true
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: none
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: ldm.modules.losses.contperceptual.DummyLoss
|
||||
cond_stage_config: __is_first_stage__
|
81
models/ldm/layout2img-openimages256/config.yaml
Normal file
81
models/ldm/layout2img-openimages256/config.yaml
Normal file
@ -0,0 +1,81 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0205
|
||||
log_every_t: 100
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: image
|
||||
cond_stage_key: coordinates_bbox
|
||||
image_size: 64
|
||||
channels: 3
|
||||
conditioning_key: crossattn
|
||||
cond_stage_trainable: true
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 128
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 3
|
||||
context_dim: 512
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.BERTEmbedder
|
||||
params:
|
||||
n_embed: 512
|
||||
n_layer: 16
|
||||
vocab_size: 8192
|
||||
max_seq_len: 92
|
||||
use_tokenizer: false
|
||||
monitor: val/loss_simple_ema
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 24
|
||||
wrap: false
|
||||
num_workers: 10
|
||||
train:
|
||||
target: ldm.data.openimages.OpenImagesBBoxTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.openimages.OpenImagesBBoxValidation
|
||||
params:
|
||||
size: 256
|
70
models/ldm/lsun_beds256/config.yaml
Normal file
70
models/ldm/lsun_beds256/config.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: class_label
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: false
|
||||
concat_mode: false
|
||||
monitor: val/loss
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 224
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 4
|
||||
num_head_channels: 32
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config: __is_unconditional__
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 48
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNBedroomsTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNBedroomsValidation
|
||||
params:
|
||||
size: 256
|
92
models/ldm/lsun_churches256/config.yaml
Normal file
92
models/ldm/lsun_churches256/config.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
model:
|
||||
base_learning_rate: 5.0e-05
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0155
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: image
|
||||
cond_stage_key: image
|
||||
image_size: 32
|
||||
channels: 4
|
||||
cond_stage_trainable: false
|
||||
concat_mode: false
|
||||
scale_by_std: true
|
||||
monitor: val/loss_simple_ema
|
||||
scheduler_config:
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps:
|
||||
- 10000
|
||||
cycle_lengths:
|
||||
- 10000000000000
|
||||
f_start:
|
||||
- 1.0e-06
|
||||
f_max:
|
||||
- 1.0
|
||||
f_min:
|
||||
- 1.0
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 192
|
||||
attention_resolutions:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 8
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_heads: 8
|
||||
use_scale_shift_norm: true
|
||||
resblock_updown: true
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config: '__is_unconditional__'
|
||||
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 96
|
||||
num_workers: 5
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.lsun.LSUNChurchesTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.lsun.LSUNChurchesValidation
|
||||
params:
|
||||
size: 256
|
78
models/ldm/semantic_synthesis512/config.yaml
Normal file
78
models/ldm/semantic_synthesis512/config.yaml
Normal file
@ -0,0 +1,78 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0205
|
||||
log_every_t: 100
|
||||
timesteps: 1000
|
||||
loss_type: l1
|
||||
first_stage_key: image
|
||||
cond_stage_key: segmentation
|
||||
image_size: 128
|
||||
channels: 3
|
||||
concat_mode: true
|
||||
cond_stage_trainable: true
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 128
|
||||
in_channels: 6
|
||||
out_channels: 3
|
||||
model_channels: 128
|
||||
attention_resolutions:
|
||||
- 32
|
||||
- 16
|
||||
- 8
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 4
|
||||
- 8
|
||||
num_heads: 8
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.SpatialRescaler
|
||||
params:
|
||||
n_stages: 2
|
||||
in_channels: 182
|
||||
out_channels: 3
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 8
|
||||
wrap: false
|
||||
num_workers: 10
|
||||
train:
|
||||
target: ldm.data.landscapes.RFWTrain
|
||||
params:
|
||||
size: 768
|
||||
crop_size: 512
|
||||
segmentation_to_float32: true
|
||||
validation:
|
||||
target: ldm.data.landscapes.RFWValidation
|
||||
params:
|
||||
size: 768
|
||||
crop_size: 512
|
||||
segmentation_to_float32: true
|
77
models/ldm/text2img256/config.yaml
Normal file
77
models/ldm/text2img256/config.yaml
Normal file
@ -0,0 +1,77 @@
|
||||
model:
|
||||
base_learning_rate: 2.0e-06
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.0015
|
||||
linear_end: 0.0195
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: image
|
||||
cond_stage_key: caption
|
||||
image_size: 64
|
||||
channels: 3
|
||||
cond_stage_trainable: true
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 64
|
||||
in_channels: 3
|
||||
out_channels: 3
|
||||
model_channels: 192
|
||||
attention_resolutions:
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
num_res_blocks: 2
|
||||
channel_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
- 5
|
||||
num_head_channels: 32
|
||||
use_spatial_transformer: true
|
||||
transformer_depth: 1
|
||||
context_dim: 640
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.VQModelInterface
|
||||
params:
|
||||
embed_dim: 3
|
||||
n_embed: 8192
|
||||
ddconfig:
|
||||
double_z: false
|
||||
z_channels: 3
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
cond_stage_config:
|
||||
target: ldm.modules.encoders.modules.BERTEmbedder
|
||||
params:
|
||||
n_embed: 640
|
||||
n_layer: 32
|
||||
data:
|
||||
target: main.DataModuleFromConfig
|
||||
params:
|
||||
batch_size: 28
|
||||
num_workers: 10
|
||||
wrap: false
|
||||
train:
|
||||
target: ldm.data.previews.pytorch_dataset.PreviewsTrain
|
||||
params:
|
||||
size: 256
|
||||
validation:
|
||||
target: ldm.data.previews.pytorch_dataset.PreviewsValidation
|
||||
params:
|
||||
size: 256
|
Reference in New Issue
Block a user