move models and modules under invokeai/backend/ldm

This commit is contained in:
Lincoln Stein
2023-03-01 18:24:18 -05:00
parent 2c7928b163
commit 850d1ee984
67 changed files with 79 additions and 79 deletions

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ["sculpture"]
@ -28,7 +28,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2
wrap: false
train:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ["painting"]
@ -27,7 +27,7 @@ model:
num_vectors_per_token: 1
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -67,7 +67,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -76,14 +76,14 @@ data:
num_workers: 16
wrap: false
train:
target: ldm.data.personalized_style.PersonalizedBase
target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized_style.PersonalizedBase
target: invokeai.backend.ldm.data.personalized_style.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 1.0e-04
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -18,7 +18,7 @@ model:
use_ema: False
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 10000 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ]
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 7.5e-05
target: invokeai.models.diffusion.ddpm.LatentInpaintDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentInpaintDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -18,7 +18,7 @@ model:
finetune_keys: null
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
target: invokeai.backend.ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
@ -27,7 +27,7 @@ model:
f_min: [ 1. ]
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -36,7 +36,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
@ -76,4 +76,4 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 5.0e-03
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
@ -19,7 +19,7 @@ model:
embedding_reg_weight: 0.0
personalization_config:
target: ldm.modules.embedding_manager.EmbeddingManager
target: invokeai.backend.ldm.modules.embedding_manager.EmbeddingManager
params:
placeholder_strings: ["*"]
initializer_words: ['sculpture']
@ -28,7 +28,7 @@ model:
progressive_words: False
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
@ -68,7 +68,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
@ -77,14 +77,14 @@ data:
num_workers: 2
wrap: false
train:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: train
per_image_tokens: false
repeats: 100
validation:
target: ldm.data.personalized.PersonalizedBase
target: invokeai.backend.ldm.data.personalized.PersonalizedBase
params:
size: 512
set: val

View File

@ -1,6 +1,6 @@
model:
base_learning_rate: 1.0e-4
target: invokeai.models.diffusion.ddpm.LatentDiffusion
target: invokeai.backend.models.diffusion.ddpm.LatentDiffusion
params:
parameterization: "v"
linear_start: 0.00085
@ -19,7 +19,7 @@ model:
use_ema: False # we set this to false because this is an inference only config
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
target: invokeai.backend.ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
use_checkpoint: True
use_fp16: True
@ -62,7 +62,7 @@ model:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
target: invokeai.backend.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
params:
freeze: True
layer: "penultimate"