EveryDream-trainer/configs/stable-diffusion/v1-finetune_micro.yaml

116 lines
3.2 KiB
YAML
Raw Normal View History

2022-10-31 22:21:27 -06:00
model:
base_learning_rate: 1.0e-6
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: image
cond_stage_key: caption
image_size: 64
channels: 4
cond_stage_trainable: true # Note: different from the one we trained before
conditioning_key: crossattn
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: False
unfreeze_model: True
model_lr: 1.0e-6
2022-11-02 20:23:09 -06:00
# scheduler_config: # 10000 warmup steps
# target: ldm.lr_scheduler.LambdaLinearScheduler
# params:
# warm_up_steps: [ 10000 ]
# cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
# f_start: [ 1.e-6 ]
# f_max: [ 1. ]
# f_min: [ 1. ]
2022-10-31 22:21:27 -06:00
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 4
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 512
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
params:
batch_size: 6
num_workers: 8
wrap: falsegit
train:
target: ldm.data.every_dream.EveryDreamBatch
params:
size: 512
2022-11-03 17:47:54 -06:00
repeats: 50 # try ~50-100 for micro models with 20-50 training images with 1-2 epochs
2022-10-31 22:21:27 -06:00
validation:
2022-11-02 20:23:09 -06:00
target: ldm.data.ed_validate.EDValidateBatch
params:
size: 384
repeats: 0.4
test:
target: ldm.data.ed_validate.EDValidateBatch
2022-10-31 22:21:27 -06:00
params:
size: 512
2022-11-02 20:23:09 -06:00
repeats: 0.2
2022-10-31 22:21:27 -06:00
lightning:
modelcheckpoint:
params:
every_n_epochs: 1
#every_n_train_steps: 1400 # can only use epoch or train step checkpoints
2022-11-02 20:27:28 -06:00
save_last: True
2022-11-02 20:23:09 -06:00
filename: "{epoch:02d}-{step:05d}"
2022-10-31 22:21:27 -06:00
callbacks:
image_logger:
target: main.ImageLogger
params:
batch_frequency: 150
max_images: 16
increase_log_steps: False
trainer:
benchmark: True
2022-11-03 17:47:54 -06:00
max_epochs: 1 # epoch step count will be (total training images) / batch_size * repeats, suggest 1-4 epochs depending on dataset size and repeats
max_steps: 99000 # better to end on epochs not steps, especially with >500 images to ensure even distribution, but you can set this if you really want...
2022-11-02 20:23:09 -06:00
check_val_every_n_epoch: 1
2022-10-31 22:21:27 -06:00
#precision: 16 # need lightning 1.6+ ?? *WIP*
#num_nodes: 2 # for multigpu *WIP*