change training example to constant lr

This commit is contained in:
Victor Hall 2023-01-28 18:20:04 -05:00
parent 3df6dea29a
commit bc273d0512
2 changed files with 3 additions and 3 deletions

View File

@ -30,12 +30,12 @@ I recommend you copy one of the examples below and keep it in a text file for fu
Training examples: Training examples:
Resuming from a checkpoint, 50 epochs, 6 batch size, 3e-6 learning rate, cosine scheduler, generate samples evern 200 steps, 10 minute checkpoint interval, adam8bit, and using the default "input" folder for training data: Resuming from a checkpoint, 50 epochs, 6 batch size, 3e-6 learning rate, constant scheduler, generate samples evern 200 steps, 10 minute checkpoint interval, adam8bit, and using the default "input" folder for training data:
python train.py --resume_ckpt "sd_v1-5_vae" ^ python train.py --resume_ckpt "sd_v1-5_vae" ^
--max_epochs 50 ^ --max_epochs 50 ^
--data_root "input" ^ --data_root "input" ^
--lr_scheduler cosine ^ --lr_scheduler constant ^
--project_name myproj ^ --project_name myproj ^
--batch_size 6 ^ --batch_size 6 ^
--sample_steps 200 ^ --sample_steps 200 ^

View File

@ -526,7 +526,7 @@ def main(args):
betas = (0.9, 0.999) betas = (0.9, 0.999)
epsilon = 1e-8 epsilon = 1e-8
if args.amp: if args.amp:
epsilon = 2e-8 epsilon = 1e-8
weight_decay = 0.01 weight_decay = 0.01
if args.useadam8bit: if args.useadam8bit: