Created using Colaboratory
This commit is contained in:
parent
6a4e32f7cf
commit
927944a5ca
|
@ -251,35 +251,33 @@
|
|||
"\n",
|
||||
"#@markdown * The learning rate affects how much \"training\" is done on the model per training step. It is a very careful balance to select a value that will learn your data and not wreck the model. \n",
|
||||
"#@markdown Leave this default unless you are very comfortable with training and know what you are doing.\n",
|
||||
"\n",
|
||||
"Learning_Rate = 1e-6 #@param{type: 'number'}\n",
|
||||
"#@markdown * chosing this will allow you to ignore any settings specific to the text encode and will match it with the Unets settings, recommended for beginers.\n",
|
||||
"Match_text_to_Unet = False #@param{type:\"boolean\"}\n",
|
||||
"Text_lr = 0.5e-6 #@param {type:\"number\"}\n",
|
||||
"#@markdown * A learning rate scheduler can change your learning rate as training progresses.\n",
|
||||
"#@markdown * I recommend sticking with constant until you are comfortable with general training. \n",
|
||||
"\n",
|
||||
"data['base']['lr'] = Learning_Rate\n",
|
||||
"Schedule = \"constant\" #@param [\"constant\", \"polynomial\", \"linear\", \"cosine\"] {allow-input: true}\n",
|
||||
"data['base']['lr_scheduler'] = Schedule\n",
|
||||
"data['text_encoder_overrides']['lr'] = Text_lr \n",
|
||||
"Text_lr_scheduler = \"constant\" #@param [\"constant\", \"polynomial\", \"linear\", \"cosine\"] {allow-input: true}\n",
|
||||
"data['text_encoder_overrides']['lr_scheduler'] = Text_lr_scheduler\n",
|
||||
"#@markdown * warm up steps are useful for validation and cosine lrs\n",
|
||||
"lr_warmup_steps = 0 #@param{type:\"integer\"}\n",
|
||||
"data['base']['lr_warmup_steps'] = lr_warmup_steps\n",
|
||||
"lr_decay_steps = 0 #@param {type:\"number\"} \n",
|
||||
"data['base']['lr_decay_steps'] = lr_decay_steps\n",
|
||||
"Text_lr_warmup_steps = 0 #@param {type:\"number\"}\n",
|
||||
"data['text_encoder_overrides']['lr_warmup_steps'] = Text_lr_warmup_steps\n",
|
||||
"Text_lr_decay_steps = 0 #@param {type:\"number\"} \n",
|
||||
"data['text_encoder_overrides']['lr_decay_steps'] = Text_lr_decay_steps\n",
|
||||
"\n",
|
||||
"if Match_text_to_Unet:\n",
|
||||
" Text_lr = Learning_Rate\n",
|
||||
" Text_lr_scheduler = Schedule\n",
|
||||
" Text_lr_warmup_steps = lr_warmup_steps\n",
|
||||
"\n",
|
||||
"data['base']['lr'] = Learning_Rate\n",
|
||||
"data['text_encoder_overrides']['lr'] = Text_lr \n",
|
||||
"data['base']['lr_scheduler'] = Schedule\n",
|
||||
"data['text_encoder_overrides']['lr_scheduler'] = Text_lr_scheduler\n",
|
||||
"data['base']['lr_warmup_steps'] = lr_warmup_steps\n",
|
||||
"data['base']['lr_decay_steps'] = lr_decay_steps\n",
|
||||
"data['text_encoder_overrides']['lr_warmup_steps'] = Text_lr_warmup_steps\n",
|
||||
"data['text_encoder_overrides']['lr_decay_steps'] = Text_lr_decay_steps\n",
|
||||
"\n",
|
||||
"# Save the updated JSON data back to the file\n",
|
||||
"with open('optimizer.json', 'w') as file:\n",
|
||||
|
@ -450,12 +448,16 @@
|
|||
"# Finish the training process\n",
|
||||
"clear_output()\n",
|
||||
"time.sleep(2)\n",
|
||||
"print(\"Training is complete.\")\n",
|
||||
"print(\"Training is complete, select a model to start training again\")\n",
|
||||
"time.sleep(2)\n",
|
||||
"\n",
|
||||
"if Disconnect_after_training:\n",
|
||||
" print(\"Training is complete, in 30 seconds the instance will reset, you will need to chose a model and you can resume training again\")\n",
|
||||
" time.sleep(40)\n",
|
||||
" runtime.unassign()\n"
|
||||
" runtime.unassign()\n",
|
||||
"\n",
|
||||
"os.kill(os.getpid(), 9)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue