diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index 70267d6..39265fb 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -275,7 +275,7 @@ "#@markdown * The learning rate affects how much \"training\" is done on the model per training step. It is a very careful balance to select a value that will learn your data and not wreck the model. \n", "#@markdown Leave this default unless you are very comfortable with training and know what you are doing.\n", "\n", - "Learning_Rate = 1.e-6 #@param{type: 'number'}\n", + "Learning_Rate = 4e-6 #@param{type: 'number'}\n", "\n", "#@markdown * A learning rate scheduler can change your learning rate as training progresses.\n", "\n", @@ -300,7 +300,7 @@ "Gradient_steps = 1 #@param{type:\"slider\", min:1, max:10, step:1}\n", "\n", "#@markdown * Location on your Gdrive where your training images are.\n", - "Dataset_Location = \"/content/drive/MyDrive/\" #@param {type:\"string\"}\n", + "Dataset_Location = \"/content/drive/MyDrive/training_samples\" #@param {type:\"string\"}\n", "dataset = Dataset_Location\n", "model = save_name\n", "\n", @@ -338,18 +338,15 @@ "#@markdown * You can turn off the text encoder training (generally not suggested)\n", "Disable_text_Encoder= False #@param{type:\"boolean\"}\n", "#@markdown * Skip the nth last layer of CLIP.\n", - "Clip_skip = 0 #@param {type:\"slider\", min:0, max:4, step:1}\n", - "Clip_skip=int(Clip_skip)\n", - "#@markdown * n% to drop a random image caption.\n", - "Conditional_DropOut = 4 #@param {type:\"slider\", min:0, max:30, step:1}\n", - "cd=Conditional_DropOut/100\n", - "#@markdown * Randomly flips n% of images.\n", - "Picture_flip = 0 #@param {type:\"slider\", min:0, max:50, step:5}\n", - "Flip=Picture_flip/100\n", - "#@markdown * Use a contrast ratio between 0-10% for Best results.\n", - "Ratio = 0 #@param {type:\"slider\", min:0, max:20, step:1}\n", + "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", + "#@markdown * ratio of captions dropped from training data.\n", + "Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", + "#@markdown * Ratio of images randomly to flip horizontally.\n", + "Picture_flip = 0.15 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", + "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n", + "zero_frequency_noise = 0.1 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", "\n", - "#@markdown *Weights and Biases logging token. \n", + "#@markdown * Weights and Biases logging token. \n", "# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n", "# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n", "wandb_token = '' #@param{type:\"string\"}\n", @@ -400,9 +397,9 @@ " --clip_skip $Clip_skip \\\n", " --batch_size $Batch_Size \\\n", " --grad_accum $Gradient_steps \\\n", - " --cond_dropout $cd \\\n", + " --cond_dropout $Conditional_DropOut \\\n", " --data_root \"$dataset\" \\\n", - " --flip_p $Flip \\\n", + " --flip_p $Picture_flip \\\n", " --lr $Learning_Rate \\\n", " --lr_scheduler \"$Schedule\" \\\n", " --max_epochs $Max_Epochs \\\n", @@ -412,7 +409,7 @@ " --sample_steps $Steps_between_samples \\\n", " --save_every_n_epoch $Save_every_N_epoch \\\n", " --seed $Training_Seed \\\n", - " --zero_frequency_noise_ratio $Ratio \\\n", + " --zero_frequency_noise_ratio $zero_frequency_noise \\\n", " --useadam8bit \n", "\n" ]