diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index f3d0203..0f76d25 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -290,7 +290,7 @@ "\n", "#@markdown * Batch size impacts VRAM use. 8 should work on SD1.x models and 5 for SD2.x models at 512 resolution. Lower this if you get CUDA out of memory errors. You can check resources on your instance and watch the GPU RAM.\n", "\n", - "Batch_Size = 8 #@param{type: 'number'}\n", + "Batch_Size = 6 #@param{type: 'number'}\n", "\n", "#@markdown * Gradient accumulation is sort of like a virtual batch size increase use this to increase batch size with out increasing vram usage\n", "#@markdown Increasing from 1 to 2 will have a minor impact on vram use, but more beyond that will not.\n", @@ -306,7 +306,7 @@ "\n", "#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n", "\n", - "Max_Epochs = 200 #@param {type:\"slider\", min:0, max:200, step:5}\n", + "Max_Epochs = 100 #@param {type:\"slider\", min:0, max:200, step:5}\n", "\n", "#@markdown * How often to save checkpoints.\n", "Save_every_N_epoch = 20 #@param{type:\"integer\"}\n",