Created using Colaboratory

This commit is contained in:
nawnie 2023-02-18 23:54:27 -06:00
parent 0969941a74
commit bf2b128fd7
1 changed files with 14 additions and 9 deletions

View File

@ -68,7 +68,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"#@title Optional connect Gdrive\n", "#@title Optional connect Gdrive\n",
"#@markdown # but strongly recommended\n", "#@markdown # But strongly recommended\n",
"#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n", "#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n",
"\n", "\n",
"#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n", "#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n",
@ -82,8 +82,8 @@
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": { "metadata": {
"cellView": "form", "id": "hAuBbtSvGpau",
"id": "hAuBbtSvGpau" "cellView": "form"
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
@ -94,7 +94,7 @@
"s = getoutput('nvidia-smi')\n", "s = getoutput('nvidia-smi')\n",
"!pip install -q torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url \"https://download.pytorch.org/whl/cu117\"\n", "!pip install -q torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url \"https://download.pytorch.org/whl/cu117\"\n",
"!pip install -q transformers==4.25.1\n", "!pip install -q transformers==4.25.1\n",
"!pip install -q diffusers[torch]==0.10.2\n", "!pip install -q diffusers[torch]==0.13.0\n",
"!pip install -q pynvml==11.4.1\n", "!pip install -q pynvml==11.4.1\n",
"!pip install -q bitsandbytes==0.35.0\n", "!pip install -q bitsandbytes==0.35.0\n",
"!pip install -q ftfy==6.1.1\n", "!pip install -q ftfy==6.1.1\n",
@ -290,7 +290,7 @@
"\n", "\n",
"#@markdown * Batch size impacts VRAM use. 8 should work on SD1.x models and 5 for SD2.x models at 512 resolution. Lower this if you get CUDA out of memory errors. You can check resources on your instance and watch the GPU RAM.\n", "#@markdown * Batch size impacts VRAM use. 8 should work on SD1.x models and 5 for SD2.x models at 512 resolution. Lower this if you get CUDA out of memory errors. You can check resources on your instance and watch the GPU RAM.\n",
"\n", "\n",
"Batch_Size = 6 #@param{type: 'number'}\n", "Batch_Size = 8 #@param{type: 'number'}\n",
"\n", "\n",
"#@markdown * Gradient accumulation is sort of like a virtual batch size increase use this to increase batch size with out increasing vram usage\n", "#@markdown * Gradient accumulation is sort of like a virtual batch size increase use this to increase batch size with out increasing vram usage\n",
"#@markdown Increasing from 1 to 2 will have a minor impact on vram use, but more beyond that will not.\n", "#@markdown Increasing from 1 to 2 will have a minor impact on vram use, but more beyond that will not.\n",
@ -306,7 +306,7 @@
"\n", "\n",
"#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n", "#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n",
"\n", "\n",
"Max_Epochs = 100 #@param {type:\"slider\", min:0, max:200, step:5}\n", "Max_Epochs = 200 #@param {type:\"slider\", min:0, max:200, step:5}\n",
"\n", "\n",
"#@markdown * How often to save checkpoints.\n", "#@markdown * How often to save checkpoints.\n",
"Save_every_N_epoch = 20 #@param{type:\"integer\"}\n", "Save_every_N_epoch = 20 #@param{type:\"integer\"}\n",
@ -329,7 +329,12 @@
"#@markdown * Using the same seed each time you train allows for more accurate a/b comparison of models, leave at -1 for random\n", "#@markdown * Using the same seed each time you train allows for more accurate a/b comparison of models, leave at -1 for random\n",
"#@markdown * The seed also effects your training samples, if you want the same seed each sample you will need to change it from -1\n", "#@markdown * The seed also effects your training samples, if you want the same seed each sample you will need to change it from -1\n",
"Training_Seed = -1 #@param{type:\"integer\"}\n", "Training_Seed = -1 #@param{type:\"integer\"}\n",
"\n", "#@markdown * use this option to configure a sample_prompts.json\n",
"#@markdown * check out /content/EveryDream2trainer/doc/logging.md. for more details\n",
"Advance_Samples = False #@param{type:\"boolean\"}\n",
"Sample_File = \"sample_prompts.txt\"\n",
"if Advance_Samples:\n",
" Sample_File = \"sample_prompts.json\"\n",
"#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n", "#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n",
"Gradient_checkpointing = True #@param{type:\"boolean\"}\n", "Gradient_checkpointing = True #@param{type:\"boolean\"}\n",
"Disable_Xformers = False #@param{type:\"boolean\"}\n", "Disable_Xformers = False #@param{type:\"boolean\"}\n",
@ -405,7 +410,7 @@
" --max_epochs $Max_Epochs \\\n", " --max_epochs $Max_Epochs \\\n",
" --project_name \"$Project_Name\" \\\n", " --project_name \"$Project_Name\" \\\n",
" --resolution $Resolution \\\n", " --resolution $Resolution \\\n",
" --sample_prompts \"sample_prompts.txt\" \\\n", " --sample_prompts \"$Sample_File\" \\\n",
" --sample_steps $Steps_between_samples \\\n", " --sample_steps $Steps_between_samples \\\n",
" --save_every_n_epoch $Save_every_N_epoch \\\n", " --save_every_n_epoch $Save_every_N_epoch \\\n",
" --seed $Training_Seed \\\n", " --seed $Training_Seed \\\n",