Merge pull request #77 from nawnie/main

Colab Update
This commit is contained in:
Victor Hall 2023-02-18 14:49:15 -05:00 committed by GitHub
commit b526cb5093
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 47 additions and 26 deletions

View File

@ -3,8 +3,8 @@
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/nawnie/EveryDream2trainer/blob/main/Train_Colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
@ -35,11 +35,13 @@
"#@title # Install python 3.10 \n",
"#@markdown # This will show a runtime error, its ok, its on purpose to restart the kernel to update python.\n",
"import os\n",
"import time\n",
"from IPython.display import clear_output\n",
"!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\n",
"!bash ./py310.sh -b -f -p /usr/local\n",
"!python -m ipykernel install --name \"py310\" --user\n",
"clear_output()\n",
"time.sleep(1) #needed to clear is before kill\n",
"os.kill(os.getpid(), 9)"
]
},
@ -132,7 +134,7 @@
"from IPython.display import clear_output\n",
"!mkdir input\n",
"%cd /content/EveryDream2trainer\n",
"MODEL_URL = \"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\" #@param [\"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\", \"https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float16.ckpt\"] {allow-input: true}\n",
"MODEL_URL = \"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\" #@param [\"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\", \"https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float16.ckpt\", \"stabilityai/stable-diffusion-2-1-base\", \"stabilityai/stable-diffusion-2-1\"] {allow-input: true}\n",
"print(\"Downloading \")\n",
"!wget $MODEL_URL\n",
"\n",
@ -254,8 +256,8 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "j9pEI69WXS9w"
"id": "j9pEI69WXS9w",
"cellView": "form"
},
"outputs": [],
"source": [
@ -267,15 +269,8 @@
"Save_to_Gdrive = True #@param{type:\"boolean\"}\n",
"#@markdown * Use resume to contnue training you just ran, will also find latest diffusers log in your Gdrive to continue.\n",
"resume = False #@param{type:\"boolean\"}\n",
"#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n",
"Gradient_checkpointing = True #@param{type:\"boolean\"}\n",
"Disable_Xformers = False\n",
"#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n",
"shuffle_tags = False #@param{type:\"boolean\"}\n",
"#@markdown * You can turn off the text encoder training (generally not suggested)\n",
"Disable_text_Encoder= False #@param{type:\"boolean\"}\n",
"#@markdown * Name your project so you can find it in your logs\n",
"Project_Name = \"my_project\" #@param{type: 'string'}\n",
"Project_Name = \"My_Project\" #@param{type: 'string'}\n",
"\n",
"#@markdown * The learning rate affects how much \"training\" is done on the model per training step. It is a very careful balance to select a value that will learn your data and not wreck the model. \n",
"#@markdown Leave this default unless you are very comfortable with training and know what you are doing.\n",
@ -323,14 +318,40 @@
"#@markdown Use the steps_between_samples to set how often the samples are generated.\n",
"Steps_between_samples = 300 #@param{type:\"integer\"}\n",
"\n",
"#@markdown *Weights and Biases logging token. \n",
"#@markdown * That's it! Run the cell! or configure these advance options\n",
"\n",
"#@markdown # ________________ ADV SETTINGS _________________\n",
"#@markdown These are the default Every Dream 2 settings, changing these without learning what they do will likley waste compute credits\n",
"#@markdown please read the doc folder before changing these!\n",
"\n",
"#@markdown * A tip using the sliders, to finely adjust these click them with your mouse then use your keyboard arrows\n",
"\n",
"#@markdown * Using the same seed each time you train allows for more accurate a/b comparison of models, leave at -1 for random\n",
"#@markdown * The seed also effects your training samples, if you want the same seed each sample you will need to change it from -1\n",
"Training_Seed = -1 #@param{type:\"integer\"}\n",
"\n",
"#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n",
"Gradient_checkpointing = True #@param{type:\"boolean\"}\n",
"Disable_Xformers = False #@param{type:\"boolean\"}\n",
"#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n",
"shuffle_tags = False #@param{type:\"boolean\"}\n",
"#@markdown * You can turn off the text encoder training (generally not suggested)\n",
"Disable_text_Encoder= False #@param{type:\"boolean\"}\n",
"#@markdown * Skip the nth last layer of CLIP.\n",
"Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n",
"#@markdown * ratio of captions dropped from training data.\n",
"Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n",
"#@markdown * Ratio of images randomly to flip horizontally.\n",
"Picture_flip = 0.15 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n",
"#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n",
"zero_frequency_noise = 0.1 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n",
"\n",
"#@markdown * Weights and Biases logging token. \n",
"# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n",
"\n",
"# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n",
"wandb_token = '' #@param{type:\"string\"}\n",
"\n",
"#@markdown * That's it! Run the cell!\n",
"\n",
"\n",
"wandb_settings = \"\"\n",
"if wandb_token:\n",
@ -373,24 +394,24 @@
" $DX \\\n",
" $wandb_settings \\\n",
" --amp \\\n",
" --clip_skip $Clip_skip \\\n",
" --batch_size $Batch_Size \\\n",
" --grad_accum $Gradient_steps \\\n",
" --cond_dropout 0.00 \\\n",
" --cond_dropout $Conditional_DropOut \\\n",
" --data_root \"$dataset\" \\\n",
" --flip_p 0.00 \\\n",
" --flip_p $Picture_flip \\\n",
" --lr $Learning_Rate \\\n",
" --lr_decay_steps 0 \\\n",
" --lr_scheduler \"$Schedule\" \\\n",
" --lr_warmup_steps 0 \\\n",
" --max_epochs $Max_Epochs \\\n",
" --project_name \"$Project_Name\" \\\n",
" --resolution $Resolution \\\n",
" --sample_prompts \"sample_prompts.txt\" \\\n",
" --sample_steps $Steps_between_samples \\\n",
" --save_every_n_epoch $Save_every_N_epoch \\\n",
" --seed 555 \\\n",
" --shuffle_tags \\\n",
" --useadam8bit \n"
" --seed $Training_Seed \\\n",
" --zero_frequency_noise_ratio $zero_frequency_noise \\\n",
" --useadam8bit \n",
"\n"
]
},
{
@ -407,7 +428,7 @@
"#@markdown * Edit chain0.json to make use of chaining\n",
"#@markdown * make sure to check each confguration you will need 1 Json per chain length 3 are provided\n",
"#@markdown * make sure your .Json contain the line Notebook: true\n",
"\n",
"#@markdown * your locations in the .json can be done in this format /content/drive/MyDrive/ - then the sub folder you wish to use\n",
"\n",
"%cd /content/EveryDream2trainer\n",
"Chain_Length=0 #@param{type:\"integer\"}\n",
@ -459,8 +480,8 @@
"metadata": {
"accelerator": "GPU",
"colab": {
"include_colab_link": true,
"provenance": []
"provenance": [],
"include_colab_link": true
},
"gpuClass": "standard",
"kernelspec": {