From e9ba16f54424d5010bd0c987b7b357074411a377 Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Mon, 27 Feb 2023 18:14:23 -0600 Subject: [PATCH 1/9] sample updates add more samplers --- utils/sample_generator.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/utils/sample_generator.py b/utils/sample_generator.py index ffded1b..a599e20 100644 --- a/utils/sample_generator.py +++ b/utils/sample_generator.py @@ -8,7 +8,7 @@ from typing import Generator, Callable, Any import torch from PIL import Image, ImageDraw, ImageFont from colorama import Fore, Style -from diffusers import StableDiffusionPipeline, DDIMScheduler, DPMSolverMultistepScheduler +from diffusers import StableDiffusionPipeline, DDIMScheduler, DPMSolverMultistepScheduler, DDPMScheduler, PNDMScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, KDPM2AncestralDiscreteScheduler from torch.cuda.amp import autocast from torch.utils.tensorboard import SummaryWriter from torchvision import transforms @@ -272,7 +272,7 @@ class SampleGenerator: @torch.no_grad() def _create_scheduler(self, scheduler_config: dict): scheduler = self.scheduler - if scheduler not in ['ddim', 'dpm++']: + if scheduler not in ['ddim', 'dpm++', 'pndm', 'ddpm', 'lms', 'euler', 'euler_a', 'kdpm2']: print(f"unsupported scheduler '{self.scheduler}', falling back to ddim") scheduler = 'ddim' @@ -280,5 +280,17 @@ class SampleGenerator: return DDIMScheduler.from_config(scheduler_config) elif scheduler == 'dpm++': return DPMSolverMultistepScheduler.from_config(scheduler_config, algorithm_type="dpmsolver++") + elif scheduler == 'pndm': + return PNDMScheduler.from_config(scheduler_config) + elif scheduler == 'ddpm': + return DDPMScheduler.from_config(scheduler_config) + elif scheduler == 'lms': + return LMSDiscreteScheduler.from_config(scheduler_config) + elif scheduler == 'euler': + return EulerDiscreteScheduler.from_config(scheduler_config) + elif scheduler == 'euler_a': + return EulerAncestralDiscreteScheduler.from_config(scheduler_config) + elif scheduler == 'kdpm2': + return KDPM2AncestralDiscreteScheduler.from_config(scheduler_config) else: raise ValueError(f"unknown scheduler '{scheduler}'") From 9961522001eb2a78c9c589e0fe748818084e3b2e Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Mon, 27 Feb 2023 18:22:58 -0600 Subject: [PATCH 2/9] sample_prompts.json add comments to explain how to better modify your samples --- sample_prompts.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 sample_prompts.json diff --git a/sample_prompts.json b/sample_prompts.json new file mode 100644 index 0000000..e4229bd --- /dev/null +++ b/sample_prompts.json @@ -0,0 +1,24 @@ +{ + "batch_size": 3, # if you get out of memory durring sample gnerations lower this + "seed": 555, + "cfgs": [7, 4], + "scheduler": "dpm++", # you can change this to ddim, dpm++,euler.euler_a, and kdpm2 + "num_inference_steps": 15, # the ideal number of steps change with samples a rule of thumb is 30 steps if not ++ + "show_progress_bars": true, # place falce for cleaner logs + "samples": [ + { + "prompt": "ted bennet and a man sitting on a sofa with a kitchen in the background", + "negative_prompt": "distorted, deformed" + }, + { + "prompt": "a photograph of ted bennet riding a bicycle", + "seed": -1 + }, + { + "random_caption": true, + "seed": 555, + "cfgs": [1,11], # we can add any of the above arguments to our prompt just make sure all lines except the last have a comma + "size": [640, 384] + } + ] +} \ No newline at end of file From d40d980bce0a8e797e9b1e0feb07436419fe5623 Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Mon, 27 Feb 2023 19:08:50 -0600 Subject: [PATCH 3/9] Created using Colaboratory --- Train_Colab.ipynb | 102 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 31 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index bbe89e6..e8f38e1 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -3,8 +3,8 @@ { "cell_type": "markdown", "metadata": { - "colab_type": "text", - "id": "view-in-github" + "id": "view-in-github", + "colab_type": "text" }, "source": [ "\"Open" @@ -47,12 +47,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "cellView": "form", - "id": "f2cdMtCt9Wb6" + "id": "f2cdMtCt9Wb6", + "outputId": "af696921-97a5-4667-d3cc-efba1222975b", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Python 3.10.6\n" + ] + } + ], "source": [ "#@title Verify python version, should be 3.10.something\n", "!python --version" @@ -60,12 +72,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": { "cellView": "form", - "id": "d1di4EC6ygw1" + "id": "d1di4EC6ygw1", + "outputId": "98e1e97c-c829-4c17-c169-01d22318a924", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Mounted at /content/drive\n" + ] + } + ], "source": [ "#@title Optional connect Gdrive\n", "#@markdown # But strongly recommended\n", @@ -80,12 +104,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "cellView": "form", - "id": "hAuBbtSvGpau" + "id": "hAuBbtSvGpau", + "outputId": "99c0130a-a997-4bf3-e8cb-18f9cd152390", + "colab": { + "base_uri": "https://localhost:8080/" + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "DONE!\n" + ] + } + ], "source": [ "#@markdown # Install Dependencies\n", "#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n", @@ -263,6 +299,7 @@ "outputs": [], "source": [ "#@title \n", + "%cd /content/EveryDream2trainer\n", "#@markdown # Run Everydream 2\n", "#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n", "\n", @@ -302,7 +339,7 @@ "\n", "#@markdown * Location on your Gdrive where your training images are.\n", "Dataset_Location = \"/content/drive/MyDrive/training_samples\" #@param {type:\"string\"}\n", - "dataset = Dataset_Location\n", + "\n", "model = save_name\n", "\n", "#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n", @@ -332,7 +369,7 @@ "Training_Seed = -1 #@param{type:\"integer\"}\n", "#@markdown * use this option to configure a sample_prompts.json\n", "#@markdown * check out /content/EveryDream2trainer/doc/logging.md. for more details\n", - "Advance_Samples = False #@param{type:\"boolean\"}\n", + "Advance_Samples = True #@param{type:\"boolean\"}\n", "Sample_File = \"sample_prompts.txt\"\n", "if Advance_Samples:\n", " Sample_File = \"sample_prompts.json\"\n", @@ -342,28 +379,32 @@ "#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n", "shuffle_tags = False #@param{type:\"boolean\"}\n", "#@markdown * You can turn off the text encoder training (generally not suggested)\n", - "Disable_text_Encoder= False #@param{type:\"boolean\"}\n", - "#@markdown * Skip the nth last layer of CLIP. 0 is default for SD1.x, 2 recommended for SD2.x models.\n", - "Clip_skip = 0 #@param {type:\"slider\", min:0, max:4, step:1}\n", - "#@markdown * Ratio of training on empty caption. Improves unconditional guidance.\n", - "Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", - "#@markdown * Ratio of images randomly to flip horizontally. Use for small data sets. May negtively affect likeness of subjects with asymmetric features.\n", - "Picture_flip = 0.0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", - "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results. 0\n", - "zero_frequency_noise = 0.05 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", + "Disable_text_Encoder= True #@param{type:\"boolean\"}\n", + "#@markdown * Skip the nth last layer of CLIP.\n", + "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", + "#@markdown * ratio of captions dropped from training data.\n", + "Conditional_DropOut = 0.1 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", + "#@markdown * Ratio of images randomly to flip horizontally.\n", + "Picture_flip = 0.05 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", + "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n", + "zero_frequency_noise = 0.1 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", "\n", "#@markdown * Weights and Biases logging token. \n", "# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n", "# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n", "wandb_token = '' #@param{type:\"string\"}\n", - "\n", - "\n", - "\n", "wandb_settings = \"\"\n", "if wandb_token:\n", " !wandb login $wandb_token\n", " wandb_settings = \"--wandb\"\n", "\n", + "if \"zip\" in Dataset_Location:\n", + " !rm -r /Training_Data/\n", + " !mkdir Training_Data\n", + " !unzip $Dataset_Location -d /Training_Data\n", + " Dataset_Location = \"/Training_Data\"\n", + "dataset = Dataset_Location\n", + "\n", "Drive=\"\"\n", "if Save_to_Gdrive:\n", " Drive = \"--logdir /content/drive/MyDrive/everydreamlogs --save_ckpt_dir /content/drive/MyDrive/everydreamlogs/ckpt\"\n", @@ -390,7 +431,7 @@ "\n", "textencode = \"\"\n", "if Disable_text_Encoder:\n", - " textencode = \"--disable_textenc_training Train_text \"\n", + " textencode = \"--disable_textenc_training\"\n", "\n", "!python train.py --resume_ckpt \"$model\" \\\n", " $textencode \\\n", @@ -415,8 +456,7 @@ " --sample_steps $Steps_between_samples \\\n", " --save_every_n_epoch $Save_every_N_epoch \\\n", " --seed $Training_Seed \\\n", - " --zero_frequency_noise_ratio $zero_frequency_noise \\\n", - " --useadam8bit \n", + " --zero_frequency_noise_ratio $zero_frequency_noise\n", "\n" ] }, @@ -486,8 +526,8 @@ "metadata": { "accelerator": "GPU", "colab": { - "include_colab_link": true, - "provenance": [] + "provenance": [], + "include_colab_link": true }, "gpuClass": "standard", "kernelspec": { @@ -507,4 +547,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From d12e39a6219d10cc2a7c371563a7840a96c9f56b Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Mon, 27 Feb 2023 19:57:08 -0600 Subject: [PATCH 4/9] Update sample_prompts.json --- sample_prompts.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sample_prompts.json b/sample_prompts.json index e4229bd..b81c246 100644 --- a/sample_prompts.json +++ b/sample_prompts.json @@ -1,10 +1,10 @@ { - "batch_size": 3, # if you get out of memory durring sample gnerations lower this + "batch_size": 3, "seed": 555, "cfgs": [7, 4], - "scheduler": "dpm++", # you can change this to ddim, dpm++,euler.euler_a, and kdpm2 - "num_inference_steps": 15, # the ideal number of steps change with samples a rule of thumb is 30 steps if not ++ - "show_progress_bars": true, # place falce for cleaner logs + "scheduler": "dpm++", + "num_inference_steps": 15, + "show_progress_bars": true, "samples": [ { "prompt": "ted bennet and a man sitting on a sofa with a kitchen in the background", @@ -17,8 +17,8 @@ { "random_caption": true, "seed": 555, - "cfgs": [1,11], # we can add any of the above arguments to our prompt just make sure all lines except the last have a comma - "size": [640, 384] + "cfgs": [1,11], + "size": [640, 384] } ] -} \ No newline at end of file +} From 308c42ebd05e676f64f116c2ffc4d5899003debd Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Tue, 28 Feb 2023 08:47:25 -0600 Subject: [PATCH 5/9] default fix left adv samples and disable text on --- Train_Colab.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index e8f38e1..3c29f18 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -369,7 +369,7 @@ "Training_Seed = -1 #@param{type:\"integer\"}\n", "#@markdown * use this option to configure a sample_prompts.json\n", "#@markdown * check out /content/EveryDream2trainer/doc/logging.md. for more details\n", - "Advance_Samples = True #@param{type:\"boolean\"}\n", + "Advance_Samples = False #@param{type:\"boolean\"}\n", "Sample_File = \"sample_prompts.txt\"\n", "if Advance_Samples:\n", " Sample_File = \"sample_prompts.json\"\n", @@ -379,7 +379,7 @@ "#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n", "shuffle_tags = False #@param{type:\"boolean\"}\n", "#@markdown * You can turn off the text encoder training (generally not suggested)\n", - "Disable_text_Encoder= True #@param{type:\"boolean\"}\n", + "Disable_text_Encoder= False #@param{type:\"boolean\"}\n", "#@markdown * Skip the nth last layer of CLIP.\n", "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", "#@markdown * ratio of captions dropped from training data.\n", @@ -547,4 +547,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 0f3fde5899049eb4a2450f61cf5413b0e785fc3e Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:27:51 -0600 Subject: [PATCH 6/9] Created using Colaboratory --- Train_Colab.ipynb | 60 ++++++++++------------------------------------- 1 file changed, 12 insertions(+), 48 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index 3c29f18..8c4f1a7 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -47,24 +47,12 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "cellView": "form", - "id": "f2cdMtCt9Wb6", - "outputId": "af696921-97a5-4667-d3cc-efba1222975b", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "f2cdMtCt9Wb6" }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Python 3.10.6\n" - ] - } - ], + "outputs": [], "source": [ "#@title Verify python version, should be 3.10.something\n", "!python --version" @@ -72,24 +60,12 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "cellView": "form", - "id": "d1di4EC6ygw1", - "outputId": "98e1e97c-c829-4c17-c169-01d22318a924", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "d1di4EC6ygw1" }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Mounted at /content/drive\n" - ] - } - ], + "outputs": [], "source": [ "#@title Optional connect Gdrive\n", "#@markdown # But strongly recommended\n", @@ -104,24 +80,12 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "cellView": "form", - "id": "hAuBbtSvGpau", - "outputId": "99c0130a-a997-4bf3-e8cb-18f9cd152390", - "colab": { - "base_uri": "https://localhost:8080/" - } + "id": "hAuBbtSvGpau" }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "DONE!\n" - ] - } - ], + "outputs": [], "source": [ "#@markdown # Install Dependencies\n", "#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n", @@ -383,9 +347,9 @@ "#@markdown * Skip the nth last layer of CLIP.\n", "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", "#@markdown * ratio of captions dropped from training data.\n", - "Conditional_DropOut = 0.1 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", + "Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", "#@markdown * Ratio of images randomly to flip horizontally.\n", - "Picture_flip = 0.05 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", + "Picture_flip = 0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n", "zero_frequency_noise = 0.1 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", "\n", @@ -547,4 +511,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From db4e632b3ff552df22d0235cb21575b251462a29 Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:30:03 -0600 Subject: [PATCH 7/9] Update Train_Colab.ipynb --- Train_Colab.ipynb | 572 +++++++++------------------------------------- 1 file changed, 103 insertions(+), 469 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index 8c4f1a7..bdb028a 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -1,514 +1,148 @@ +Skip to content +Search or jump to… +Pull requests +Issues +Codespaces +Marketplace +Explore + +@nawnie +nawnie +/ +EveryDream2trainer +Public +forked from victorchall/EveryDream2trainer +Cannot fork because you own this repository and are not a member of any organizations. +Code +Pull requests +Actions +Projects +Wiki +Security +Insights +Settings +EveryDream2trainer +/ +Train_Colab.ipynb +in +main + + +Spaces + +2 + +No wrap +1 { +2 "cells": [ +3 { +4 "cell_type": "markdown", +5 "metadata": { +6 "id": "view-in-github", +7 "colab_type": "text" +8 }, +9 "source": [ +10 "\"Open" +11 ] +12 }, +13 { +14 "cell_type": "markdown", +15 "metadata": { +16 "id": "blaLMSbkPHhG" +17 }, +18 "source": [ +19 "# EveryDream2 Colab Edition\n", +20 "\n", +21 "Check out documentation here: https://github.com/victorchall/EveryDream2trainer#docs\n", +22 "\n", +23 "And join the discord: https://discord.gg/uheqxU6sXN" +24 ] +25 }, +26 { +27 "cell_type": "code", +28 "execution_count": null, +29 "metadata": { +30 "cellView": "form", +31 "id": "WsYIcz9HY9lx" +32 }, +33 "outputs": [], +34 "source": [ +35 "#@title # Install python 3.10 \n", +36 "#@markdown # This will show a runtime error, its ok, its on purpose to restart the kernel to update python.\n", +37 "import os\n", +38 "import time\n", +39 "from IPython.display import clear_output\n", +40 "!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\n", +41 "!bash ./py310.sh -b -f -p /usr/local\n", +42 "!python -m ipykernel install --name \"py310\" --user\n", +43 "clear_output()\n", +44 "time.sleep(1) #needed to clear is before kill\n", +45 "os.kill(os.getpid(), 9)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "f2cdMtCt9Wb6" - }, - "outputs": [], - "source": [ - "#@title Verify python version, should be 3.10.something\n", - "!python --version" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "d1di4EC6ygw1" - }, - "outputs": [], - "source": [ - "#@title Optional connect Gdrive\n", - "#@markdown # But strongly recommended\n", - "#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n", - "\n", - "#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n", - "from google.colab import drive\n", - "drive.mount('/content/drive')\n", - "\n", - "!mkdir -p /content/drive/MyDrive/everydreamlogs/ckpt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "hAuBbtSvGpau" - }, - "outputs": [], - "source": [ - "#@markdown # Install Dependencies\n", - "#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n", - "from IPython.display import clear_output\n", - "from subprocess import getoutput\n", - "s = getoutput('nvidia-smi')\n", - "!pip install -q torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url \"https://download.pytorch.org/whl/cu117\"\n", - "!pip install -q transformers==4.25.1\n", - "!pip install -q diffusers[torch]==0.13.0\n", - "!pip install -q pynvml==11.4.1\n", - "!pip install -q bitsandbytes==0.35.0\n", - "!pip install -q ftfy==6.1.1\n", - "!pip install -q aiohttp==3.8.3\n", - "!pip install -q tensorboard>=2.11.0\n", - "!pip install -q protobuf==3.20.1\n", - "!pip install -q wandb==0.13.6\n", - "!pip install -q pyre-extensions==0.0.23\n", - "!pip install -q xformers==0.0.16\n", - "!pip install -q pytorch-lightning==1.6.5\n", - "!pip install -q OmegaConf==2.2.3\n", - "!pip install -q numpy==1.23.5\n", - "!pip install -q colorama\n", - "!pip install -q keyboard\n", - "!pip install -q triton\n", - "!pip install -q lion-pytorch\n", - "clear_output()\n", - "!git clone https://github.com/victorchall/EveryDream2trainer.git\n", - "%cd /content/EveryDream2trainer\n", - "!python utils/get_yamls.py\n", - "clear_output()\n", - "print(\"DONE!\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "unaffeqGP_0A" - }, - "outputs": [], - "source": [ - "#@title Get A Base Model\n", - "#@markdown Choose SD1.5 or Waifu Diffusion 1.3 from the dropdown, or paste your own URL in the box\n", - "\n", - "#@markdown If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive\n", - "from IPython.display import clear_output\n", - "!mkdir input\n", - "%cd /content/EveryDream2trainer\n", - "MODEL_URL = \"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\" #@param [\"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\", \"https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float16.ckpt\", \"stabilityai/stable-diffusion-2-1-base\", \"stabilityai/stable-diffusion-2-1\"] {allow-input: true}\n", - "print(\"Downloading \")\n", - "!wget $MODEL_URL\n", - "\n", - "%cd /content/EveryDream2trainer\n", - "\n", - "clear_output()\n", - "print(\"DONE!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nEzuEYH0536C" - }, - "source": [ - "In order to train, you need a base model on which to train. This is a one-time setup to configure base models when you want to use a particular base. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "tPvQSo6ScF2c" - }, - "outputs": [], - "source": [ - "import os\n", - "#@title Setup conversion\n", - "\n", - "#@markdown **If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive.** \n", - "# \n", - "# If you are not sure, look in your Gdrive for `everydreamlogs/ckpt` and see if you have a folder with the `save_name` below.\n", - "\n", - "#@markdown Pick the `model_type` in the dropdown. This is the model type that you are converting and you downloaded above. This is important as it will determine the model architecture and the correct settings to use.\n", - "\n", - "#@markdown * `SD1x` is all SD1.x based models *(SD1.4, SD1.5, Waifu Diffusion 1.3, etc)*\n", - "\n", - "#@markdown * `SD2_512_base` is the SD2 512 base model\n", - "\n", - "#@markdown * `SD21` is all SD2 768 models. *(ex. SD2.1 768, or trained models based on that)*\n", - "\n", - "#@markdown If you are not sure, double check the model author's page or ask for help on [Discord](https://discord.gg/uheqxU6sXN).\n", - "model_type = \"SD1x\" #@param [\"SD1x\", \"SD2_512_base\", \"SD21\"]\n", - "\n", - "#@markdown This is the temporary ckpt file that was downloaded above. If you downloaded a different model, you can change this. *Hint: look at your file manager in the EveryDream2trainer folder for .ckpt files*.\n", - "base_path = \"/content/EveryDream2trainer/sd_v1-5_vae.ckpt\" #@param {type:\"string\"}\n", - "\n", - "#@markdown The name that you will use when selecting this model in the future training sessons.\n", - "save_name = \"SD15\" #@param{type:\"string\"}\n", - "\n", - "#@markdown If you are using Gdrive, this will save the converted model to your Gdrive for future use so you can skip downloading and converting the model.\n", - "cache_to_gdrive = True #@param{type:\"boolean\"}\n", - "\n", - "if cache_to_gdrive:\n", - " save_name = os.path.join(\"/content/drive/MyDrive/everydreamlogs/ckpt\", save_name)\n", - "\n", - "img_size = 512\n", - "upscale_attention = False\n", - "prediction_type = \"epsilon\"\n", - "if model_type == \"SD1x\":\n", - " inference_yaml = \"v1-inference.yaml\"\n", - "elif model_type == \"SD2_512_base\":\n", - " upscale_attention = True\n", - " inference_yaml = \"v2-inference.yaml\"\n", - "elif model_type == \"SD21\":\n", - " upscale_attention = True\n", - " prediction_type = \"v_prediction\"\n", - " inference_yaml = \"v2-inference-v.yaml\"\n", - " img_size = 768\n", - "\n", - "print(base_path)\n", - "print(inference_yaml)\n", - "\n", - "!python utils/convert_original_stable_diffusion_to_diffusers.py --scheduler_type ddim \\\n", - "--original_config_file {inference_yaml} \\\n", - "--image_size {img_size} \\\n", - "--checkpoint_path {base_path} \\\n", - "--prediction_type {prediction_type} \\\n", - "--upcast_attn False \\\n", - "--dump_path {save_name}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "bLpcvpGJB4Gu" - }, - "outputs": [], - "source": [ - "#@title Pick your base model from a diffusers model saved to your Gdrive (converted above)\n", - "\n", - "#@markdown Do not skip this cell.\n", - "\n", - "#@markdown * If you have preveiously saved diffusers on your drive you can select it here\n", - "\n", - "#@markdown ex. */content/drive/MyDrive/everydreamlogs/myproject_202208/ckpts/interrupted-gs023*\n", - "\n", - "#@markdown The default for SD1.5 converted above would be */content/drive/MyDrive/everydreamlogs/ckpt/SD15*\n", - "Resume_Model = \"/content/drive/MyDrive/everydreamlogs/ckpt/SD15\" #@param{type:\"string\"} \n", - "save_name = Resume_Model" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JXVu-W2lCjwX" - }, - "source": [ - "For a more indepth Explanation of each of these paramaters check out /content/EveryDream2trainer/doc.\n", - "\n", - "\n", - "After youve tried a few models you will find /content/EveryDream2trainer/doc/ATWEAKING.md to be extremly helpful." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "j9pEI69WXS9w" - }, - "outputs": [], - "source": [ - "#@title \n", - "%cd /content/EveryDream2trainer\n", - "#@markdown # Run Everydream 2\n", - "#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n", - "\n", - "#@markdown * Save logs and output ckpts to Gdrive (strongly suggested)\n", - "Save_to_Gdrive = True #@param{type:\"boolean\"}\n", - "#@markdown * Use resume to contnue training you just ran, will also find latest diffusers log in your Gdrive to continue.\n", - "resume = False #@param{type:\"boolean\"}\n", - "#@markdown * Name your project so you can find it in your logs\n", - "Project_Name = \"My_Project\" #@param{type: 'string'}\n", - "\n", - "#@markdown * The learning rate affects how much \"training\" is done on the model per training step. It is a very careful balance to select a value that will learn your data and not wreck the model. \n", - "#@markdown Leave this default unless you are very comfortable with training and know what you are doing.\n", - "\n", - "Learning_Rate = 1e-6 #@param{type: 'number'}\n", - "\n", - "#@markdown * A learning rate scheduler can change your learning rate as training progresses.\n", - "\n", - "#@markdown I recommend sticking with constant until you are comfortable with general training. \n", - "\n", - "Schedule = \"constant\" #@param [\"constant\", \"polynomial\", \"linear\", \"cosine\"] {allow-input: true}\n", - "\n", - "#@markdown * Resolution to train at (recommend 512). Higher resolution will require lower batch size (below).\n", - "Resolution = 512 #@param {type:\"slider\", min:256, max:768, step:64}\n", - "\n", - "#@markdown * Batch size is also another \"hyperparameter\" of itself and there are tradeoffs. It may not always be best to use the highest batch size possible. Once of the primary reasons to change it is if you get \"CUDA out of memory\" errors where lowering the value may help.\n", - "\n", - "#@markdown * Batch size impacts VRAM use. 8 should work on SD1.x models and 5 for SD2.x models at 512 resolution. Lower this if you get CUDA out of memory errors. You can check resources on your instance and watch the GPU RAM.\n", - "\n", - "Batch_Size = 6 #@param{type: 'number'}\n", - "\n", - "#@markdown * Gradient accumulation is sort of like a virtual batch size increase use this to increase batch size with out increasing vram usage\n", - "#@markdown Increasing from 1 to 2 will have a minor impact on vram use, but more beyond that will not.\n", - "#@markdown In colab free teir you can expect the fastest proformance from a batch of 8 and a gradient step of 1\n", - "#@markdown This is mostly for use if you are training higher resolution on free tier and cannot increase batch size.\n", - "\n", - "Gradient_steps = 1 #@param{type:\"slider\", min:1, max:10, step:1}\n", - "\n", - "#@markdown * Location on your Gdrive where your training images are.\n", - "Dataset_Location = \"/content/drive/MyDrive/training_samples\" #@param {type:\"string\"}\n", - "\n", - "model = save_name\n", - "\n", - "#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n", - "\n", - "Max_Epochs = 100 #@param {type:\"slider\", min:0, max:200, step:5}\n", - "\n", - "#@markdown * How often to save checkpoints.\n", - "Save_every_N_epoch = 20 #@param{type:\"integer\"}\n", - "\n", - "#@markdown * Test sample generation steps, how often to generate samples during training.\n", - "\n", - "#@markdown You can set your own sample prompts by adding them, one line at a time, to `/content/EveryDream2trainer/sample_prompts.txt`. If left empty, it will use the captions from your training images.\n", - "\n", - "#@markdown Use the steps_between_samples to set how often the samples are generated.\n", - "Steps_between_samples = 300 #@param{type:\"integer\"}\n", - "\n", - "#@markdown * That's it! Run the cell! or configure these advance options\n", - "\n", - "#@markdown # ________________ ADV SETTINGS _________________\n", - "#@markdown These are the default Every Dream 2 settings, changing these without learning what they do will likley waste compute credits\n", - "#@markdown please read the doc folder before changing these!\n", - "\n", - "#@markdown * A tip using the sliders, to finely adjust these click them with your mouse then use your keyboard arrows\n", - "\n", - "#@markdown * Using the same seed each time you train allows for more accurate a/b comparison of models, leave at -1 for random\n", - "#@markdown * The seed also effects your training samples, if you want the same seed each sample you will need to change it from -1\n", - "Training_Seed = -1 #@param{type:\"integer\"}\n", - "#@markdown * use this option to configure a sample_prompts.json\n", - "#@markdown * check out /content/EveryDream2trainer/doc/logging.md. for more details\n", - "Advance_Samples = False #@param{type:\"boolean\"}\n", - "Sample_File = \"sample_prompts.txt\"\n", - "if Advance_Samples:\n", - " Sample_File = \"sample_prompts.json\"\n", - "#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n", - "Gradient_checkpointing = True #@param{type:\"boolean\"}\n", - "Disable_Xformers = False #@param{type:\"boolean\"}\n", - "#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n", - "shuffle_tags = False #@param{type:\"boolean\"}\n", - "#@markdown * You can turn off the text encoder training (generally not suggested)\n", - "Disable_text_Encoder= False #@param{type:\"boolean\"}\n", - "#@markdown * Skip the nth last layer of CLIP.\n", - "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", - "#@markdown * ratio of captions dropped from training data.\n", - "Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", - "#@markdown * Ratio of images randomly to flip horizontally.\n", - "Picture_flip = 0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", - "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n", - "zero_frequency_noise = 0.1 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", - "\n", - "#@markdown * Weights and Biases logging token. \n", - "# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n", - "# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n", - "wandb_token = '' #@param{type:\"string\"}\n", - "wandb_settings = \"\"\n", - "if wandb_token:\n", - " !wandb login $wandb_token\n", - " wandb_settings = \"--wandb\"\n", - "\n", - "if \"zip\" in Dataset_Location:\n", - " !rm -r /Training_Data/\n", - " !mkdir Training_Data\n", - " !unzip $Dataset_Location -d /Training_Data\n", - " Dataset_Location = \"/Training_Data\"\n", - "dataset = Dataset_Location\n", - "\n", - "Drive=\"\"\n", - "if Save_to_Gdrive:\n", - " Drive = \"--logdir /content/drive/MyDrive/everydreamlogs --save_ckpt_dir /content/drive/MyDrive/everydreamlogs/ckpt\"\n", - "\n", - "if Max_Epochs==0:\n", - " Max_Epoch=1\n", - "\n", - "if resume:\n", - " model = \"findlast\"\n", - "\n", - "Gradient = \"\"\n", - "if Gradient_checkpointing:\n", - " Gradient = \"--gradient_checkpointing \"\n", - "if \"A100\" in s:\n", - " Gradient = \"\"\n", - "\n", - "DX = \"\" \n", - "if Disable_Xformers:\n", - " DX = \"--disable_xformers \"\n", - "\n", - "shuffle = \"\"\n", - "if shuffle_tags:\n", - " shuffle = \"--shuffle_tags \"\n", - "\n", - "textencode = \"\"\n", - "if Disable_text_Encoder:\n", - " textencode = \"--disable_textenc_training\"\n", - "\n", - "!python train.py --resume_ckpt \"$model\" \\\n", - " $textencode \\\n", - " $Gradient \\\n", - " $shuffle \\\n", - " $Drive \\\n", - " $DX \\\n", - " $wandb_settings \\\n", - " --amp \\\n", - " --clip_skip $Clip_skip \\\n", - " --batch_size $Batch_Size \\\n", - " --grad_accum $Gradient_steps \\\n", - " --cond_dropout $Conditional_DropOut \\\n", - " --data_root \"$dataset\" \\\n", - " --flip_p $Picture_flip \\\n", - " --lr $Learning_Rate \\\n", - " --lr_scheduler \"$Schedule\" \\\n", - " --max_epochs $Max_Epochs \\\n", - " --project_name \"$Project_Name\" \\\n", - " --resolution $Resolution \\\n", - " --sample_prompts \"$Sample_File\" \\\n", - " --sample_steps $Steps_between_samples \\\n", - " --save_every_n_epoch $Save_every_N_epoch \\\n", - " --seed $Training_Seed \\\n", - " --zero_frequency_noise_ratio $zero_frequency_noise\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "Iuoa_1B9jRGU" - }, - "outputs": [], - "source": [ - "#@title Alternate startup script\n", - "#@markdown * Edit train.json to setup your paramaters\n", - "#@markdown * Edit chain0.json to make use of chaining\n", - "#@markdown * make sure to check each confguration you will need 1 Json per chain length 3 are provided\n", - "#@markdown * make sure your .Json contain the line Notebook: true\n", - "#@markdown * your locations in the .json can be done in this format /content/drive/MyDrive/ - then the sub folder you wish to use\n", - "\n", - "%cd /content/EveryDream2trainer\n", - "Chain_Length=0 #@param{type:\"integer\"}\n", - "l = Chain_Length \n", - "I=0 #repeat counter\n", - "if l == None or l == 0:\n", - " l=1\n", - "while l > 0:\n", - " !python train_colab.py --config chain{I}.json\n", - " l -= 1\n", - " I =+ 1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "8HmIWtODuE6p" - }, - "outputs": [], - "source": [ - "#@title Test your Diffusers\n", - "#@markdown Path to the diffusers that was trained\n", - "\n", - "#@markdown You can look in the file drawer on the left /content/drive/MyDrive/everydreamlogs and click the three dots to copy the path\n", - "\n", - "#@markdown ex. /content/drive/MyDrive/everydreamlogs/my_project_20230126-023804/ckpts/interrupted-gs86\n", - "\n", - "diffusers_path=\"\" #@param{type:\"string\"}\n", - "DF=diffusers_path\n", - "PROMPT= \"a photo of an astronaut on the moon\"#@param{type:\"string\"}\n", - "Resolution = 512 #@param {type:\"slider\", min:256, max:1024, step:32}\n", - "Seed= -1 #@param{type:\"integer\"}\n", - "Steps = 30 #@param {type:\"slider\", min:10, max:50, step:1}\n", - "cfg = 7 #@param {type:\"slider\", min:1, max:15, step:0.5}\n", - "\n", - "\n", - "!python /content/EveryDream2trainer/scripts/txt2img.py \\\n", - " --diffusers_path \"$DF\" \\\n", - " --resolution $Resolution \\\n", - " --seed $Seed \\\n", - " --prompt \"$PROMPT\" \\\n", - " --steps $Steps \\\n", - " --cfg_scale $cfg " - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [], - "include_colab_link": true - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.10.5" - }, - "vscode": { - "interpreter": { - "hash": "e602395b73d27e246c3f66de86a1ed4dc1e5a85e8356fd1a2f027b9d2f1f8162" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file +@nawnie +Commit changes +Commit summary +Create Train_Colab.ipynb +Optional extended description +Add an optional extended description… + Commit directly to the main branch. + Create a new branch for this commit and start a pull request. Learn more about pull requests. + +Footer +© 2023 GitHub, Inc. +Footer navigation +Terms +Privacy +Security +Status +Docs +Contact GitHub +Pricing +API +Training +Blog +About From 97450d88813c9cb2eaca4fb34b80f9f19ac456fc Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:33:29 -0600 Subject: [PATCH 8/9] Update Train_Colab.ipynb --- Train_Colab.ipynb | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index bdb028a..53ef7a9 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -1,38 +1,4 @@ -Skip to content -Search or jump to… -Pull requests -Issues -Codespaces -Marketplace -Explore - -@nawnie -nawnie -/ -EveryDream2trainer -Public -forked from victorchall/EveryDream2trainer -Cannot fork because you own this repository and are not a member of any organizations. -Code -Pull requests -Actions -Projects -Wiki -Security -Insights -Settings -EveryDream2trainer -/ -Train_Colab.ipynb -in -main - -Spaces - -2 - -No wrap 1 { 2 From 893eb15cf8f83ed30d957d41f7471476b4020340 Mon Sep 17 00:00:00 2001 From: nawnie <106923464+nawnie@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:35:12 -0600 Subject: [PATCH 9/9] Created using Colaboratory --- Train_Colab.ipynb | 538 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 469 insertions(+), 69 deletions(-) diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index 53ef7a9..fc0f32e 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -1,114 +1,514 @@ - -1 { -2 "cells": [ -3 { -4 "cell_type": "markdown", -5 "metadata": { -6 "id": "view-in-github", -7 "colab_type": "text" -8 }, -9 "source": [ -10 "\"Open" -11 ] -12 }, -13 { -14 "cell_type": "markdown", -15 "metadata": { -16 "id": "blaLMSbkPHhG" -17 }, -18 "source": [ -19 "# EveryDream2 Colab Edition\n", -20 "\n", -21 "Check out documentation here: https://github.com/victorchall/EveryDream2trainer#docs\n", -22 "\n", -23 "And join the discord: https://discord.gg/uheqxU6sXN" -24 ] -25 }, -26 { -27 "cell_type": "code", -28 "execution_count": null, -29 "metadata": { -30 "cellView": "form", -31 "id": "WsYIcz9HY9lx" -32 }, -33 "outputs": [], -34 "source": [ -35 "#@title # Install python 3.10 \n", -36 "#@markdown # This will show a runtime error, its ok, its on purpose to restart the kernel to update python.\n", -37 "import os\n", -38 "import time\n", -39 "from IPython.display import clear_output\n", -40 "!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\n", -41 "!bash ./py310.sh -b -f -p /usr/local\n", -42 "!python -m ipykernel install --name \"py310\" --user\n", -43 "clear_output()\n", -44 "time.sleep(1) #needed to clear is before kill\n", -45 "os.kill(os.getpid(), 9)" -@nawnie -Commit changes -Commit summary -Create Train_Colab.ipynb -Optional extended description -Add an optional extended description… - Commit directly to the main branch. - Create a new branch for this commit and start a pull request. Learn more about pull requests. - -Footer -© 2023 GitHub, Inc. -Footer navigation -Terms -Privacy -Security -Status -Docs -Contact GitHub -Pricing -API -Training -Blog -About + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "f2cdMtCt9Wb6" + }, + "outputs": [], + "source": [ + "#@title Verify python version, should be 3.10.something\n", + "!python --version" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "d1di4EC6ygw1" + }, + "outputs": [], + "source": [ + "#@title Optional connect Gdrive\n", + "#@markdown # But strongly recommended\n", + "#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n", + "\n", + "#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n", + "from google.colab import drive\n", + "drive.mount('/content/drive')\n", + "\n", + "!mkdir -p /content/drive/MyDrive/everydreamlogs/ckpt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "hAuBbtSvGpau" + }, + "outputs": [], + "source": [ + "#@markdown # Install Dependencies\n", + "#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n", + "from IPython.display import clear_output\n", + "from subprocess import getoutput\n", + "s = getoutput('nvidia-smi')\n", + "!pip install -q torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url \"https://download.pytorch.org/whl/cu117\"\n", + "!pip install -q transformers==4.25.1\n", + "!pip install -q diffusers[torch]==0.13.0\n", + "!pip install -q pynvml==11.4.1\n", + "!pip install -q bitsandbytes==0.35.0\n", + "!pip install -q ftfy==6.1.1\n", + "!pip install -q aiohttp==3.8.3\n", + "!pip install -q tensorboard>=2.11.0\n", + "!pip install -q protobuf==3.20.1\n", + "!pip install -q wandb==0.13.6\n", + "!pip install -q pyre-extensions==0.0.23\n", + "!pip install -q xformers==0.0.16\n", + "!pip install -q pytorch-lightning==1.6.5\n", + "!pip install -q OmegaConf==2.2.3\n", + "!pip install -q numpy==1.23.5\n", + "!pip install -q colorama\n", + "!pip install -q keyboard\n", + "!pip install -q triton\n", + "!pip install -q lion-pytorch\n", + "clear_output()\n", + "!git clone https://github.com/victorchall/EveryDream2trainer.git\n", + "%cd /content/EveryDream2trainer\n", + "!python utils/get_yamls.py\n", + "clear_output()\n", + "print(\"DONE!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "unaffeqGP_0A" + }, + "outputs": [], + "source": [ + "#@title Get A Base Model\n", + "#@markdown Choose SD1.5 or Waifu Diffusion 1.3 from the dropdown, or paste your own URL in the box\n", + "\n", + "#@markdown If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive\n", + "from IPython.display import clear_output\n", + "!mkdir input\n", + "%cd /content/EveryDream2trainer\n", + "MODEL_URL = \"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\" #@param [\"https://huggingface.co/panopstor/EveryDream/resolve/main/sd_v1-5_vae.ckpt\", \"https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float16.ckpt\", \"stabilityai/stable-diffusion-2-1-base\", \"stabilityai/stable-diffusion-2-1\"] {allow-input: true}\n", + "print(\"Downloading \")\n", + "!wget $MODEL_URL\n", + "\n", + "%cd /content/EveryDream2trainer\n", + "\n", + "clear_output()\n", + "print(\"DONE!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nEzuEYH0536C" + }, + "source": [ + "In order to train, you need a base model on which to train. This is a one-time setup to configure base models when you want to use a particular base. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "tPvQSo6ScF2c" + }, + "outputs": [], + "source": [ + "import os\n", + "#@title Setup conversion\n", + "\n", + "#@markdown **If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive.** \n", + "# \n", + "# If you are not sure, look in your Gdrive for `everydreamlogs/ckpt` and see if you have a folder with the `save_name` below.\n", + "\n", + "#@markdown Pick the `model_type` in the dropdown. This is the model type that you are converting and you downloaded above. This is important as it will determine the model architecture and the correct settings to use.\n", + "\n", + "#@markdown * `SD1x` is all SD1.x based models *(SD1.4, SD1.5, Waifu Diffusion 1.3, etc)*\n", + "\n", + "#@markdown * `SD2_512_base` is the SD2 512 base model\n", + "\n", + "#@markdown * `SD21` is all SD2 768 models. *(ex. SD2.1 768, or trained models based on that)*\n", + "\n", + "#@markdown If you are not sure, double check the model author's page or ask for help on [Discord](https://discord.gg/uheqxU6sXN).\n", + "model_type = \"SD1x\" #@param [\"SD1x\", \"SD2_512_base\", \"SD21\"]\n", + "\n", + "#@markdown This is the temporary ckpt file that was downloaded above. If you downloaded a different model, you can change this. *Hint: look at your file manager in the EveryDream2trainer folder for .ckpt files*.\n", + "base_path = \"/content/EveryDream2trainer/sd_v1-5_vae.ckpt\" #@param {type:\"string\"}\n", + "\n", + "#@markdown The name that you will use when selecting this model in the future training sessons.\n", + "save_name = \"SD15\" #@param{type:\"string\"}\n", + "\n", + "#@markdown If you are using Gdrive, this will save the converted model to your Gdrive for future use so you can skip downloading and converting the model.\n", + "cache_to_gdrive = True #@param{type:\"boolean\"}\n", + "\n", + "if cache_to_gdrive:\n", + " save_name = os.path.join(\"/content/drive/MyDrive/everydreamlogs/ckpt\", save_name)\n", + "\n", + "img_size = 512\n", + "upscale_attention = False\n", + "prediction_type = \"epsilon\"\n", + "if model_type == \"SD1x\":\n", + " inference_yaml = \"v1-inference.yaml\"\n", + "elif model_type == \"SD2_512_base\":\n", + " upscale_attention = True\n", + " inference_yaml = \"v2-inference.yaml\"\n", + "elif model_type == \"SD21\":\n", + " upscale_attention = True\n", + " prediction_type = \"v_prediction\"\n", + " inference_yaml = \"v2-inference-v.yaml\"\n", + " img_size = 768\n", + "\n", + "print(base_path)\n", + "print(inference_yaml)\n", + "\n", + "!python utils/convert_original_stable_diffusion_to_diffusers.py --scheduler_type ddim \\\n", + "--original_config_file {inference_yaml} \\\n", + "--image_size {img_size} \\\n", + "--checkpoint_path {base_path} \\\n", + "--prediction_type {prediction_type} \\\n", + "--upcast_attn False \\\n", + "--dump_path {save_name}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "bLpcvpGJB4Gu" + }, + "outputs": [], + "source": [ + "#@title Pick your base model from a diffusers model saved to your Gdrive (converted above)\n", + "\n", + "#@markdown Do not skip this cell.\n", + "\n", + "#@markdown * If you have preveiously saved diffusers on your drive you can select it here\n", + "\n", + "#@markdown ex. */content/drive/MyDrive/everydreamlogs/myproject_202208/ckpts/interrupted-gs023*\n", + "\n", + "#@markdown The default for SD1.5 converted above would be */content/drive/MyDrive/everydreamlogs/ckpt/SD15*\n", + "Resume_Model = \"/content/drive/MyDrive/everydreamlogs/ckpt/SD15\" #@param{type:\"string\"} \n", + "save_name = Resume_Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JXVu-W2lCjwX" + }, + "source": [ + "For a more indepth Explanation of each of these paramaters check out /content/EveryDream2trainer/doc.\n", + "\n", + "\n", + "After youve tried a few models you will find /content/EveryDream2trainer/doc/ATWEAKING.md to be extremly helpful." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "j9pEI69WXS9w" + }, + "outputs": [], + "source": [ + "#@title \n", + "%cd /content/EveryDream2trainer\n", + "#@markdown # Run Everydream 2\n", + "#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n", + "\n", + "#@markdown * Save logs and output ckpts to Gdrive (strongly suggested)\n", + "Save_to_Gdrive = True #@param{type:\"boolean\"}\n", + "#@markdown * Use resume to contnue training you just ran, will also find latest diffusers log in your Gdrive to continue.\n", + "resume = False #@param{type:\"boolean\"}\n", + "#@markdown * Name your project so you can find it in your logs\n", + "Project_Name = \"My_Project\" #@param{type: 'string'}\n", + "\n", + "#@markdown * The learning rate affects how much \"training\" is done on the model per training step. It is a very careful balance to select a value that will learn your data and not wreck the model. \n", + "#@markdown Leave this default unless you are very comfortable with training and know what you are doing.\n", + "\n", + "Learning_Rate = 1e-6 #@param{type: 'number'}\n", + "\n", + "#@markdown * A learning rate scheduler can change your learning rate as training progresses.\n", + "\n", + "#@markdown I recommend sticking with constant until you are comfortable with general training. \n", + "\n", + "Schedule = \"constant\" #@param [\"constant\", \"polynomial\", \"linear\", \"cosine\"] {allow-input: true}\n", + "\n", + "#@markdown * Resolution to train at (recommend 512). Higher resolution will require lower batch size (below).\n", + "Resolution = 512 #@param {type:\"slider\", min:256, max:768, step:64}\n", + "\n", + "#@markdown * Batch size is also another \"hyperparameter\" of itself and there are tradeoffs. It may not always be best to use the highest batch size possible. Once of the primary reasons to change it is if you get \"CUDA out of memory\" errors where lowering the value may help.\n", + "\n", + "#@markdown * Batch size impacts VRAM use. 8 should work on SD1.x models and 5 for SD2.x models at 512 resolution. Lower this if you get CUDA out of memory errors. You can check resources on your instance and watch the GPU RAM.\n", + "\n", + "Batch_Size = 6 #@param{type: 'number'}\n", + "\n", + "#@markdown * Gradient accumulation is sort of like a virtual batch size increase use this to increase batch size with out increasing vram usage\n", + "#@markdown Increasing from 1 to 2 will have a minor impact on vram use, but more beyond that will not.\n", + "#@markdown In colab free teir you can expect the fastest proformance from a batch of 8 and a gradient step of 1\n", + "#@markdown This is mostly for use if you are training higher resolution on free tier and cannot increase batch size.\n", + "\n", + "Gradient_steps = 1 #@param{type:\"slider\", min:1, max:10, step:1}\n", + "\n", + "#@markdown * Location on your Gdrive where your training images are.\n", + "Dataset_Location = \"/content/drive/MyDrive/training_samples\" #@param {type:\"string\"}\n", + "\n", + "model = save_name\n", + "\n", + "#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n", + "\n", + "Max_Epochs = 100 #@param {type:\"slider\", min:0, max:200, step:5}\n", + "\n", + "#@markdown * How often to save checkpoints.\n", + "Save_every_N_epoch = 20 #@param{type:\"integer\"}\n", + "\n", + "#@markdown * Test sample generation steps, how often to generate samples during training.\n", + "\n", + "#@markdown You can set your own sample prompts by adding them, one line at a time, to `/content/EveryDream2trainer/sample_prompts.txt`. If left empty, it will use the captions from your training images.\n", + "\n", + "#@markdown Use the steps_between_samples to set how often the samples are generated.\n", + "Steps_between_samples = 300 #@param{type:\"integer\"}\n", + "\n", + "#@markdown * That's it! Run the cell! or configure these advance options\n", + "\n", + "#@markdown # ________________ ADV SETTINGS _________________\n", + "#@markdown These are the default Every Dream 2 settings, changing these without learning what they do will likley waste compute credits\n", + "#@markdown please read the doc folder before changing these!\n", + "\n", + "#@markdown * A tip using the sliders, to finely adjust these click them with your mouse then use your keyboard arrows\n", + "\n", + "#@markdown * Using the same seed each time you train allows for more accurate a/b comparison of models, leave at -1 for random\n", + "#@markdown * The seed also effects your training samples, if you want the same seed each sample you will need to change it from -1\n", + "Training_Seed = -1 #@param{type:\"integer\"}\n", + "#@markdown * use this option to configure a sample_prompts.json\n", + "#@markdown * check out /content/EveryDream2trainer/doc/logging.md. for more details\n", + "Advance_Samples = False #@param{type:\"boolean\"}\n", + "Sample_File = \"sample_prompts.txt\"\n", + "if Advance_Samples:\n", + " Sample_File = \"sample_prompts.json\"\n", + "#@markdown * Checkpointing Saves Vram to allow larger batch sizes minor slow down on a single batch size but will can allow room for a higher traning resolution (suggested on Colab Free tier, turn off for A100)\n", + "Gradient_checkpointing = True #@param{type:\"boolean\"}\n", + "Disable_Xformers = False #@param{type:\"boolean\"}\n", + "#@markdown * Tag shuffling, mainly for booru training. Best to just read this if interested in shufflng tags /content/EveryDream2trainer/doc/SHUFFLING_TAGS.md\n", + "shuffle_tags = False #@param{type:\"boolean\"}\n", + "#@markdown * You can turn off the text encoder training (generally not suggested)\n", + "Disable_text_Encoder= False #@param{type:\"boolean\"}\n", + "#@markdown * Skip the nth last layer of CLIP.\n", + "Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n", + "#@markdown * ratio of captions dropped from training data.\n", + "Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n", + "#@markdown * Ratio of images randomly to flip horizontally.\n", + "Picture_flip = 0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n", + "#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n", + "zero_frequency_noise = 0.05 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n", + "\n", + "#@markdown * Weights and Biases logging token. \n", + "# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n", + "# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n", + "wandb_token = '' #@param{type:\"string\"}\n", + "wandb_settings = \"\"\n", + "if wandb_token:\n", + " !wandb login $wandb_token\n", + " wandb_settings = \"--wandb\"\n", + "\n", + "if \"zip\" in Dataset_Location:\n", + " !rm -r /Training_Data/\n", + " !mkdir Training_Data\n", + " !unzip $Dataset_Location -d /Training_Data\n", + " Dataset_Location = \"/Training_Data\"\n", + "dataset = Dataset_Location\n", + "\n", + "Drive=\"\"\n", + "if Save_to_Gdrive:\n", + " Drive = \"--logdir /content/drive/MyDrive/everydreamlogs --save_ckpt_dir /content/drive/MyDrive/everydreamlogs/ckpt\"\n", + "\n", + "if Max_Epochs==0:\n", + " Max_Epoch=1\n", + "\n", + "if resume:\n", + " model = \"findlast\"\n", + "\n", + "Gradient = \"\"\n", + "if Gradient_checkpointing:\n", + " Gradient = \"--gradient_checkpointing \"\n", + "if \"A100\" in s:\n", + " Gradient = \"\"\n", + "\n", + "DX = \"\" \n", + "if Disable_Xformers:\n", + " DX = \"--disable_xformers \"\n", + "\n", + "shuffle = \"\"\n", + "if shuffle_tags:\n", + " shuffle = \"--shuffle_tags \"\n", + "\n", + "textencode = \"\"\n", + "if Disable_text_Encoder:\n", + " textencode = \"--disable_textenc_training\"\n", + "\n", + "!python train.py --resume_ckpt \"$model\" \\\n", + " $textencode \\\n", + " $Gradient \\\n", + " $shuffle \\\n", + " $Drive \\\n", + " $DX \\\n", + " $wandb_settings \\\n", + " --amp \\\n", + " --clip_skip $Clip_skip \\\n", + " --batch_size $Batch_Size \\\n", + " --grad_accum $Gradient_steps \\\n", + " --cond_dropout $Conditional_DropOut \\\n", + " --data_root \"$dataset\" \\\n", + " --flip_p $Picture_flip \\\n", + " --lr $Learning_Rate \\\n", + " --lr_scheduler \"$Schedule\" \\\n", + " --max_epochs $Max_Epochs \\\n", + " --project_name \"$Project_Name\" \\\n", + " --resolution $Resolution \\\n", + " --sample_prompts \"$Sample_File\" \\\n", + " --sample_steps $Steps_between_samples \\\n", + " --save_every_n_epoch $Save_every_N_epoch \\\n", + " --seed $Training_Seed \\\n", + " --zero_frequency_noise_ratio $zero_frequency_noise\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "Iuoa_1B9jRGU" + }, + "outputs": [], + "source": [ + "#@title Alternate startup script\n", + "#@markdown * Edit train.json to setup your paramaters\n", + "#@markdown * Edit chain0.json to make use of chaining\n", + "#@markdown * make sure to check each confguration you will need 1 Json per chain length 3 are provided\n", + "#@markdown * make sure your .Json contain the line Notebook: true\n", + "#@markdown * your locations in the .json can be done in this format /content/drive/MyDrive/ - then the sub folder you wish to use\n", + "\n", + "%cd /content/EveryDream2trainer\n", + "Chain_Length=0 #@param{type:\"integer\"}\n", + "l = Chain_Length \n", + "I=0 #repeat counter\n", + "if l == None or l == 0:\n", + " l=1\n", + "while l > 0:\n", + " !python train_colab.py --config chain{I}.json\n", + " l -= 1\n", + " I =+ 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "8HmIWtODuE6p" + }, + "outputs": [], + "source": [ + "#@title Test your Diffusers\n", + "#@markdown Path to the diffusers that was trained\n", + "\n", + "#@markdown You can look in the file drawer on the left /content/drive/MyDrive/everydreamlogs and click the three dots to copy the path\n", + "\n", + "#@markdown ex. /content/drive/MyDrive/everydreamlogs/my_project_20230126-023804/ckpts/interrupted-gs86\n", + "\n", + "diffusers_path=\"\" #@param{type:\"string\"}\n", + "DF=diffusers_path\n", + "PROMPT= \"a photo of an astronaut on the moon\"#@param{type:\"string\"}\n", + "Resolution = 512 #@param {type:\"slider\", min:256, max:1024, step:32}\n", + "Seed= -1 #@param{type:\"integer\"}\n", + "Steps = 30 #@param {type:\"slider\", min:10, max:50, step:1}\n", + "cfg = 7 #@param {type:\"slider\", min:1, max:15, step:0.5}\n", + "\n", + "\n", + "!python /content/EveryDream2trainer/scripts/txt2img.py \\\n", + " --diffusers_path \"$DF\" \\\n", + " --resolution $Resolution \\\n", + " --seed $Seed \\\n", + " --prompt \"$PROMPT\" \\\n", + " --steps $Steps \\\n", + " --cfg_scale $cfg " + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "include_colab_link": true + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.5" + }, + "vscode": { + "interpreter": { + "hash": "e602395b73d27e246c3f66de86a1ed4dc1e5a85e8356fd1a2f027b9d2f1f8162" + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file