commit
b0f7777a6c
|
@ -3,8 +3,8 @@
|
|||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "view-in-github"
|
||||
"id": "view-in-github",
|
||||
"colab_type": "text"
|
||||
},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/nawnie/EveryDream2trainer/blob/main/Train_Colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
|
@ -263,6 +263,7 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"#@title \n",
|
||||
"%cd /content/EveryDream2trainer\n",
|
||||
"#@markdown # Run Everydream 2\n",
|
||||
"#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n",
|
||||
"\n",
|
||||
|
@ -302,7 +303,7 @@
|
|||
"\n",
|
||||
"#@markdown * Location on your Gdrive where your training images are.\n",
|
||||
"Dataset_Location = \"/content/drive/MyDrive/training_samples\" #@param {type:\"string\"}\n",
|
||||
"dataset = Dataset_Location\n",
|
||||
"\n",
|
||||
"model = save_name\n",
|
||||
"\n",
|
||||
"#@markdown * Max Epochs to train for, this defines how many total times all your training data is used. Default of 100 is a good start if you are training ~30-40 images of one subject. If you have 100 images, you can reduce this to 40-50 and so forth.\n",
|
||||
|
@ -343,27 +344,31 @@
|
|||
"shuffle_tags = False #@param{type:\"boolean\"}\n",
|
||||
"#@markdown * You can turn off the text encoder training (generally not suggested)\n",
|
||||
"Disable_text_Encoder= False #@param{type:\"boolean\"}\n",
|
||||
"#@markdown * Skip the nth last layer of CLIP. 0 is default for SD1.x, 2 recommended for SD2.x models.\n",
|
||||
"Clip_skip = 0 #@param {type:\"slider\", min:0, max:4, step:1}\n",
|
||||
"#@markdown * Ratio of training on empty caption. Improves unconditional guidance.\n",
|
||||
"#@markdown * Skip the nth last layer of CLIP.\n",
|
||||
"Clip_skip = 1 #@param {type:\"slider\", min:0, max:4, step:1}\n",
|
||||
"#@markdown * ratio of captions dropped from training data.\n",
|
||||
"Conditional_DropOut = 0.04 #@param {type:\"slider\", min:0, max:0.3, step:0.01}\n",
|
||||
"#@markdown * Ratio of images randomly to flip horizontally. Use for small data sets. May negtively affect likeness of subjects with asymmetric features.\n",
|
||||
"Picture_flip = 0.0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n",
|
||||
"#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results. 0\n",
|
||||
"#@markdown * Ratio of images randomly to flip horizontally.\n",
|
||||
"Picture_flip = 0 #@param {type:\"slider\", min:0, max:0.5, step:0.05}\n",
|
||||
"#@markdown * This can improve contrast in light and dark scenes, Use a ratio between 0-10% for Best results.\n",
|
||||
"zero_frequency_noise = 0.05 #@param {type:\"slider\", min:0, max:0.25, step:0.01}\n",
|
||||
"\n",
|
||||
"#@markdown * Weights and Biases logging token. \n",
|
||||
"# #@markdown Paste your token here if you have an account so you can use it to track your training progress. If you don't have an account, you can create one for free at https://wandb.ai/site. Log will use your project name from above. This is a free online logging utility.\n",
|
||||
"# #@markdown Your key is on this page: https://wandb.ai/settings under \"Danger Zone\" \"API Keys\"\n",
|
||||
"wandb_token = '' #@param{type:\"string\"}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"wandb_settings = \"\"\n",
|
||||
"if wandb_token:\n",
|
||||
" !wandb login $wandb_token\n",
|
||||
" wandb_settings = \"--wandb\"\n",
|
||||
"\n",
|
||||
"if \"zip\" in Dataset_Location:\n",
|
||||
" !rm -r /Training_Data/\n",
|
||||
" !mkdir Training_Data\n",
|
||||
" !unzip $Dataset_Location -d /Training_Data\n",
|
||||
" Dataset_Location = \"/Training_Data\"\n",
|
||||
"dataset = Dataset_Location\n",
|
||||
"\n",
|
||||
"Drive=\"\"\n",
|
||||
"if Save_to_Gdrive:\n",
|
||||
" Drive = \"--logdir /content/drive/MyDrive/everydreamlogs --save_ckpt_dir /content/drive/MyDrive/everydreamlogs/ckpt\"\n",
|
||||
|
@ -390,7 +395,7 @@
|
|||
"\n",
|
||||
"textencode = \"\"\n",
|
||||
"if Disable_text_Encoder:\n",
|
||||
" textencode = \"--disable_textenc_training Train_text \"\n",
|
||||
" textencode = \"--disable_textenc_training\"\n",
|
||||
"\n",
|
||||
"!python train.py --resume_ckpt \"$model\" \\\n",
|
||||
" $textencode \\\n",
|
||||
|
@ -415,8 +420,7 @@
|
|||
" --sample_steps $Steps_between_samples \\\n",
|
||||
" --save_every_n_epoch $Save_every_N_epoch \\\n",
|
||||
" --seed $Training_Seed \\\n",
|
||||
" --zero_frequency_noise_ratio $zero_frequency_noise \\\n",
|
||||
" --useadam8bit \n",
|
||||
" --zero_frequency_noise_ratio $zero_frequency_noise\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
|
@ -486,8 +490,8 @@
|
|||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"include_colab_link": true,
|
||||
"provenance": []
|
||||
"provenance": [],
|
||||
"include_colab_link": true
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
|
@ -507,4 +511,4 @@
|
|||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"batch_size": 3,
|
||||
"seed": 555,
|
||||
"cfgs": [7, 4],
|
||||
"scheduler": "dpm++",
|
||||
"num_inference_steps": 15,
|
||||
"show_progress_bars": true,
|
||||
"samples": [
|
||||
{
|
||||
"prompt": "ted bennet and a man sitting on a sofa with a kitchen in the background",
|
||||
"negative_prompt": "distorted, deformed"
|
||||
},
|
||||
{
|
||||
"prompt": "a photograph of ted bennet riding a bicycle",
|
||||
"seed": -1
|
||||
},
|
||||
{
|
||||
"random_caption": true,
|
||||
"seed": 555,
|
||||
"cfgs": [1,11],
|
||||
"size": [640, 384]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -8,7 +8,7 @@ from typing import Generator, Callable, Any
|
|||
import torch
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from colorama import Fore, Style
|
||||
from diffusers import StableDiffusionPipeline, DDIMScheduler, DPMSolverMultistepScheduler
|
||||
from diffusers import StableDiffusionPipeline, DDIMScheduler, DPMSolverMultistepScheduler, DDPMScheduler, PNDMScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, KDPM2AncestralDiscreteScheduler
|
||||
from torch.cuda.amp import autocast
|
||||
from torch.utils.tensorboard import SummaryWriter
|
||||
from torchvision import transforms
|
||||
|
@ -272,7 +272,7 @@ class SampleGenerator:
|
|||
@torch.no_grad()
|
||||
def _create_scheduler(self, scheduler_config: dict):
|
||||
scheduler = self.scheduler
|
||||
if scheduler not in ['ddim', 'dpm++']:
|
||||
if scheduler not in ['ddim', 'dpm++', 'pndm', 'ddpm', 'lms', 'euler', 'euler_a', 'kdpm2']:
|
||||
print(f"unsupported scheduler '{self.scheduler}', falling back to ddim")
|
||||
scheduler = 'ddim'
|
||||
|
||||
|
@ -280,5 +280,17 @@ class SampleGenerator:
|
|||
return DDIMScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'dpm++':
|
||||
return DPMSolverMultistepScheduler.from_config(scheduler_config, algorithm_type="dpmsolver++")
|
||||
elif scheduler == 'pndm':
|
||||
return PNDMScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'ddpm':
|
||||
return DDPMScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'lms':
|
||||
return LMSDiscreteScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'euler':
|
||||
return EulerDiscreteScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'euler_a':
|
||||
return EulerAncestralDiscreteScheduler.from_config(scheduler_config)
|
||||
elif scheduler == 'kdpm2':
|
||||
return KDPM2AncestralDiscreteScheduler.from_config(scheduler_config)
|
||||
else:
|
||||
raise ValueError(f"unknown scheduler '{scheduler}'")
|
||||
|
|
Loading…
Reference in New Issue