Major overhaul part 1, adds validation support cuts down on amount of cells PROGRESS BAR WORKS... sorta its still a messy display
This commit is contained in:
parent
2271c8f809
commit
ee44a73a54
|
@ -27,55 +27,94 @@
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"cellView": "form",
|
"id": "WsYIcz9HY9lx",
|
||||||
"id": "WsYIcz9HY9lx"
|
"cellView": "form"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#@title # Install python 3.10 \n",
|
"#@title # Install python 3.10 \n",
|
||||||
"#@markdown # This will show a runtime error, its ok, its on purpose to restart the kernel to update python.\n",
|
"#@markdown # This will show a runtime error, it's ok, it's on purpose to restart the kernel to update python.\n",
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"import time\n",
|
"import time\n",
|
||||||
|
"import sys\n",
|
||||||
"from IPython.display import clear_output\n",
|
"from IPython.display import clear_output\n",
|
||||||
"!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh\n",
|
"\n",
|
||||||
"!bash ./py310.sh -b -f -p /usr/local\n",
|
"\n",
|
||||||
"!python -m ipykernel install --name \"py310\" --user\n",
|
"#@markdown Optional connect Gdrive But strongly recommended\n",
|
||||||
"clear_output()\n",
|
|
||||||
"time.sleep(1) #needed to clear is before kill\n",
|
|
||||||
"os.kill(os.getpid(), 9)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "f2cdMtCt9Wb6"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#@title Verify python version, should be 3.10.something\n",
|
|
||||||
"!python --version"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "d1di4EC6ygw1"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#@title Optional connect Gdrive\n",
|
|
||||||
"#@markdown # But strongly recommended\n",
|
|
||||||
"#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n",
|
"#@markdown This will let you put all your training data and checkpoints directly on your drive. Much faster/easier to continue later, less setup time.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n",
|
"#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n",
|
||||||
"from google.colab import drive\n",
|
"Mount_to_Gdrive = True #@param{type:\"boolean\"} \n",
|
||||||
"drive.mount('/content/drive')\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"!mkdir -p /content/drive/MyDrive/everydreamlogs/ckpt"
|
"if Mount_to_Gdrive:\n",
|
||||||
|
" from google.colab import drive\n",
|
||||||
|
" drive.mount('/content/drive')\n",
|
||||||
|
"\n",
|
||||||
|
" !mkdir -p /content/drive/MyDrive/everydreamlogs/ckpt\n",
|
||||||
|
"\n",
|
||||||
|
"# Define a custom function to display a progress bar\n",
|
||||||
|
"def display_progress_bar(progress, total, prefix=\"\"):\n",
|
||||||
|
" sys.stdout.write(f\"\\r{prefix}[{'=' * progress}>{' ' * (total - progress - 1)}] {progress + 1}/{total}\")\n",
|
||||||
|
" sys.stdout.flush()\n",
|
||||||
|
"\n",
|
||||||
|
"total_steps = 9\n",
|
||||||
|
"current_step = 0\n",
|
||||||
|
"\n",
|
||||||
|
"!pip install transformers==4.25.1 --progress-bar on --quiet\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"!pip install watchdog --progress-bar on --quiet\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"!pip install matplotlib --progress-bar on --quiet\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Install the alive-package library\n",
|
||||||
|
"!pip install alive-progress --progress-bar on --quiet\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# Install the tqdm library\n",
|
||||||
|
"!pip install tqdm --progress-bar on --quiet\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Download the py310.sh script\n",
|
||||||
|
"!wget https://github.com/korakot/kora/releases/download/v0.10/py310.sh -q\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Run the py310.sh script\n",
|
||||||
|
"try:\n",
|
||||||
|
" output = os.popen('bash ./py310.sh -b -f -p /usr/local 2>&1').read()\n",
|
||||||
|
" total_lines = len(output.splitlines())\n",
|
||||||
|
" for i, line in enumerate(output.splitlines()):\n",
|
||||||
|
" clear_output(wait=True)\n",
|
||||||
|
" display_progress_bar(i, total_lines, \"install progress:\")\n",
|
||||||
|
"except Exception as e:\n",
|
||||||
|
" print(str(e))\n",
|
||||||
|
"\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Install the py310 kernel\n",
|
||||||
|
"!python -m ipykernel install --name \"py310\" --user > /dev/null 2>&1\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Clear output\n",
|
||||||
|
"!rm /content/py310.sh\n",
|
||||||
|
"current_step += 1\n",
|
||||||
|
"display_progress_bar(current_step, total_steps, \"install progress:\")\n",
|
||||||
|
"clear_output()\n",
|
||||||
|
"time.sleep(1) #needed to clear is before kill\n",
|
||||||
|
"os.kill(os.getpid(), 9)\n",
|
||||||
|
"print(\"\\nInstallation completed.\")\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -87,44 +126,59 @@
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#@markdown # Install Dependencies\n",
|
"#@markdown # Finish Install Dependencies into new python\n",
|
||||||
"#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n",
|
"#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n",
|
||||||
"from IPython.display import clear_output\n",
|
"from IPython.display import clear_output\n",
|
||||||
"from subprocess import getoutput\n",
|
"import subprocess\n",
|
||||||
"s = getoutput('nvidia-smi')\n",
|
"from tqdm.notebook import tqdm\n",
|
||||||
"!pip install -q torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url \"https://download.pytorch.org/whl/cu117\"\n",
|
"\n",
|
||||||
"!pip install -q transformers==4.25.1\n",
|
"packages = [\n",
|
||||||
"!pip install -q diffusers[torch]==0.13.0\n",
|
" ('torch==1.13.1+cu117 torchvision==0.14.1+cu117', 'https://download.pytorch.org/whl/cu117'),\n",
|
||||||
"!pip install -q pynvml==11.4.1\n",
|
" 'transformers==4.25.1',\n",
|
||||||
"!pip install -q bitsandbytes==0.35.0\n",
|
" 'diffusers[torch]==0.13.0',\n",
|
||||||
"!pip install -q ftfy==6.1.1\n",
|
" 'pynvml==11.4.1',\n",
|
||||||
"!pip install -q aiohttp==3.8.3\n",
|
" 'bitsandbytes==0.35.0',\n",
|
||||||
"!pip install -q tensorboard>=2.11.0\n",
|
" 'ftfy==6.1.1',\n",
|
||||||
"!pip install -q protobuf==3.20.1\n",
|
" 'aiohttp==3.8.3',\n",
|
||||||
"!pip install -q wandb==0.13.6\n",
|
" 'tensorboard>=2.11.0',\n",
|
||||||
"!pip install -q pyre-extensions==0.0.23\n",
|
" 'protobuf==3.20.1',\n",
|
||||||
"!pip install -q xformers==0.0.16\n",
|
" 'wandb==0.13.6',\n",
|
||||||
"!pip install -q pytorch-lightning==1.6.5\n",
|
" 'pyre-extensions==0.0.23',\n",
|
||||||
"!pip install -q OmegaConf==2.2.3\n",
|
" 'xformers==0.0.16',\n",
|
||||||
"!pip install -q numpy==1.23.5\n",
|
" 'pytorch-lightning==1.6.5',\n",
|
||||||
"!pip install -q colorama\n",
|
" 'OmegaConf==2.2.3',\n",
|
||||||
"!pip install -q keyboard\n",
|
" 'numpy==1.23.5',\n",
|
||||||
"!pip install -q triton\n",
|
" 'colorama',\n",
|
||||||
"!pip install -q lion-pytorch\n",
|
" 'keyboard',\n",
|
||||||
|
" 'triton',\n",
|
||||||
|
" 'lion-pytorch'\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"for package in tqdm(packages, desc='Installing packages', unit='package'):\n",
|
||||||
|
" if isinstance(package, tuple):\n",
|
||||||
|
" package_name, extra_index_url = package\n",
|
||||||
|
" cmd = f\"pip install -q {package_name} --extra-index-url {extra_index_url}\"\n",
|
||||||
|
" else:\n",
|
||||||
|
" cmd = f\"pip install -q {package}\"\n",
|
||||||
|
" \n",
|
||||||
|
" subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
|
||||||
|
"\n",
|
||||||
"clear_output()\n",
|
"clear_output()\n",
|
||||||
|
"\n",
|
||||||
"!git clone https://github.com/victorchall/EveryDream2trainer.git\n",
|
"!git clone https://github.com/victorchall/EveryDream2trainer.git\n",
|
||||||
"%cd /content/EveryDream2trainer\n",
|
"%cd /content/EveryDream2trainer\n",
|
||||||
"!python utils/get_yamls.py\n",
|
"!python utils/get_yamls.py\n",
|
||||||
"clear_output()\n",
|
"clear_output()\n",
|
||||||
"print(\"DONE!\")"
|
"print(\"DONE! installing dependcies make sure we are using python 3.10.x\")\n",
|
||||||
|
"!python --version"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"cellView": "form",
|
"id": "unaffeqGP_0A",
|
||||||
"id": "unaffeqGP_0A"
|
"cellView": "form"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
@ -139,124 +193,61 @@
|
||||||
"if MODEL_URL == \"sd_v1-5+vae.ckpt\":\n",
|
"if MODEL_URL == \"sd_v1-5+vae.ckpt\":\n",
|
||||||
" MODEL_URL = \"panopstor/EveryDream\"\n",
|
" MODEL_URL = \"panopstor/EveryDream\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"download_path = \"\"\n",
|
||||||
|
"\n",
|
||||||
"if \".co\" in MODEL_URL or \"https\" in MODEL_URL or \"www\" in MODEL_URL: #maybe just add a radio button to download this should work for now\n",
|
"if \".co\" in MODEL_URL or \"https\" in MODEL_URL or \"www\" in MODEL_URL: #maybe just add a radio button to download this should work for now\n",
|
||||||
" print(\"Downloading \")\n",
|
" print(\"Downloading \")\n",
|
||||||
" !wget $MODEL_URL\n",
|
" !wget $MODEL_URL\n",
|
||||||
" clear_output()\n",
|
" clear_output()\n",
|
||||||
" print(\"DONE!\")\n",
|
" print(\"DONE!\")\n",
|
||||||
|
" download_path = os.path.join(os.getcwd(), os.path.basename(MODEL_URL))\n",
|
||||||
|
"\n",
|
||||||
"else:\n",
|
"else:\n",
|
||||||
" save_name = MODEL_URL\n",
|
" save_name = MODEL_URL\n",
|
||||||
" print(\"using diffusers from \" + save_name + \" a download will start when training begins!\")\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"%cd /content/EveryDream2trainer\n",
|
"%cd /content/EveryDream2trainer\n",
|
||||||
"#@markdown * if you chose to link to diffusers Proceed to the [Run EveryDream 2](#scrollTo=j9pEI69WXS9w&line=2&uniqifier=1) cell\n"
|
"#@markdown * if you chose to link to diffusers Proceed to the [Run EveryDream 2](#scrollTo=j9pEI69WXS9w&line=2&uniqifier=1) cell\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"source": [
|
|
||||||
"# Optional, If you downloaded a Ckpt expand this to convert your model to diffusers, if you linked to diffusers this can be skipped!"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"id": "jSOXhd3GXqWM"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"id": "nEzuEYH0536C"
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"In order to train, we need to convert a .ckpt into diffusers, if you linked to diffuers above or plan to chose to resume from a previous model you may skip this step."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "tPvQSo6ScF2c"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
|
||||||
"#@title Setup conversion - Skip if you linked diffusers\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown **If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive.** \n",
|
"inference_yaml = \" \"\n",
|
||||||
"# \n",
|
|
||||||
"# If you are not sure, look in your Gdrive for `everydreamlogs/ckpt` and see if you have a folder with the `save_name` below.\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown Pick the `model_type` in the dropdown. This is the model type that you are converting and you downloaded above. This is important as it will determine the model architecture and the correct settings to use.\n",
|
"# Check if the downloaded or copied model is a .ckpt file\n",
|
||||||
|
"#@markdown is the model 1.5 or 2.1 based\n",
|
||||||
|
"if download_path.endswith(\".ckpt\"):\n",
|
||||||
|
" model_type = \"SD1x\" #@param [\"SD1x\", \"SD2_512_base\", \"SD21\"]\n",
|
||||||
|
" save_path = download_path\n",
|
||||||
|
" if \".ckpt\" in save_name:\n",
|
||||||
|
" save_name = save_name.replace(\".ckpt\", \"\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown * `SD1x` is all SD1.x based models *(SD1.4, SD1.5, Waifu Diffusion 1.3, etc)*\n",
|
" img_size = 512\n",
|
||||||
|
" upscale_attention = False\n",
|
||||||
|
" prediction_type = \"epsilon\"\n",
|
||||||
|
" if model_type == \"SD1x\":\n",
|
||||||
|
" inference_yaml = \"v1-inference.yaml\"\n",
|
||||||
|
" elif model_type == \"SD2_512_base\":\n",
|
||||||
|
" upscale_attention = True\n",
|
||||||
|
" inference_yaml = \"v2-inference.yaml\"\n",
|
||||||
|
" elif model_type == \"SD21\":\n",
|
||||||
|
" upscale_attention = True\n",
|
||||||
|
" prediction_type = \"v_prediction\"\n",
|
||||||
|
" inference_yaml = \"v2-inference-v.yaml\"\n",
|
||||||
|
" img_size = 768\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown * `SD2_512_base` is the SD2 512 base model\n",
|
" !python utils/convert_original_stable_diffusion_to_diffusers.py --scheduler_type ddim \\\n",
|
||||||
|
" --original_config_file $inference_yaml \\\n",
|
||||||
|
" --image_size $img_size \\\n",
|
||||||
|
" --checkpoint_path $save_path \\\n",
|
||||||
|
" --prediction_type $prediction_type \\\n",
|
||||||
|
" --upcast_attn False \\\n",
|
||||||
|
" --dump_path $save_name\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#@markdown * `SD21` is all SD2 768 models. *(ex. SD2.1 768, or trained models based on that)*\n",
|
" # Set the save path to the GDrive directory if cache_to_gdrive is True\n",
|
||||||
"\n",
|
" if cache_to_gdrive:\n",
|
||||||
"#@markdown If you are not sure, double check the model author's page or ask for help on [Discord](https://discord.gg/uheqxU6sXN).\n",
|
" save_name = os.path.join(\"/content/drive/MyDrive/everydreamlogs/ckpt\", save_name)\n",
|
||||||
"model_type = \"SD1x\" #@param [\"SD1x\", \"SD2_512_base\", \"SD21\"]\n",
|
"if inference_yaml != \" \":\n",
|
||||||
"\n",
|
" print(\"Model saved to: \" + save_name + \". The \" + inference_yaml + \" was used!\")\n",
|
||||||
"#@markdown This is the temporary ckpt file that was downloaded above. If you downloaded a different model, you can change this. *Hint: look at your file manager in the EveryDream2trainer folder for .ckpt files*.\n",
|
"print(\"Model \" + save_name + \" will be used!, download will start when training beigins\")\n"
|
||||||
"base_path = \"/content/EveryDream2trainer/sd_v1-5_vae.ckpt\" #@param {type:\"string\"}\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown The name that you will use when selecting this model in the future training sessons.\n",
|
|
||||||
"save_name = \"SD15\" #@param{type:\"string\"}\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown If you are using Gdrive, this will save the converted model to your Gdrive for future use so you can skip downloading and converting the model.\n",
|
|
||||||
"cache_to_gdrive = True #@param{type:\"boolean\"}\n",
|
|
||||||
"\n",
|
|
||||||
"if cache_to_gdrive:\n",
|
|
||||||
" save_name = os.path.join(\"/content/drive/MyDrive/everydreamlogs/ckpt\", save_name)\n",
|
|
||||||
"\n",
|
|
||||||
"img_size = 512\n",
|
|
||||||
"upscale_attention = False\n",
|
|
||||||
"prediction_type = \"epsilon\"\n",
|
|
||||||
"if model_type == \"SD1x\":\n",
|
|
||||||
" inference_yaml = \"v1-inference.yaml\"\n",
|
|
||||||
"elif model_type == \"SD2_512_base\":\n",
|
|
||||||
" upscale_attention = True\n",
|
|
||||||
" inference_yaml = \"v2-inference.yaml\"\n",
|
|
||||||
"elif model_type == \"SD21\":\n",
|
|
||||||
" upscale_attention = True\n",
|
|
||||||
" prediction_type = \"v_prediction\"\n",
|
|
||||||
" inference_yaml = \"v2-inference-v.yaml\"\n",
|
|
||||||
" img_size = 768\n",
|
|
||||||
"\n",
|
|
||||||
"print(base_path)\n",
|
|
||||||
"print(inference_yaml)\n",
|
|
||||||
"\n",
|
|
||||||
"!python utils/convert_original_stable_diffusion_to_diffusers.py --scheduler_type ddim \\\n",
|
|
||||||
"--original_config_file {inference_yaml} \\\n",
|
|
||||||
"--image_size {img_size} \\\n",
|
|
||||||
"--checkpoint_path {base_path} \\\n",
|
|
||||||
"--prediction_type {prediction_type} \\\n",
|
|
||||||
"--upcast_attn False \\\n",
|
|
||||||
"--dump_path {save_name}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"cellView": "form",
|
|
||||||
"id": "bLpcvpGJB4Gu"
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#@title Pick your base model from a diffusers model saved to your Gdrive (converted above)\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown Do not skip this cell before reading.\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown * If you have preveiously saved diffusers on your drive, or needed to convert a ckpt you can select the diffuser path here\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown * If you chose to link to diffusers from hugging face you can skip this cell\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown ex. */content/drive/MyDrive/everydreamlogs/myproject_202208/ckpts/interrupted-gs023*\n",
|
|
||||||
"\n",
|
|
||||||
"#@markdown The default for SD1.5 converted above would be */content/drive/MyDrive/everydreamlogs/ckpt/SD15*\n",
|
|
||||||
"Resume_Model = \"/content/drive/MyDrive/everydreamlogs/ckpt/SD15\" #@param{type:\"string\"} \n",
|
|
||||||
"save_name = Resume_Model"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -276,13 +267,20 @@
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"cellView": "form",
|
"id": "j9pEI69WXS9w",
|
||||||
"id": "j9pEI69WXS9w"
|
"cellView": "form"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from google.colab import runtime\n",
|
"from google.colab import runtime\n",
|
||||||
|
"from IPython.display import clear_output\n",
|
||||||
"import time\n",
|
"import time\n",
|
||||||
|
"from tqdm import tqdm\n",
|
||||||
|
"import re\n",
|
||||||
|
"import sys\n",
|
||||||
|
"import time\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"\n",
|
||||||
"#@title #Run Everydream 2\n",
|
"#@title #Run Everydream 2\n",
|
||||||
"%cd /content/EveryDream2trainer\n",
|
"%cd /content/EveryDream2trainer\n",
|
||||||
"#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n",
|
"#@markdown If you want to use a .json config or upload your own, skip this cell and run the cell below instead\n",
|
||||||
|
@ -384,11 +382,28 @@
|
||||||
" !wandb login $wandb_token\n",
|
" !wandb login $wandb_token\n",
|
||||||
" wandb_settings = \"--wandb\"\n",
|
" wandb_settings = \"--wandb\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"if \"zip\" in Dataset_Location:\n",
|
"#@markdown use validation with wandb\n",
|
||||||
" !rm -r /Training_Data/\n",
|
"\n",
|
||||||
" !mkdir Training_Data\n",
|
"validatation = False #@param{type:\"boolean\"}\n",
|
||||||
" !unzip $Dataset_Location -d /Training_Data\n",
|
"validate = \"\"\n",
|
||||||
" Dataset_Location = \"/Training_Data\"\n",
|
"if validatation:\n",
|
||||||
|
" validate = \"--validation_config validation_default.json\"\n",
|
||||||
|
"\n",
|
||||||
|
"extensions = ['.zip', '.7z', '.rar', '.tgz']\n",
|
||||||
|
"uncompressed_dir = 'Training_Data'\n",
|
||||||
|
"\n",
|
||||||
|
"if any(ext in Dataset_Location for ext in extensions):\n",
|
||||||
|
" # Create the uncompressed directory if it doesn't exist\n",
|
||||||
|
" if not os.path.exists(uncompressed_dir):\n",
|
||||||
|
" os.makedirs(uncompressed_dir)\n",
|
||||||
|
" \n",
|
||||||
|
" # Extract the compressed file to the uncompressed directory\n",
|
||||||
|
" shutil.unpack_archive(Dataset_Location, uncompressed_dir)\n",
|
||||||
|
"\n",
|
||||||
|
" # Set the dataset location to the uncompressed directory\n",
|
||||||
|
" Dataset_Location = uncompressed_dir\n",
|
||||||
|
"\n",
|
||||||
|
"# Use the dataset location in the rest of your code\n",
|
||||||
"dataset = Dataset_Location\n",
|
"dataset = Dataset_Location\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Drive=\"\"\n",
|
"Drive=\"\"\n",
|
||||||
|
@ -404,8 +419,6 @@
|
||||||
"Gradient = \"\"\n",
|
"Gradient = \"\"\n",
|
||||||
"if Gradient_checkpointing:\n",
|
"if Gradient_checkpointing:\n",
|
||||||
" Gradient = \"--gradient_checkpointing \"\n",
|
" Gradient = \"--gradient_checkpointing \"\n",
|
||||||
"if \"A100\" in s:\n",
|
|
||||||
" Gradient = \"\"\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"DX = \"\" \n",
|
"DX = \"\" \n",
|
||||||
"if Disable_Xformers:\n",
|
"if Disable_Xformers:\n",
|
||||||
|
@ -415,39 +428,68 @@
|
||||||
"if shuffle_tags:\n",
|
"if shuffle_tags:\n",
|
||||||
" shuffle = \"--shuffle_tags \"\n",
|
" shuffle = \"--shuffle_tags \"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"def parse_progress(log_line):\n",
|
||||||
|
" match = re.search(r'\\((\\d+)%\\)', log_line)\n",
|
||||||
|
" if match:\n",
|
||||||
|
" return int(match.group(1))\n",
|
||||||
|
" return None\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
"textencode = \"\"\n",
|
"textencode = \"\"\n",
|
||||||
"if Disable_text_Encoder:\n",
|
"if Disable_text_Encoder:\n",
|
||||||
" textencode = \"--disable_textenc_training\"\n",
|
" textencode = \"--disable_textenc_training\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!python train.py --resume_ckpt \"$model\" \\\n",
|
"def update_progress_bar(progress: float):\n",
|
||||||
" $textencode \\\n",
|
" print(\"Training progress: {:.2f}%\".format(progress))\n",
|
||||||
" $Gradient \\\n",
|
" print(\"[{0}{1}]\".format('#' * int(progress // 2), ' ' * (50 - int(progress // 2))))\n",
|
||||||
" $shuffle \\\n",
|
"\n",
|
||||||
" $Drive \\\n",
|
"# Start the training process and capture the output\n",
|
||||||
" $DX \\\n",
|
"command = f\"\"\"python train.py --resume_ckpt \"{model}\" \\\n",
|
||||||
" $wandb_settings \\\n",
|
" {textencode} \\\n",
|
||||||
|
" {Gradient} \\\n",
|
||||||
|
" {shuffle} \\\n",
|
||||||
|
" {Drive} \\\n",
|
||||||
|
" {DX} \\\n",
|
||||||
|
" {validate} \\\n",
|
||||||
|
" {wandb_settings} \\\n",
|
||||||
" --amp \\\n",
|
" --amp \\\n",
|
||||||
" --clip_skip $Clip_skip \\\n",
|
" --clip_skip {Clip_skip} \\\n",
|
||||||
" --batch_size $Batch_Size \\\n",
|
" --batch_size {Batch_Size} \\\n",
|
||||||
" --grad_accum $Gradient_steps \\\n",
|
" --grad_accum {Gradient_steps} \\\n",
|
||||||
" --cond_dropout $Conditional_DropOut \\\n",
|
" --cond_dropout {Conditional_DropOut} \\\n",
|
||||||
" --data_root \"$dataset\" \\\n",
|
" --data_root \"{dataset}\" \\\n",
|
||||||
" --flip_p $Picture_flip \\\n",
|
" --flip_p {Picture_flip} \\\n",
|
||||||
" --lr $Learning_Rate \\\n",
|
" --lr {Learning_Rate} \\\n",
|
||||||
" --lr_scheduler \"$Schedule\" \\\n",
|
" --log_step 25 \\\n",
|
||||||
" --max_epochs $Max_Epochs \\\n",
|
" --lr_scheduler \"{Schedule}\" \\\n",
|
||||||
" --project_name \"$Project_Name\" \\\n",
|
" --max_epochs {Max_Epochs} \\\n",
|
||||||
" --resolution $Resolution \\\n",
|
" --project_name \"{Project_Name}\" \\\n",
|
||||||
" --sample_prompts \"$Sample_File\" \\\n",
|
" --resolution {Resolution} \\\n",
|
||||||
" --sample_steps $Steps_between_samples \\\n",
|
" --sample_prompts \"{Sample_File}\" \\\n",
|
||||||
" --save_every_n_epoch $Save_every_N_epoch \\\n",
|
" --sample_steps {Steps_between_samples} \\\n",
|
||||||
" --seed $Training_Seed \\\n",
|
" --save_every_n_epoch {Save_every_N_epoch} \\\n",
|
||||||
" --zero_frequency_noise_ratio $zero_frequency_noise \n",
|
" --seed {Training_Seed} \\\n",
|
||||||
|
" --zero_frequency_noise_ratio {zero_frequency_noise}\"\"\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"if Disconnect_after_training :\n",
|
"# Initialize the progress bar\n",
|
||||||
" time.sleep(40)\n",
|
"progress_bar = tqdm(total=100, desc=\"Training progress\", ncols=100)\n",
|
||||||
" runtime.unassign()"
|
"\n",
|
||||||
|
"for log_line in process.stdout:\n",
|
||||||
|
" global last_output\n",
|
||||||
|
" log_line = log_line.strip()\n",
|
||||||
|
" if log_line:\n",
|
||||||
|
" if log_line != last_output:\n",
|
||||||
|
" progress = parse_progress(log_line)\n",
|
||||||
|
" if progress is not None:\n",
|
||||||
|
" update_progress_bar(progress)\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(log_line)\n",
|
||||||
|
" last_output = log_line\n",
|
||||||
|
"\n",
|
||||||
|
"# Finish the training process\n",
|
||||||
|
"process.wait()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue