diff --git a/Train_Colab.ipynb b/Train_Colab.ipynb index 3932e39..cc83a25 100644 --- a/Train_Colab.ipynb +++ b/Train_Colab.ipynb @@ -3,8 +3,8 @@ { "cell_type": "markdown", "metadata": { - "colab_type": "text", - "id": "view-in-github" + "id": "view-in-github", + "colab_type": "text" }, "source": [ "\"Open" @@ -126,7 +126,7 @@ }, "outputs": [], "source": [ - "#@markdown # Finish Install Dependencies into new python\n", + "#@markdown # Finish Install Dependencies into the new python\n", "#@markdown This will take a couple minutes, be patient and watch the output for \"DONE!\"\n", "from IPython.display import clear_output\n", "import subprocess\n", @@ -183,32 +183,34 @@ "outputs": [], "source": [ "#@title Get A Base Model\n", - "#@markdown Choose SD1.5 or Waifu Diffusion 1.3 from the dropdown, or paste your own URL in the box\n", + "#@markdown Choose SD1.5, Waifu Diffusion 1.3, SD2.1, or 2.1(512) from the dropdown, or paste your own URL in the box\n", + "#@markdown * alternately you can link to a HF repo using NAME/MODEL\n", + "#@markdown * link to a set of diffusers on your Gdrive\n", + "#@markdown * paste a url, atm there is no support for .safetensors\n", "\n", - "#@markdown If you already did this once with Gdrive connected, you can skip this step as the cached copy is on your gdrive\n", "from IPython.display import clear_output\n", "!mkdir input\n", "%cd /content/EveryDream2trainer\n", - "MODEL_URL = \"sd_v1-5+vae.ckpt\" #@param [\"sd_v1-5+vae.ckpt\", \"hakurei/waifu-diffusion-v1-3\", \"stabilityai/stable-diffusion-2-1-base\", \"stabilityai/stable-diffusion-2-1\"] {allow-input: true}\n", - "if MODEL_URL == \"sd_v1-5+vae.ckpt\":\n", - " MODEL_URL = \"panopstor/EveryDream\"\n", + "MODEL_LOCATION = \"sd_v1-5+vae.ckpt\" #@param [\"sd_v1-5+vae.ckpt\", \"hakurei/waifu-diffusion-v1-3\", \"stabilityai/stable-diffusion-2-1-base\", \"stabilityai/stable-diffusion-2-1\"] {allow-input: true}\n", + "if MODEL_LOCATION == \"sd_v1-5+vae.ckpt\":\n", + " MODEL_LOCATION = \"panopstor/EveryDream\"\n", "\n", "import os\n", "\n", "download_path = \"\"\n", "\n", - "if \".co\" in MODEL_URL or \"https\" in MODEL_URL or \"www\" in MODEL_URL: #maybe just add a radio button to download this should work for now\n", + "if \".co\" in MODEL_LOCATION or \"https\" in MODEL_LOCATION or \"www\" in MODEL_LOCATION: #maybe just add a radio button to download this should work for now\n", " print(\"Downloading \")\n", - " !wget $MODEL_URL\n", + " !wget $MODEL_LOCATION\n", " clear_output()\n", " print(\"DONE!\")\n", " download_path = os.path.join(os.getcwd(), os.path.basename(MODEL_URL))\n", "\n", "else:\n", - " save_name = MODEL_URL\n", + " save_name = MODEL_LOCATION\n", "\n", "%cd /content/EveryDream2trainer\n", - "#@markdown * if you chose to link to diffusers Proceed to the [Run EveryDream 2](#scrollTo=j9pEI69WXS9w&line=2&uniqifier=1) cell\n", + "#@markdown * if you chose to link to a .ckpt Select the correct model version in the drop down menu for conversion\n", "\n", "inference_yaml = \" \"\n", "\n", @@ -247,7 +249,7 @@ " save_name = os.path.join(\"/content/drive/MyDrive/everydreamlogs/ckpt\", save_name)\n", "if inference_yaml != \" \":\n", " print(\"Model saved to: \" + save_name + \". The \" + inference_yaml + \" was used!\")\n", - "print(\"Model \" + save_name + \" will be used!, download will start when training beigins\")\n" + "print(\"Model \" + save_name + \" will be used!\")\n" ] }, { @@ -565,8 +567,8 @@ "metadata": { "accelerator": "GPU", "colab": { - "include_colab_link": true, - "provenance": [] + "provenance": [], + "include_colab_link": true }, "gpuClass": "standard", "kernelspec": { @@ -586,4 +588,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file