{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "view-in-github", "colab_type": "text" }, "source": [ "" ] }, { "cell_type": "markdown", "metadata": { "id": "blaLMSbkPHhG" }, "source": [ "
\n", " \n", "
\n", "\n", "\n", " \n", " \n", " \n", "
\n", "\n", "\n", " \n", " \n", " \n", "
\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "cellView": "form", "id": "hAuBbtSvGpau" }, "outputs": [], "source": [ "#@markdown # Setup and Install Dependencies\n", "from IPython.display import clear_output, display, HTML\n", "import subprocess\n", "import time\n", "import os \n", "from tqdm.auto import tqdm\n", "import PIL\n", "\n", "# Defining function for colored text\n", "def colored(r, g, b, text):\n", " return f\"\\033[38;2;{r};{g};{b}m{text} \\033[38;2;255;255;255m\"\n", "\n", "#@markdown Optional connect Gdrive But strongly recommended\n", "#@markdown This will let you put all your training data and checkpoints directly on your drive. \n", "#@markdown Much faster/easier to continue later, less setup time.\n", "\n", "#@markdown Creates /content/drive/MyDrive/everydreamlogs/ckpt\n", "Mount_to_Gdrive = True #@param{type:\"boolean\"} \n", "\n", "# Clone the git repository\n", "print(colored(0, 255, 0, 'Cloning git repository...'))\n", "!git clone https://github.com/victorchall/EveryDream2trainer.git\n", "\n", "if Mount_to_Gdrive:\n", " from google.colab import drive\n", " drive.mount('/content/drive')\n", "\n", " !mkdir -p /content/drive/MyDrive/everydreamlogs/ckpt\n", "\n", "%cd /content/EveryDream2trainer\n", "\n", "# Download Arial font\n", "print(colored(0, 255, 0, 'Downloading Arial font...'))\n", "!wget -O arial.ttf https://raw.githubusercontent.com/matomo-org/travis-scripts/master/fonts/Arial.ttf\n", "\n", "packages = [\n", " 'transformers==4.29.2',\n", " 'diffusers[torch]==0.14.0',\n", " 'pynvml==11.4.1',\n", " 'bitsandbytes==0.37.2',\n", " 'ftfy==6.1.1',\n", " 'aiohttp==3.8.4',\n", " 'compel~=1.1.3',\n", " 'protobuf==3.20.1',\n", " 'wandb==0.15.3',\n", " 'pyre-extensions==0.0.29',\n", " 'xformers==0.0.20',\n", " 'pytorch-lightning==1.6.5',\n", " 'OmegaConf==2.2.3',\n", " 'tensorboard>=2.11.0',\n", " 'tensorrt'\n", " 'wandb',\n", " 'colorama',\n", " 'keyboard',\n", " 'lion-pytorch'\n", "]\n", "\n", "print(colored(0, 255, 0, 'Installing packages...'))\n", "for package in tqdm(packages, desc='Installing packages', unit='package', bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'):\n", " if isinstance(package, tuple):\n", " package_name, extra_index_url = package\n", " cmd = f\"pip install -I -q {package_name} --extra-index-url {extra_index_url}\"\n", " else:\n", " cmd = f\"pip install -q {package}\"\n", " \n", " subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", "\n", "clear_output()\n", "\n", "\n", "# Execute Python script\n", "print(colored(0, 255, 0, 'Executing Python script...'))\n", "!python utils/get_yamls.py\n", "clear_output()\n", "\n", "print(colored(0, 255, 0, \"DONE! installing dependencies.\"))\n", "\n", "# Import pynvml and get GPU details\n", "import pynvml\n", "\n", "pynvml.nvmlInit()\n", "\n", "handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n", "\n", "gpu_name = pynvml.nvmlDeviceGetName(handle)\n", "gpu_memory = pynvml.nvmlDeviceGetMemoryInfo(handle).total / 1024**3\n", "cuda_version_number = pynvml.nvmlSystemGetCudaDriverVersion_v2()\n", "cuda_version_major = cuda_version_number // 1000\n", "cuda_version_minor = (cuda_version_number % 1000) // 10\n", "cuda_version = f\"{cuda_version_major}.{cuda_version_minor}\"\n", "\n", "pynvml.nvmlShutdown()\n", "\n", "Python_version = !python --version\n", "import torch\n", "import torchvision\n", "import xformers\n", "\n", "display(HTML(f\"\"\"\n", "Python version: | \n", "{Python_version[0]} | \n", "GPU Name: | \n", "{gpu_name} | \n", "
PyTorch version: | \n", "{torch.__version__} | \n", "GPU Memory (GB): | \n", "{gpu_memory:.2f} | \n", "
Torchvision version: | \n", "{torchvision.__version__} | \n", "CUDA version: | \n", "{cuda_version} | \n", "
XFormers version: | \n", "{xformers.__version__} | \n", "