1508 lines
68 KiB
Plaintext
1508 lines
68 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"id": "gfKvWAVnz8OB",
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"# AUTOMATIC1111's Stable Diffusion WebUI\n",
|
|
"\n",
|
|
"https://github.com/AUTOMATIC1111/stable-diffusion-webui\n",
|
|
"\n",
|
|
"Loosely based on https://colab.research.google.com/drive/1kw3egmSn-KgWsikYvOMjJkVDsPLjEMzl\n",
|
|
"\n",
|
|
"**Guides**\n",
|
|
"- [Getting started on Paperspace](https://github.com/Engineer-of-Stuff/stable-diffusion-paperspace/blob/main/Docs/Paperspace%20Guide%20for%20Retards.md)\n",
|
|
"- [Using the WebUI](https://rentry.org/voldy)\n",
|
|
"- [Using the Inpainter](https://rentry.org/drfar)\n",
|
|
"- [Textual Inversion](https://rentry.org/aikgx)\n",
|
|
"- [Crowd-Sourced Prompts](https://lexica.art/)\n",
|
|
"- [Artist Name Prompts](https://sgreens.notion.site/sgreens/4ca6f4e229e24da6845b6d49e6b08ae7?v=fdf861d1c65d456e98904fe3f3670bd3)\n",
|
|
"- [Stable Diffusion Models](https://cyberes.github.io/stable-diffusion-models)\n",
|
|
"- [Textual Inversion Models](https://cyberes.github.io/stable-diffusion-textual-inversion-models/)\n",
|
|
"- [Have I Been Trained?](https://haveibeentrained.com/)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"# Installation and Setup\n",
|
|
"\n",
|
|
"You must reinstall everything each time you restart the machine. If already downloaded, dependencies will be auto-updated."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**Where to store your files**\n",
|
|
"\n",
|
|
"`/storage/` is persistent storage shared across all machines on your account. Mounted to your machine.\n",
|
|
"\n",
|
|
"`/notebooks/` is storage for this notebook only. This directory has to be copied into your machine which can increase start/stop times if it's very large. To avoid this, put large files in `/storage/`.\n",
|
|
"\n",
|
|
"`/tmp/` <mark style=\"background-color:lime\">is not a persistent directory, meaning your files there will be deleted when the machine turns off.</mark>\n",
|
|
"\n",
|
|
"<br>\n",
|
|
"\n",
|
|
"<mark style=\"background-color: #ff780082\">If you are having storage issues</mark>, set `repo_storage_dir` to `/tmp/stable-diffusion`. Make sure `symlink_to_notebooks` is set to `True` so it gets linked back to `/notebooks/`.\n",
|
|
"\n",
|
|
"<br>\n",
|
|
"\n",
|
|
"<mark>You must uncomment the correct section and run the block below or else the notebook won't work!</mark>\n",
|
|
"\n",
|
|
"Select the section you want and do `ctrl + /` to uncomment. If you change any settings here, rerun this cell."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model_storage_dir = '/storage/models' # Where to store your model checkpoints.\n",
|
|
"\n",
|
|
"repo_storage_dir = '/storage/stable-diffusion' # Where to store your Stable Diffusion-related files.\n",
|
|
"\n",
|
|
"pip_cache_dir = None # The installer can cache pip wheels so you don't have to re-download them\n",
|
|
" # every time you start the machine. I recommed setting it to '/storage/pip/cache'\n",
|
|
"\n",
|
|
"\n",
|
|
"# Other optional settings\n",
|
|
"# You don't have to change these if you don't want to.\n",
|
|
"\n",
|
|
"symlink_to_notebooks = True # Enables the creation of symlinks back to /notebooks/\n",
|
|
"\n",
|
|
"activate_xformers = True # Enables the xformers optimizations using pre-built wheels.\n",
|
|
" # Setting to True will automatically set up your environment/machine for xformers. \n",
|
|
"\n",
|
|
"link_novelai_anime_vae = True # Enables the linking of animevae.pt to each of the NovelAI models.\n",
|
|
" # Set to True if you've downloaded both the NovelAI models and hypernetworks.\n",
|
|
"\n",
|
|
"activate_deepdanbooru = False # Enable and install DeepDanbooru -> https://github.com/KichangKim/DeepDanbooru\n",
|
|
"\n",
|
|
"activate_medvram = True # Enable medvram option.\n",
|
|
" # These are model optimizations which will reduce VRAM usage at the expense of some speed.\n",
|
|
" # Set to False if you have a lot of VRAM.\n",
|
|
"\n",
|
|
"disable_pickle_check = False # Disable the automatic check for unexpected data in model files.\n",
|
|
" # Leave this set to False unless you have a reason to disable the check.\n",
|
|
"\n",
|
|
"gradio_port = False # Launch Gradio on a specific port. Set to False to let Gradio choose a port.\n",
|
|
" # This disables online Gradio app mode and you will only be able to access it on your local network.\n",
|
|
"\n",
|
|
"gradio_auth = False # Enable gradio_auth and insecure-extension-access option.\n",
|
|
" # Set to \"me:password\" to enable.\n",
|
|
"\n",
|
|
"search_paperspace_datasets = True # Enable searching for checkpoints in /datasets to link to the webui\n",
|
|
"\n",
|
|
"ui_theme = None # Set the WEB UI theme. Values can be None (default) or 'dark'.\n",
|
|
"\n",
|
|
"insecure_extension_access = False # Force enable extensions without a password.\n",
|
|
" # If you don't set a password anyone can install and run arbitrary code on your machine!\n",
|
|
" # Instead, use gradio_auth which will automatically enable extensions when set.\n",
|
|
"\n",
|
|
"export_storage_dir = '/notebooks/exports' # Where the generated images will be exported to.\n",
|
|
" \n",
|
|
"# ===================================================================================================\n",
|
|
"# Save variables to Jupiter's temp storage so we can access it even if the kernel restarts.\n",
|
|
"%store symlink_to_notebooks model_storage_dir repo_storage_dir export_storage_dir activate_xformers link_novelai_anime_vae activate_deepdanbooru activate_medvram disable_pickle_check gradio_port gradio_auth search_paperspace_datasets ui_theme insecure_extension_access pip_cache_dir"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"<mark>If you see any errors please check your settings!</mark>\n",
|
|
"\n",
|
|
"**Don't forget, there's a [block in the Tools section](#Download-the-latest-version-of-this-notebook-from-Github) at the bottom to update this notebook to [the latest version](https://github.com/Engineer-of-Stuff/stable-diffusion-paperspace/blob/main/StableDiffusionUI_Voldemort_paperspace.ipynb) on GitHub.**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Clone the WebUI repository"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "sBbcB4vwj_jm",
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# You'll see this little code block at the beginning of every cell.\n",
|
|
"# It makes sure you have ran the first block that defines your settings.\n",
|
|
"try:\n",
|
|
" %store -r symlink_to_notebooks model_storage_dir repo_storage_dir\n",
|
|
" test = [symlink_to_notebooks, model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
" \n",
|
|
"import os\n",
|
|
"from pathlib import Path\n",
|
|
"\n",
|
|
"repo_storage_dir = Path(repo_storage_dir)\n",
|
|
"stable_diffusion_webui_path = repo_storage_dir / 'stable-diffusion-webui'\n",
|
|
"\n",
|
|
"if not stable_diffusion_webui_path.exists():\n",
|
|
" !mkdir -p \"{stable_diffusion_webui_path}\"\n",
|
|
" !git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui \"{stable_diffusion_webui_path}\"\n",
|
|
"else:\n",
|
|
" print('stable-diffusion-webui already downloaded, updating...')\n",
|
|
" !cd \"{stable_diffusion_webui_path}\" && git pull # no % so we don't interfere with the main process\n",
|
|
"\n",
|
|
"!mkdir -p \"{repo_storage_dir / 'stable-diffusion-webui' / 'outputs'}\"\n",
|
|
"!mkdir -p \"{repo_storage_dir / 'stable-diffusion-webui' / 'log'}\"\n",
|
|
"\n",
|
|
"symlinks = [\n",
|
|
" (repo_storage_dir / 'stable-diffusion-webui', Path('/notebooks/stable-diffusion-webui')),\n",
|
|
" (repo_storage_dir / 'stable-diffusion-webui' / 'outputs', Path('/notebooks/outputs')),\n",
|
|
" (repo_storage_dir / 'stable-diffusion-webui' / 'log', repo_storage_dir / 'stable-diffusion-webui' / 'outputs' / 'log'),\n",
|
|
" (Path('/storage'), Path('/notebooks/storage')),\n",
|
|
" ]\n",
|
|
"\n",
|
|
"if symlink_to_notebooks and repo_storage_dir != '/notebooks':\n",
|
|
" print('\\nCreating Symlinks...')\n",
|
|
" for src, dest in symlinks:\n",
|
|
" # If `/notebooks/stable-diffusion-webui` is a broken symlink then remove it.\n",
|
|
" # The WebUI might have previously been installed in a non-persistent directory.\n",
|
|
" if dest.is_symlink() and not dest.exists(): # .exists() validates a symlink\n",
|
|
" print('Symlink broken, removing:', dest)\n",
|
|
" dest.unlink()\n",
|
|
" if not dest.exists():\n",
|
|
" os.symlink(src, dest)\n",
|
|
" print(os.path.realpath(dest), '->', dest)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"id": "C68TUpkq0nj_",
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Install requirements and download repositories"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "SaAJk33ppFw1",
|
|
"scrolled": true,
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r symlink_to_notebooks model_storage_dir repo_storage_dir activate_xformers activate_deepdanbooru pip_cache_dir\n",
|
|
" test = [symlink_to_notebooks, model_storage_dir, repo_storage_dir, activate_xformers, activate_deepdanbooru, pip_cache_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"%cd \"{Path(repo_storage_dir, 'stable-diffusion-webui')}\"\n",
|
|
"\n",
|
|
"!pip install --upgrade pip\n",
|
|
"!pip install --upgrade wheel setuptools\n",
|
|
"\n",
|
|
"if pip_cache_dir:\n",
|
|
" !pip install git+https://github.com/pixelb/crudini.git\n",
|
|
" !mkdir -p \"{pip_cache_dir}\"\n",
|
|
" !python3 -m crudini --set /etc/pip.conf global cache-dir \"{pip_cache_dir}\"\n",
|
|
" !echo \"Set pip cache directory: $(pip cache dir)\"\n",
|
|
"\n",
|
|
"import os\n",
|
|
"\n",
|
|
"# Import launch.py which will automatically run the install script but not launch the WebUI.\n",
|
|
"import launch\n",
|
|
"launch.prepare_environment()\n",
|
|
"\n",
|
|
"# Install things for this notebook\n",
|
|
"!pip install requests gdown bs4 markdownify\n",
|
|
"\n",
|
|
"# The installer isn't installing deepdanbooru right now so we'll do it manually.\n",
|
|
"if activate_deepdanbooru:\n",
|
|
" # https://github.com/KichangKim/DeepDanbooru/releases\n",
|
|
" !pip install \"git+https://github.com/KichangKim/DeepDanbooru.git@v3-20211112-sgd-e28#egg=deepdanbooru[tensorflow]\" # $(curl --silent \"https://api.github.com/KichangKim/DeepDanbooru/releases/latest\" | grep '\"tag_name\":' | sed -E 's/.*\"([^\"]+)\".*/\\1/')#egg=deepdanbooru[tensorflow]\" # tensorflow==2.10.0 tensorflow-io==0.27.0 flatbuffers==1.12\n",
|
|
"\n",
|
|
"# latent-diffusion is a requirement but launch.py isn't downloading it so we'll do it manually.\n",
|
|
"# TODO: can this be removed?\n",
|
|
"# if not os.path.exists(f'{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion'):\n",
|
|
"# !git clone https://github.com/crowsonkb/k-diffusion.git \"{repo_storage_dir}/stable-diffusion-webui/repositories/k-diffusion\"\n",
|
|
"# !git clone https://github.com/Hafiidz/latent-diffusion.git \"{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion\"\n",
|
|
"\n",
|
|
"if activate_xformers:\n",
|
|
" print('Installing xformers...')\n",
|
|
" import subprocess\n",
|
|
" def download_release(url):\n",
|
|
" binary = 'xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl' # have to save the binary as a specific name that pip likes\n",
|
|
" tmp_dir = subprocess.check_output(['mktemp', '-d']).decode('ascii').strip('\\n')\n",
|
|
" !wget \"{url}\" -O \"{tmp_dir}/{binary}\"\n",
|
|
" return os.path.join(tmp_dir, binary)\n",
|
|
"\n",
|
|
" # Set up pip packages\n",
|
|
" !pip uninstall -y torch torchvision torchaudio # Remove existing pytorch install.\n",
|
|
" !pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113 # Install pytorch for cuda 11.3\n",
|
|
" s = subprocess.getoutput('nvidia-smi')\n",
|
|
" if 'A4000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/A4000-Oct-28-2022/a4000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'A5000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/A5000-Nov-1-2022/a5000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'A6000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/A6000-Nov-1-2022/a6000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'P5000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/P5000-Nov-1-2022/p5000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'RTX 4000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/RTX-4000-Nov-1-2022/rtx4000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'RTX 5000' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/RTX-5000-Nov-1-2022/rtx5000-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'A100' in s:\n",
|
|
" xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/A100-Nov-1-2022/a100-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" elif 'M4000' in s:\n",
|
|
" print('xformers for M4000 hasn\\'t been built yet.')\n",
|
|
" # xformers_whl = download_release('https://github.com/Cyberes/xformers-compiled/releases/download/A100-Nov-1-2022/a100-xformers-0.0.14.dev0-cp39-cp39-linux_x86_64.whl')\n",
|
|
" else:\n",
|
|
" print('GPU not matched to xformers binary so a one-size-fits-all binary was installed. If you have any issues, please build xformers using the Tools block below.')\n",
|
|
" xformers_whl = download_release('https://raw.githubusercontent.com/Cyberes/xformers-compiled/main/various/xformers-0.0.14.dev0-cp37-cp37m-linux_x86_64.whl')\n",
|
|
" !pip install --force-reinstall \"{xformers_whl}\"\n",
|
|
"\n",
|
|
"# Make sure important directories exists\n",
|
|
"!mkdir -p \"{model_storage_dir}/hypernetworks\"\n",
|
|
"!mkdir -p \"{model_storage_dir}/vae\"\n",
|
|
"!mkdir -p \"{repo_storage_dir}/stable-diffusion-webui/models/hypernetworks\"\n",
|
|
"!mkdir -p \"{repo_storage_dir}/stable-diffusion-webui/models/VAE\"\n",
|
|
"!mkdir -p \"{repo_storage_dir}/stable-diffusion-webui/log/images\"\n",
|
|
"\n",
|
|
"!echo -e \"\\n===================================\\nDone! If you're seeing this the process has exited successfully.\\n\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"id": "F0EINk5M0s-w",
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Download the Model"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"You don't need to repeat this step if you've already downloaded the models.\n",
|
|
"\n",
|
|
"<br>\n",
|
|
"\n",
|
|
"**There are additional models available here: https://cyberes.github.io/stable-diffusion-models**\n",
|
|
"\n",
|
|
"Textual inversion: https://cyberes.github.io/stable-diffusion-textual-inversion-models\n",
|
|
"\n",
|
|
"DreamBooth: https://cyberes.github.io/stable-diffusion-dreambooth-library\n",
|
|
"\n",
|
|
"<br>\n",
|
|
"\n",
|
|
"### Filesize and Storage Disclaimer\n",
|
|
"\n",
|
|
"Paperspace free tier has only 5GB of storage space. If you're having storage issues, here are a few suggestions.\n",
|
|
"1. Download everything to `/tmp/` each time you start the machine.\n",
|
|
"2. Add a payment method to your account. Storage overages are billed at \\$0.29/GB and billing occurs monthly and runs at midnight on the first of each month. With a payment method on file, Paperspace will let you use more storage and if you time it right you shouldn't actually be charged for it.\n",
|
|
"3. Upgrade to a Pro account. They'll give you 15GB and you'll get longer runtimes and more powerful free GPUs.\n",
|
|
"4. Use my referral code `KQLRH37`. You'll get \\$10 credit that you should be able to put towards the storage overage charges. Redeem the code at the bottom of the Billing page.\n",
|
|
"\n",
|
|
"### Torrent Instructions\n",
|
|
"\n",
|
|
"Aria2 may show some errors/warnings while downloading. Those are fine, when it eventually says \"Download Complete\" that means everything worked as it should."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Stable Diffusion 2.0"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**768x768**\n",
|
|
"\n",
|
|
"This model can generate images 768 pixels by 768 pixels."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -d \"{model_storage_dir}\" -o \"sd-v2-0-768-v-ema.ckpt\"\n",
|
|
"!wget https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O \"{model_storage_dir}/sd-v2-0-768-v-ema.yaml\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**512x512 Base**\n",
|
|
"\n",
|
|
"Referred to as the \"base\" model, this model generates images in the standard 512x512 resolution."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/stabilityai/stable-diffusion-2-base/resolve/main/512-base-ema.ckpt -d \"{model_storage_dir}\" -o \"sd-v2-0-512-base-ema.ckpt\"\n",
|
|
"!wget https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference.yaml -O \"{model_storage_dir}/sd-v2-0-512-base-ema.yaml\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**Depth Model**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/stabilityai/stable-diffusion-2-depth/resolve/main/512-depth-ema.ckpt -d \"{model_storage_dir}\" -o \"sd-v2-0-512-depth-ema.ckpt\"\n",
|
|
"!wget https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-midas-inference.yaml -O \"{model_storage_dir}/sd-v2-0-512-depth-ema.yaml\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**4x Upscaler**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/resolve/main/x4-upscaler-ema.ckpt -d \"{model_storage_dir}\" -o \"sd-v2-0-x4-upscaler-ema.ckpt\"\n",
|
|
"!wget https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml -O \"{model_storage_dir}/sd-v2-0-x4-upscaler-ema.yaml\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Stable Diffusion 1.0"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**v1.5**\n",
|
|
"\n",
|
|
"Paperspace includes this model in its public data sources, which don't use up your storage quota. To add it, click on `Data Sources` in the toolbar, `Public`, and `stable-diffusion-classic`. The file is mounted at `/datasets/stable-diffusion-classic/`. You only need to do this once, it will stay mounted between sessions. Make sure the setting `search_paperspace_datasets` is set to `True` so the program will link it to the WebUI.\n",
|
|
"\n",
|
|
"Otherwise, you can download it yourself:"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"%cd \"{model_storage_dir}\"\n",
|
|
"!aria2c --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:2daef5b5f63a16a9af9169a529b1a773fc452637&dn=v1-5-pruned-emaonly.ckpt&tr=udp%3a%2f%2ftracker.opentrackr.org%3a1337%2fannounce&tr=udp%3a%2f%2f9.rarbg.com%3a2810%2fannounce&tr=udp%3a%2f%2ftracker.openbittorrent.com%3a6969%2fannounce&tr=udp%3a%2f%2fopentracker.i2p.rocks%3a6969%2fannounce&tr=https%3a%2f%2fopentracker.i2p.rocks%3a443%2fannounce&tr=http%3a%2f%2ftracker.openbittorrent.com%3a80%2fannounce&tr=udp%3a%2f%2ftracker.torrent.eu.org%3a451%2fannounce&tr=udp%3a%2f%2fopen.stealth.si%3a80%2fannounce&tr=udp%3a%2f%2fvibe.sleepyinternetfun.xyz%3a1738%2fannounce&tr=udp%3a%2f%2ftracker2.dler.org%3a80%2fannounce&tr=udp%3a%2f%2ftracker1.bt.moack.co.kr%3a80%2fannounce&tr=udp%3a%2f%2ftracker.zemoj.com%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.tiny-vps.com%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.theoks.net%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.publictracker.xyz%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.monitorit4.me%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.moeking.me%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.lelux.fi%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.dler.org%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.army%3a6969%2fannounce\"\n",
|
|
"!mv \"v1-5-pruned-emaonly.ckpt\" \"sd-v1-5-pruned-emaonly.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**v1.5 Inpainting**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"%cd \"{model_storage_dir}\"\n",
|
|
"!aria2c --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:b523a9e71ae02e27b28007eca190f41999c2add1&dn=sd-v1-5-inpainting.ckpt&tr=udp%3a%2f%2ftracker.opentrackr.org%3a1337%2fannounce&tr=udp%3a%2f%2f9.rarbg.com%3a2810%2fannounce&tr=udp%3a%2f%2ftracker.openbittorrent.com%3a6969%2fannounce&tr=http%3a%2f%2ftracker.openbittorrent.com%3a80%2fannounce&tr=udp%3a%2f%2fopentracker.i2p.rocks%3a6969%2fannounce&tr=https%3a%2f%2fopentracker.i2p.rocks%3a443%2fannounce&tr=udp%3a%2f%2ftracker.torrent.eu.org%3a451%2fannounce&tr=udp%3a%2f%2fopen.stealth.si%3a80%2fannounce&tr=udp%3a%2f%2fvibe.sleepyinternetfun.xyz%3a1738%2fannounce&tr=udp%3a%2f%2ftracker2.dler.org%3a80%2fannounce&tr=udp%3a%2f%2ftracker1.bt.moack.co.kr%3a80%2fannounce&tr=udp%3a%2f%2ftracker.zemoj.com%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.tiny-vps.com%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.theoks.net%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.swateam.org.uk%3a2710%2fannounce&tr=udp%3a%2f%2ftracker.publictracker.xyz%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.pomf.se%3a80%2fannounce&tr=udp%3a%2f%2ftracker.monitorit4.me%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.moeking.me%3a6969%2fannounce&tr=udp%3a%2f%2ftracker.lelux.fi%3a6969%2fannounce\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**v1.4**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"%cd \"{model_storage_dir}\"\n",
|
|
"!aria2c --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:3A4A612D75ED088EA542ACAC52F9F45987488D1C&tr=udp://tracker.opentrackr.org:1337/announce\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Waifu Diffusion"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**v1.3**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"%cd \"{model_storage_dir}\"\n",
|
|
"!aria2c --seed-time=0 --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:AWJJJZNFOOK7R2XXXGZ4GFNKUEU2TSFP&dn=wd-v1-3-float16.ckpt&xl=2132889245&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**v1.2**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"%cd \"{model_storage_dir}\"\n",
|
|
"!aria2c --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:153590FD7E93EE11D8DB951451056C362E3A9150&dn=wd-v1-2-full-ema-pruned.ckpt&tr=udp://tracker.opentrackr.org:1337\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### trinart_stable_diffusion_v2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Another anime finetune. Designed to nudge SD to an anime/manga style. Seems to be more \"stylized\" and \"artistic\" than Waifu Diffusion, if that makes any sense.\n",
|
|
"\n",
|
|
"The 60,000 steps version is the original, the 115,000 and 95,000 versions is the 60,000 with additional training. Use the 60,000 step version if the style nudging is too much.\n",
|
|
"\n",
|
|
"[See the comparison here.](https://cyberes.github.io/stable-diffusion-models/#model-comparison)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**60000**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/naclbit/trinart_stable_diffusion_v2/resolve/main/trinart2_step60000.ckpt -d \"{model_storage_dir}\" -o \"trinart2_step60000.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**95000**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/naclbit/trinart_stable_diffusion_v2/resolve/main/trinart2_step95000.ckpt -d \"{model_storage_dir}\" -o \"trinart2_step95000.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**115000**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/naclbit/trinart_stable_diffusion_v2/resolve/main/trinart2_step115000.ckpt -d \"{model_storage_dir}\" -o \"trinart2_step115000.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### trinart_characters_19-2m_stable_diffusion_v1\n",
|
|
"\n",
|
|
"Trained on 19.2M anime/manga-style images. This model seeks for a sweet spot between artistic style and anatomical quality."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
"!aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue https://huggingface.co/naclbit/trinart_characters_19.2m_stable_diffusion_v1/resolve/main/trinart_characters_it4_v1.ckpt -d \"{model_storage_dir}\" -o \"trinart_characters_it4_v1.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### NovelAI Leak"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**animefull-final-pruned**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"metalink = 'magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce'\n",
|
|
"import re\n",
|
|
"infohash = re.search(\"^magnet:\\?xt=urn:btih:(.*?)&.*?$\", metalink).group(1)\n",
|
|
"import subprocess\n",
|
|
"tmp_dir = subprocess.check_output(['mktemp', '-d']).decode('ascii').strip('\\n')\n",
|
|
"%cd \"{tmp_dir}\"\n",
|
|
"# Have to convert the metalink to a torrent file so aria2c can read the files inside\n",
|
|
"!aria2c -d . --bt-metadata-only=true --bt-save-metadata=true --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce\"\n",
|
|
"!aria2c --select-file=64,65 --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"{infohash}.torrent\"\n",
|
|
"!mv \"novelaileak/stableckpt/animefull-final-pruned/config.yaml\" \"{model_storage_dir}/novelai-animefull-final-pruned.yaml\"\n",
|
|
"!mv \"novelaileak/stableckpt/animefull-final-pruned/model.ckpt\" \"{model_storage_dir}/novelai-animefull-final-pruned.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**animesfw-final-pruned**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"metalink = 'magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce'\n",
|
|
"import re\n",
|
|
"infohash = re.search(\"^magnet:\\?xt=urn:btih:(.*?)&.*?$\", metalink).group(1)\n",
|
|
"import subprocess\n",
|
|
"tmp_dir = subprocess.check_output(['mktemp', '-d']).decode('ascii').strip('\\n')\n",
|
|
"%cd \"{tmp_dir}\"\n",
|
|
"# Have to convert the metalink to a torrent file so aria2c can read the files inside\n",
|
|
"!aria2c -d . --bt-metadata-only=true --bt-save-metadata=true --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce\"\n",
|
|
"!aria2c --select-file=70,71 --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"{infohash}.torrent\"\n",
|
|
"!mv \"novelaileak/stableckpt/animesfw-final-pruned/config.yaml\" \"{model_storage_dir}/novelai-animesfw-final-pruned.yaml\"\n",
|
|
"!mv \"novelaileak/stableckpt/animesfw-final-pruned/model.ckpt\" \"{model_storage_dir}/novelai-animesfw-final-pruned.ckpt\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"**Hypernetworks**\n",
|
|
"\n",
|
|
"A hypernetwork is trained much like a neural network and helps to guide the neural net towards the intended output."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"!apt update\n",
|
|
"!apt install -y aria2\n",
|
|
"metalink = 'magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce'\n",
|
|
"import re\n",
|
|
"infohash = re.search(\"^magnet:\\?xt=urn:btih:(.*?)&.*?$\", metalink).group(1)\n",
|
|
"import subprocess\n",
|
|
"tmp_dir = subprocess.check_output(['mktemp', '-d']).decode('ascii').strip('\\n')\n",
|
|
"%cd \"{tmp_dir}\"\n",
|
|
"# Have to convert the metalink to a torrent file so aria2c can read the files inside\n",
|
|
"!aria2c -d . --bt-metadata-only=true --bt-save-metadata=true --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"magnet:?xt=urn:btih:5bde442da86265b670a3e5ea3163afad2c6f8ecc&dn=novelaileak&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2F9.rarbg.com%3A2810%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce\"\n",
|
|
"!aria2c --select-file=76,81,82,83,84,85,86,87,88,89,90,91,92,93 --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --file-allocation=none \"{infohash}.torrent\"\n",
|
|
"# -exec mv doesn't work with python variables so we'll set an environment variable instead\n",
|
|
"!rm novelaileak/stableckpt/extra-sd-prune/sd-prune/anime700k-64bs-0.1ucg-penultimate-1epoch-clip-ema-continue-76000.pt # aria2 downloads this file even though I told it not to\n",
|
|
"import subprocess\n",
|
|
"s = subprocess.run(f'find novelaileak/ -type f -name \"*.pt\" -exec mv \"{{}}\" \"{Path(model_storage_dir, \"hypernetworks\")}\" \\;', shell=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Clean up and restart the kernel"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir pip_cache_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir, pip_cache_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"# Get some storage back\n",
|
|
"if not pip_cache_dir:\n",
|
|
" !pip cache purge\n",
|
|
" !echo \"Purged pip cache\"\n",
|
|
"!cd \"{model_storage_dir}\" && rm *.aria2\n",
|
|
"!apt remove --purge -y aria2 p7zip-full\n",
|
|
"!apt autoremove --purge -y\n",
|
|
"!apt clean\n",
|
|
"\n",
|
|
"# Restart the kernel\n",
|
|
"import os\n",
|
|
"os.kill(os.getpid(), 9)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Link the models directory\n",
|
|
"\n",
|
|
"Create symlinks. The file will be stored in the models storage directory and linked to where the WebUI expects the files to be."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir link_novelai_anime_vae search_paperspace_datasets\n",
|
|
" test = [model_storage_dir, repo_storage_dir, link_novelai_anime_vae, search_paperspace_datasets]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"import os\n",
|
|
"from glob import glob\n",
|
|
"from pathlib import Path\n",
|
|
"import sys\n",
|
|
"\n",
|
|
"model_storage_dir = Path(model_storage_dir)\n",
|
|
"\n",
|
|
"if not model_storage_dir.exists():\n",
|
|
" print('Your model storage directory does not exist:', model_storage_dir)\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"webui_root_model_path = Path(repo_storage_dir, 'stable-diffusion-webui/models')\n",
|
|
"webui_sd_model_path = Path(webui_root_model_path, 'Stable-diffusion')\n",
|
|
"webui_hypernetwork_path = Path(webui_root_model_path, 'hypernetworks')\n",
|
|
"webui_vae_path = Path(webui_root_model_path, 'VAE')\n",
|
|
"\n",
|
|
"def delete_broken_symlinks(dir):\n",
|
|
" deleted = False\n",
|
|
" dir = Path(dir)\n",
|
|
" for file in dir.iterdir():\n",
|
|
" if file.is_symlink() and not file.exists():\n",
|
|
" print('Symlink broken, removing:', file)\n",
|
|
" file.unlink()\n",
|
|
" deleted = True\n",
|
|
" if deleted:\n",
|
|
" print('')\n",
|
|
"\n",
|
|
"def create_symlink(source, dest):\n",
|
|
" if os.path.isdir(dest):\n",
|
|
" dest = Path(dest, os.path.basename(source))\n",
|
|
" if not dest.exists():\n",
|
|
" os.symlink(source, dest)\n",
|
|
" print(source, '->', Path(dest).absolute())\n",
|
|
"\n",
|
|
"# Check for broken symlinks and remove them\n",
|
|
"print('Removing broken symlinks...')\n",
|
|
"delete_broken_symlinks(webui_sd_model_path)\n",
|
|
"delete_broken_symlinks(webui_hypernetwork_path)\n",
|
|
"delete_broken_symlinks(webui_vae_path)\n",
|
|
"\n",
|
|
"def link_ckpts(source_path):\n",
|
|
" # Link .ckpt and .safetensor/.st files (recursive)\n",
|
|
" print('\\nLinking .ckpt and .safetensor/.safetensors/.st files in', source_path)\n",
|
|
" source_path = Path(source_path)\n",
|
|
" for file in [p for p in source_path.rglob('*') if p.suffix in ['.ckpt', '.safetensor', '.safetensors', '.st']]:\n",
|
|
" if Path(file).parent.parts[-1] not in ['hypernetworks', 'vae'] :\n",
|
|
" if not (webui_sd_model_path / file.name):\n",
|
|
" print('New model:', file.name)\n",
|
|
" create_symlink(file, webui_sd_model_path)\n",
|
|
" # Link config yaml files\n",
|
|
" print('\\nLinking config .yaml files in', source_path)\n",
|
|
" for file in model_storage_dir.glob('*.yaml'):\n",
|
|
" create_symlink(file, webui_sd_model_path)\n",
|
|
"\n",
|
|
"\n",
|
|
"link_ckpts(model_storage_dir)\n",
|
|
"\n",
|
|
"# Link hypernetworks\n",
|
|
"print('\\nLinking hypernetworks...')\n",
|
|
"hypernetwork_source_path = Path(model_storage_dir, 'hypernetworks')\n",
|
|
"if hypernetwork_source_path.is_dir():\n",
|
|
" for file in hypernetwork_source_path.iterdir():\n",
|
|
" create_symlink(hypernetwork_source_path / file, webui_hypernetwork_path)\n",
|
|
"else:\n",
|
|
" print('Hypernetwork storage directory not found:', hypernetwork_source_path)\n",
|
|
"\n",
|
|
"# Link VAEs\n",
|
|
"print('\\nLinking VAEs...')\n",
|
|
"vae_source_path = Path(model_storage_dir, 'vae')\n",
|
|
"if vae_source_path.is_dir():\n",
|
|
" for file in vae_source_path.iterdir():\n",
|
|
" create_symlink(vae_source_path / file, webui_vae_path)\n",
|
|
"else:\n",
|
|
" print('VAE storage directory not found:', vae_source_path)\n",
|
|
"\n",
|
|
"# Link the NovelAI files for each of the NovelAI models\n",
|
|
"print('\\nLinking NovelAI files for each of the NovelAI models...')\n",
|
|
"for model in model_storage_dir.glob('novelai-*.ckpt'):\n",
|
|
" yaml = model.stem + '.yaml'\n",
|
|
" if os.path.exists(yaml):\n",
|
|
" print('New NovelAI model config:', yaml)\n",
|
|
" create_symlink(yaml, webui_sd_model_path)\n",
|
|
"\n",
|
|
"if link_novelai_anime_vae:\n",
|
|
" print('\\nLinking NovelAI anime VAE...')\n",
|
|
" for model in model_storage_dir.glob('novelai-*.ckpt'):\n",
|
|
" if (model_storage_dir / 'hypernetworks' / 'animevae.pt').is_file():\n",
|
|
" vae = model.stem + '.vae.pt'\n",
|
|
" if not os.path.exists(webui_vae_path):\n",
|
|
" print(f'Linking NovelAI {vae} and {model}')\n",
|
|
" create_symlink(model_storage_dir / 'hypernetworks' / 'animevae.pt', webui_vae_path)\n",
|
|
" else:\n",
|
|
" print(f'{model_storage_dir}/hypernetworks/animevae.pt not found!')\n",
|
|
"\n",
|
|
"if search_paperspace_datasets:\n",
|
|
" if Path('/datasets').is_dir():\n",
|
|
" link_ckpts('/datasets')\n",
|
|
" else:\n",
|
|
" print('\\nNo datasets mounted!')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"id": "xt8lbdmC04ox"
|
|
},
|
|
"source": [
|
|
"# Launch the WebUI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"Run this block to launch the WebUI. You will get a link to nnn.gradio.app, that's your WebUI. Follow it.\n",
|
|
"\n",
|
|
"See [shared.py](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/shared.py#L22) to view the code for the launch args. There's a lot of good info in there about exactly what the args do. If you aren't a programmer, [here's the wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Command-Line-Arguments-and-Settings).\n",
|
|
"\n",
|
|
"#### Troubleshooting\n",
|
|
"- If you have any issues, try restarting the kernel.\n",
|
|
"- `EOFError: Ran out of input` probably means you ran out of storage space and the model `.ckpt` file wasn't downloaded completely. Try cleaning up your files. There are some helpful scripts in the Tools section below.\n",
|
|
"- `The file may be malicious, so the program is not going to read it` means the program encountered unexpected data in the model file (the technical term is \"pickle\"). Merging models can cause this. You can disable this feature by setting `disable_pickle_check` to True in the settings block.\n",
|
|
"- Try updating your notebook using the block in the Tools section below.\n",
|
|
"- If you're still having issues, delete `stable-diffusion-webui` and reinstall."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"id": "R-xAdMA5wxXd",
|
|
"scrolled": true,
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir activate_xformers activate_deepdanbooru activate_medvram disable_pickle_check gradio_port gradio_auth ui_theme insecure_extension_access\n",
|
|
" test = [model_storage_dir, repo_storage_dir, activate_xformers, activate_deepdanbooru, activate_medvram, disable_pickle_check, gradio_port, gradio_auth, ui_theme, insecure_extension_access]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"%cd \"{Path(repo_storage_dir, 'stable-diffusion-webui')}\"\n",
|
|
"\n",
|
|
"# Code to set the options you want as defined in the very first block\n",
|
|
"x_arg = '--xformers' if activate_xformers else ''\n",
|
|
"dd_arg = '--deepdanbooru' if activate_deepdanbooru else ''\n",
|
|
"mvram_arg = '--medvram' if activate_medvram else ''\n",
|
|
"pickled = '--disable-safe-unpickle' if disable_pickle_check else ''\n",
|
|
"port = f'--port {gradio_port}' if gradio_port else '--share'\n",
|
|
"auth = f'--gradio-auth {gradio_auth} --enable-insecure-extension-access' if gradio_auth else ''\n",
|
|
"theme = f'--theme {ui_theme}' if ui_theme else ''\n",
|
|
"insecure_extension_access = '--enable-insecure-extension-access' if insecure_extension_access else ''\n",
|
|
"\n",
|
|
"# Launch args go below:\n",
|
|
"!python webui.py {x_arg} {dd_arg} {mvram_arg} {pickled} {port} {auth} {theme} --gradio-debug"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"# Export Generations"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"This block will rename and compress the outputs with 7zip max compression. It expects you to have `log/` and `outputs/` in `/notebooks/stable-diffusion-webui/`."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": true,
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir export_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir, export_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"import os\n",
|
|
"from pathlib import Path\n",
|
|
"import subprocess\n",
|
|
"\n",
|
|
"repo_storage_dir = Path(repo_storage_dir)\n",
|
|
"export_storage_dir = Path(export_storage_dir)\n",
|
|
"export_storage_dir.mkdir(exist_ok=True)\n",
|
|
"\n",
|
|
"!if [ $(dpkg-query -W -f='${Status}' p7zip-full 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y p7zip-full; fi # install 7z if it isn't already installed\n",
|
|
"from datetime import datetime\n",
|
|
"datetime_str = datetime.now().strftime('%m-%d-%Y_%H:%M:%S')\n",
|
|
"%cd \"{export_storage_dir}\"\n",
|
|
"!mkdir -p \"{datetime_str}/log\"\n",
|
|
"!cd \"{repo_storage_dir / 'stable-diffusion-webui' / 'log'}\" && mv * \"{export_storage_dir / datetime_str / 'log'}\"\n",
|
|
"!cd \"{repo_storage_dir / 'stable-diffusion-webui' / 'outputs'}\" && mv * \"{export_storage_dir / datetime_str}\"\n",
|
|
"s = subprocess.run(f'find \"{Path(export_storage_dir, datetime_str)}\" -type d -name .ipynb_checkpoints -exec rm -rv {{}} +', shell=True)\n",
|
|
"!7z a -t7z -m0=lzma2 -mx=9 -mfb=64 -md=32m -ms=on \"{datetime_str}.7z\" \"{export_storage_dir / datetime_str}\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Delete old output folder\n",
|
|
"\n",
|
|
"This block will delete the folder you just compressed."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# !rm -rf \"{export_storage_dir / datetime_str}\"\n",
|
|
"# !echo \"Deleted {export_storage_dir / datetime_str}\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"# Tools"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Show graphics card info"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"!nvidia-smi"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Download the latest version of this notebook from Github\n",
|
|
"\n",
|
|
"Run this and refresh the page (press F5). Don't save anything or you will overwrite the downloaded file."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"!mv /notebooks/StableDiffusionUI_Voldemort_paperspace.ipynb /notebooks/StableDiffusionUI_Voldemort_paperspace.ipynb.backup # save your old notebook to a backup\n",
|
|
"!wget https://raw.githubusercontent.com/Engineer-of-Stuff/stable-diffusion-paperspace/main/StableDiffusionUI_Voldemort_paperspace.ipynb -O /notebooks/StableDiffusionUI_Voldemort_paperspace.ipynb\n",
|
|
"!echo \"Downloaded! Now, refresh the page (press F5). Don't save anything or you will overwrite the downloaded file.\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Reset Repository\n",
|
|
"\n",
|
|
"Sometimes AUTOMATIC1111 breaks something. Go to https://github.com/AUTOMATIC1111/stable-diffusion-webui/commits/master and choose a commit to revert to.\n",
|
|
"\n",
|
|
"If you're looking for a specific date, do: `git log --since='Sept 17 2022' --until='Sept 18 2022'`\n",
|
|
"\n",
|
|
"\n",
|
|
"**This shouldn't delete your outputs or any changes you've made to files, but I'd back up anything important just to be safe.**"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"%cd \"{Path(repo_storage_dir, 'stable-diffusion-webui')}\"\n",
|
|
"!git reset --hard <commit>"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Delete .ipynb_checkpoints\n",
|
|
"\n",
|
|
"Jupyter stores temporary files in folders named `.ipynb_checkpoints`. It gets a little excessive sometimes so if you're running low on storage space or getting weird errors about a directory named `.ipynb_checkpoints`, run this block."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"import subprocess\n",
|
|
"!find /notebooks/ -type d -name .ipynb_checkpoints -type d -exec rm -rv {} +\n",
|
|
"s = subprocess.run(f'find \"{repo_storage_dir}\" -type d -name .ipynb_checkpoints -exec rm -rv {{}} +', shell=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Reset storage\n",
|
|
"\n",
|
|
"This will delete ALL your files in `/notebooks/`, `/storage/`, `model_storage_dir`, and `repo_storage_dir`. Use if you're having issues with zero storage space and you don't want to delete your notebook."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Uncomment the lines below to run this block. You can highlight the lines and do ctrl + /\n",
|
|
"# %store -r model_storage_dir repo_storage_dir\n",
|
|
"# try:\n",
|
|
"# test = [model_storage_dir, repo_storage_dir]\n",
|
|
"# except NameError as e:\n",
|
|
"# print(\"There is an issue with your variables.\")\n",
|
|
"# print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
"# print('Error:', e)\n",
|
|
"# import sys\n",
|
|
"# sys.exit(1)\n",
|
|
"# !rm -rf /storage/*\n",
|
|
"# !mv /notebooks/*.ipynb / # move the notebook out of the directory before we nuke it\n",
|
|
"# !rm -rf /notebooks/*\n",
|
|
"# !mv /*.ipynb /notebooks/ # move it back\n",
|
|
"# !rm -rf {model_storage_dir}\n",
|
|
"# !rm -rf {repo_storage_dir}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"### Build and Install Xformers\n",
|
|
"\n",
|
|
"This is an advanced feature that should boost your generation speeds.\n",
|
|
"\n",
|
|
"1. Run the block below to download the install script to `/notebooks/`\n",
|
|
"2. Go to https://developer.nvidia.com/cuda-gpus and find the Cuda arch for your GPU model. It's likely 7.5, but double check.\n",
|
|
"3. Once you have read these instructions, uncomment the second line and insert your Cuda arch.\n",
|
|
"4. Enable xformers in the settings block above.\n",
|
|
"\n",
|
|
"If you have any issues, open the Jpyter Lab and run `build-xformers.sh` from the terminal."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"!wget https://raw.githubusercontent.com/Engineer-of-Stuff/stable-diffusion-paperspace/main/other/build-xformers.sh -O /notebooks/build-xformers.sh\n",
|
|
"# !bash /notebooks/build-xformers.sh [your cuda arch]\n",
|
|
"!echo \"COMPLETED!\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Automated Model Downloader\n",
|
|
"\n",
|
|
"Here's a tool to download a model from a torrent magnet link, web link, Google Drive, HuggingFace, or CivitAI."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"try:\n",
|
|
" %store -r model_storage_dir repo_storage_dir\n",
|
|
" test = [model_storage_dir, repo_storage_dir]\n",
|
|
"except NameError as e:\n",
|
|
" print(\"There is an issue with your variables.\")\n",
|
|
" print(\"Please go back to the first block and make sure your settings are correct, then run the cell.\")\n",
|
|
" print('Error:', e)\n",
|
|
" import sys\n",
|
|
" sys.exit(1)\n",
|
|
"\n",
|
|
"model_uri = input('URI of model to download: ')\n",
|
|
"import re\n",
|
|
"import requests\n",
|
|
"import gdown\n",
|
|
"import json\n",
|
|
"from bs4 import BeautifulSoup\n",
|
|
"from markdownify import markdownify\n",
|
|
"import urllib.request\n",
|
|
"from pathlib import Path\n",
|
|
"\n",
|
|
"user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'\n",
|
|
"\n",
|
|
"def dl_web_file(web_dl_file):\n",
|
|
" %cd \"{model_storage_dir}\"\n",
|
|
" # We're going to use aria2 to split the download into threads which will allow us to download\n",
|
|
" # the file very fast even if the site serves the file slow.\n",
|
|
" !if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
" !aria2c --file-allocation=none -c -x 16 -s 16 --summary-interval=0 --console-log-level=warn --continue --user-agent=\"{user_agent}\" \"{web_dl_file}\" \n",
|
|
"\n",
|
|
"def is_url(url_str):\n",
|
|
" return re.search(r'https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,}', url_str)\n",
|
|
"\n",
|
|
"magnet_match = re.search(r'magnet:\\?xt=urn:btih:[\\-_A-Za-z0-9&=%.]*', model_uri)\n",
|
|
"civitai_match = re.search(r'^https?:\\/\\/(?:www\\.|(?!www))civitai\\.com\\/models\\/\\d*\\/.*?$', model_uri)\n",
|
|
"web_match = is_url(model_uri)\n",
|
|
"\n",
|
|
"if magnet_match:\n",
|
|
" !if [ $(dpkg-query -W -f='${Status}' aria2 2>/dev/null | grep -c \"ok installed\") = 0 ]; then sudo apt update && sudo apt install -y aria2; fi\n",
|
|
" %cd \"{model_storage_dir}\"\n",
|
|
" bash_var = magnet_match[0]\n",
|
|
" !aria2c --seed-time=0 --max-overall-upload-limit=1K --bt-max-peers=120 --summary-interval=0 --console-log-level=warn --file-allocation=none \"{bash_var}\"\n",
|
|
" # clean exit here\n",
|
|
"elif 'https://huggingface.co/' in model_uri:\n",
|
|
" response = requests.head(model_uri, allow_redirects=True, headers={'User-Agent': user_agent})\n",
|
|
" if 'octet-stream' not in response.headers['content-type']:\n",
|
|
" response = requests.head(model_uri.replace('/blob/', '/resolve/'), allow_redirects=True, headers={'User-Agent': user_agent})\n",
|
|
" if 'octet-stream' not in response.headers['content-type']:\n",
|
|
" print(f'Wrong content-type: {response.headers[\"content-type\"].split(\";\")[0]}')\n",
|
|
" # clean exit here\n",
|
|
" else:\n",
|
|
" dl_web_file(model_uri.replace('/blob/', '/resolve/'))\n",
|
|
" # clean exit here\n",
|
|
" else:\n",
|
|
" dl_web_file(model_uri)\n",
|
|
" # clean exit here\n",
|
|
"elif 'https://drive.google.com' in model_uri:\n",
|
|
" gdrive_file_id, _ = gdown.parse_url.parse_url(model_uri)\n",
|
|
" %cd \"{model_storage_dir}\"\n",
|
|
" gdown.download(f\"https://drive.google.com/uc?id={gdrive_file_id}&confirm=t\")\n",
|
|
" # clean exit here\n",
|
|
"elif civitai_match:\n",
|
|
" if not is_url(civitai_match[0]):\n",
|
|
" print('URL does not match known civitai.com pattern.')\n",
|
|
" # clean exit here\n",
|
|
" else:\n",
|
|
" soup = BeautifulSoup(requests.get(model_uri, headers={'User-Agent': user_agent}).text, features=\"html.parser\")\n",
|
|
" data = json.loads(soup.find('script', {'id': '__NEXT_DATA__'}).text)\n",
|
|
" model_data = data[\"props\"][\"pageProps\"][\"trpcState\"][\"json\"][\"queries\"][0][\"state\"][\"data\"]\n",
|
|
" latest_model = model_data['modelVersions'][0]\n",
|
|
" latest_model_url = f\"https://civitai.com/api/download/models/{latest_model['id']}\"\n",
|
|
" print('Downloading model:', model_data['name'])\n",
|
|
" \n",
|
|
" # Download the description to a markdown file next to the checkpoint\n",
|
|
" desc = markdownify(model_data['description'])\n",
|
|
" req = urllib.request.Request(latest_model_url, data=None, headers={'User-Agent': user_agent})\n",
|
|
" content_disp = urllib.request.urlopen(req).getheader('Content-Disposition')\n",
|
|
" if content_disp:\n",
|
|
" filename = Path(re.match(r'attachment; filename=\"(.*?)\"', content_disp)[1]).stem\n",
|
|
" with open(Path(model_storage_dir, f'{filename}.md'), 'w') as file:\n",
|
|
" file.write(f\"# {model_data['name']} \\n\")\n",
|
|
" file.write(f'Original CivitAI URL: {model_uri} \\n\\n <br> \\nhttps://civitai.com/models/3950/art-and-eros-aeros-a-tribute-to-beauty\\n')\n",
|
|
" file.write(desc)\n",
|
|
" else:\n",
|
|
" print('Failed to get filename of checkpoint for markdown file')\n",
|
|
"\n",
|
|
" dl_web_file(latest_model_url)\n",
|
|
" # clean exit here\n",
|
|
"elif web_match:\n",
|
|
" # Always do the web match last\n",
|
|
" response = requests.head(model_uri, allow_redirects=True, headers={'User-Agent': user_agent})\n",
|
|
" if 'octet-stream' not in response.headers['content-type']:\n",
|
|
" print(f'Wrong content-type: {response.headers[\"content-type\"].split(\";\")[0]}')\n",
|
|
" # clean exit here\n",
|
|
" else:\n",
|
|
" dl_web_file(model_uri)\n",
|
|
" # clean exit here\n",
|
|
"else:\n",
|
|
" print('Could not parse your URI.')\n",
|
|
" # clean exit here"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"accelerator": "GPU",
|
|
"colab": {
|
|
"collapsed_sections": [],
|
|
"private_outputs": true,
|
|
"provenance": []
|
|
},
|
|
"gpuClass": "standard",
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.16"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
}
|