more doc, textual inversion training, organize files

Former-commit-id: dab8c993c50040c8e51e7abd5d9e0c635630d726 [formerly 36d5e9661c]
Former-commit-id: e1731a34c87d9dbdad691a1ca46cdfe03cc09ebf
This commit is contained in:
frostydad 2022-10-01 13:41:10 -06:00
parent 5d45e17c29
commit fb39562d69
7 changed files with 273 additions and 8 deletions

View File

@ -48,7 +48,13 @@
"\n",
"`/notebooks/` is storage for this notebook only.\n",
"\n",
"We're going to store models in `/storage/models` and create a symlink.\n",
"`/tmp/` is not a persistent directory, meaning your files there will be deleted when the machine turns off.\n",
"\n",
"<br>\n",
"\n",
"If you are having storage issues, set `repo_storage_dir` to `/tmp/stable-diffusion`.\n",
"\n",
"<br>\n",
"\n",
"<mark>You must uncomment the correct section and run the block below or else the notebook won't work!</mark>"
]
@ -62,7 +68,7 @@
"# Free tier\n",
"# free_tier = True # Enables the creation of symlinks back to /notebooks/\n",
"# model_storage_dir = '/tmp/stable-diffusion/models' # Where the models will be downloaded to\n",
"# repo_storage_dir = '/tmp/stable-diffusion' # Where the repository will be downloaded to\n",
"# repo_storage_dir = '/notebooks' # Where the repository will be downloaded to\n",
"\n",
"# Paid Tier\n",
"# free_tier = False\n",
@ -97,7 +103,6 @@
"source": [
"import os\n",
"%store -r free_tier model_storage_dir repo_storage_dir\n",
"%cd /notebooks/\n",
"\n",
"def delete_broken_symlinks(path):\n",
" # make sure to pass this function a path without a trailing slash\n",
@ -110,8 +115,12 @@
"if not os.path.exists(f'{repo_storage_dir}/stable-diffusion-webui'):\n",
" if free_tier:\n",
" delete_broken_symlinks('/notebooks/') # remove broken symlinks since it might have been installed in a non-persistent directory\n",
" !mkdir -p \"{repo_storage_dir}\"\n",
" !ln -s \"{repo_storage_dir}\" /notebooks/\n",
" if not os.path.exists(repo_storage_dir) and '/notebooks/' not in repo_storage_dir:\n",
" # symlink repo_storage_dir back to /notebooks/ only if it hasn't been downloaded to /notebooks/\n",
" !mkdir -p \"{repo_storage_dir}\"\n",
" !ln -s \"{repo_storage_dir}\" /notebooks/\n",
" else:\n",
" print('Not symlinking repo_storage_dir back to /notebooks/')\n",
" %cd \"{repo_storage_dir}\"\n",
" !git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui\n",
"else: # update repo if already exists\n",
@ -147,9 +156,8 @@
"# They require a few specific external git repo commits so we have to do it their way. \n",
"import launch\n",
"\n",
"# latent-diffusion is a requirement but launch.py isn't downloading it so we'll do it manually.\n",
"if not os.path.exists(f'{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion'):\n",
" !git clone https://github.com/crowsonkb/k-diffusion.git \"{repo_storage_dir}/stable-diffusion-webui/repositories/k-diffusion\"\n",
"# latent-diffusion is a requirement but launch.py isn't downloading it so we'll do it manually\n",
"if not os.path.exists(f'{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion'): # check that it hasn't been downloaded in case they fix this issue\n",
" !git clone https://github.com/Hafiidz/latent-diffusion.git \"{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion\"\n",
" # I don't think it's necessary to do this:\n",
" # %mkdir \"{repo_storage_dir}/stable-diffusion-webui/repositories/latent-diffusion/experiments/\"\n",
@ -466,6 +474,9 @@
"outputs": [],
"source": [
"%store -r free_tier model_storage_dir repo_storage_dir\n",
"\n",
"!echo -e \"You are using $(du -sh /notebooks/ | cut -f1) in /notebooks/\\n\"\n",
"\n",
"%cd \"{repo_storage_dir}/stable-diffusion-webui\"\n",
"!python webui.py --share # --gradio-auth me:password1234"
]

View File

@ -0,0 +1,173 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "60b8e4a2-4e54-4dae-9220-31fc24fa719f",
"metadata": {
"scrolled": true,
"tags": []
},
"outputs": [],
"source": [
"%cd /notebooks/\n",
"# Download and run the Anaconda3 installer\n",
"import sys\n",
"!wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\n",
"!bash ./Miniconda3-latest-Linux-x86_64.sh -b -f -p /usr/local\n",
"sys.path.append('/usr/local/lib/python3.7/site-packages/')\n",
"!rm Miniconda3-latest-Linux-x86_64.sh\n",
"!conda init bash\n",
"!mkdir -p /notebooks/textual\\ inversion/\n",
"!mkdir -p /notebooks/Stable-textual-inversion_win/logs/"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5743d8c-f0ba-4f07-971b-b498b0e306f4",
"metadata": {
"scrolled": true,
"tags": []
},
"outputs": [],
"source": [
"import os.path\n",
"if not os.path.exists('/notebooks/Stable-textual-inversion_win'):\n",
" %cd /notebooks/\n",
" !git clone https://github.com/nicolai256/Stable-textual-inversion_win.git\n",
" !mkdir -p /notebooks/textual\\ inversion/\n",
" \n",
"# Uncomment this to copy the detault config\n",
" # !cp /notebooks/Stable-textual-inversion_win/configs/stable-diffusion/v1-finetune.yaml /notebooks/textual\\ inversion\n",
"else: # update repo if already exists\n",
" print('stable-diffusion-webui already downloaded, updating...')\n",
" %cd /notebooks/Stable-textual-inversion_win\n",
" !git pull\n",
" \n",
"# Symlink the output dir to /notebooks/\n",
"!mkdir -p /notebooks/textual\\ inversion/output/\n",
"!ln -s /notebooks/Stable-textual-inversion_win/logs/ \"/notebooks/textual inversion/\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53ee34a7-e5f3-4529-b3b9-2bdac9a45c54",
"metadata": {
"scrolled": true,
"tags": []
},
"outputs": [],
"source": [
"%cd /notebooks/Stable-textual-inversion_win/\n",
"!conda env update -n base --file environment.yaml\n",
"!conda activate ldm\n",
"!pip install setuptools==59.5.0\n",
"!pip install pillow==9.0.1\n",
"!pip install torchmetrics==0.6.0\n",
"!pip install -e ."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fa5eda42-c1c6-47b9-80ce-39b777c77bb5",
"metadata": {
"scrolled": true,
"tags": []
},
"outputs": [],
"source": [
"# Config\n",
"\n",
"config_path = '/notebooks/textual inversion/v1-finetune.yaml'\n",
"\n",
"# Directory holding the images you want to feed into the AI\n",
"source_image_directory = '/notebooks/textual inversion/source/'\n",
"\n",
"# Word to activate the embedding in the SD WebUI\n",
"init_word = 'simonstalenhag'\n",
"\n",
"project_name = 'simonstalenhag'\n",
"\n",
"actual_resume = '/storage/models/trinart2_step115000.ckpt' # trinart2_step115000.ckpt sd-v1-4.ckpt\n",
"\n",
"# ============================================================================================================\n",
"\n",
"!rm -rf \"{source_image_directory}/.ipynb_checkpoints\"\n",
"%cd /notebooks/Stable-textual-inversion_win/\n",
"!python main.py \\\n",
" --base \"{config_path}\" \\\n",
" -t --no-test \\\n",
" --actual_resume \"{actual_resume}\" \\\n",
" --gpus=1 \\\n",
" --data_root \"{source_image_directory}\" \\\n",
" --init_word {init_word} \\\n",
" -n {project_name} \\"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be231cb1-a562-48ac-abb7-2cbee730c9a9",
"metadata": {},
"outputs": [],
"source": [
"# Resuming\n",
"\n",
"# Config\n",
"\n",
"config_path = '/notebooks/textual inversion/v1-finetune.yaml'\n",
"\n",
"# Directory holding the images you want to feed into the AI\n",
"source_image_directory = '/notebooks/textual inversion/source/'\n",
"\n",
"# Word to activate the embedding in the SD WebUI\n",
"init_word = 'simonstalenhag'\n",
"\n",
"project_path = '/notebooks/textual inversion/logs/source2022-10-01T03-52-45_simonstalenhag'\n",
"\n",
"actual_resume = '/storage/models/trinart2_step115000.ckpt' # trinart2_step115000.ckpt sd-v1-4.ckpt\n",
"\n",
"# ============================================================================================================\n",
"\n",
"from datetime import datetime\n",
"datetime_str = datetime.now().strftime('%m-%d-%Y_%H:%M:%S')\n",
"\n",
"!python \"main.py\" \\\n",
" --base \"{config_path}\" \\\n",
" -t --no-test \\\n",
" --actual_resume \"{actual_resume}\" \\\n",
" --gpus=1 \\\n",
" --data_root \"{source_image_directory}\" \\\n",
" --init_word \"{init_word}\" \\\n",
" --project \"{project_path}\" \\\n",
" --embedding_manager_ckpt \"{project_path}/checkpoints/embeddings.pt\" \\\n",
" --resume_from_checkpoint \"{project_path}/checkpoints/last.ckpt\" \\\n",
" -n \"{init_word}_continue_{datetime_str}\""
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

1
lfs/latent-diffusion/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
model.ckpt filter=lfs diff=lfs merge=lfs -text

View File

@ -0,0 +1,80 @@
model:
base_learning_rate: 1.0e-06
target: ldm.models.diffusion.ddpm.LatentDiffusion
params:
linear_start: 0.0015
linear_end: 0.0155
log_every_t: 100
timesteps: 1000
loss_type: l2
first_stage_key: image
cond_stage_key: LR_image
image_size: 64
channels: 3
concat_mode: true
cond_stage_trainable: false
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 64
in_channels: 6
out_channels: 3
model_channels: 160
attention_resolutions:
- 16
- 8
num_res_blocks: 2
channel_mult:
- 1
- 2
- 2
- 4
num_head_channels: 32
first_stage_config:
target: ldm.models.autoencoder.VQModelInterface
params:
embed_dim: 3
n_embed: 8192
monitor: val/rec_loss
ddconfig:
double_z: false
z_channels: 3
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Module # todo
cond_stage_config:
target: torch.nn.Identity
data:
target: cutlit.DataModuleFromConfig
params:
batch_size: 64
wrap: false
num_workers: 12
train:
target: ldm.data.openimages.SuperresOpenImagesAdvancedTrain
params:
size: 256
degradation: bsrgan_light
downscale_f: 4
min_crop_f: 0.5
max_crop_f: 1.0
random_crop: true
validation:
target: ldm.data.openimages.SuperresOpenImagesAdvancedValidation
params:
size: 256
degradation: bsrgan_light
downscale_f: 4
min_crop_f: 0.5
max_crop_f: 1.0
random_crop: true