file renamer and some general updates
This commit is contained in:
parent
e91d9eafe6
commit
0aa31179d7
|
@ -1 +1 @@
|
||||||
{"cells":[{"cell_type":"markdown","metadata":{},"source":["# Please read the documentation here before you start.\n","\n","I suggest reading this doc before you connect to your runtime to avoid using credits or being charged while you figure it out.\n","\n","[Auto Captioning Readme](doc/AUTO_CAPTION.md)\n","\n","This notebook requires an Nvidia GPU instance. Any will do, you don't need anything power. As low as 4GB should be fine.\n","\n","Only colab has automatic file transfers at this time. If you are using another platform, you will need to manually download your output files."]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":929,"status":"ok","timestamp":1667184580032,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"lWGx2LuU8Q_I","outputId":"d0eb4d03-f16d-460b-981d-d5f88447e85e"},"outputs":[],"source":["#download repo\n","!git clone https://github.com/victorchall/EveryDream.git\n","# Set working directory\n","%cd EveryDream"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4944,"status":"ok","timestamp":1667184754992,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"RJxfSai-8pkD","outputId":"0ac1b805-62a0-48aa-e0da-ee19503bb3f1"},"outputs":[],"source":["# install requirements\n","!pip install torch=='1.12.1+cu113' 'torchvision==0.13.1+cu113' --extra-index-url https://download.pytorch.org/whl/cu113\n","!pip install pandas>='1.3.5'\n","!git clone https://github.com/salesforce/BLIP scripts/BLIP\n","!pip install timm\n","!pip install fairscale=='0.4.4'\n","!pip install transformers=='4.19.2'\n","!pip install timm"]},{"cell_type":"markdown","metadata":{"id":"sbeUIVXJ-EVf"},"source":["# Upload your input images into the EveryDream/input folder\n","\n","![upload to input](demo/upload_images_caption.png)"]},{"cell_type":"markdown","metadata":{},"source":["## Please read the documentation for information on the parameters\n","\n","[Auto Captioning](doc/AUTO_CAPTION.md)\n","\n","*You cannot have commented lines between uncommented lines. If you uncomment a line below, move it above any other commented lines.*\n","\n","*!python must remain the first line.*\n","\n","Default params should work fairly well."]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":18221,"status":"ok","timestamp":1667185808005,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"4TAICahl-RPn","outputId":"da7fa1a8-0855-403a-c295-4da31658d1f6"},"outputs":[],"source":["!python scripts/auto_caption.py \\\n","--img_dir EveryDream/input \\\n","--out_dir EveryDream/output \\\n","#--fomat mrwho \\\n","#--min_length 34 \\\n","#--q_factor 1.3 \\\n","#--nucleus \\"]},{"cell_type":"markdown","metadata":{"id":"HBrWnu1C_lN9"},"source":["## Download your captioned images from EveryDream/output\n","\n","If you're on a colab you can use the cell below to push your output to your Gdrive."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')\n","\n","!mkdir /content/drive/MyDrive/AutoCaption\n","!cp output/*.* /content/drive/MyDrive/AutoCaption"]},{"cell_type":"markdown","metadata":{},"source":["## If not on colab/gdrive, the following will zip up your files for extraction\n","\n","You'll still need to use your runtime's own download feature to download the zip.\n","\n","![output zip](demo/output_zip.png)"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["!pip install patool\n","\n","import patoolib\n","\n","!mkdir output/zip\n","\n","!zip -r output/zip/output.zip output"]}],"metadata":{"colab":{"authorship_tag":"ABX9TyN9ZSr0RyOQKdfeVsl2uOiE","collapsed_sections":[],"provenance":[{"file_id":"16QrivRfoDFvE7fAa7eLeVlxj78Q573E0","timestamp":1667185879409}]},"kernelspec":{"display_name":"Python 3.10.5 ('.venv': venv)","language":"python","name":"python3"},"language_info":{"name":"python","version":"3.10.5"},"vscode":{"interpreter":{"hash":"faf4a6abb601e3a9195ce3e9620411ceec233a951446de834cdf28542d2d93b4"}}},"nbformat":4,"nbformat_minor":0}
|
{"cells":[{"cell_type":"markdown","metadata":{},"source":["# Please read the documentation here before you start.\n","\n","I suggest reading this doc before you connect to your runtime to avoid using credits or being charged while you figure it out.\n","\n","[Auto Captioning Readme](doc/AUTO_CAPTION.md)\n","\n","This notebook requires an Nvidia GPU instance. Any will do, you don't need anything power. As low as 4GB should be fine.\n","\n","Only colab has automatic file transfers at this time. If you are using another platform, you will need to manually download your output files."]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":929,"status":"ok","timestamp":1667184580032,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"lWGx2LuU8Q_I","outputId":"d0eb4d03-f16d-460b-981d-d5f88447e85e"},"outputs":[],"source":["#download repo\n","!git clone https://github.com/victorchall/EveryDream.git\n","# Set working directory\n","%cd EveryDream"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4944,"status":"ok","timestamp":1667184754992,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"RJxfSai-8pkD","outputId":"0ac1b805-62a0-48aa-e0da-ee19503bb3f1"},"outputs":[],"source":["# install requirements\n","!pip install torch=='1.12.1+cu113' 'torchvision==0.13.1+cu113' --extra-index-url https://download.pytorch.org/whl/cu113\n","!pip install pandas>='1.3.5'\n","!git clone https://github.com/salesforce/BLIP scripts/BLIP\n","!pip install timm\n","!pip install fairscale=='0.4.4'\n","!pip install transformers=='4.19.2'\n","!pip install timm"]},{"cell_type":"markdown","metadata":{"id":"sbeUIVXJ-EVf"},"source":["# Upload your input images into the EveryDream/input folder\n","\n","![upload to input](demo/upload_images_caption.png)"]},{"cell_type":"markdown","metadata":{},"source":["## Please read the documentation for information on the parameters\n","\n","[Auto Captioning](doc/AUTO_CAPTION.md)\n","\n","*You cannot have commented lines between uncommented lines. If you uncomment a line below, move it above any other commented lines.*\n","\n","*!python must remain the first line.*\n","\n","Default params should work fairly well."]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":18221,"status":"ok","timestamp":1667185808005,"user":{"displayName":"Victor Hall","userId":"00029068894644207946"},"user_tz":240},"id":"4TAICahl-RPn","outputId":"da7fa1a8-0855-403a-c295-4da31658d1f6"},"outputs":[],"source":["!python scripts/auto_caption.py \\\n","--img_dir EveryDream/input \\\n","--out_dir EveryDream/output \\\n","#--format mrwho \\\n","#--min_length 34 \\\n","#--q_factor 1.3 \\\n","#--nucleus \\"]},{"cell_type":"markdown","metadata":{"id":"HBrWnu1C_lN9"},"source":["## Download your captioned images from EveryDream/output\n","\n","If you're on a colab you can use the cell below to push your output to your Gdrive."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')\n","\n","!mkdir /content/drive/MyDrive/AutoCaption\n","!cp output/*.* /content/drive/MyDrive/AutoCaption"]},{"cell_type":"markdown","metadata":{},"source":["## If not on colab/gdrive, the following will zip up your files for extraction\n","\n","You'll still need to use your runtime's own download feature to download the zip.\n","\n","![output zip](demo/output_zip.png)"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["!pip install patool\n","\n","import patoolib\n","\n","!mkdir output/zip\n","\n","!zip -r output/zip/output.zip output"]}],"metadata":{"colab":{"authorship_tag":"ABX9TyN9ZSr0RyOQKdfeVsl2uOiE","collapsed_sections":[],"provenance":[{"file_id":"16QrivRfoDFvE7fAa7eLeVlxj78Q573E0","timestamp":1667185879409}]},"kernelspec":{"display_name":"Python 3.10.5 ('.venv': venv)","language":"python","name":"python3"},"language_info":{"name":"python","version":"3.10.5"},"vscode":{"interpreter":{"hash":"faf4a6abb601e3a9195ce3e9620411ceec233a951446de834cdf28542d2d93b4"}}},"nbformat":4,"nbformat_minor":0}
|
||||||
|
|
|
@ -18,9 +18,9 @@ Join the EveryDream discord here: https://discord.gg/uheqxU6sXN
|
||||||
|
|
||||||
[Auto Captioning](./doc/AUTO_CAPTION.md) - Uses BLIP interrogation to caption images for training (includes colab notebook, needs minimal GPU).
|
[Auto Captioning](./doc/AUTO_CAPTION.md) - Uses BLIP interrogation to caption images for training (includes colab notebook, needs minimal GPU).
|
||||||
|
|
||||||
[File renaming](./doc/FILE_RENAME.md) - Simple script for replacing generic pronouns in filenames with proper names (ex "a man" -> "john doe").
|
[File renaming](./doc/FILE_RENAME.md) - Simple script for replacing generic pronouns that come out of clip in filenames with proper names (ex "a man" -> "john doe", "a person" -> "jane doe").
|
||||||
|
|
||||||
see clip_rename.bat for an example to chain captioning and renaming together.
|
See clip_rename.bat for an example to chain captioning and renaming together.
|
||||||
|
|
||||||
[Training](https://github.com/victorchall/EveryDream-trainer) (separate repo) - Fine tuning with captioned training and ground truth data (needs 24GB GPU).
|
[Training](https://github.com/victorchall/EveryDream-trainer) (separate repo) - Fine tuning with captioned training and ground truth data (needs 24GB GPU).
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
python scripts/auto_caption.py --q_factor 1.4
|
python scripts/auto_caption.py --q_factor 1.4
|
||||||
python scripts/filename_replace.py --img_dir output --replace "rihanna" --find "a woman"
|
::python scripts/filename_replace.py --img_dir output --find "a woman" --replace "rihanna"
|
||||||
python scripts/filename_replace.py --img_dir output --replace "asap rocky"
|
::python scripts/filename_replace.py --img_dir output --find "a person" --replace "rihanna"
|
||||||
|
::python scripts/filename_replace.py --img_dir output --find "a man" --replace "asap rocky"
|
||||||
|
::python scripts/filename_replace.py --img_dir output --replace "Keira Knightley"
|
||||||
|
::python scripts/filename_replace.py --img_dir output --append "by Giotto"
|
||||||
|
|
|
@ -9,11 +9,13 @@ By default, it will replace "a man", "a woman", and "a person" with your supplie
|
||||||
|
|
||||||
python scripts/filename_replace.py --img_dir output --replace "john doe"
|
python scripts/filename_replace.py --img_dir output --replace "john doe"
|
||||||
|
|
||||||
*"a man standing in a park with birds on his shoulders.jpg"
|
*"a man standing in a park with birds on his shoulders.jpg"
|
||||||
->
|
->
|
||||||
"john doe standing in a park with birds on his shoulders.jpg"*
|
"john doe standing in a park with birds on his shoulders.jpg"*
|
||||||
|
|
||||||
You can chain together the auto_caption.py and file_name to help deal with multiple people in photos in a simple shell script (bash or windows .bat) with a bit of thinking about what you replace and using --fird to specify the pronoun to replace first more specifically than all three default pronouns.
|
## Chaining with auto caption
|
||||||
|
|
||||||
|
You can chain together the auto_caption.py and file_rename.py to help deal with multiple people in photos in a simple shell script (bash or windows .bat) with a bit of thinking about what you replace and using --find to specify the pronoun to replace first more specifically than all three default pronouns.
|
||||||
|
|
||||||
python scripts/auto_caption.py --q_factor 1.4 --img_dir input --out_dir output
|
python scripts/auto_caption.py --q_factor 1.4 --img_dir input --out_dir output
|
||||||
python scripts/filename_replace.py --img_dir output --find "a woman" --replace "rihanna"
|
python scripts/filename_replace.py --img_dir output --find "a woman" --replace "rihanna"
|
||||||
|
@ -23,4 +25,6 @@ You can chain together the auto_caption.py and file_name to help deal with multi
|
||||||
->
|
->
|
||||||
"asap rocky and rihanna standing next to each other in front of a green wall with leaves on it.webp"
|
"asap rocky and rihanna standing next to each other in front of a green wall with leaves on it.webp"
|
||||||
|
|
||||||
|
See clip_rename.bat in the root folder, modify it to your needs.
|
||||||
|
|
||||||
Renaming is nearly instant as it is just renaming the files and not using and AI models or calculations, just a dumb find and replace on the filename.
|
Renaming is nearly instant as it is just renaming the files and not using and AI models or calculations, just a dumb find and replace on the filename.
|
|
@ -8,8 +8,10 @@ from torchvision.transforms.functional import InterpolationMode
|
||||||
import torch
|
import torch
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import subprocess
|
||||||
|
|
||||||
SIZE = 384
|
SIZE = 384
|
||||||
|
BLIP_MODEL_URL = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
||||||
|
|
||||||
def get_parser(**parser_kwargs):
|
def get_parser(**parser_kwargs):
|
||||||
parser = argparse.ArgumentParser(**parser_kwargs)
|
parser = argparse.ArgumentParser(**parser_kwargs)
|
||||||
|
@ -91,9 +93,9 @@ async def main(opt):
|
||||||
|
|
||||||
if not os.path.exists(model_cache_path):
|
if not os.path.exists(model_cache_path):
|
||||||
print(f"Downloading model to {model_cache_path}... please wait")
|
print(f"Downloading model to {model_cache_path}... please wait")
|
||||||
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.get(blip_model_url) as res:
|
async with session.get(BLIP_MODEL_URL) as res:
|
||||||
result = await res.read()
|
result = await res.read()
|
||||||
with open(model_cache_path, 'wb') as f:
|
with open(model_cache_path, 'wb') as f:
|
||||||
f.write(result)
|
f.write(result)
|
||||||
|
@ -158,7 +160,6 @@ def isWindows():
|
||||||
return sys.platform.startswith("win")
|
return sys.platform.startswith("win")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print(f"starting in {print(os.getcwd())}")
|
|
||||||
parser = get_parser()
|
parser = get_parser()
|
||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
|
@ -171,6 +172,9 @@ if __name__ == "__main__":
|
||||||
else:
|
else:
|
||||||
print("Unix detected, using default asyncio event loop policy")
|
print("Unix detected, using default asyncio event loop policy")
|
||||||
|
|
||||||
|
if not os.path.exists("scripts/BLIP"):
|
||||||
|
print("BLIP not found, cloning BLIP repo")
|
||||||
|
subprocess.run(["git", "clone", "https://github.com/salesforce/BLIP", "scripts/BLIP"])
|
||||||
blip_path = "scripts/BLIP"
|
blip_path = "scripts/BLIP"
|
||||||
sys.path.append(blip_path)
|
sys.path.append(blip_path)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
import aiofiles
|
|
||||||
import aiofiles.os
|
|
||||||
import asyncio
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import glob
|
import glob
|
||||||
|
@ -21,54 +18,76 @@ def get_parser(**parser_kwargs):
|
||||||
type=str,
|
type=str,
|
||||||
nargs="?",
|
nargs="?",
|
||||||
const=True,
|
const=True,
|
||||||
default="a man,a woman,a person",
|
default=None,
|
||||||
help="what strings to replace, in csv format, default: 'a man,a woman,a person'",
|
help="what strings to replace, in csv format, default: 'a man,a woman,a person'",
|
||||||
),
|
),
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--replace",
|
"--replace",
|
||||||
type=str,
|
type=str,
|
||||||
nargs="?",
|
nargs="?",
|
||||||
required=True,
|
required=False,
|
||||||
const=True,
|
const=True,
|
||||||
default="filename",
|
default=None,
|
||||||
help="string to replace with, ex. 'john doe'",
|
help="string to replace with, ex. 'john doe'",
|
||||||
),
|
),
|
||||||
|
parser.add_argument(
|
||||||
|
"--append_only",
|
||||||
|
type=str,
|
||||||
|
nargs="?",
|
||||||
|
required=False,
|
||||||
|
const=True,
|
||||||
|
default=None,
|
||||||
|
help="skips pronoun replace, adds a string at the end of the filename, use for 'by artist name' or 'in the style of somestyle'",
|
||||||
|
)
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
def isWindows():
|
def isWindows():
|
||||||
return sys.platform.startswith('win')
|
return sys.platform.startswith('win')
|
||||||
|
|
||||||
async def rename_files(opt):
|
def get_replace_list(opt):
|
||||||
print("go")
|
if opt.find is None:
|
||||||
find_list = opt.find.split(",")
|
return ("a man", "a woman", "a person", \
|
||||||
|
"a girl", "a boy", \
|
||||||
|
"a young woman", "a young man", \
|
||||||
|
"a beautiful woman", "a handsome man", \
|
||||||
|
"a beautiful young woman", "a handsome young man",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return opt.find.split(",")
|
||||||
|
|
||||||
dir_iter = await aiofiles.os.scandir(opt.img_dir)
|
|
||||||
for file in dir_iter:
|
def rename_files(opt):
|
||||||
# get file extension
|
find_list = get_replace_list(opt)
|
||||||
if file.is_file() and os.path.splitext(file.name)[1] in (".jpg", ".png", ".jpeg", ".gif", ".bmp", ".webp"):
|
|
||||||
try:
|
for file in glob.glob(opt.img_dir + "/*"):
|
||||||
|
print(file)
|
||||||
|
|
||||||
|
if os.path.splitext(file)[1] in (".jpg", ".png", ".jpeg", ".gif", ".bmp", ".webp"):
|
||||||
|
new_filename = file
|
||||||
|
if opt.append_only is not None:
|
||||||
|
new_filename = f"{os.path.splitext(file)[0]} {opt.append_only}{os.path.splitext(file)[1]}"
|
||||||
|
else:
|
||||||
for s in find_list:
|
for s in find_list:
|
||||||
if s in file.name:
|
if s in file.name:
|
||||||
new_filename = file.name.replace(s, opt.replace)
|
new_filename = new_filename.replace(s, opt.replace)
|
||||||
await aiofiles.os.rename(file, os.path.join(opt.img_dir, new_filename))
|
try:
|
||||||
|
print(f"Renaming {file} to {new_filename}")
|
||||||
|
os.rename(file, new_filename)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error opening file: {file}")
|
print(f"error opening file: {file}")
|
||||||
print(f"{e}")
|
print(f"{e}")
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = get_parser()
|
parser = get_parser()
|
||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
if (isWindows()):
|
|
||||||
print("{Fore.CYAN}Windows detected, using asyncio.WindowsSelectorEventLoopPolicy{Style.RESET_ALL}")
|
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
||||||
else:
|
|
||||||
print("{Fore.CYAN}Unix detected, using default asyncio event loop policy{Style.RESET_ALL}")
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
s = time.perf_counter()
|
s = time.perf_counter()
|
||||||
result = asyncio.run(rename_files(opt))
|
|
||||||
|
rename_files(opt)
|
||||||
|
|
||||||
elapsed = time.perf_counter() - s
|
elapsed = time.perf_counter() - s
|
||||||
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
|
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
|
Loading…
Reference in New Issue