EveryDream-trainer/ldm/data/ed_validate.py

76 lines
2.7 KiB
Python
Raw Normal View History

2022-09-06 01:00:21 -06:00
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from pathlib import Path
2022-11-03 17:47:54 -06:00
from ldm.data.data_loader import DataLoaderMultiAspect as dlma
2022-11-02 20:23:09 -06:00
import math
2022-11-02 20:23:09 -06:00
class EDValidateBatch(Dataset):
2022-09-06 01:00:21 -06:00
def __init__(self,
data_root,
2022-09-28 15:18:09 -06:00
flip_p=0.0,
2022-11-02 20:23:09 -06:00
repeats=1,
2022-11-05 09:41:48 -06:00
debug_level=0,
batch_size=1
2022-09-06 01:00:21 -06:00
):
2022-11-05 09:41:48 -06:00
print(f"EDValidateBatch batch size: {self.batch_size}") if debug_level > 0 else None
2022-09-06 01:00:21 -06:00
self.data_root = data_root
2022-11-05 09:41:48 -06:00
self.batch_size = batch_size
self.image_caption_pairs = dlma(data_root=data_root, debug_level=debug_level, batch_size=self.batch_size).get_all_images()
2022-09-06 01:00:21 -06:00
2022-11-05 09:41:48 -06:00
# most_subscribed_aspect_ratio = self.most_subscribed_aspect_ratio()
# self.image_caption_pairs = [image_caption_pair for image_caption_pair in self.image_caption_pairs if image_caption_pair[0].size == aspect_ratio]
2022-11-02 20:23:09 -06:00
2022-11-03 17:47:54 -06:00
self.num_images = len(self.image_caption_pairs)
2022-09-06 01:00:21 -06:00
2022-11-05 09:41:48 -06:00
self._length = max(math.trunc(self.num_images * repeats), 1)
2022-09-06 01:00:21 -06:00
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
2022-11-03 17:47:54 -06:00
idx = i % len(self.image_caption_pairs)
example = self.get_image(self.image_caption_pairs[idx])
2022-11-05 09:41:48 -06:00
#print caption and image size
print(f"Caption: {example['image'].shape} {example['caption']}")
2022-11-03 17:47:54 -06:00
return example
def get_image(self, image_caption_pair):
2022-09-06 01:00:21 -06:00
example = {}
2022-11-03 17:47:54 -06:00
image = image_caption_pair[0]
2022-09-06 01:00:21 -06:00
if not image.mode == "RGB":
image = image.convert("RGB")
2022-11-03 17:47:54 -06:00
identifier = image_caption_pair[1]
2022-09-06 01:00:21 -06:00
image = self.flip(image)
image = np.array(image).astype(np.uint8)
example["image"] = (image / 127.5 - 1.0).astype(np.float32)
2022-11-02 20:23:09 -06:00
example["caption"] = identifier
return example
2022-11-05 09:41:48 -06:00
def filter_aspect_ratio(self, aspect_ratio):
# filter the images to only include the given aspect ratio
self.image_caption_pairs = [image_caption_pair for image_caption_pair in self.image_caption_pairs if image_caption_pair[0].size == aspect_ratio]
self.num_images = len(self.image_caption_pairs)
self._length = max(math.trunc(self.num_images * self.repeats), 2)
def most_subscribed_aspect_ratio(self):
# find the image size with the highest number of images
aspect_ratios = {}
for image_caption_pair in self.image_caption_pairs:
image = image_caption_pair[0]
aspect_ratio = image.size
if aspect_ratio in aspect_ratios:
aspect_ratios[aspect_ratio] += 1
else:
aspect_ratios[aspect_ratio] = 1
return max(aspect_ratios, key=aspect_ratios.get)