Merge pull request #2 from choe220/main

changed 'demoura' to 'joepenna' to match the notebook
This commit is contained in:
David B 2022-09-28 15:14:28 -05:00 committed by GitHub
commit b441e1fc20
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 18 additions and 12 deletions

View File

@ -8,7 +8,7 @@ from torchvision import transforms
import random
training_templates_smallest = [
'demoura {}',
'joepenna {}',
]
reg_templates_smallest = [
@ -28,6 +28,7 @@ per_img_token_list = [
'א', 'ב', 'ג', 'ד', 'ה', 'ו', 'ז', 'ח', 'ט', 'י', 'כ', 'ל', 'מ', 'נ', 'ס', 'ע', 'פ', 'צ', 'ק', 'ר', 'ש', 'ת',
]
class PersonalizedBase(Dataset):
def __init__(self,
data_root,
@ -41,16 +42,17 @@ class PersonalizedBase(Dataset):
center_crop=False,
mixing_prob=0.25,
coarse_class_text=None,
reg = False
reg=False
):
self.data_root = data_root
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.image_paths = [os.path.join(
self.data_root, file_path) for file_path in os.listdir(self.data_root)]
# self._length = len(self.image_paths)
self.num_images = len(self.image_paths)
self._length = self.num_images
self._length = self.num_images
self.placeholder_token = placeholder_token
@ -61,7 +63,8 @@ class PersonalizedBase(Dataset):
self.coarse_class_text = coarse_class_text
if per_image_tokens:
assert self.num_images < len(per_img_token_list), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
assert self.num_images < len(
per_img_token_list), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'."
if set == "train":
self._length = self.num_images * repeats
@ -90,26 +93,29 @@ class PersonalizedBase(Dataset):
placeholder_string = f"{self.coarse_class_text} {placeholder_string}"
if not self.reg:
text = random.choice(training_templates_smallest).format(placeholder_string)
text = random.choice(training_templates_smallest).format(
placeholder_string)
else:
text = random.choice(reg_templates_smallest).format(placeholder_string)
text = random.choice(reg_templates_smallest).format(
placeholder_string)
example["caption"] = text
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
h, w, = img.shape[0], img.shape[1]
img = img[(h - crop) // 2:(h + crop) // 2,
(w - crop) // 2:(w + crop) // 2]
(w - crop) // 2:(w + crop) // 2]
image = Image.fromarray(img)
if self.size is not None:
image = image.resize((self.size, self.size), resample=self.interpolation)
image = image.resize((self.size, self.size),
resample=self.interpolation)
image = self.flip(image)
image = np.array(image).astype(np.uint8)
example["image"] = (image / 127.5 - 1.0).astype(np.float32)
return example
return example