2023-02-06 23:10:34 -07:00
|
|
|
import json
|
2023-02-08 03:28:45 -07:00
|
|
|
import logging
|
2023-02-07 09:32:54 -07:00
|
|
|
import math
|
2023-02-06 23:10:34 -07:00
|
|
|
import random
|
2023-04-29 09:31:08 -06:00
|
|
|
from dataclasses import dataclass, field
|
2023-02-08 03:28:45 -07:00
|
|
|
from typing import Callable, Any, Optional, Generator
|
2023-02-06 23:10:34 -07:00
|
|
|
from argparse import Namespace
|
|
|
|
|
|
|
|
import torch
|
2023-03-04 13:08:47 -07:00
|
|
|
import numpy as np
|
2023-02-06 23:10:34 -07:00
|
|
|
from colorama import Fore, Style
|
|
|
|
import torch.nn.functional as F
|
|
|
|
from torch.utils.data import DataLoader
|
|
|
|
from torch.utils.tensorboard import SummaryWriter
|
|
|
|
from tqdm.auto import tqdm
|
|
|
|
|
|
|
|
from data.every_dream import build_torch_dataloader, EveryDreamBatch
|
|
|
|
from data.data_loader import DataLoaderMultiAspect
|
|
|
|
from data import resolver
|
|
|
|
from data import aspects
|
2023-02-07 09:32:54 -07:00
|
|
|
from data.image_train_item import ImageTrainItem
|
2023-02-06 23:10:34 -07:00
|
|
|
from utils.isolate_rng import isolate_rng
|
|
|
|
|
2023-04-29 09:31:08 -06:00
|
|
|
from colorama import Fore, Style
|
|
|
|
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-02-07 09:32:54 -07:00
|
|
|
def get_random_split(items: list[ImageTrainItem], split_proportion: float, batch_size: int) \
|
|
|
|
-> tuple[list[ImageTrainItem], list[ImageTrainItem]]:
|
2023-02-20 13:56:22 -07:00
|
|
|
split_item_count = max(1, math.ceil(split_proportion * len(items)))
|
2023-02-07 09:32:54 -07:00
|
|
|
# sort first, then shuffle, to ensure determinate outcome for the current random state
|
|
|
|
items_copy = list(sorted(items, key=lambda i: i.pathname))
|
|
|
|
random.shuffle(items_copy)
|
|
|
|
split_items = list(items_copy[:split_item_count])
|
|
|
|
remaining_items = list(items_copy[split_item_count:])
|
|
|
|
return split_items, remaining_items
|
|
|
|
|
2023-02-08 03:28:45 -07:00
|
|
|
def disable_multiplier_and_flip(items: list[ImageTrainItem]) -> Generator[ImageTrainItem, None, None]:
|
|
|
|
for i in items:
|
|
|
|
yield ImageTrainItem(image=i.image, caption=i.caption, aspects=i.aspects, pathname=i.pathname, flip_p=0, multiplier=1)
|
2023-02-07 09:32:54 -07:00
|
|
|
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class ValidationDataset:
|
|
|
|
name: str
|
|
|
|
dataloader: torch.utils.data.DataLoader
|
|
|
|
loss_history: list[float] = field(default_factory=list)
|
|
|
|
val_loss_window_size: Optional[int] = 5 # todo: arg for this?
|
|
|
|
|
|
|
|
def track_loss_trend(self, mean_loss: float):
|
|
|
|
if self.val_loss_window_size is None:
|
|
|
|
return
|
|
|
|
self.loss_history.append(mean_loss)
|
|
|
|
|
|
|
|
if len(self.loss_history) > ((self.val_loss_window_size * 2) + 1):
|
|
|
|
dy = np.diff(self.loss_history[-self.val_loss_window_size:])
|
|
|
|
if np.average(dy) > 0:
|
|
|
|
logging.warning(f"Validation loss for {self.name} shows diverging. Check your loss/{self.name} graph.")
|
|
|
|
|
|
|
|
|
2023-02-06 23:10:34 -07:00
|
|
|
class EveryDreamValidator:
|
|
|
|
def __init__(self,
|
|
|
|
val_config_path: Optional[str],
|
2023-02-07 09:32:54 -07:00
|
|
|
default_batch_size: int,
|
2023-02-08 03:28:45 -07:00
|
|
|
resolution: int,
|
2023-03-25 18:09:06 -06:00
|
|
|
log_writer: SummaryWriter,
|
|
|
|
):
|
2023-04-29 09:31:08 -06:00
|
|
|
self.validation_datasets = []
|
2023-02-08 03:28:45 -07:00
|
|
|
self.resolution = resolution
|
2023-03-25 18:09:06 -06:00
|
|
|
self.log_writer = log_writer
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-02-07 09:52:23 -07:00
|
|
|
self.config = {
|
|
|
|
'batch_size': default_batch_size,
|
|
|
|
'every_n_epochs': 1,
|
2023-02-07 10:18:21 -07:00
|
|
|
'seed': 555,
|
|
|
|
|
2023-02-08 03:28:45 -07:00
|
|
|
'validate_training': True,
|
2023-02-07 10:18:21 -07:00
|
|
|
'val_split_mode': 'automatic',
|
2023-04-29 09:31:08 -06:00
|
|
|
'auto_split_proportion': 0.15,
|
2023-02-07 10:18:21 -07:00
|
|
|
|
|
|
|
'stabilize_training_loss': False,
|
2023-03-25 18:09:06 -06:00
|
|
|
'stabilize_split_proportion': 0.15,
|
|
|
|
|
|
|
|
'use_relative_loss': False,
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
'extra_manual_datasets': {
|
|
|
|
# name: path pairs
|
|
|
|
# eg "santa suit": "/path/to/captioned_santa_suit_images", will be logged to tensorboard as "loss/santa suit"
|
|
|
|
}
|
2023-02-07 09:52:23 -07:00
|
|
|
}
|
2023-02-06 23:10:34 -07:00
|
|
|
if val_config_path is not None:
|
|
|
|
with open(val_config_path, 'rt') as f:
|
2023-02-07 09:52:23 -07:00
|
|
|
self.config.update(json.load(f))
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-04-29 12:38:04 -06:00
|
|
|
if 'val_data_root' in self.config:
|
|
|
|
logging.warning(f" * {Fore.YELLOW}using old name 'val_data_root' for 'manual_data_root' - please "
|
|
|
|
f"update your validation config json{Style.RESET_ALL}")
|
|
|
|
self.config.update({'manual_data_root': self.config['val_data_root']})
|
|
|
|
|
2023-04-29 09:31:08 -06:00
|
|
|
if self.config.get('val_split_mode') == 'manual':
|
2023-06-17 02:38:34 -06:00
|
|
|
manual_data_root = self.config.get('manual_data_root')
|
|
|
|
if manual_data_root is not None:
|
2023-06-17 01:31:28 -06:00
|
|
|
self.config['extra_manual_datasets'].update({'val': self.config['manual_data_root']})
|
|
|
|
else:
|
|
|
|
if len(self.config['extra_manual_datasets']) == 0:
|
|
|
|
raise ValueError("Error in validation config .json: 'manual' validation requested but no "
|
|
|
|
"'manual_data_root' or 'extra_manual_datasets'")
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
if 'val_split_proportion' in self.config:
|
2023-04-29 12:35:39 -06:00
|
|
|
logging.warning(f" * {Fore.YELLOW}using old name 'val_split_proportion' for 'auto_split_proportion' - please "
|
2023-04-29 09:31:08 -06:00
|
|
|
f"update your validation config json{Style.RESET_ALL}")
|
|
|
|
self.config.update({'auto_split_proportion': self.config['val_split_proportion']})
|
|
|
|
|
2023-03-09 00:21:59 -07:00
|
|
|
|
2023-03-04 13:08:47 -07:00
|
|
|
|
2023-02-07 09:52:23 -07:00
|
|
|
@property
|
|
|
|
def batch_size(self):
|
|
|
|
return self.config['batch_size']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def every_n_epochs(self):
|
|
|
|
return self.config['every_n_epochs']
|
|
|
|
|
|
|
|
@property
|
|
|
|
def seed(self):
|
|
|
|
return self.config['seed']
|
2023-03-25 18:09:06 -06:00
|
|
|
|
|
|
|
@property
|
|
|
|
def use_relative_loss(self):
|
|
|
|
return self.config['use_relative_loss']
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-02-07 09:32:54 -07:00
|
|
|
def prepare_validation_splits(self, train_items: list[ImageTrainItem], tokenizer: Any) -> list[ImageTrainItem]:
|
|
|
|
"""
|
|
|
|
Build the validation splits as requested by the config passed at init.
|
|
|
|
This may steal some items from `train_items`.
|
|
|
|
If this happens, the returned `list` contains the remaining items after the required items have been stolen.
|
|
|
|
Otherwise, the returned `list` is identical to the passed-in `train_items`.
|
|
|
|
"""
|
2023-02-06 23:10:34 -07:00
|
|
|
with isolate_rng():
|
2023-02-20 13:56:22 -07:00
|
|
|
random.seed(self.seed)
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
auto_dataset, remaining_train_items = self._build_automatic_validation_dataset_if_required(train_items, tokenizer)
|
2023-02-06 23:10:34 -07:00
|
|
|
# order is important - if we're removing images from train, this needs to happen before making
|
|
|
|
# the overlapping dataloader
|
2023-04-29 09:31:08 -06:00
|
|
|
train_overlapping_dataset = self._build_train_stabilizer_dataloader_if_required(
|
2023-02-07 09:54:00 -07:00
|
|
|
remaining_train_items, tokenizer)
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
if auto_dataset is not None:
|
|
|
|
self.validation_datasets.append(auto_dataset)
|
|
|
|
if train_overlapping_dataset is not None:
|
|
|
|
self.validation_datasets.append(train_overlapping_dataset)
|
|
|
|
manual_splits = self._build_manual_validation_datasets(tokenizer)
|
|
|
|
self.validation_datasets.extend(manual_splits)
|
|
|
|
|
2023-02-07 09:32:54 -07:00
|
|
|
return remaining_train_items
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-03-10 15:13:53 -07:00
|
|
|
def get_validation_step_indices(self, epoch, epoch_length_steps: int) -> list[int]:
|
2023-03-09 12:42:17 -07:00
|
|
|
if self.every_n_epochs >= 1:
|
2023-03-10 15:13:53 -07:00
|
|
|
if ((epoch+1) % self.every_n_epochs) == 0:
|
|
|
|
# last step only
|
|
|
|
return [epoch_length_steps-1]
|
|
|
|
else:
|
|
|
|
return []
|
2023-05-06 18:11:58 -06:00
|
|
|
else:
|
|
|
|
# subdivide the epoch evenly, by rounding self.every_n_epochs to the nearest clean division of steps
|
|
|
|
num_divisions = max(1, min(epoch_length_steps, round(1/self.every_n_epochs)))
|
|
|
|
# validation happens after training:
|
|
|
|
# if an epoch has eg 100 steps and num_divisions is 2, then validation should occur after steps 49 and 99
|
|
|
|
validate_every_n_steps = epoch_length_steps / num_divisions
|
|
|
|
return [math.ceil((i+1)*validate_every_n_steps) - 1 for i in range(num_divisions)]
|
2023-03-09 12:42:17 -07:00
|
|
|
|
|
|
|
def do_validation(self, global_step: int,
|
|
|
|
get_model_prediction_and_target_callable: Callable[
|
2023-02-06 23:10:34 -07:00
|
|
|
[Any, Any], tuple[torch.Tensor, torch.Tensor]]):
|
2023-06-17 01:27:18 -06:00
|
|
|
mean_loss_accumulator = 0
|
2023-05-06 18:11:58 -06:00
|
|
|
for i, dataset in enumerate(self.validation_datasets):
|
|
|
|
mean_loss = self._calculate_validation_loss(dataset.name,
|
|
|
|
dataset.dataloader,
|
2023-03-09 12:42:17 -07:00
|
|
|
get_model_prediction_and_target_callable)
|
2023-06-17 01:27:18 -06:00
|
|
|
mean_loss_accumulator += mean_loss
|
2023-05-06 18:11:58 -06:00
|
|
|
self.log_writer.add_scalar(tag=f"loss/{dataset.name}",
|
|
|
|
scalar_value=mean_loss,
|
2023-03-09 12:42:17 -07:00
|
|
|
global_step=global_step)
|
2023-05-06 18:11:58 -06:00
|
|
|
dataset.track_loss_trend(mean_loss)
|
2023-06-17 01:27:18 -06:00
|
|
|
# log combine loss to loss/_all_val_combined
|
|
|
|
if len(self.validation_datasets) > 1:
|
|
|
|
total_mean_loss = mean_loss_accumulator / len(self.validation_datasets)
|
|
|
|
self.log_writer.add_scalar(tag=f"loss/_all_val_combined",
|
|
|
|
scalar_value=total_mean_loss,
|
|
|
|
global_step=global_step)
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-03-09 00:21:59 -07:00
|
|
|
def _calculate_validation_loss(self, tag, dataloader, get_model_prediction_and_target: Callable[
|
|
|
|
[Any, Any], tuple[torch.Tensor, torch.Tensor]]) -> float:
|
2023-02-06 23:10:34 -07:00
|
|
|
with torch.no_grad(), isolate_rng():
|
2023-03-09 00:21:59 -07:00
|
|
|
# ok to override seed here because we are in a `with isolate_rng():` block
|
|
|
|
random.seed(self.seed)
|
|
|
|
torch.manual_seed(self.seed)
|
|
|
|
|
2023-02-06 23:10:34 -07:00
|
|
|
loss_validation_epoch = []
|
2023-03-02 10:29:28 -07:00
|
|
|
steps_pbar = tqdm(range(len(dataloader)), position=1, leave=False)
|
2023-02-06 23:10:34 -07:00
|
|
|
steps_pbar.set_description(f"{Fore.LIGHTCYAN_EX}Validate ({tag}){Style.RESET_ALL}")
|
|
|
|
|
|
|
|
for step, batch in enumerate(dataloader):
|
|
|
|
model_pred, target = get_model_prediction_and_target(batch["image"], batch["tokens"])
|
|
|
|
|
|
|
|
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
|
|
|
|
|
|
|
del target, model_pred
|
|
|
|
|
|
|
|
loss_step = loss.detach().item()
|
|
|
|
loss_validation_epoch.append(loss_step)
|
|
|
|
|
|
|
|
steps_pbar.update(1)
|
|
|
|
|
|
|
|
steps_pbar.close()
|
|
|
|
|
|
|
|
loss_validation_local = sum(loss_validation_epoch) / len(loss_validation_epoch)
|
2023-03-04 13:08:47 -07:00
|
|
|
return loss_validation_local
|
2023-03-09 00:21:59 -07:00
|
|
|
|
2023-03-04 13:08:47 -07:00
|
|
|
|
2023-04-29 09:31:08 -06:00
|
|
|
def _build_automatic_validation_dataset_if_required(self, image_train_items: list[ImageTrainItem], tokenizer) \
|
|
|
|
-> tuple[Optional[ValidationDataset], list[ImageTrainItem]]:
|
2023-02-08 03:28:45 -07:00
|
|
|
val_split_mode = self.config['val_split_mode'] if self.config['validate_training'] else None
|
2023-04-29 09:31:08 -06:00
|
|
|
if val_split_mode is None or val_split_mode == 'none' or val_split_mode == 'manual':
|
|
|
|
# manual is handled by _build_manual_validation_datasets
|
2023-02-07 09:32:54 -07:00
|
|
|
return None, image_train_items
|
|
|
|
elif val_split_mode == 'automatic':
|
2023-04-29 09:31:08 -06:00
|
|
|
auto_split_proportion = self.config['auto_split_proportion']
|
|
|
|
val_items, remaining_train_items = get_random_split(image_train_items, auto_split_proportion, batch_size=self.batch_size)
|
2023-02-08 03:28:45 -07:00
|
|
|
val_items = list(disable_multiplier_and_flip(val_items))
|
|
|
|
logging.info(f" * Removed {len(val_items)} images from the training set to use for validation")
|
2023-04-29 09:31:08 -06:00
|
|
|
val_ed_batch = self._build_ed_batch(val_items, tokenizer=tokenizer, name='val')
|
|
|
|
val_dataloader = build_torch_dataloader(val_ed_batch, batch_size=self.batch_size)
|
|
|
|
return ValidationDataset(name='val', dataloader=val_dataloader), remaining_train_items
|
2023-02-07 09:32:54 -07:00
|
|
|
else:
|
|
|
|
raise ValueError(f"Unrecognized validation split mode '{val_split_mode}'")
|
2023-04-29 09:31:08 -06:00
|
|
|
|
|
|
|
def _build_manual_validation_datasets(self, tokenizer) -> list[ValidationDataset]:
|
|
|
|
datasets = []
|
|
|
|
for name, root in self.config.get('extra_manual_datasets', {}).items():
|
|
|
|
items = self._load_manual_val_split(root)
|
|
|
|
logging.info(f" * Loaded {len(items)} validation images for validation set '{name}' from {root}")
|
|
|
|
ed_batch = self._build_ed_batch(items, tokenizer=tokenizer, name=name)
|
|
|
|
dataloader = build_torch_dataloader(ed_batch, batch_size=self.batch_size)
|
|
|
|
datasets.append(ValidationDataset(name=name, dataloader=dataloader))
|
|
|
|
return datasets
|
2023-02-07 09:32:54 -07:00
|
|
|
|
2023-02-07 10:21:05 -07:00
|
|
|
def _build_train_stabilizer_dataloader_if_required(self, image_train_items: list[ImageTrainItem], tokenizer) \
|
2023-04-29 09:31:08 -06:00
|
|
|
-> Optional[ValidationDataset]:
|
2023-02-07 10:18:21 -07:00
|
|
|
stabilize_training_loss = self.config['stabilize_training_loss']
|
2023-02-07 09:32:54 -07:00
|
|
|
if not stabilize_training_loss:
|
|
|
|
return None
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-02-07 10:18:21 -07:00
|
|
|
stabilize_split_proportion = self.config['stabilize_split_proportion']
|
2023-02-07 10:21:05 -07:00
|
|
|
stabilize_items, _ = get_random_split(image_train_items, stabilize_split_proportion, batch_size=self.batch_size)
|
2023-02-08 03:28:45 -07:00
|
|
|
stabilize_items = list(disable_multiplier_and_flip(stabilize_items))
|
2023-04-29 09:31:08 -06:00
|
|
|
stabilize_ed_batch = self._build_ed_batch(stabilize_items, tokenizer=tokenizer, name='stabilize-train')
|
2023-02-07 09:32:54 -07:00
|
|
|
stabilize_dataloader = build_torch_dataloader(stabilize_ed_batch, batch_size=self.batch_size)
|
2023-04-29 09:31:08 -06:00
|
|
|
return ValidationDataset(name='stabilize-train', dataloader=stabilize_dataloader, val_loss_window_size=None)
|
2023-02-06 23:10:34 -07:00
|
|
|
|
2023-03-09 00:21:59 -07:00
|
|
|
def _load_manual_val_split(self, val_data_root: str):
|
|
|
|
args = Namespace(
|
|
|
|
aspects=aspects.get_aspect_buckets(self.resolution),
|
|
|
|
flip_p=0.0,
|
|
|
|
seed=self.seed,
|
|
|
|
)
|
|
|
|
val_items = resolver.resolve_root(val_data_root, args)
|
|
|
|
val_items.sort(key=lambda i: i.pathname)
|
|
|
|
random.shuffle(val_items)
|
|
|
|
return val_items
|
|
|
|
|
2023-04-29 09:31:08 -06:00
|
|
|
def _build_ed_batch(self, items: list[ImageTrainItem], tokenizer, name='val'):
|
2023-02-07 09:32:54 -07:00
|
|
|
batch_size = self.batch_size
|
|
|
|
seed = self.seed
|
2023-02-06 23:10:34 -07:00
|
|
|
data_loader = DataLoaderMultiAspect(
|
2023-02-07 09:32:54 -07:00
|
|
|
items,
|
2023-02-06 23:10:34 -07:00
|
|
|
batch_size=batch_size,
|
|
|
|
seed=seed,
|
|
|
|
)
|
2023-02-07 09:32:54 -07:00
|
|
|
ed_batch = EveryDreamBatch(
|
2023-02-06 23:10:34 -07:00
|
|
|
data_loader=data_loader,
|
|
|
|
debug_level=1,
|
|
|
|
conditional_dropout=0,
|
2023-02-07 09:32:54 -07:00
|
|
|
tokenizer=tokenizer,
|
2023-02-06 23:10:34 -07:00
|
|
|
seed=seed,
|
|
|
|
name=name,
|
2023-03-09 00:21:59 -07:00
|
|
|
crop_jitter=0
|
2023-02-06 23:10:34 -07:00
|
|
|
)
|
2023-02-07 09:32:54 -07:00
|
|
|
return ed_batch
|