diff --git a/data/data_loader.py b/data/data_loader.py index 3176855..6a1f352 100644 --- a/data/data_loader.py +++ b/data/data_loader.py @@ -231,7 +231,7 @@ class DataLoaderMultiAspect(): target_wh = min(self.aspects, key=lambda aspects:abs(aspects[0]/aspects[1] - image_aspect)) if not self.has_scanned: if width * height < target_wh[0] * target_wh[1]: - undersized_images.append(f" *** {pathname} with size: {width},{height} is smaller than target size: {target_wh}, consider using larger images") + undersized_images.append(f" {pathname}, size: {width},{height}, target size: {target_wh}") image_train_item = ImageTrainItem(image=None, caption=caption, target_wh=target_wh, pathname=pathname, flip_p=flip_p) @@ -251,7 +251,7 @@ class DataLoaderMultiAspect(): with open(underized_log_path, "w") as undersized_images_file: undersized_images_file.write(f" The following images are smaller than the target size, consider removing or sourcing a larger copy:") for undersized_image in undersized_images: - undersized_images_file.write(undersized_image) + undersized_images_file.write(f"{undersized_image}\n") return decorated_image_train_items diff --git a/train.py b/train.py index 0883363..8e5f1f6 100644 --- a/train.py +++ b/train.py @@ -675,8 +675,6 @@ def main(args): logging.info(f" {Fore.GREEN}batch_size: {Style.RESET_ALL}{Fore.LIGHTGREEN_EX}{args.batch_size}{Style.RESET_ALL}") logging.info(f" {Fore.GREEN}epoch_len: {Fore.LIGHTGREEN_EX}{epoch_len}{Style.RESET_ALL}") - - #scaler = torch.cuda.amp.GradScaler() scaler = GradScaler( enabled=args.amp, init_scale=2**17.5, @@ -686,13 +684,8 @@ def main(args): ) logging.info(f" Grad scaler enabled: {scaler.is_enabled()} (amp mode)") - epoch_pbar = tqdm(range(args.max_epochs), position=0, leave=True) epoch_pbar.set_description(f"{Fore.LIGHTCYAN_EX}Epochs{Style.RESET_ALL}") - - # steps_pbar = tqdm(range(epoch_len), position=1, leave=True) - # steps_pbar.set_description(f"{Fore.LIGHTCYAN_EX}Steps{Style.RESET_ALL}") - epoch_times = [] global global_step @@ -781,11 +774,8 @@ def main(args): param.grad *= grad_scale if ((global_step + 1) % args.grad_accum == 0) or (step == epoch_len - 1): - # if args.amp: scaler.step(optimizer) scaler.update() - # else: - # optimizer.step() optimizer.zero_grad(set_to_none=True) lr_scheduler.step()