better undersized log file
This commit is contained in:
parent
1c2708dc63
commit
36ece59660
|
@ -231,7 +231,7 @@ class DataLoaderMultiAspect():
|
|||
target_wh = min(self.aspects, key=lambda aspects:abs(aspects[0]/aspects[1] - image_aspect))
|
||||
if not self.has_scanned:
|
||||
if width * height < target_wh[0] * target_wh[1]:
|
||||
undersized_images.append(f" *** {pathname} with size: {width},{height} is smaller than target size: {target_wh}, consider using larger images")
|
||||
undersized_images.append(f" {pathname}, size: {width},{height}, target size: {target_wh}")
|
||||
|
||||
image_train_item = ImageTrainItem(image=None, caption=caption, target_wh=target_wh, pathname=pathname, flip_p=flip_p)
|
||||
|
||||
|
@ -251,7 +251,7 @@ class DataLoaderMultiAspect():
|
|||
with open(underized_log_path, "w") as undersized_images_file:
|
||||
undersized_images_file.write(f" The following images are smaller than the target size, consider removing or sourcing a larger copy:")
|
||||
for undersized_image in undersized_images:
|
||||
undersized_images_file.write(undersized_image)
|
||||
undersized_images_file.write(f"{undersized_image}\n")
|
||||
|
||||
return decorated_image_train_items
|
||||
|
||||
|
|
10
train.py
10
train.py
|
@ -675,8 +675,6 @@ def main(args):
|
|||
logging.info(f" {Fore.GREEN}batch_size: {Style.RESET_ALL}{Fore.LIGHTGREEN_EX}{args.batch_size}{Style.RESET_ALL}")
|
||||
logging.info(f" {Fore.GREEN}epoch_len: {Fore.LIGHTGREEN_EX}{epoch_len}{Style.RESET_ALL}")
|
||||
|
||||
|
||||
#scaler = torch.cuda.amp.GradScaler()
|
||||
scaler = GradScaler(
|
||||
enabled=args.amp,
|
||||
init_scale=2**17.5,
|
||||
|
@ -686,13 +684,8 @@ def main(args):
|
|||
)
|
||||
logging.info(f" Grad scaler enabled: {scaler.is_enabled()} (amp mode)")
|
||||
|
||||
|
||||
epoch_pbar = tqdm(range(args.max_epochs), position=0, leave=True)
|
||||
epoch_pbar.set_description(f"{Fore.LIGHTCYAN_EX}Epochs{Style.RESET_ALL}")
|
||||
|
||||
# steps_pbar = tqdm(range(epoch_len), position=1, leave=True)
|
||||
# steps_pbar.set_description(f"{Fore.LIGHTCYAN_EX}Steps{Style.RESET_ALL}")
|
||||
|
||||
epoch_times = []
|
||||
|
||||
global global_step
|
||||
|
@ -781,11 +774,8 @@ def main(args):
|
|||
param.grad *= grad_scale
|
||||
|
||||
if ((global_step + 1) % args.grad_accum == 0) or (step == epoch_len - 1):
|
||||
# if args.amp:
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
# else:
|
||||
# optimizer.step()
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
|
||||
lr_scheduler.step()
|
||||
|
|
Loading…
Reference in New Issue