_all_loss = np.mean(all_loss_list) _img_loss = np.mean(img_loss_list) _lmk_loss = np.mean(lmk_loss_list) _recog_loss = np.mean(recog_loss_list) print( "Epoch {:02}/{:02} all_loss: {:.6f} image loss: {:.6f} landmark loss {:.6f} recog loss {:.6f}" .format(epoch + 1, NUM_EPOCH, _all_loss, _img_loss, _lmk_loss, _recog_loss)) print("-" * 116) return _all_loss, _img_loss, _lmk_loss, _recog_loss, visualize_image for epoch in range(5, NUM_EPOCH): model = train(model, epoch) all_loss, img_loss, lmk_loss, recog_loss, visualize_image = eval( model, epoch) lr_schduler.step(all_loss) io.imsave( "./result_full/Epoch:{:02}_AllLoss:{:.6f}_ImgLoss:{:.6f}_LMKLoss:{:.6f}_RecogLoss:{:.6f}.png" .format(epoch, all_loss, img_loss, lmk_loss, recog_loss), visualize_image) model2save = { 'model': model.state_dict(), 'optimizer': optimizer.state_dict() } torch.save( model2save, "./model_result_full/epoch_{:02}_loss_{:.4f}_Img_loss_{:.4f}_LMK_loss{:.4f}_Recog_loss{:.4f}.pth" .format(epoch + 1, img_loss + LMK_LOSS_WEIGHT * lmk_loss, img_loss, lmk_loss, recog_loss))
for g in optimizer.param_groups: g['lr'] = args.lr * e / args.warn_up_epoch # train train(model=network, train_loader=train_loader, criterion_cls=criterion_cls, optimizer=optimizer, device=device, writer=writer, cur_epoch=e) if e % args.val_epoch == 0: # validation val_acc = validate(model=network, val_loader=val_loader, device=device, writer=writer, cur_epoch=e) if val_acc > best_acc: best_acc = val_acc # save ckpt torch.save({ 'model': network.state_dict(), 'best_acc': best_acc }, os.path.join(args.save_dir, 'best.pth')) if e > args.warm_up_epoch: scheduler.step() if args.test: # prepare dataloader test_loader = DataLoader(dataset=CustomData('test', dir_path=args.data_dir), batch_size=1, num_workers=args.num_workers, shuffle=False) test(model=network, test_loader=test_loader, device=device, out_path=args.test_out_csv_path)