Example #1
0
        writer.add_scalar('test_loss', test_loss, epoch)
        writer.add_scalar('test_acc', test_acc, epoch)

        # if epoch >= 5:
        #     scheduler.step(test_loss)

        print(optimizer)
        print(log_dir)

    for idx, q_type in idx_to_question_type.items():
        writer.add_scalar('train_acc_sub_{}'.format(q_type),
                          train_acc_list[idx], epoch)

    writer.add_scalar('train_loss', train_loss, epoch)

    writer.add_scalar('train_acc', train_acc, epoch)

    saved_model_list = [x for x in os.listdir(model_dir) if x.endswith('.pt')]
    saved_model_list = sorted(
        saved_model_list, key=lambda x: int(x.split('_')[-1].split('.pt')[0]))

    if len(saved_model_list) >= 5:
        os.remove(os.path.join(model_dir, saved_model_list[0]))

    if epoch % 5 == 0:
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict()
            }, os.path.join(model_dir, 'model_{}.pt'.format(epoch)))