예제 #1
0
def main():
    args = config_params()
    print(args)

    setup_seed(args.seed)
    if not os.path.exists(args.saved_path):
        os.makedirs(args.saved_path)
    summary_path = os.path.join(args.saved_path, 'summary')
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    checkpoints_path = os.path.join(args.saved_path, 'checkpoints')
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    train_set = ModelNet40(root=args.root,
                           npts=args.train_npts,
                           train=True,
                           normal=args.normal,
                           mode=args.mode)
    test_set = ModelNet40(root=args.root,
                          npts=args.train_npts,
                          train=False,
                          normal=args.normal,
                          mode=args.mode)
    train_loader = DataLoader(train_set,
                              batch_size=args.batchsize,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(test_set,
                             batch_size=args.batchsize,
                             shuffle=False,
                             num_workers=args.num_workers)

    in_dim = 6 if args.normal else 3
    model = IterativeBenchmark(in_dim=in_dim, niters=args.niters, gn=args.gn)
    model = model.cuda()
    loss_fn = EMDLosspy().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.gamma, last_epoch=-1)

    writer = SummaryWriter(summary_path)

    test_min_loss, test_min_r_mse_error, test_min_rot_error = \
        float('inf'), float('inf'), float('inf')
    for epoch in range(args.epoches):
        print('=' * 20, epoch + 1, '=' * 20)
        train_results = train_one_epoch(train_loader, model, loss_fn,
                                        optimizer)
        print_train_info(train_results)
        test_results = test_one_epoch(test_loader, model, loss_fn)
        print_train_info(test_results)

        if epoch % args.saved_frequency == 0:
            writer.add_scalar('Loss/train', train_results['loss'], epoch + 1)
            writer.add_scalar('Loss/test', test_results['loss'], epoch + 1)
            writer.add_scalar('RError/train', train_results['r_mse'],
                              epoch + 1)
            writer.add_scalar('RError/test', test_results['r_mse'], epoch + 1)
            writer.add_scalar('rotError/train', train_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('rotError/test', test_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('Lr', optimizer.param_groups[0]['lr'], epoch + 1)
        test_loss, test_r_error, test_rot_error = \
            test_results['loss'], test_results['r_mse'], test_results['r_isotropic']
        if test_loss < test_min_loss:
            saved_path = os.path.join(checkpoints_path, "test_min_loss.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_loss = test_loss
        if test_r_error < test_min_r_mse_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rmse_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_r_mse_error = test_r_error
        if test_rot_error < test_min_rot_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rot_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_rot_error = test_rot_error
        scheduler.step()
예제 #2
0
def main():
    args = config_params()
    print(args)

    setup_seed(args.seed)
    if not os.path.exists(args.saved_path):
        os.makedirs(args.saved_path)
    summary_path = os.path.join(args.saved_path, 'summary')
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    checkpoints_path = os.path.join(args.saved_path, 'checkpoints')
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    train_set = ModelNet40(args.root, args.train_npts)
    test_set = ModelNet40(args.root, args.train_npts, False)
    train_loader = DataLoader(train_set,
                              batch_size=args.batchsize,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(test_set,
                             batch_size=args.batchsize,
                             shuffle=False,
                             num_workers=args.num_workers)

    model = IterativeBenchmark(in_dim1=args.in_dim, niters=args.niters)
    model = model.cuda()
    loss_fn = EMDLosspy()
    loss_fn = loss_fn.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.gamma, last_epoch=-1)

    writer = SummaryWriter(summary_path)

    test_min_loss, test_min_t_error, test_min_R_error, test_min_degree_error = \
        float('inf'), float('inf'), float('inf'), float('inf')
    for epoch in range(args.epoches):
        print('=' * 20, epoch + 1, '=' * 20)
        train_loss, train_t_error, train_R_error, train_degree_error = \
            train_one_epoch(train_loader, model, loss_fn, optimizer)
        print(
            'Train: loss: {:.4f}, t_error: {:.4f}, R_error: {:.4f}, degree_error: {:.4f}'
            .format(train_loss, train_t_error, train_R_error,
                    train_degree_error))
        test_loss, test_t_error, test_R_error, test_degree_error = \
            test_one_epoch(test_loader, model, loss_fn)
        print(
            'Test: loss: {:.4f}, t_error: {:.4f}, R_error: {:.4f}, degree_error: {:.4f}'
            .format(test_loss, test_t_error, test_R_error, test_degree_error))

        if epoch % args.saved_frequency == 0:
            writer.add_scalar('Loss/train', train_loss, epoch + 1)
            writer.add_scalar('Loss/test', test_loss, epoch + 1)
            writer.add_scalar('TError/train', train_t_error, epoch + 1)
            writer.add_scalar('TError/test', test_t_error, epoch + 1)
            writer.add_scalar('RError/train', train_R_error, epoch + 1)
            writer.add_scalar('RError/test', test_R_error, epoch + 1)
            writer.add_scalar('degreeError/train', train_degree_error,
                              epoch + 1)
            writer.add_scalar('degreeError/test', test_degree_error, epoch + 1)
            writer.add_scalar('Lr', optimizer.param_groups[0]['lr'], epoch + 1)
        if test_loss < test_min_loss:
            saved_path = os.path.join(checkpoints_path, "test_min_loss.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_loss = test_loss
        if test_t_error < test_min_t_error:
            saved_path = os.path.join(checkpoints_path, "test_min_t_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_t_error = test_t_error
        if test_R_error < test_min_R_error:
            saved_path = os.path.join(checkpoints_path, "test_min_R_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_R_error = test_R_error
        if test_degree_error < test_min_degree_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_degree_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_degree_error = test_degree_error
        scheduler.step()