示例#1
0
def main(args):
    exp_path = os.path.join(args.output_dir, args.exp_name)
    log_path = os.path.join(exp_path, 'logs')
    checkpoint_path = os.path.join(exp_path, 'checkpoints')

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    if os.path.exists(exp_path):
        print('Error: Experiment already exists, please rename --exp-name')
        exit()
    os.makedirs(log_path)
    os.mkdir(checkpoint_path)
    print("All experiment outputs will be saved within:", exp_path)

    # set seed
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # get models and load pre-trained disparity network
    disp_net = DispNet.DispNet(1).to(device)
    disp_net.init_weights()
    if args.disp_net is not None:
        disp_net.load_state_dict(torch.load(args.disp_net, map_location='cpu'))
    disp_net.train()
    pose_net = PoseNet.PoseNet(1, args.sequence_length - 1).to(device)
    pose_net.init_weights()
    pose_net.train()

    # joint optimizer (pose and depth)
    optim_params = [{
        'params': disp_net.parameters(),
        'lr': args.learning_rate
    }, {
        'params': pose_net.parameters(),
        'lr': args.learning_rate
    }]
    optim = torch.optim.Adam(optim_params,
                             betas=(args.momentum, args.beta),
                             weight_decay=args.weight_decay)

    # get sequential dataset
    train_set = CPETDataset.CPET(args.dataset_dir, 'train',
                                 args.sequence_length, args.seed)
    val_set = CPETDataset.CPET(args.dataset_dir, 'val', args.sequence_length,
                               args.seed)
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True)

    # custom view synthesis loss and depth smoothness loss
    criterion = ViewSynthesisLoss(device, args.rotation_mode,
                                  args.padding_mode)
    w_synth, w_smooth = args.photo_loss_weight, args.smooth_loss_weight

    # visualizer
    visualizer = Visualizer(exp_path, device)

    # commence experiment
    print(
        "Experiment commencing on 4 train seq and 1 validation seq for {} epochs..."
        .format(args.epochs))
    start_time = time.time()

    # track losses and absolute trajectory error
    train_loss = np.zeros((args.epochs, 3))
    val_loss = np.zeros((args.epochs, 3))
    val_ate_mean = np.zeros(args.epochs)
    total_time = np.zeros(args.epochs)

    for epo in range(args.epochs):

        # run training epoch and generate / save random visualizations
        l_train = train_epoch(disp_net, pose_net, train_loader, criterion,
                              optim, w_synth, w_smooth)
        train_loss[epo, :] = l_train[:]
        visualizer.generate_random_visuals(disp_net, pose_net, train_loader,
                                           criterion, args.vis_per_epoch, epo,
                                           'train')

        # run validation epoch and acquire pose estimation metrics. Plot trajectories
        l_val, ate, ate_mean, gt_traj, pred_traj = validate(
            disp_net, pose_net, val_loader, criterion, w_synth, w_smooth)
        val_loss[epo, :] = l_val[:]
        val_ate_mean[epo] = ate_mean

        # visualization of disparity maps, BEV trajectories, and 3D trajectories
        visualizer.generate_random_visuals(disp_net, pose_net, val_loader,
                                           criterion, args.vis_per_epoch, epo,
                                           'val')
        visualizer.generate_trajectory(pred_traj, 'pred', 'Estimated', epo,
                                       'val')
        visualizer.generate_trajectories(gt_traj, pred_traj, "Horns", epo,
                                         'val')
        visualizer.generate_3d_trajectory(gt_traj, pred_traj, "Horns", epo,
                                          'val')
        if epo == 0:
            visualizer.generate_trajectory(gt_traj, 'gt', 'True', epo, 'val')

        total_time[epo] = time.time() - start_time
        print_str = "epo - {}/{} | train_loss - {:.3f} | val_loss - {:.3f} | ".format(
            epo, args.epochs, train_loss[epo, 0], val_loss[epo, 0])
        print_str += "val_ate - {:.3f} | total_time - {}".format(
            ate_mean, datetime.timedelta(seconds=total_time[epo]))
        print(print_str)

        # save models
        if (epo + 1) % args.save_freq == 0:
            model_checkpoint(disp_net, 'disp_net_' + str(epo + 1),
                             checkpoint_path)
            model_checkpoint(pose_net, 'pose_net_' + str(epo + 1),
                             checkpoint_path)

        # save current stats
        np.savetxt(os.path.join(log_path, 'train_loss.txt'), train_loss)
        np.savetxt(os.path.join(log_path, 'val_loss.txt'), val_loss)
        np.savetxt(os.path.join(log_path, 'val_ate_mean.txt'), val_ate_mean)
        np.savetxt(os.path.join(log_path, 'time_log.txt'), total_time)

    # generate metric curves
    generate_curve([train_loss[:, 0], val_loss[:, 0]], ['train', 'val'],
                   'loss', 'Train vs Val Combined Loss', log_path)
    generate_curve([train_loss[:, 1], val_loss[:, 1]], ['train', 'val'],
                   'photometric loss',
                   'Train vs Val Photometric Reconstruction Loss', log_path)
    generate_curve([train_loss[:, 2], val_loss[:, 2]], ['train', 'val'],
                   'depth smooth loss', 'Train vs Val Depth Smoothness Loss',
                   log_path)
    generate_curve([val_ate_mean], ['val'], 'ATE',
                   'Validation Absolute Trajectory Error', log_path)
示例#2
0
def main(args):
    exp_path = os.path.join(args.output_dir, args.exp_name)
    log_path = os.path.join(exp_path, 'logs')

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    if os.path.exists(exp_path):
        print('Error: Experiment already exists, over-writing experiment')
        shutil.rmtree(exp_path)

    os.makedirs(log_path)
    print("All experiment outputs will be saved within:", exp_path)

    # set seed
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # get models, load pre-trained disparity network and pose network
    disp_net = DispNet.DispNet(1).to(device)
    disp_net.load_state_dict(torch.load(args.disp_net, map_location='cpu'))
    disp_net.eval()
    pose_net = PoseNet.PoseNet(1, args.sequence_length - 1).to(device)
    pose_net.load_state_dict(torch.load(args.pose_net, map_location='cpu'))
    pose_net.eval()

    # get sequence dataset
    test_set = CPETDataset.CPET(args.dataset_dir, args.run_sequence,
                                args.sequence_length, args.seed)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True)

    # custom view synthesis loss and depth smoothness loss
    criterion = ViewSynthesisLoss(device, args.rotation_mode,
                                  args.padding_mode)
    w1, w2 = args.photo_loss_weight, args.smooth_loss_weight

    # visualizer
    visualizer = Visualizer(exp_path, device)

    print("Commencing testing on {} sequence...".format(args.run_sequence))

    # run test epoch, acquire pose estimation metrics (ATE) from Horn's Method and Umeyama Method
    start_time = time.time()
    l_test, horn, umeyama, rate = test(disp_net, pose_net, test_loader,
                                       criterion, visualizer, args.skip_freq,
                                       w1, w2)
    total_time = time.time() - start_time

    # visualize estimated and ground truth trajectories in BEV / 3D - Horn's alignment
    visualizer.generate_trajectories(horn[2], horn[3], "Horns", epo,
                                     args.run_sequence)
    visualizer.generate_3d_trajectory(horn[2], horn[3], "Horns", epo,
                                      args.run_sequence)

    # visualize estimated and ground truth trajectories in BEV / 3D - Umeyama alignment
    visualizer.generate_trajectories(umeyama[2], umeyama[3], "Umeyama", epo,
                                     args.run_sequence)
    visualizer.generate_3d_trajectory(umeyama[2], umeyama[3], "Umeyama", epo,
                                      args.run_sequence)

    # visualize trajectories independently - Umeyama
    visualizer.generate_trajectory(umeyama[2], 'gt', 'True', epo,
                                   args.run_sequence)
    visualizer.generate_trajectory(umeyama[3], 'pred', 'Estimated', epo,
                                   args.run_sequence)

    print_str = "ATE (Umeyama) - {:.3f} | ATE (Horn's) - {:.3f}".format(
        umeyama[1], horn[1])
    print_str += " | view synth loss - {:.3f} | smooth loss - {:.3f}".format(
        l_test[1], l_test[2])
    print_str += " | Hz - {:.5f} | total time - {}".format(
        rate, datetime.timedelta(seconds=total_time))
    print(print_str)

    # save current stats
    np.savetxt(os.path.join(log_path, '{}_loss.txt'.format(args.run_sequence)),
               l_test)
    np.savetxt(
        os.path.join(log_path,
                     '{}_ate_mean_horn.txt'.format(args.run_sequence)),
        np.array([horn[1]]))
    np.savetxt(
        os.path.join(log_path,
                     '{}_ate_full_horn.txt'.format(args.run_sequence)),
        horn[0])
    np.savetxt(
        os.path.join(log_path,
                     '{}_ate_mean_umeyama.txt'.format(args.run_sequence)),
        np.array([umeyama[1]]))
    np.savetxt(
        os.path.join(log_path,
                     '{}_ate_full_umeyama.txt'.format(args.run_sequence)),
        umeyama[0])
    np.savetxt(os.path.join(log_path, 'time_log.txt'), np.array([total_time]))
    print('-----')