Ejemplo n.º 1
0
def compute_pose_vec(transform_mat):
    """
    Given the transformation matrix, computes the 6-DoF pose.

    :param transform_mat: 1x12 transformation vector [R | t]
    :return: 6-DoF pose vector
    """

    rotation = np.reshape(transform_mat[[0, 1, 2, 4, 5, 6, 8, 9, 10]], [3, 3])
    euler_angles = list(mat2euler(rotation))
    translation = transform_mat[[3, 7, 11]]

    # Concat Euler Angle & Translation vectors
    pose = euler_angles
    pose.extend(translation)

    pose = np.asarray(pose)

    return pose
Ejemplo n.º 2
0
    K_actual = np.array([[2448.0, 0, 1253.0], [0, 2438.0, 986.0], [0, 0, 1.0]])
    print
    print "Actual Matrix:\n", K_actual

    # Part D: Estimate the angle between the box and floor.
    floor_vanishing1 = v1
    floor_vanishing2 = v2
    box_vanishing1 = v3
    box_vanishing2 = compute_vanishing_point(
        np.array([[1094, 1340], [1774, 1086], [1080, 598], [1840, 478]]))
    angle = compute_angle_between_planes([floor_vanishing1, floor_vanishing2],
                                         [box_vanishing1, box_vanishing2],
                                         K_actual)
    print
    print "Angle between floor and box:", angle

    # Part E: Compute the rotation matrix between the two cameras.
    rotation_matrix = compute_rotation_matrix_between_cameras(
        np.array([v1, v2, v3]), np.array([v1b, v2b, v3b]), K_actual)
    print
    print "Rotation between two cameras:\n", rotation_matrix
    z, y, x = mat2euler(rotation_matrix)
    x_angle = x * 180 / math.pi
    y_angle = y * 180 / math.pi
    z_angle = z * 180 / math.pi
    print
    print "Angle around z-axis (pointing out of camera): %f degrees" % z_angle
    print "Angle around y-axis (pointing vertically): %f degrees" % y_angle
    print "Angle around x-axis (pointing horizontally): %f degrees" % x_angle
Ejemplo n.º 3
0
def validate(args, val_loader, feature_ext, rec_feat, rec_imu, pose_net,
             fc_flownet, selectfusion, temp, epoch, fusion_mode):

    batch_time = AverageMeter()

    # switch to evaluate mode
    feature_ext.eval()
    rec_feat.eval()
    pose_net.eval()
    rec_imu.eval()
    fc_flownet.eval()
    selectfusion.eval()

    end = time.time()

    aver_loss = 0
    aver_pose_loss = 0
    aver_euler_loss = 0
    aver_n = 0

    for i, (imgs, imus, poses) in enumerate(val_loader):

        if len(imgs[0]) != args.batch_size:
            continue

        rec_feat.module.hidden = rec_feat.module.init_hidden()

        pose_loss = 0
        euler_loss = 0

        # compute output
        for j in range(0, len(imgs) - 1):

            tgt_img = imgs[j + 1]
            ref_img = imgs[j]
            imu = imus[j]

            if torch.cuda.is_available():
                tgt_img_var = Variable(tgt_img.cuda())
                ref_img_var = Variable(ref_img.cuda())
                imu_var = Variable(imu.transpose(0, 1).cuda())
            else:
                tgt_img_var = Variable(tgt_img)
                ref_img_var = Variable(ref_img)
                imu_var = Variable(imu.transpose(0, 1))

            with torch.no_grad():

                rec_imu.module.hidden = rec_imu.module.init_hidden()

                raw_feature_vision = feature_ext(tgt_img_var, ref_img_var)

                feature_vision = fc_flownet(raw_feature_vision)

                if fusion_mode == 0:

                    feature_weighted = feature_vision

                else:

                    # extract imu features
                    feature_imu = rec_imu(imu_var)

                    # concatenate visual and imu features
                    feature = torch.cat([feature_vision, feature_imu], 2)

                    if fusion_mode == 1:

                        feature_weighted = feature

                    else:

                        if fusion_mode == 2:
                            mask = selectfusion(feature)

                        else:
                            mask = selectfusion(feature, temp)

                        feature_weighted = torch.cat(
                            [feature_vision, feature_imu], 2) * mask

                # recurrent features
                feature_new = rec_feat(feature_weighted)

                pose = pose_net(feature_new)

            # compute pose err
            pose = pose.view(-1, 6)

            trans_pose = compute_trans_pose(
                poses[j].cpu().data.numpy().astype(np.float64),
                poses[j + 1].cpu().data.numpy().astype(np.float64))

            if torch.cuda.is_available():
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1]).cuda()
            else:
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1])

            rot_mat = torch.FloatTensor(trans_pose[:, :, :3]).cuda()

            euler = mat2euler(rot_mat)

            euler_loss += F.mse_loss(euler, pose[:, 3:])

            pose_loss += F.mse_loss(pose_truth, pose[:, :3])

        euler_loss /= (len(imgs) - 1)
        pose_loss /= (len(imgs) - 1)

        loss = pose_loss + euler_loss * 100

        aver_pose_loss += pose_loss.item()
        aver_loss += loss.item()

        aver_euler_loss += euler_loss.item()

        aver_n += 1

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Val: Epoch [{}/{}] Step [{}/{}]: Loss {:.5} '
                  'Pose {:.5} Euler {:.5}'.format(epoch + 1,
                                                  args.epochs, i + 1,
                                                  len(val_loader), loss.item(),
                                                  pose_loss.item(),
                                                  euler_loss.item()))

    aver_loss /= aver_n
    aver_pose_loss /= aver_n
    aver_euler_loss /= aver_n
    print(
        'Val: {}, Average_Loss {:.5} Pose_loss {:.5} Euler_loss {:.5}'.format(
            epoch + 1, aver_loss, aver_pose_loss, aver_euler_loss))

    return aver_loss, aver_pose_loss, aver_euler_loss
Ejemplo n.º 4
0
def test(args, test_loader, feature_ext, rec_feat, rec_imu, pose_net,
         fc_flownet, selectfusion, temp, epoch, fusion_mode):

    batch_time = AverageMeter()

    # switch to evaluate mode
    feature_ext.eval()
    rec_feat.eval()
    pose_net.eval()
    rec_imu.eval()
    fc_flownet.eval()
    selectfusion.eval()

    end = time.time()

    aver_loss = 0
    aver_pose_loss = 0
    aver_euler_loss = 0
    aver_n = 0

    for i, (imgs, imus, poses) in enumerate(test_loader):

        if i == 0:
            k = 5
        if i == 1:
            k = 7
        if i == 2:
            k = 10

        result = []
        truth_pose = []
        truth_euler = []

        rec_feat.module.hidden = rec_feat.module.init_test_hidden()

        pose_loss = 0
        euler_loss = 0

        # compute output
        for j in range(0, len(imgs) - 1):

            tgt_img = imgs[j + 1]
            ref_img = imgs[j]
            imu = imus[j]

            if torch.cuda.is_available():
                tgt_img_var = Variable(tgt_img.cuda())
                ref_img_var = Variable(ref_img.cuda())
                imu_var = Variable(imu.transpose(0, 1).cuda())
            else:
                tgt_img_var = Variable(tgt_img)
                ref_img_var = Variable(ref_img)
                imu_var = Variable(imu.transpose(0, 1))

            with torch.no_grad():

                rec_imu.module.hidden = rec_imu.module.init_test_hidden()

                raw_feature_vision = feature_ext(tgt_img_var, ref_img_var)

                feature_vision = fc_flownet(raw_feature_vision)

                if fusion_mode == 0:

                    feature_weighted = feature_vision

                else:

                    # extract imu features
                    feature_imu = rec_imu(imu_var)

                    # concatenate visual and imu features
                    feature = torch.cat([feature_vision, feature_imu], 2)

                    if fusion_mode == 1:

                        feature_weighted = feature

                    else:

                        if fusion_mode == 2:
                            mask = selectfusion(feature)

                        else:
                            mask = selectfusion(feature, temp)

                        feature_weighted = torch.cat(
                            [feature_vision, feature_imu], 2) * mask

                # recurrent features
                feature_new = rec_feat(feature_weighted)

                pose = pose_net(feature_new)

            # compute pose err
            pose = pose.view(-1, 6)

            if len(result) == 0:
                result = np.copy(pose.cpu().detach().numpy())
            else:
                result = np.concatenate((result, pose.cpu().detach().numpy()),
                                        axis=0)

            trans_pose = compute_trans_pose(
                poses[j].cpu().data.numpy().astype(np.float64),
                poses[j + 1].cpu().data.numpy().astype(np.float64))

            if torch.cuda.is_available():
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1]).cuda()
            else:
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1])

            rot_mat = torch.FloatTensor(trans_pose[:, :, :3]).cuda()

            euler = mat2euler(rot_mat)

            euler_loss += F.mse_loss(euler, pose[:, 3:])

            pose_loss += F.mse_loss(pose_truth, pose[:, :3])

            if len(truth_pose) == 0:
                truth_pose = np.copy(pose_truth.cpu().detach().numpy())
            else:
                truth_pose = np.concatenate(
                    (truth_pose, pose_truth.cpu().detach().numpy()), axis=0)

            if len(truth_euler) == 0:
                truth_euler = np.copy(euler.cpu().detach().numpy())
            else:
                truth_euler = np.concatenate(
                    (truth_euler, euler.cpu().detach().numpy()), axis=0)

        euler_loss /= (len(imgs) - 1)
        pose_loss /= (len(imgs) - 1)

        loss = pose_loss + euler_loss * 100

        aver_pose_loss += pose_loss.item()
        aver_loss += loss.item()

        aver_euler_loss += euler_loss.item()

        aver_n += 1

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        print('Test Seq{}: Epoch [{}/{}] Step [{}/{}]: Loss {:.5} '
              'Pose {:.5} Euler {:.5}'.format(k, epoch + 1, args.epochs, i + 1,
                                              len(test_loader), loss.item(),
                                              pose_loss.item(),
                                              euler_loss.item()))

        file_name = 'results/result_seq' + str(k) + '_' + str(epoch) + '.csv'
        np.savetxt(file_name, result, delimiter=',')

        file_name = 'results/truth_pose_seq' + str(k) + '_' + str(
            epoch) + '.csv'
        np.savetxt(file_name, truth_pose, delimiter=',')

        file_name = 'results/truth_euler_seq' + str(k) + '_' + str(
            epoch) + '.csv'
        np.savetxt(file_name, truth_euler, delimiter=',')

    aver_loss /= aver_n
    aver_pose_loss /= aver_n
    aver_euler_loss /= aver_n
    print(
        'Test Average: {}, Average_Loss {:.5} Pose_loss {:.5} Euler_loss {:.5}'
        .format(epoch + 1, aver_loss, aver_pose_loss, aver_euler_loss))

    return
Ejemplo n.º 5
0
def train(args, train_loader, feature_ext, rec_feat, rec_imu, pose_net,
          fc_flownet, selectfusion, optimizer, epoch, fusion_mode):

    global n_iter
    batch_time = AverageMeter()
    data_time = AverageMeter()

    epoch_size = args.epoch_size

    # switch to train mode
    feature_ext.eval()
    rec_feat.train()
    rec_imu.train()
    pose_net.train()
    fc_flownet.train()
    selectfusion.train()

    end = time.time()

    aver_loss = 0
    aver_pose_loss = 0
    aver_euler_loss = 0
    aver_n = 0

    temp_min = 0.5
    ANNEAL_RATE = 0.0006
    temp = 1.0

    for i, (imgs, imus, poses) in enumerate(train_loader):

        if len(imgs[0]) != args.batch_size:
            continue

        # measure data loading time
        data_time.update(time.time() - end)

        rec_feat.module.hidden = rec_feat.module.init_hidden()

        pose_loss = 0
        euler_loss = 0

        # compute output
        for j in range(0, len(imgs) - 1):

            tgt_img = imgs[j + 1]
            ref_img = imgs[j]

            imu = imus[j]

            if torch.cuda.is_available():
                tgt_img_var = Variable(tgt_img.cuda())
                ref_img_var = Variable(ref_img.cuda())
                imu_var = Variable(imu.transpose(0, 1).cuda())
            else:
                tgt_img_var = Variable(tgt_img)
                ref_img_var = Variable(ref_img)
                imu_var = Variable(imu.transpose(0, 1))

            rec_imu.module.hidden = rec_imu.module.init_hidden()

            with torch.no_grad():

                raw_feature_vision = feature_ext(tgt_img_var, ref_img_var)

            feature_vision = fc_flownet(raw_feature_vision)

            if fusion_mode == 0:

                feature_weighted = feature_vision

            else:

                # extract imu features
                feature_imu = rec_imu(imu_var)

                # concatenate visual and imu features
                feature = torch.cat([feature_vision, feature_imu], 2)

                if fusion_mode == 1:

                    feature_weighted = feature

                else:

                    if fusion_mode == 2:
                        mask = selectfusion(feature)

                    else:
                        mask = selectfusion(feature, temp)

                    feature_weighted = torch.cat([feature_vision, feature_imu],
                                                 2) * mask

            # recurrent features
            feature_new = rec_feat(feature_weighted)

            # pose net
            pose = pose_net(feature_new)

            # compute pose err
            pose = pose.view(-1, 6)

            trans_pose = compute_trans_pose(
                poses[j].cpu().data.numpy().astype(np.float64),
                poses[j + 1].cpu().data.numpy().astype(np.float64))

            if torch.cuda.is_available():
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1]).cuda()
            else:
                pose_truth = torch.FloatTensor(trans_pose[:, :, -1])

            rot_mat = torch.FloatTensor(trans_pose[:, :, :3]).cuda()

            euler = mat2euler(rot_mat)

            euler_loss += F.mse_loss(euler, pose[:, 3:])

            pose_loss += F.mse_loss(pose_truth, pose[:, :3])

        euler_loss /= (len(imgs) - 1)
        pose_loss /= (len(imgs) - 1)

        loss = pose_loss + euler_loss * 100

        aver_loss += loss.item()
        aver_pose_loss += pose_loss.item()
        aver_euler_loss += euler_loss.item()

        aver_n += 1

        # compute gradient and do Adam step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:

            print(
                'Train: Epoch [{}/{}] Step [{}/{}]: Time {} Data {} Loss {:.5} '
                'Pose {:.5} Euler {:.5}'.format(epoch + 1, args.epochs, i + 1,
                                                epoch_size, batch_time,
                                                data_time, loss.item(),
                                                pose_loss.item(),
                                                euler_loss.item()))

        # decrease hard mask temperature
        if i % 10 == 0:
            temp = np.maximum(temp * np.exp(-ANNEAL_RATE * i), temp_min)

        if i >= epoch_size - 1:
            break

        n_iter += 1

    aver_loss /= aver_n
    aver_pose_loss /= aver_n
    aver_euler_loss /= aver_n

    print('Train: {}, Average_Loss {:.5} pose_loss {:.5} euler_loss {:.5}'.
          format(epoch + 1, aver_loss, aver_pose_loss, aver_euler_loss))

    return aver_loss, aver_pose_loss, aver_euler_loss, temp