Exemple #1
0
def procrustes_vs_regular_distances():
    vec_pair_procrustes_dist = dict()
    vec_pair_procrustes_ratio = dict()
    for i in range(0, len(en_vecsets)):
        #en_vecs = vec_subdict("en", en_vecsets[i])
        #en_mat = build_mat_from_dict(en_vecs)
        for j in range(0, len(fr_vecsets)):
            #fr_vecs = vec_subdict("fr", fr_vecsets[j])
            #fr_mat = build_mat_from_dict(fr_vecs)
            en_mat, fr_mat, index_map = build_parallel_mats_from_dicts(
                en_vecsets[i], fr_vecsets[j], translation_dict)
            # frobenius distance between matrices
            original_dist = sqrt(pow(en_mat - fr_mat, 2).sum())
            d, Z, t = procrustes(en_mat, fr_mat)
            t_mat = np.dot(en_mat,
                           t['rotation']) * t['scale'] + t['translation']
            new_dist = sqrt(pow(t_mat - fr_mat, 2).sum())
            vec_pair_procrustes_dist[(en_vec_strs[i],
                                      fr_vec_strs[j])] = new_dist
            vec_pair_procrustes_ratio[(
                en_vec_strs[i], fr_vec_strs[j])] = original_dist / new_dist
    return sorted(vec_pair_procrustes_dist,
                  key=vec_pair_procrustes_dist.get,
                  reverse=False), vec_pair_procrustes_dist, sorted(
                      vec_pair_procrustes_ratio,
                      key=vec_pair_procrustes_ratio.get,
                      reverse=True), vec_pair_procrustes_ratio
def procrustes_filling(s1, s2, N=50, scale=250):
    im2_res, im2_hat, proc_dist = procrustes.procrustes(s1, s2, fullout=True)
    im2_matrix = shape_to_filled_image(im2_res, N=N, scale=scale)
    im2hat_matrix = shape_to_filled_image(im2_hat, N=N, scale=scale)
    fill_dist = np.linalg.norm(im2_matrix - im2hat_matrix)
    fill_dist = fill_dist / N
    return fill_dist
def mnist_eucl_proc(digits, num_points, num_avg):
    """Evaluate kmeans accuracy """

    eucl_dist = lambda a, b: np.linalg.norm(a-b)
    proc_dist1 = lambda a, b: procrustes.procrustes(a, b)
    proc_dist2 = lambda a, b: procrustes.procrustes2(a, b)
    proc_dist3 = lambda a, b: procrustes.procrustes3(a, b, 50)
    
    k = len(digits)
    a1, a2, a3, a4, a5 = [], [], [], [], [] 
    
    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points]*k, 
                                                            digits)
        
        l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
        l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist1)
        l3, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist3)
        l4, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist1)
        l5, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist2)
        
        a1.append(kmeans.accuracy(labels, l1))
        a2.append(kmeans.accuracy(labels, l2))
        a3.append(kmeans.accuracy(labels, l3))
        a4.append(kmeans.accuracy(labels, l4))
        a5.append(kmeans.accuracy(labels, l5))
    
    print "d_E = %f" % np.mean(a1)
    print "d_{P_0} = %f" % np.mean(a2)
    print "d_{P_3} = %f" % np.mean(a3)
    print "d_{P} = %f" % np.mean(a4)
    print "d_{P_l} = %f" % np.mean(a5)
Exemple #4
0
def mnist_eucl_proc(digits, num_points, num_avg):
    """Evaluate kmeans accuracy """

    eucl_dist = lambda a, b: np.linalg.norm(a - b)
    proc_dist1 = lambda a, b: procrustes.procrustes(a, b)
    proc_dist2 = lambda a, b: procrustes.procrustes2(a, b)
    proc_dist3 = lambda a, b: procrustes.procrustes3(a, b, 50)

    k = len(digits)
    a1, a2, a3, a4, a5 = [], [], [], [], []

    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points] * k,
                                                          digits)

        l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
        l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist1)
        l3, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist3)
        l4, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist1)
        l5, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist2)

        a1.append(kmeans.accuracy(labels, l1))
        a2.append(kmeans.accuracy(labels, l2))
        a3.append(kmeans.accuracy(labels, l3))
        a4.append(kmeans.accuracy(labels, l4))
        a5.append(kmeans.accuracy(labels, l5))

    print "d_E = %f" % np.mean(a1)
    print "d_{P_0} = %f" % np.mean(a2)
    print "d_{P_3} = %f" % np.mean(a3)
    print "d_{P} = %f" % np.mean(a4)
    print "d_{P_l} = %f" % np.mean(a5)
def closest_transform(vec_dict_en, vec_dict_fr, translation):
	en_mat, fr_mat, index_map = build_parallel_mats_from_dicts(vec_dict_en, vec_dict_fr, translation)
	# Z is transformed fr_mat
	# transform is a dict specifying the transformation
	d, Z, transform = procrustes(en_mat, fr_mat)
	print "Normalized SSE: " + str(d) + "\n"
	return transform
Exemple #6
0
def rigid_align(coords_pred,
                coords_true,
                *,
                joint_validity_mask=None,
                scale_align=False,
                reflection_align=False):
    """Returns the predicted coordinates after rigid alignment to the ground truth."""

    if joint_validity_mask is None:
        joint_validity_mask = np.ones_like(coords_pred[..., 0], dtype=np.bool)

    valid_coords_pred = coords_pred[joint_validity_mask]
    valid_coords_true = coords_true[joint_validity_mask]
    try:
        d, Z, tform = procrustes.procrustes(
            valid_coords_true,
            valid_coords_pred,
            scaling=scale_align,
            reflection='best' if reflection_align else False)
    except np.linalg.LinAlgError:
        logging.error(
            'Cannot do Procrustes alignment, returning original prediction.')
        return coords_pred

    T = tform['rotation']
    b = tform['scale']
    c = tform['translation']
    return b * coords_pred @ T + c
Exemple #7
0
def build_model(shape_data, app_data, triangulation=None):
    """Builds AAM using shape and appearence data.
    """
    shape_triangles = zeros((0, 3), dtype=uint32)
    if app_data.dtype != float:
        print('Warning: appearance data not in floating point format')
        app_data = double(app_data) / 255
    if triangulation:
        shape_triangles = triangulation
    ns = shape_data.shape[0]  # numper of shapes
    np = shape_data.shape[1]  # number of points
    nc = app_data.shape[3]  # number of colors
    # initially we use first shape instance as a mean
    mean_shape = shape_data[0, :, :]
    reference_shape = mean_shape
    aligned_data = shape_data  # matrix containing aligned shapes
    for it in range(100):
        for i in range(ns):
            d, aligned, t = procrustes(reference_shape, aligned_data[i, :, :])
            aligned_data[i, :, :] = aligned
        new_mean_shape = aligned_data.mean(axis=0)
        d, mean_shape, t = procrustes(reference_shape, new_mean_shape)
    mean_shape = aligned_data.mean(axis=0)
    # determine region of interest
    mini = mean_shape[:, 0].min()
    minj = mean_shape[:, 1].min()
    maxi = mean_shape[:, 0].max()
    maxj = mean_shape[:, 1].max()
    # place the origin in an upper left corner of bounding box
    mean_shape = mean_shape - [mini, minj] + 1
    # determine model width and height, add 1 pixel offset for gradient
    modelw = ceil(maxj - minj + 3)
    modelh = ceil(maxi - mini + 3)
    aam = AAM()
    aam.s0 = mean_shape.flatten()
    shape_matrix = aligned_data.reshape(ns, 2 * np) - aam.s0
    del aligned_data
    # print(shape_matrix[0, :3])
    pc, eiv = pca(shape_matrix)
    del shape_matrix
    aam.shape_eiv = eiv
    # Build the basis for the global shape transform, we do it here because
    # they are used to orthonormalize the shape principal vectors
    # It is done differently to the paper as we're using a different coordinate
    # frame. Here u -> i, v -> j
    s1_star = aam.s0
Exemple #8
0
def closest_transform(vec_dict_en, vec_dict_fr, translation):
    en_mat, fr_mat, index_map = build_parallel_mats_from_dicts(
        vec_dict_en, vec_dict_fr, translation)
    # Z is transformed fr_mat
    # transform is a dict specifying the transformation
    d, Z, transform = procrustes(en_mat, fr_mat)
    print "Normalized SSE: " + str(d) + "\n"
    return transform
Exemple #9
0
def build_model(shape_data, app_data, triangulation=None):
    """Builds AAM using shape and appearence data.
    """
    shape_triangles = zeros((0, 3), dtype=uint32)
    if app_data.dtype != float:
        print('Warning: appearance data not in floating point format')
        app_data = double(app_data) / 255
    if triangulation:
        shape_triangles = triangulation
    ns = shape_data.shape[0]            # numper of shapes    
    np = shape_data.shape[1]            # number of points
    nc = app_data.shape[3]              # number of colors
    # initially we use first shape instance as a mean
    mean_shape = shape_data[0, :, :]
    reference_shape = mean_shape
    aligned_data = shape_data           # matrix containing aligned shapes
    for it in range(100):
        for i in range(ns):
            d, aligned, t = procrustes(reference_shape, aligned_data[i, :, :])
            aligned_data[i, :, :] = aligned
        new_mean_shape = aligned_data.mean(axis=0)
        d, mean_shape, t = procrustes(reference_shape, new_mean_shape)
    mean_shape = aligned_data.mean(axis=0)
    # determine region of interest
    mini = mean_shape[:, 0].min()
    minj = mean_shape[:, 1].min()
    maxi = mean_shape[:, 0].max()
    maxj = mean_shape[:, 1].max()
    # place the origin in an upper left corner of bounding box
    mean_shape = mean_shape - [mini, minj] + 1
    # determine model width and height, add 1 pixel offset for gradient
    modelw = ceil(maxj - minj + 3)
    modelh = ceil(maxi - mini + 3)
    aam = AAM()
    aam.s0 = mean_shape.flatten()
    shape_matrix = aligned_data.reshape(ns, 2*np) - aam.s0
    del aligned_data
    # print(shape_matrix[0, :3])
    pc, eiv = pca(shape_matrix) 
    del shape_matrix
    aam.shape_eiv = eiv
    # Build the basis for the global shape transform, we do it here because
    # they are used to orthonormalize the shape principal vectors
    # It is done differently to the paper as we're using a different coordinate
    # frame. Here u -> i, v -> j
    s1_star = aam.s0
def structure_from_motion(point_view_matrix,
                          block_size,
                          eliminate_affinity=False):
    """
    
    :param point_view_matrix: 
    :param block_size:
    :param eliminate_affinity
    :return: 
    """
    m, n = int(point_view_matrix.shape[0] / 2), point_view_matrix.shape[1]
    print("SFM starting\n.........\n#Images: {}, #Points: {}".format(m, n))

    model = np.zeros((n, 3))
    for i in range(m - (block_size - 1)):
        pvm_rows = point_view_matrix[i:i + 2 * block_size, ...]
        dense_idx = np.all(pvm_rows, axis=0)
        world_idx = np.all(model, axis=1)

        if np.any(~world_idx[dense_idx]):
            D = pvm_rows[:, dense_idx]
            D = D - D.mean(axis=1)[:, None]

            U, W, Vt = np.linalg.svd(D)
            V = Vt.T

            W = np.sqrt(np.diag(W)[:3, :3])
            V = V[:, :3]
            U = U[:, :3]

            S = W @ V.T
            S[0, :] = -S[0, :]
            S[1, :] = -S[1, :]

            if eliminate_affinity:
                M = U @ W
                L = LA.pinv(M) @ LA.pinv(M.T)
                C = LA.cholesky(L)
                S = LA.inv(C) @ S

            if not np.any(world_idx):
                model[dense_idx, :] = S.T

            else:
                X = model[world_idx & dense_idx, :]
                Y = S[:, world_idx[dense_idx]].T
                _, _, (R, s, t) = procrustes(X, Y)
                Z = s * S.T @ R + t
                model[dense_idx, :] = Z

    model = model[world_idx, :]

    return model
Exemple #11
0
def mnist_procrustes(digits, num_points, num_avg):
    proc_dist = lambda a, b: procrustes.procrustes(a, b)
    k = len(digits)
    a = []
    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points] * k,
                                                          digits)
        l, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist)
        accu = kmeans.accuracy(labels, l)
        a.append(accu)
        print accu
    print
    print "d_{P} = %f" % np.mean(a)
def mnist_procrustes(digits, num_points, num_avg):
    proc_dist = lambda a, b: procrustes.procrustes(a, b)
    k = len(digits)
    a = [] 
    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points]*k, 
                                                            digits)
        l, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist)
        accu = kmeans.accuracy(labels, l)
        a.append(accu)
        print accu
    print
    print "d_{P} = %f" % np.mean(a)
def procrustes_filling_test(im1, im2, fname, numpoints=300, N=20, scale=80):
    """Comparing pure procrustes versus "image filling" distance."""
    fig = plt.figure(figsize=(12, 3))

    ax = fig.add_subplot(151)
    ax.imshow(im1, cmap=plt.cm.gray)
    ax.axis('off')
    ax.set_aspect('equal')

    ax = fig.add_subplot(152)
    ax.imshow(im2, cmap=plt.cm.gray)
    ax.axis('off')
    ax.set_aspect('equal')

    s1 = shape.get_external_contour(im1,
                                    numpoints,
                                    smooth=1,
                                    rotate=np.eye(2),
                                    translate=np.array([[0, 0]]))
    s2 = shape.get_external_contour(im2,
                                    numpoints,
                                    smooth=1,
                                    rotate=np.eye(2),
                                    translate=np.array([[0, 0]]))
    im2_res, im2_hat, proc_dist = procrustes.procrustes(s1, s2, fullout=True)

    ax = fig.add_subplot(153)
    ax.plot(im2_res[:, 0], im2_res[:, 1], 'or', alpha=.7)
    ax.plot(im2_hat[:, 0], im2_hat[:, 1], 'ob', alpha=.7)
    ax.set_aspect('equal')
    ax.set_title(r'$d_P=%f$' % proc_dist)
    ax.set_xticks([])
    ax.set_yticks([])

    im2_matrix = shape_to_filled_image(im2_res, N=N, scale=scale)
    im2hat_matrix = shape_to_filled_image(im2_hat, N=N, scale=scale)
    fill_dist = np.linalg.norm(im2_matrix - im2hat_matrix)
    fill_dist = fill_dist / N

    ax = fig.add_subplot(154)
    ax.imshow(im2_matrix, cmap=plt.cm.gray)
    ax.set_aspect('equal')
    ax.axis('off')
    ax.set_title(r'$d_{P_f}=%f$' % fill_dist)

    ax = fig.add_subplot(155)
    ax.imshow(im2hat_matrix, cmap=plt.cm.gray)
    ax.set_aspect('equal')
    ax.axis('off')

    fig.savefig(fname)
Exemple #14
0
def callback(iters, current, env):
    print 'Callback: ', iters

    # get all traces
    env.updategui = False
    traces = env.evaluate_policy(current)
    env.updategui = True
    pickle.dump(traces, open('traces{0}.pck'.format(iters), 'w'),
                pickle.HIGHEST_PROTOCOL)

    # measure task performance
    avg_reward = 0
    for t in traces:
        avg_reward += sum([i[2] for i in t])
    avg_reward = avg_reward / float(len(traces))
    print 'Avg reward: ', avg_reward

    # find current embedding
    ematrix = np.zeros((512, 512))
    for (i, t) in enumerate(traces):
        for (j, s) in enumerate(traces):
            ematrix[i,
                    j] = edit_distance_vc([e[1] for e in t], [l[1] for l in s],
                                          (1.0, 1.0, 1.2))
    pickle.dump(ematrix, open('ematrix{0}.pck'.format(iters), 'w'),
                pickle.HIGHEST_PROTOCOL)
    y, s, adj = isomap(ematrix)
    if len(y) < 512:
        # fallback to mds if more than 1 connected component
        print "More than 1 CC - Falling back to MDS"
        y, s = mds(ematrix)
        adj = None

    # plot stuff later because of pylab / pygame incompat on mac
    # save embedding image - multiple formats?
    #scatter(y[:,0],y[:,1], filename='scatter{0}.pdf'.format(iters))

    # save scree plot
    #plot(s[:10], filename='scree{0}.pdf'.format(iters))

    # procrustes error
    gt = env.coords_array()
    err = procrustes(gt, y)
    print "Procrustes ", err

    pickle.dump((iters, err, gt, avg_reward, current, y, s, adj),
                open('misc{0}.pck'.format(iters), 'w'),
                pickle.HIGHEST_PROTOCOL)

    env.save('iter{0}.png'.format(iters))
Exemple #15
0
def parttransform(i):
    #mesh_1 = handle.handle_obj("D:/matlab_code/scapecode/scape/1.obj")
    #mesh_2 = handle.handle_obj("D:/matlab_code/scapecode/scape/20.obj")

    mesh_1 = handle.handle_obj("../res/1.obj")
    mesh_2 = handle.handle_obj("../res/20.obj")

    part_vertex_id = handle.handle_txt(i)

    mesh_1_part = mesh_1[part_vertex_id]
    mesh_2_part = mesh_2[part_vertex_id]

    d, z, t = procrustes.procrustes(mesh_1_part, mesh_2_part, False, False)

    return t['rotation']
def aligning_image(d1, d2):
    im1 = pick_digit(d1)
    im2 = pick_digit(d2)
    p1 = shape.get_external_contour(im1,
                                    10,
                                    smooth=5,
                                    rotate=np.eye(2),
                                    translate=np.array([[0, 0]]))
    p2 = shape.get_external_contour(im2,
                                    10,
                                    smooth=5,
                                    rotate=np.eye(2),
                                    translate=np.array([[0, 0]]))

    im2_res, im2_hat, dist = procrustes.procrustes(p1, p2, fullout=True)
def procrustes_vs_regular_distances():
	vec_pair_procrustes_dist = dict()
	vec_pair_procrustes_ratio = dict()
	for i in range(0, len(en_vecsets)):
		#en_vecs = vec_subdict("en", en_vecsets[i])
		#en_mat = build_mat_from_dict(en_vecs)
		for j in range(0, len(fr_vecsets)):
			#fr_vecs = vec_subdict("fr", fr_vecsets[j])
			#fr_mat = build_mat_from_dict(fr_vecs)
			en_mat, fr_mat, index_map = build_parallel_mats_from_dicts(en_vecsets[i], fr_vecsets[j], translation_dict)
			# frobenius distance between matrices
			original_dist = sqrt(pow(en_mat - fr_mat, 2).sum())
			d, Z, t = procrustes(en_mat, fr_mat)
			t_mat = np.dot(en_mat, t['rotation'])*t['scale'] + t['translation']
			new_dist = sqrt(pow(t_mat - fr_mat, 2).sum())
			vec_pair_procrustes_dist[(en_vec_strs[i], fr_vec_strs[j])] = new_dist
			vec_pair_procrustes_ratio[(en_vec_strs[i], fr_vec_strs[j])] = original_dist/new_dist
	return sorted(vec_pair_procrustes_dist, key = vec_pair_procrustes_dist.get, reverse=False), vec_pair_procrustes_dist, sorted(vec_pair_procrustes_ratio, key = vec_pair_procrustes_ratio.get, reverse=True), vec_pair_procrustes_ratio
Exemple #18
0
def models_build(pp_lms, precision):
    models = {}
    means = {} #mean
    for key in pp_lms:
        means[key]=np.average(pp_lms[key], axis = 0)
    for key in pp_lms:
        i_r = deepcopy(pp_lms[key])
        d = 1
        while d > 1 - precision:
            err = []
            for item in i_r:
                er, item, a = procrustes(means[key], item)
                err.append(er)
            error = np.array(err)
            d = error.mean()
            means[key]=i_r
        models[key]=np.average(means[key], axis = 0)
    return models
Exemple #19
0
    def get_true_poses(self):
        """
        Function to find 3D positions of each defined keypoints in the frame of
        every scene origin. Uses the .npz zipped archive to get relative scene
        transformations, the selection matrix etc.
        Returns a list of (Nx3) 2D numpy arrays where each i-th array in the list
        holds the 3D keypoint pose configuration of the object in the i-th scene.
        """
        list_of_poses = []
        # get selected points from input npz array
        ref_keypts = self.input_array['ref']
        # get selection matrix frrom input npz array
        select_mat = self.input_array['sm']
        # convert selection matrix to a binary matrix
        col_splits = np.hsplit(select_mat, select_mat.shape[1] // 3)
        row_splits = [np.vsplit(col, col.shape[0] // 3) for col in col_splits]
        vis_list = [
            sum(map(lambda x: (x == np.eye(3)).all(), mat))
            for mat in row_splits
        ]
        # binary matrix of shape (num of scenes x num of keypoints)
        vis_mat = np.reshape(vis_list, [self.num_scenes, self.num_keypts])

        for sce_id, visibility in zip(range(len(self.list_of_scene_dirs)),
                                      vis_mat):
            # read true model from .pp file
            tru_model = SparseModel().reader(self.picked_pts, 1 / 1000)
            # partial user-annotated 3D model
            obj_manual = ref_keypts[sce_id].transpose()[np.nonzero(visibility)
                                                        [0]]
            # select corresponding points in true model
            tru_model_part = tru_model[np.nonzero(visibility)[0]]
            # use procrustes analysis to align true model to annotated points
            _, _, tform = procrustes(tru_model_part, obj_manual, False)

            T = tfa.compose(tform['translation'],
                            np.linalg.inv(tform['rotation']), np.ones(3))
            T = np.linalg.inv(T)
            true_object = np.asarray([(T[:3, :3].dot(pt) + T[:3, 3])
                                      for pt in tru_model])
            list_of_poses.append(true_object)
        return list_of_poses
Exemple #20
0
def mnist_procrustes_filling(digits, num_points, num_avg):
    eucl_dist = lambda a, b: np.linalg.norm(a - b)
    proc_dist = lambda a, b: procrustes.procrustes(a, b)
    proc_dist_filling = lambda a, b: fill.procrustes_filling(
        a, b, N=40, scale=200)
    k = len(digits)
    aa1 = []
    aa2 = []
    aa3 = []
    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points] * k,
                                                          digits)
        l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
        l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist)
        l3, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist_filling)
        a1 = kmeans.accuracy(labels, l1)
        a2 = kmeans.accuracy(labels, l2)
        a3 = kmeans.accuracy(labels, l3)
        aa1.append(a1)
        aa2.append(a2)
        aa3.append(a3)
    print "d_{E} = %f" % np.mean(aa1)
    print "d_{P} = %f" % np.mean(aa2)
    print "d_{F} = %f" % np.mean(aa3)
def mnist_procrustes_filling(digits, num_points, num_avg):
    eucl_dist = lambda a, b: np.linalg.norm(a-b)
    proc_dist = lambda a, b: procrustes.procrustes(a, b)
    proc_dist_filling = lambda a, b: fill.procrustes_filling(a, b, N=40,
                                                        scale=200)
    k = len(digits)
    aa1 = []
    aa2 = []
    aa3 = []
    for i in range(num_avg):
        originals, shapes, ext_shapes, labels = pick_data([num_points]*k, 
                                                            digits)
        l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
        l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist)
        l3, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist_filling)
        a1 = kmeans.accuracy(labels, l1)
        a2 = kmeans.accuracy(labels, l2)
        a3 = kmeans.accuracy(labels, l3)
        aa1.append(a1)
        aa2.append(a2)
        aa3.append(a3)
    print "d_{E} = %f" % np.mean(aa1)
    print "d_{P} = %f" % np.mean(aa2)
    print "d_{F} = %f" % np.mean(aa3)
Exemple #22
0
def preprocess(coordfiles, mirror=True, useNotVisiblePoints=True, crop=True):
    """
	Preprocessing of images and coordinate input:
	*optional mirroring
	*procrustes analysis
	*cropping and aligning of images
	"""

    # read in coordinates
    coordinates = []
    filenames = []
    not_visible = []
    fi = open(coordfiles, "r")
    for lines in fi:
        li = lines.strip().split(";")

        if not os.path.exists(os.path.join(config.images, li[0])):
            print "Could not find file %s in %s" % (li[0], config.images)
            continue

        coor = []
        not_visible_coor = []
        filenames.append(li[0])

        for r in xrange(0, num_patches):
            i = (r * 3) + 1
            if li[i + 2] == "false":
                not_visible_coor.append(r)
            coor.append(float(li[i]))
            coor.append(float(li[i + 1]))

        single_coor = numpy.array(coor).reshape((num_patches, 2))
        coordinates.append(single_coor)
        not_visible.append(not_visible_coor)
    fi.close()

    if len(coordinates) == 0:
        sys.exit(
            "No images were found for training. Please make sure that folders in config.py are correct, and that images for training are downloaded."
        )

    # mirror the points around vertical axis and use those also
    if mirror:
        # create mirror coordinates according to some map in config
        mirrors = []
        for c in range(0, len(coordinates)):
            # load image
            print "Loading " + filenames[c]
            im = Image.open(config.images + filenames[c], "r")
            # get imagesize
            imsize = im.size
            m = [coordinates[c][mirror_map[r]] for r in range(0, num_patches)]
            m = vstack(m)
            m[:, 0] = (imsize[0] - 1.0) - m[:, 0]
            #m[:,0] = (imsize[0])-m[:,0]
            mirrors.append(m)
            not_visible_coor = [mirror_map[v] for v in not_visible[c]]
            not_visible.append(not_visible_coor)
        coordinates.extend(mirrors)

    # procrustes analysis of coordinates
    procrustes_distance = 1000.0
    # TODO: check that the first coordinate has all coordinates

    # TODO : we should rotate the meanshape (either at the beginning or the end) so that it's symmetrical

    meanshape = coordinates[0]
    while procrustes_distance > 20.0:
        aligned_coordinates = [[] for i in range(num_patches)]
        for c in coordinates:
            if useNotVisiblePoints:
                present_coord = [r for r in range(0, num_patches)]
            else:
                present_coord = [
                    r for r in range(0, num_patches)
                    if not numpy.isnan(coordinates[c][r, 0])
                    and not numpy.isnan(coordinates[c][r, 1])
                ]
                # check that at least 50% of coordinates are present
                if len(present_coord) < num_patches / 2:
                    continue
            # only do procrustes analysis on present coordinates
            reduced_mean = meanshape[present_coord, :]
            reduced_coord = c[present_coord, :]
            # calculate aligned coordinates
            aligned = procrustes.procrustes(reduced_mean, reduced_coord)
            # add to aligned_coordinates
            for r in range(0, len(present_coord)):
                aligned_coordinates[present_coord[r]].append(aligned[r, :])
        # create new mean shape
        new_meanshape = numpy.zeros((num_patches, 2))
        for r in range(0, num_patches):
            for ar in aligned_coordinates[r]:
                new_meanshape[r, :] += ar
            new_meanshape[r, :] /= len(aligned_coordinates[r])
        # calculate procrustes distance between old and new mean shape
        procrustes_distance = procrustes.procrustes_distance(
            meanshape, new_meanshape)
        # set old mean shape to new mean shape
        meanshape = new_meanshape
        print "procrustes distance in current iteration: " + str(
            procrustes_distance)

    # scale mean model to given modelwidth
    meanshape = procrustes.scale_width(meanshape, modelwidth)

    procrustes_transformations = []
    coordinates_final = []
    for c in range(0, len(coordinates)):
        if useNotVisiblePoints:
            present_coord = [r for r in range(0, num_patches)]
        else:
            present_coord = [
                r for r in range(0, num_patches)
                if not numpy.isnan(coordinates[c][r, 0])
                and not numpy.isnan(coordinates[c][r, 1])
            ]
            # check that at least 50% of coordinates are present
            if len(present_coord) < num_patches / 2:
                continue
        # only do procrustes analysis on present coordinates
        reduced_mean = meanshape[present_coord, :]
        reduced_coord = coordinates[c][present_coord, :]
        # get procrustes transformation to mean
        c_transform = procrustes.procrustes(reduced_mean, reduced_coord)
        procrustes_transformations.append(c_transform)
        # transformed coordinates including nan
        c_final = numpy.array([numpy.nan
                               for r in range(0, num_patches * 2)]).reshape(
                                   (num_patches, 2))
        for r in range(0, len(present_coord)):
            c_final[present_coord[r], :] = c_transform[r, :]
        c_final = vstack(c_final)
        coordinates_final.append(c_final)

    if crop:

        # find how large to crop images
        mean_x = mean(meanshape[:, 0])
        mean_y = mean(meanshape[:, 1])
        min_x, max_x, min_y, max_y = float("inf"), -float("inf"), float(
            "inf"), -float("inf")
        for c in procrustes_transformations:
            min_x = min(numpy.min(c[:, 0]), min_x)
            max_x = max(numpy.max(c[:, 0]), max_x)
            min_y = min(numpy.min(c[:, 1]), min_y)
            max_y = max(numpy.max(c[:, 1]), max_y)

        min_half_width = max(mean_x - min_x, max_x - mean_x) + (
            (patch_size - 1) / 2) + 2
        min_half_height = max(mean_y - min_y, max_y - mean_y) + (
            (patch_size - 1) / 2) + 2
        min_half_width = int(min_half_width)
        min_half_height = int(min_half_height)

        # get initial rectangle for cropping
        rect = numpy.array([mean_x-min_half_width, mean_y-min_half_height, \
         mean_x-min_half_width, mean_y+min_half_height,\
         mean_x+min_half_width, mean_y+min_half_height,\
         mean_x+min_half_width, mean_y-min_half_height]).reshape((4,2))

        # rotate and transform images same way as procrustes
        cropped_filenames = []
        i = 0
        for filename in filenames:
            # load image
            im = Image.open(config.images + filename, "r")
            if useNotVisiblePoints:
                present_coord = [r for r in range(0, num_patches)]
            else:
                # check which coordinates are present
                present_coord = [
                    r for r in range(0, num_patches)
                    if not numpy.isnan(coordinates[i][r, 0])
                    and not numpy.isnan(coordinates[i][r, 1])
                ]
                # check that at least 50% of coordinates are present
                if len(present_coord) < num_patches / 2:
                    continue
            # only do procrustes analysis on present coordinates
            reduced_mean = meanshape[present_coord, :]
            reduced_coord = coordinates[i][present_coord, :]

            # get transformations
            crop_s, crop_r, crop_m1, crop_m2 = procrustes.get_reverse_transforms(
                reduced_mean, reduced_coord)
            # transform rect
            crop_rect = procrustes.transform(rect, crop_s, crop_r, crop_m1,
                                             crop_m2)

            # create a mask to detect when we crop outside the original image
            # create white image of same size as original
            mask = Image.new(mode='RGB', size=im.size, color=(255, 255, 255))
            # transform the same way as image
            mask = mask.transform(
                (min_half_width * 2, min_half_height * 2), Image.QUAD,
                crop_rect.flatten(), Image.BILINEAR)
            # convert to boolean
            mask = mask.convert('L')
            mask.save(
                os.path.join(data_folder, "cropped/",
                             os.path.splitext(filename)[0] + "_mask.bmp"))

            # use pil im.transform to crop and scale faces from images
            im = im.transform((min_half_width * 2, min_half_height * 2),
                              Image.QUAD, crop_rect.flatten(), Image.BILINEAR)
            # save cropped images to output folder with text
            im.save(
                os.path.join(data_folder, "cropped/",
                             os.path.splitext(filename)[0] + ".bmp"))
            cropped_filenames.append(os.path.splitext(filename)[0] + ".bmp")
            i += 1
        # if mirror is True: we need to mirror image
        if mirror:
            for filename in filenames:
                #do the same stuff for mirrored images
                # load image
                im = Image.open(config.images + filename, "r")

                if useNotVisiblePoints:
                    present_coord = [r for r in range(0, num_patches)]
                else:
                    # check which coordinates are present
                    present_coord = [
                        r for r in range(0, num_patches)
                        if not numpy.isnan(coordinates[i][r, 0])
                        and not numpy.isnan(coordinates[i][r, 1])
                    ]
                    # check that at least 50% of coordinates are present
                    if len(present_coord) < num_patches / 2:
                        continue
                # only do procrustes analysis on present coordinates
                reduced_mean = meanshape[present_coord, :]
                reduced_coord = coordinates[i][present_coord, :]

                # get transformations
                crop_s, crop_r, crop_m1, crop_m2 = procrustes.get_reverse_transforms(
                    reduced_mean, reduced_coord)
                # transform rect
                crop_rect = procrustes.transform(rect, crop_s, crop_r, crop_m1,
                                                 crop_m2)

                # create a mask to detect when we crop outside the original image
                # create white image of same size as original
                mask = Image.new(mode='RGB',
                                 size=im.size,
                                 color=(255, 255, 255))
                # transform the same way as image
                mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
                mask = mask.transform(
                    (min_half_width * 2, min_half_height * 2), Image.QUAD,
                    crop_rect.flatten(), Image.BILINEAR)
                # convert to boolean
                mask = mask.convert('L')
                mask.save(
                    os.path.join(data_folder, "cropped/",
                                 os.path.splitext(filename)[0] +
                                 "_m_mask.bmp"))

                # use pil im.transform to crop and scale faces from images
                im = im.transpose(Image.FLIP_LEFT_RIGHT)
                im = im.transform(
                    (min_half_width * 2, min_half_height * 2), Image.QUAD,
                    crop_rect.flatten(), Image.BILINEAR)
                # save cropped images to output folder with text
                im.save(
                    os.path.join(data_folder, "cropped/",
                                 os.path.splitext(filename)[0] + "_m.bmp"))
                cropped_filenames.append(
                    os.path.splitext(filename)[0] + "_m.bmp")
                i += 1

        # output new coordinates
        new_coordinates = []
        for c in coordinates_final:
            # mark coordinate files where the mark is occluded in some way
            new_coordinates.append(c - meanshape)

        #returns a dictionary where key is filename and value is coordinate matrix
        data_pca = {}
        for r in range(0, len(new_coordinates)):
            data_pca[cropped_filenames[r]] = new_coordinates[r]

        # TODO : create duplicate matrix
        data_patches = {}
        for r in range(0, len(new_coordinates)):
            coord = numpy.copy(new_coordinates[r])
            if useNotVisiblePoints:
                # set not visible points to nan
                for vn in not_visible[r]:
                    coord[vn, :] = numpy.nan
            data_patches[cropped_filenames[r]] = coord

        return data_pca, data_patches, meanshape, (min_half_width * 2,
                                                   min_half_height * 2)
    else:
        # output new coordinates
        new_coordinates = []
        for c in coordinates_final:
            # mark coordinate files where the mark is occluded in some way
            new_coordinates.append(c - meanshape)

        #returns a dictionary where key is filename and value is coordinate matrix
        data_pca = {}
        for r in range(0, len(filenames)):
            data_pca[filenames[r]] = new_coordinates[r]
        if mirror:
            for r in range(0, len(filenames)):
                data_pca[filenames[r] +
                         "_m"] = new_coordinates[len(filenames) + r]

        return data_pca, meanshape
def Calculate(data, GPA=False, delta_t=0.1, plot=False): 
#    data = pd.read_csv(file)
    delta_t = np.repeat(0.1, data.shape[0]) # time interval is 0.1 seconds
    unit_vec = np.array([1,0,0])
    
#    position = []
#    position.extend([0, 0, 0])
#    velocity = []
#    velocity.extend([0, 0, 0])
        
    for i in range(int(len(data)/9)):
        
        df = data.iloc[i:i+9]
        Axc = df['ax'].tolist()
        Ayc = df['ay'].tolist()
        Azc = df['az'].tolist()

        position_x, velocity_x = double_int_class(Axc, delta_t, 0, 0)
        position_y, velocity_y = double_int_class(Ayc, delta_t, 0, 0)
        position_z, velocity_z = double_int_class(Azc, delta_t, 0, 0)
        position = np.array([position_x, position_y, position_z])
        velocity = np.array([velocity_x, velocity_y, velocity_z])

        if GPA == True:
            if i == 0:
                X = np.array([position_x, position_y, position_z])
            else:
                position = procrustes(X, position, scaling=False)
                    
        t1 = df['tilt1'].tolist()
        t2 = df['tilt2'].tolist()
        compass = df['compass'].tolist()
        trans_vec = []
        for j in range(8):
            angle_vec = np.array([t1[j]*math.pi/180, t2[j]*math.pi/180, compass[j]*math.pi/180])
            trans_vec.append(unit_vect_transform(unit_vec, angle_vec))
        max_angle = angle(trans_vec[0], trans_vec[-1])
        trans_vec = np.array(trans_vec)
        
        if plot == True:
            fig = plt.figure(figsize=(20, 10))
            ax = fig.add_subplot(211, projection='3d')
            ax.scatter(position[0, :],  position[1, :], position[2, :])
            ax.plot(position[0, :],  position[1, :], position[2, :], color='blue')
            for j in range(8):
                ax.quiver(position[0, j],  position[1, j], position[2, j], 
                          trans_vec[j, 0], trans_vec[j, 1], trans_vec[j, 2], 
                          length = 20, normalize = True, color='red', linestyle = '--')
            ax.view_init(azim=0, elev=90) #xy plane
            plt.xticks(fontsize=10)
            plt.xticks(fontsize=10)
#            ax.set_axis_off()
            
            ax2 = fig.add_subplot(212, projection='3d')
            ax2.scatter(position[0, :],  position[1, :], position[2, :])
            ax2.plot(position[0, :],  position[1, :], position[2, :], color='blue')
            for j in range(8):
                ax2.quiver(position[0, j],  position[1, j], position[2, j], 
                          trans_vec[j, 0], trans_vec[j, 1], trans_vec[j, 2], 
                          length = 20, normalize = True, color='red', linestyle = '--')
            ax2.view_init(azim=0, elev=45)
            
#            ax3 = fig.add_subplot(223, projection='3d')
#            ax3.scatter(position[0, :],  position[1, :], position[2, :])
#            ax3.plot(position[0, :],  position[1, :], position[2, :])
#            for j in range(8):
#                ax3.quiver(position[0, j],  position[1, j], position[2, j], 
#                          trans_vec[j, 0], trans_vec[j, 1], trans_vec[j, 2], 
#                          length = 20, normalize = True, color='red', linestyle = '--')
#            ax3.set_axis_off()
            
    ReturnType = collections.namedtuple('ReturnType', 'Position_Vector Max_Speed Max_Angle')
    
    return ReturnType(Position_Vector=position, Max_Speed=round(max(np.linalg.norm(velocity,axis=1))/1000, 3),
                      Max_Angle = round(max_angle/math.pi*180, 3))
Exemple #24
0
def dif(coords1, coords2):
  """Input: two np arrays"""
  return procrustes.procrustes(coords1, coords2)[0]
Exemple #25
0
def mnist_standard_vs_procrustes(nrange, digits, num_sample, outfile):
    """Plot accuracy when clustering MNIST digits, using procrustes
    and Euclidean distance.
    
    """

    eucl_dist = lambda a, b: np.linalg.norm(a - b)
    proc_dist1 = lambda a, b: procrustes.procrustes(a, b)
    proc_dist2 = lambda a, b: procrustes.procrustes2(a, b)
    proc_dist3 = lambda a, b: procrustes.procrustes3(a, b, 50)

    k = len(digits)
    a1, a2, a3, a4, a5 = [], [], [], [], []
    for n in nrange:

        print "Doing %i of %i" % (n, nrange[-1])

        ns = [n] * k
        for m in range(num_sample):

            originals, shapes, ext_shapes, labels = pick_data(ns, digits)

            l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
            l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist1)
            l3, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist3)
            l4, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist1)
            l5, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist2)

            ac1 = kmeans.accuracy(labels, l1)
            ac2 = kmeans.accuracy(labels, l2)
            ac3 = kmeans.accuracy(labels, l3)
            ac4 = kmeans.accuracy(labels, l4)
            ac5 = kmeans.accuracy(labels, l5)

            a1.append([n, ac1])
            a2.append([n, ac2])
            a3.append([n, ac3])
            a4.append([n, ac4])
            a5.append([n, ac5])

            print '    ', ac1, ac2, ac3, ac4, ac5

    a1 = np.array(a1)
    a2 = np.array(a2)
    a3 = np.array(a3)
    a4 = np.array(a4)
    a5 = np.array(a5)

    # plotting results
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(a1[:, 0], a1[:, 1], 'o', color='b', alpha=.5, label=r'$d_E$')
    ax.plot(a2[:, 0], a2[:, 1], 'o', color='r', alpha=.5, label=r'$d_{P_0}$')
    ax.plot(a3[:, 0], a3[:, 1], 'o', color='g', alpha=.5, label=r'$d_{P_3}$')
    ax.plot(a4[:, 0], a4[:, 1], 'o', color='c', alpha=.5, label=r'$d_{P}$')
    ax.plot(a5[:, 0], a5[:, 1], 'o', color='m', alpha=.5, label=r'$d_{P_l}$')

    a1_avg, a2_avg, a3_avg, a4_avg, a5_avg = [], [], [], [], []
    for n in nrange:
        mu1 = a1[np.where(a1[:, 0] == n)][:, 1].mean()
        mu2 = a2[np.where(a2[:, 0] == n)][:, 1].mean()
        mu3 = a3[np.where(a3[:, 0] == n)][:, 1].mean()
        mu4 = a4[np.where(a4[:, 0] == n)][:, 1].mean()
        mu5 = a5[np.where(a5[:, 0] == n)][:, 1].mean()

        a1_avg.append([n, mu1])
        a2_avg.append([n, mu2])
        a3_avg.append([n, mu3])
        a4_avg.append([n, mu4])
        a5_avg.append([n, mu5])
    a1_avg = np.array(a1_avg)
    a2_avg = np.array(a2_avg)
    a3_avg = np.array(a3_avg)
    a4_avg = np.array(a4_avg)
    a5_avg = np.array(a5_avg)

    ax.plot(a1_avg[:, 0], a1_avg[:, 1], '-', color='b')
    ax.plot(a2_avg[:, 0], a2_avg[:, 1], '-', color='r')
    ax.plot(a3_avg[:, 0], a3_avg[:, 1], '-', color='g')
    ax.plot(a4_avg[:, 0], a4_avg[:, 1], '-', color='c')
    ax.plot(a5_avg[:, 0], a5_avg[:, 1], '-', color='m')

    ax.set_xlabel(r'$N_i$')
    ax.set_ylabel(r'$A$')
    leg = ax.legend(loc=0)
    leg.get_frame().set_alpha(0.6)
    ax.set_title(r'$\{%s\}$' % (','.join([str(d) for d in digits])))
    fig.savefig(outfile)
Exemple #26
0

f1 = open('lm/landmarks2-1.txt', 'r')
Lf1=[]

for line in f1:
    Lf1=Lf1+[int(float(line))]


xy1=np.empty((len(Lf1)/2,2),np.int32)

for x in range(len(Lf)/2):
    xy1[x]=Lf1[2*x],Lf1[2*x+1]


[d,Z,t]=procrustes.procrustes(xy,xy1)
Zi=np.int32(Z)
print Zi


img = cv2.imread('img/01.tif')
cv2.namedWindow("mainw",0)   ## create window for display
cv2.resizeWindow("mainw", 1500,900);


cv2.polylines(img,[xy],True,(0,0,255),2)
cv2.polylines(img,[xy1],True,(0,255,0),2)
cv2.polylines(img,[Zi],True,(255,0,0),5)

cv2.imshow('mainw',img)
cv2.waitKey(0)
Exemple #27
0
from procrustes import procrustes
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2

# Open images...
target_X_img = cv2.imread('IR.png', 0)
input_Y_img = cv2.imread('VIS.jpg', 0)

# Landmark points - same number and order!
X_pts = np.asarray([[19, 68], [206, 50], [88, 197], [173, 188]])

Y_pts = np.asarray([[2792, 1392], [4462, 1172], [3412, 2404], [4080, 2300]])

# Calculate transform via procrustes...
d, Z_pts, Tform = procrustes(X_pts, Y_pts)

# Build and apply transform matrix...
# Note: for affine need 2x3 (a,b,c,d,e,f) form
R = np.eye(3)
R[0:2, 0:2] = Tform['rotation']
S = np.eye(3) * Tform['scale']
S[2, 2] = 1
t = np.eye(3)
t[0:2, 2] = Tform['translation']
M = np.dot(np.dot(R, S), t.T).T
tr_Y_img = cv2.warpAffine(input_Y_img, M[0:2, :], (240, 320))

# Confirm points...
aY_pts = np.hstack((Y_pts, np.array(([[1, 1, 1, 1]])).T))
tr_Y_pts = np.dot(M, aY_pts.T).T
Exemple #28
0
xy = np.empty((len(Lf) / 2, 2), np.int32)

for x in range(len(Lf) / 2):
    xy[x] = Lf[2 * x], Lf[2 * x + 1]

f1 = open('lm/landmarks2-1.txt', 'r')
Lf1 = []

for line in f1:
    Lf1 = Lf1 + [int(float(line))]

xy1 = np.empty((len(Lf1) / 2, 2), np.int32)

for x in range(len(Lf) / 2):
    xy1[x] = Lf1[2 * x], Lf1[2 * x + 1]

[d, Z, t] = procrustes.procrustes(xy, xy1)
Zi = np.int32(Z)
print Zi

img = cv2.imread('img/01.tif')
cv2.namedWindow("mainw", 0)  ## create window for display
cv2.resizeWindow("mainw", 1500, 900)

cv2.polylines(img, [xy], True, (0, 0, 255), 2)
cv2.polylines(img, [xy1], True, (0, 255, 0), 2)
cv2.polylines(img, [Zi], True, (255, 0, 0), 5)

cv2.imshow('mainw', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
    D_true = np.ndarray(shape = (len(receivers_positions),len(receivers_positions),len(receivers_positions)))
    #receivers_positions = [(131.2797962158893, 59.88438221876277), (121.4281368314987, 102.70505203219363),(153.97361311259738, 93.98137587687233), (115.85778131733423, 72.92402160679922)]
    receivers_positions = np.array(receivers_positions)
    sum_square_tdoa_true = 0
    for cnt_j in range(len(receivers_positions)):
        for cnt_l, rx_l in enumerate(receivers_positions):
            for cnt_k, rx_k in enumerate(receivers_positions):
                if cnt_j != cnt_l and cnt_j != cnt_k and cnt_l != cnt_k:
                    D_true[cnt_j, cnt_l, cnt_k] = (np.linalg.norm(receivers_positions[cnt_l]-receivers_positions[cnt_j])-np.linalg.norm(receivers_positions[cnt_k]-receivers_positions[cnt_j]))
                else:
                    D_true[cnt_j, cnt_l, cnt_k] = 0.0
                sum_square_tdoa_true += D_true[cnt_j, cnt_l, cnt_k]**2
    difference_D = D - D_true
    scale = math.ceil(math.sqrt(abs(x*y/0.3136)))
    if options.check_anchoring:
        d, pos_selfloc_procrustes, tform = procrustes(receivers_positions, pos_selfloc, scaling=False)

    if options.replay:
        # first set of samples: delays for algorithm. length: average_length*num_sensors
        # second set: delays for anchoring. length: average_length* num_anchors
        # mabe build in check?
        # samples log given
        if len(args) == 2:
            f_samples = open(args[1], "r")
            for line_number, line in enumerate(f_samples.readlines()):
                receivers_samples = eval(eval(line))
                receivers = dict()
                # FIXME
                if line_number < selfloc_average_length * len(receivers_positions):
                    for cnt_tx in range(1, len(receivers_positions) + 1):
                        if selfloc_average_length * (cnt_tx - 1) <= line_number < selfloc_average_length * cnt_tx:
Exemple #30
0
def preprocess(coordfiles, mirror=True, useNotVisiblePoints=True):
	"""
	Preprocessing of images and coordinate input:
	*optional mirroring
	*procrustes analysis
	*cropping and aligning of images
	"""
	
	# read in coordinates
	coordinates = []
	filenames = []
	not_visible = []
	fi = open(coordfiles, "r")
	for lines in fi:
		li = lines.strip().split(";")
		
		coor = []
		not_visible_coor = []
		filenames.append(li[0])

		for r in xrange(0, num_patches):
			i = (r*3)+1
			if li[i+2] == "false":
				not_visible_coor.append(r)
			coor.append(float(li[i]))
			coor.append(float(li[i+1]))
		
		single_coor = numpy.array(coor).reshape((num_patches,2))
		coordinates.append(single_coor)
		not_visible.append(not_visible_coor)
	fi.close()
	
	# mirror the points around vertical axis and use those also
	if mirror:
		# create mirror coordinates according to some map in config
		mirrors = []
		for c in range(0, len(coordinates)):
			# load image
			im = Image.open(config.images+filenames[c], "r")
			# get imagesize
			imsize = im.size
			m = [coordinates[c][mirror_map[r]] for r in range(0, num_patches)]
			m = vstack(m)
			m[:,0] = (imsize[0]-1.0)-m[:,0]
			#m[:,0] = (imsize[0])-m[:,0]
			mirrors.append(m)
			not_visible_coor = [mirror_map[v] for v in not_visible[c]]
			not_visible.append(not_visible_coor)
		coordinates.extend(mirrors)
	
	# procrustes analysis of coordinates
	procrustes_distance = 1000.0
	# TODO: check that the first coordinate has all coordinates
	
	# TODO : we should rotate the meanshape (either at the beginning or the end) so that it's symmetrical
	
	meanshape = coordinates[0]
	while procrustes_distance > 20.0:
		aligned_coordinates = [[] for i in range(num_patches)]
		for c in coordinates:
			if useNotVisiblePoints:
				present_coord = [r for r in range(0, num_patches)]
			else:
				present_coord = [r for r in range(0, num_patches) if not numpy.isnan(coordinates[c][r,0]) and not numpy.isnan(coordinates[c][r,1])]
				# check that at least 50% of coordinates are present
				if len(present_coord) < num_patches/2:
					continue
			# only do procrustes analysis on present coordinates
			reduced_mean = meanshape[present_coord,:]
			reduced_coord = c[present_coord,:]
			# calculate aligned coordinates
			aligned = procrustes.procrustes(reduced_mean, reduced_coord)
			# add to aligned_coordinates
			for r in range(0,len(present_coord)):
				aligned_coordinates[present_coord[r]].append(aligned[r,:])
		# create new mean shape
		new_meanshape = numpy.zeros((num_patches,2))
		for r in range(0, num_patches):
			for ar in aligned_coordinates[r]:
				new_meanshape[r,:] += ar
			new_meanshape[r,:] /= len(aligned_coordinates[r])
		# calculate procrustes distance between old and new mean shape
		procrustes_distance = procrustes.procrustes_distance(meanshape, new_meanshape)
		# set old mean shape to new mean shape
		meanshape = new_meanshape
		print "procrustes distance in current iteration: "+str(procrustes_distance)
	
	# scale mean model to given modelwidth
	meanshape = procrustes.scale_width(meanshape, modelwidth)
	
	procrustes_transformations = []
	coordinates_final = []
	for c in range(0,len(coordinates)):
		if useNotVisiblePoints:
			present_coord = [r for r in range(0, num_patches)]
		else:
			present_coord = [r for r in range(0, num_patches) if not numpy.isnan(coordinates[c][r,0]) and not numpy.isnan(coordinates[c][r,1])]
			# check that at least 50% of coordinates are present
			if len(present_coord) < num_patches/2:
				continue
		# only do procrustes analysis on present coordinates
		reduced_mean = meanshape[present_coord,:]
		reduced_coord = coordinates[c][present_coord,:]
		# get procrustes transformation to mean
		c_transform = procrustes.procrustes(reduced_mean, reduced_coord)
		procrustes_transformations.append(c_transform)
		# transformed coordinates including nan
		c_final = numpy.array([numpy.nan for r in range(0,num_patches*2)]).reshape((num_patches,2))
		for r in range(0,len(present_coord)):
			c_final[present_coord[r],:] = c_transform[r,:]
		c_final = vstack(c_final)
		coordinates_final.append(c_final)
	
	# find how large to crop images
	mean_x = mean(meanshape[:,0])
	mean_y = mean(meanshape[:,1])
	min_x, max_x, min_y, max_y = float("inf"), -float("inf"), float("inf"), -float("inf")
	for c in procrustes_transformations:
		min_x = min(numpy.min(c[:,0]), min_x)
		max_x = max(numpy.max(c[:,0]), max_x)
		min_y = min(numpy.min(c[:,1]), min_y)
		max_y = max(numpy.max(c[:,1]), max_y)
	
	min_half_width = max(mean_x-min_x, max_x-mean_x) + ((patch_size-1)/2) + 2
	min_half_height = max(mean_y-min_y, max_y-mean_y) + ((patch_size-1)/2) + 2
	min_half_width = int(min_half_width)
	min_half_height = int(min_half_height)
	
	# get initial rectangle for cropping
	rect = numpy.array([mean_x-min_half_width, mean_y-min_half_height, \
		mean_x-min_half_width, mean_y+min_half_height,\
		mean_x+min_half_width, mean_y+min_half_height,\
		mean_x+min_half_width, mean_y-min_half_height]).reshape((4,2))
	
	# rotate and transform images same way as procrustes
	cropped_filenames = []
	fi = open(coordfiles, "r")
	i = 0
	for lines in fi:
		# load image
		filename = lines.split(";")[0]
		im = Image.open(config.images+filename, "r")
		if useNotVisiblePoints:
			present_coord = [r for r in range(0, num_patches)]
		else:
			# check which coordinates are present
			present_coord = [r for r in range(0, num_patches) if not numpy.isnan(coordinates[i][r,0]) and not numpy.isnan(coordinates[i][r,1])]
			# check that at least 50% of coordinates are present
			if len(present_coord) < num_patches/2:
				continue
		# only do procrustes analysis on present coordinates
		reduced_mean = meanshape[present_coord,:]
		reduced_coord = coordinates[i][present_coord,:]
		
		# get transformations
		crop_s, crop_r, crop_m1, crop_m2 = procrustes.get_reverse_transforms(reduced_mean, reduced_coord)
		# transform rect
		crop_rect = procrustes.transform(rect, crop_s, crop_r, crop_m1, crop_m2)

		# create a mask to detect when we crop outside the original image
		# create white image of same size as original
		mask = Image.new(mode='RGB', size=im.size, color=(255,255,255))
		# transform the same way as image
		mask = mask.transform((min_half_width*2, min_half_height*2), Image.QUAD, crop_rect.flatten(), Image.BILINEAR)
		# convert to boolean
		mask = mask.convert('L')
		mask.save(os.path.join(data_folder, "cropped/", os.path.splitext(filename)[0]+"_mask.bmp"))
		
		# use pil im.transform to crop and scale faces from images
		im = im.transform((min_half_width*2, min_half_height*2), Image.QUAD, crop_rect.flatten(), Image.BILINEAR)
		# save cropped images to output folder with text 
		im.save(os.path.join(data_folder, "cropped/", os.path.splitext(filename)[0]+".bmp"))
		cropped_filenames.append(os.path.splitext(filename)[0]+".bmp")
		i += 1
	fi.close()
	# if mirror is True: we need to mirror image
	if mirror:
		fi = open(coordfiles, "r")
		for lines in fi:
			#do the same stuff for mirrored images
			# load image
			filename = lines.split(";")[0]
			im = Image.open(config.images+filename, "r")
			
			if useNotVisiblePoints:
				present_coord = [r for r in range(0, num_patches)]
			else:
				# check which coordinates are present
				present_coord = [r for r in range(0, num_patches) if not numpy.isnan(coordinates[i][r,0]) and not numpy.isnan(coordinates[i][r,1])]
				# check that at least 50% of coordinates are present
				if len(present_coord) < num_patches/2:
					continue
			# only do procrustes analysis on present coordinates
			reduced_mean = meanshape[present_coord,:]
			reduced_coord = coordinates[i][present_coord,:]
			
			# get transformations
			crop_s, crop_r, crop_m1, crop_m2 = procrustes.get_reverse_transforms(reduced_mean, reduced_coord)
			# transform rect
			crop_rect = procrustes.transform(rect, crop_s, crop_r, crop_m1, crop_m2)

			# create a mask to detect when we crop outside the original image
			# create white image of same size as original
			mask = Image.new(mode='RGB', size=im.size, color=(255,255,255))
			# transform the same way as image
			mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
			mask = mask.transform((min_half_width*2, min_half_height*2), Image.QUAD, crop_rect.flatten(), Image.BILINEAR)
			# convert to boolean
			mask = mask.convert('L')
			mask.save(os.path.join(data_folder, "cropped/" , os.path.splitext(filename)[0]+"_m_mask.bmp"))

			# use pil im.transform to crop and scale faces from images
			im = im.transpose(Image.FLIP_LEFT_RIGHT)
			im = im.transform((min_half_width*2, min_half_height*2), Image.QUAD, crop_rect.flatten(), Image.BILINEAR)
			# save cropped images to output folder with text 
			im.save(os.path.join(data_folder, "cropped/", os.path.splitext(filename)[0]+"_m.bmp"))
			cropped_filenames.append(os.path.splitext(filename)[0]+"_m.bmp")
			i += 1
		fi.close()
	
	# output new coordinates
	new_coordinates = []
	for c in coordinates_final:
		# mark coordinate files where the mark is occluded in some way
		new_coordinates.append(c - meanshape)
	
	#returns a dictionary where key is filename and value is coordinate matrix
	data_pca = {}
	for r in range(0, len(new_coordinates)):
		data_pca[cropped_filenames[r]] = new_coordinates[r]
			
	# TODO : create duplicate matrix
	data_patches = {}
	for r in range(0, len(new_coordinates)):
		coord = numpy.copy(new_coordinates[r])
		if useNotVisiblePoints:
			# set not visible points to nan
			for vn in not_visible[r]:
				coord[vn,:] = numpy.nan
		data_patches[cropped_filenames[r]] = coord
	
	return data_pca, data_patches, meanshape, (min_half_width*2, min_half_height*2)
def procrustes_alignment_example(a, b, outputfile):
    """Choose two digits from MNIST, namelly "a" and "b".
    Extract the contour and apply procrustes alignment. 
    Show a figure comparing the pairs.
    
    """
    f = gzip.open('data/mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    images, labels = train_set
    
    id1 = np.where(labels==a)[0]
    im11 = images[np.random.choice(id1)].reshape((28,28))
    im12 = images[np.random.choice(id1)].reshape((28,28))
    
    id2 = np.where(labels==b)[0]
    im21 = images[np.random.choice(id2)].reshape((28,28))
    im22 = images[np.random.choice(id2)].reshape((28,28))

    pairs = [[im11,im12], [im21,im22], 
             [im11,im21], [im11,im22], 
             [im12,im21], [im12,im22]]

    fig, axes = plt.subplots(nrows=len(pairs), ncols=7, 
                             figsize=(2*7, 2*len(pairs)))
    for (im1, im2), row in zip(pairs, axes):
        
        ax1, ax2, ax3, ax4, ax5, ax6, ax7 = row

        X1 = shape.get_all_contours(im1, 100, 5, 50)
        X2 = shape.get_all_contours(im2, 100, 5, 50)
        X1, X2 = procrustes.fix_dimensions(X1, X2)
        Y1 = shape.get_external_contour(im1, 100, 5)
        Y2 = shape.get_external_contour(im2, 100, 5)
    
        ax1.imshow(im1, cmap=plt.cm.gray)
        ax1.axis('off')
        ax2.imshow(im2, cmap=plt.cm.gray)
        ax2.axis('off')

        # purelly Euclidean distance between landmark points
        XX1, XX2 = strip_zeros(X1), strip_zeros(X2)
        ax3.plot(XX1[:,0], XX1[:,1], 'ob', alpha=.7)
        ax3.plot(XX2[:,0], XX2[:,1], 'or', alpha=.7)
        ax3.set_title(r'$d_{E}=%.2f/%.2f$'%(np.linalg.norm(X1-X2),
                                            np.linalg.norm(im1-im2)))
        ax3.set_xlim([0,30])
        ax3.set_ylim([0,30])
        ax3.set_aspect('equal')
        ax3.axis('off')

        # just the outside contour
        A, B, d = procrustes.procrustes(Y1, Y2, fullout=True)
        ax4.plot(A[:,0], A[:,1], 'ob', alpha=.7)
        ax4.plot(B[:,0], B[:,1], 'or', alpha=.7)
        ax4.set_title(r'$d_{P_0}=%f$'%d)
        ax4.set_xlim([-.18,.18])
        ax4.set_ylim([-.18,.18])
        ax4.set_aspect('equal')
        ax4.axis('off')

        # outside contour for alignment only
        A, B, d = procrustes.procrustes3(X1, X2, 100, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax5.plot(AA[:,0], AA[:,1], 'ob', alpha=.7)
        ax5.plot(BB[:,0], BB[:,1], 'or', alpha=.7)
        ax5.set_title(r'$d_{P_3}=%f$'%d)
        ax5.set_xlim([-.18,.18])
        ax5.set_ylim([-.18,.18])
        ax5.set_aspect('equal')
        ax5.axis('off')

        # regular procrustes
        A, B, d = procrustes.procrustes(X1, X2, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax6.plot(AA[:,0], AA[:,1], 'ob', alpha=.7)
        ax6.plot(BB[:,0], BB[:,1], 'or', alpha=.7)
        ax6.set_title(r'$d_{P}=%f$'%d)
        ax6.set_xlim([-.18,.18])
        ax6.set_ylim([-.18,.18])
        ax6.set_aspect('equal')
        ax6.axis('off')

        # procrustes from library
        A, B, d = procrustes.procrustes2(X1, X2, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax7.plot(AA[:,0], AA[:,1], 'ob', alpha=.7)
        ax7.plot(BB[:,0], BB[:,1], 'or', alpha=.7)
        ax7.set_title(r'$d_{P_l}=%f$'%d)
        ax7.set_xlim([-.18,.18])
        ax7.set_ylim([-.18,.18])
        ax7.set_aspect('equal')
        ax7.axis('off')
    
    fig.tight_layout()
    fig.savefig(outputfile)
def mnist_standard_vs_procrustes(nrange, digits, num_sample, outfile):
    """Plot accuracy when clustering MNIST digits, using procrustes
    and Euclidean distance.
    
    """
    
    eucl_dist = lambda a, b: np.linalg.norm(a-b)
    proc_dist1 = lambda a, b: procrustes.procrustes(a, b)
    proc_dist2 = lambda a, b: procrustes.procrustes2(a, b)
    proc_dist3 = lambda a, b: procrustes.procrustes3(a, b, 50)
    
    k = len(digits)
    a1, a2, a3, a4, a5 = [], [], [], [], [] 
    for n in nrange:
        
        print "Doing %i of %i"%(n, nrange[-1])
        
        ns = [n]*k
        for m in range(num_sample):
            
            originals, shapes, ext_shapes, labels = pick_data(ns, digits)
            
            l1, _, _, _ = kmeans.kmeans_(k, originals, eucl_dist)
            l2, _, _, _ = kmeans.kmeans_(k, ext_shapes, proc_dist1)
            l3, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist3)
            l4, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist1)
            l5, _, _, _ = kmeans.kmeans_(k, shapes, proc_dist2)

            ac1 = kmeans.accuracy(labels, l1)
            ac2 = kmeans.accuracy(labels, l2)
            ac3 = kmeans.accuracy(labels, l3)
            ac4 = kmeans.accuracy(labels, l4)
            ac5 = kmeans.accuracy(labels, l5)
            
            a1.append([n, ac1])
            a2.append([n, ac2])
            a3.append([n, ac3])
            a4.append([n, ac4])
            a5.append([n, ac5])
            
            print '    ', ac1, ac2, ac3, ac4, ac5

    a1 = np.array(a1)
    a2 = np.array(a2)
    a3 = np.array(a3)
    a4 = np.array(a4)
    a5 = np.array(a5)

    # plotting results
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(a1[:,0], a1[:,1], 'o', color='b', alpha=.5, label=r'$d_E$')
    ax.plot(a2[:,0], a2[:,1], 'o', color='r', alpha=.5, label=r'$d_{P_0}$')
    ax.plot(a3[:,0], a3[:,1], 'o', color='g', alpha=.5, label=r'$d_{P_3}$')
    ax.plot(a4[:,0], a4[:,1], 'o', color='c', alpha=.5, label=r'$d_{P}$')
    ax.plot(a5[:,0], a5[:,1], 'o', color='m', alpha=.5, label=r'$d_{P_l}$')
   
    a1_avg, a2_avg, a3_avg, a4_avg, a5_avg = [], [], [], [], []
    for n in nrange:
        mu1 = a1[np.where(a1[:,0]==n)][:,1].mean()
        mu2 = a2[np.where(a2[:,0]==n)][:,1].mean()
        mu3 = a3[np.where(a3[:,0]==n)][:,1].mean()
        mu4 = a4[np.where(a4[:,0]==n)][:,1].mean()
        mu5 = a5[np.where(a5[:,0]==n)][:,1].mean()

        a1_avg.append([n, mu1])
        a2_avg.append([n, mu2])
        a3_avg.append([n, mu3])
        a4_avg.append([n, mu4])
        a5_avg.append([n, mu5])
    a1_avg = np.array(a1_avg)
    a2_avg = np.array(a2_avg)
    a3_avg = np.array(a3_avg)
    a4_avg = np.array(a4_avg)
    a5_avg = np.array(a5_avg)

    ax.plot(a1_avg[:,0], a1_avg[:,1], '-', color='b')
    ax.plot(a2_avg[:,0], a2_avg[:,1], '-', color='r')
    ax.plot(a3_avg[:,0], a3_avg[:,1], '-', color='g')
    ax.plot(a4_avg[:,0], a4_avg[:,1], '-', color='c')
    ax.plot(a5_avg[:,0], a5_avg[:,1], '-', color='m')
    
    ax.set_xlabel(r'$N_i$')
    ax.set_ylabel(r'$A$')
    leg = ax.legend(loc=0)
    leg.get_frame().set_alpha(0.6)
    ax.set_title(r'$\{%s\}$'%(','.join([str(d) for d in digits])))
    fig.savefig(outfile)
        for cnt_l, rx_l in enumerate(receivers_positions):
            for cnt_k, rx_k in enumerate(receivers_positions):
                if cnt_j != cnt_l and cnt_j != cnt_k and cnt_l != cnt_k:
                    D_true[cnt_j, cnt_l, cnt_k] = (
                        np.linalg.norm(receivers_positions[cnt_l] -
                                       receivers_positions[cnt_j]) -
                        np.linalg.norm(receivers_positions[cnt_k] -
                                       receivers_positions[cnt_j]))
                else:
                    D_true[cnt_j, cnt_l, cnt_k] = 0.0
                sum_square_tdoa_true += D_true[cnt_j, cnt_l, cnt_k]**2
    difference_D = D - D_true
    scale = math.ceil(math.sqrt(abs(x * y / 0.3136)))
    if options.check_anchoring:
        d, pos_selfloc_procrustes, tform = procrustes(receivers_positions,
                                                      pos_selfloc,
                                                      scaling=False)

    if options.replay:
        # first set of samples: delays for algorithm. length: average_length*num_sensors
        # second set: delays for anchoring. length: average_length* num_anchors
        # mabe build in check?
        # samples log given
        if len(args) == 2:
            f_samples = open(args[1], "r")
            for line_number, line in enumerate(f_samples.readlines()):
                receivers_samples = eval(eval(line))
                receivers = dict()
                # FIXME
                if line_number < selfloc_average_length * len(
                        receivers_positions):
 # find indices
 time_aligned_idx = np.where(np.logical_and(t_list>start_time, t_list<end_time))
 gt_time_aligned_idx = np.where(np.logical_and(gt_t_list>start_time, gt_t_list<end_time))
 # cut vectors (timestamps and locations)
 chan_x = np.array(chan_x)[time_aligned_idx]
 chan_y = np.array(chan_y)[time_aligned_idx]
 gt_x_list = np.array(gt_x_list)[gt_time_aligned_idx]
 gt_y_list = np.array(gt_y_list)[gt_time_aligned_idx]
 gt_fix_list = np.array(gt_fix_list)[gt_time_aligned_idx]
 if any(chan_x_kalman):
     chan_x_kalman = np.array(chan_x_kalman)[time_aligned_idx]
     chan_y_kalman = np.array(chan_y_kalman)[time_aligned_idx]
 if len(chan_x) == 0 and len(grid_x) == 0:
     sys.exit('no timestamp matches with ground-truth!')       
 if options.procrustes:
     d, Z_chan, tform = procrustes( np.vstack((gt_x_list,gt_y_list)).T,np.vstack((chan_x,chan_y)).T)
     print d
 else:
     Z_chan = np.vstack((chan_x,chan_y)).T
 xdiff_chan = Z_chan[:,0] -gt_x_list
 ydiff_chan = Z_chan[:,1] -gt_y_list
 err_chan = np.square(xdiff_chan) + np.square(ydiff_chan)
 rmse_chan = np.sqrt(np.mean(err_chan))
 print time_aligned_idx
 delays_calibrated = delays_calibrated[time_aligned_idx]
 true_delays_history = true_delays_history[gt_time_aligned_idx]
 err_chan_kalman = np.array([])
 if any(chan_x_kalman):
     if options.procrustes:
         d, Z_chan_kalman, tform = procrustes( np.vstack((gt_x_list,gt_y_list)).T,np.vstack((chan_x_kalman,chan_y_kalman)).T)
         Z_chan = np.dot(np.dot(tform["scale"],np.vstack((chan_x,chan_y)).T),tform["rotation"])+tform["translation"]
Exemple #35
0
if False:
    pylab.title("Policy Improvement")
    pylab.ylabel("Average Reward")
    pylab.xlabel("LTDQ Iterations")
    pylab.plot(areward)
    pylab.ylim([0,1.1])
    pylab.savefig("re.pdf")

if True:
    ogw = RBFObserverGridworld('/Users/stober/wrk/lspi/bin/16/20comp.npy', '/Users/stober/wrk/lspi/bin/16/states.npy', endstates = [272], walls=None, nrbf=80)
    pts = np.array(ogw.states.values())
    colors = create_norm_colors(pts)

    comps = np.load('/Users/stober/wrk/lspi/bin/16/5comp.npy')
    
    print procrustes(pts, comps[:,:2])

    pylab.clf()
    pylab.title('PCA Embedding')
    pylab.scatter(comps[:,0], comps[:,1],c=colors)
    pylab.xlabel('First Component')
    pylab.ylabel('Second Component')
    pylab.savefig('pca_embedding.pdf')


    pylab.clf()
    pylab.title('Ground Truth')
    pylab.scatter(pts[:,0], pts[:,1], c=colors)
    pylab.xlabel('Yaw')
    pylab.ylabel('Roll')
    pylab.savefig('gt_embedding.pdf')
Exemple #36
0
def procrustes_alignment_example(a, b, outputfile):
    """Choose two digits from MNIST, namelly "a" and "b".
    Extract the contour and apply procrustes alignment. 
    Show a figure comparing the pairs.
    
    """
    f = gzip.open('data/mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    images, labels = train_set

    id1 = np.where(labels == a)[0]
    im11 = images[np.random.choice(id1)].reshape((28, 28))
    im12 = images[np.random.choice(id1)].reshape((28, 28))

    id2 = np.where(labels == b)[0]
    im21 = images[np.random.choice(id2)].reshape((28, 28))
    im22 = images[np.random.choice(id2)].reshape((28, 28))

    pairs = [[im11, im12], [im21, im22], [im11, im21], [im11, im22],
             [im12, im21], [im12, im22]]

    fig, axes = plt.subplots(nrows=len(pairs),
                             ncols=7,
                             figsize=(2 * 7, 2 * len(pairs)))
    for (im1, im2), row in zip(pairs, axes):

        ax1, ax2, ax3, ax4, ax5, ax6, ax7 = row

        X1 = shape.get_all_contours(im1, 100, 5, 50)
        X2 = shape.get_all_contours(im2, 100, 5, 50)
        X1, X2 = procrustes.fix_dimensions(X1, X2)
        Y1 = shape.get_external_contour(im1, 100, 5)
        Y2 = shape.get_external_contour(im2, 100, 5)

        ax1.imshow(im1, cmap=plt.cm.gray)
        ax1.axis('off')
        ax2.imshow(im2, cmap=plt.cm.gray)
        ax2.axis('off')

        # purelly Euclidean distance between landmark points
        XX1, XX2 = strip_zeros(X1), strip_zeros(X2)
        ax3.plot(XX1[:, 0], XX1[:, 1], 'ob', alpha=.7)
        ax3.plot(XX2[:, 0], XX2[:, 1], 'or', alpha=.7)
        ax3.set_title(r'$d_{E}=%.2f/%.2f$' %
                      (np.linalg.norm(X1 - X2), np.linalg.norm(im1 - im2)))
        ax3.set_xlim([0, 30])
        ax3.set_ylim([0, 30])
        ax3.set_aspect('equal')
        ax3.axis('off')

        # just the outside contour
        A, B, d = procrustes.procrustes(Y1, Y2, fullout=True)
        ax4.plot(A[:, 0], A[:, 1], 'ob', alpha=.7)
        ax4.plot(B[:, 0], B[:, 1], 'or', alpha=.7)
        ax4.set_title(r'$d_{P_0}=%f$' % d)
        ax4.set_xlim([-.18, .18])
        ax4.set_ylim([-.18, .18])
        ax4.set_aspect('equal')
        ax4.axis('off')

        # outside contour for alignment only
        A, B, d = procrustes.procrustes3(X1, X2, 100, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax5.plot(AA[:, 0], AA[:, 1], 'ob', alpha=.7)
        ax5.plot(BB[:, 0], BB[:, 1], 'or', alpha=.7)
        ax5.set_title(r'$d_{P_3}=%f$' % d)
        ax5.set_xlim([-.18, .18])
        ax5.set_ylim([-.18, .18])
        ax5.set_aspect('equal')
        ax5.axis('off')

        # regular procrustes
        A, B, d = procrustes.procrustes(X1, X2, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax6.plot(AA[:, 0], AA[:, 1], 'ob', alpha=.7)
        ax6.plot(BB[:, 0], BB[:, 1], 'or', alpha=.7)
        ax6.set_title(r'$d_{P}=%f$' % d)
        ax6.set_xlim([-.18, .18])
        ax6.set_ylim([-.18, .18])
        ax6.set_aspect('equal')
        ax6.axis('off')

        # procrustes from library
        A, B, d = procrustes.procrustes2(X1, X2, fullout=True)
        AA, BB = strip_zeros(A), strip_zeros(B)
        ax7.plot(AA[:, 0], AA[:, 1], 'ob', alpha=.7)
        ax7.plot(BB[:, 0], BB[:, 1], 'or', alpha=.7)
        ax7.set_title(r'$d_{P_l}=%f$' % d)
        ax7.set_xlim([-.18, .18])
        ax7.set_ylim([-.18, .18])
        ax7.set_aspect('equal')
        ax7.axis('off')

    fig.tight_layout()
    fig.savefig(outputfile)
Exemple #37
0
    pylab.plot(areward)
    pylab.ylim([0, 1.1])
    pylab.savefig("re.pdf")

if True:
    ogw = RBFObserverGridworld('/Users/stober/wrk/lspi/bin/16/20comp.npy',
                               '/Users/stober/wrk/lspi/bin/16/states.npy',
                               endstates=[272],
                               walls=None,
                               nrbf=80)
    pts = np.array(list(ogw.states.values()))
    colors = create_norm_colors(pts)

    comps = np.load('/Users/stober/wrk/lspi/bin/16/5comp.npy')

    print(procrustes(pts, comps[:, :2]))

    pylab.clf()
    pylab.title('PCA Embedding')
    pylab.scatter(comps[:, 0], comps[:, 1], c=colors)
    pylab.xlabel('First Component')
    pylab.ylabel('Second Component')
    pylab.savefig('pca_embedding.pdf')

    pylab.clf()
    pylab.title('Ground Truth')
    pylab.scatter(pts[:, 0], pts[:, 1], c=colors)
    pylab.xlabel('Yaw')
    pylab.ylabel('Roll')
    pylab.savefig('gt_embedding.pdf')
Exemple #38
0
    pylab.plot(areward)
    pylab.ylim([0, 1.1])
    pylab.savefig("re.pdf")

if True:
    ogw = RBFObserverGridworld('/Users/stober/wrk/lspi/bin/16/20comp.npy',
                               '/Users/stober/wrk/lspi/bin/16/states.npy',
                               endstates=[272],
                               walls=None,
                               nrbf=80)
    pts = np.array(ogw.states.values())
    colors = create_norm_colors(pts)

    comps = np.load('/Users/stober/wrk/lspi/bin/16/5comp.npy')

    print procrustes(pts, comps[:, :2])

    pylab.clf()
    pylab.title('PCA Embedding')
    pylab.scatter(comps[:, 0], comps[:, 1], c=colors)
    pylab.xlabel('First Component')
    pylab.ylabel('Second Component')
    pylab.savefig('pca_embedding.pdf')

    pylab.clf()
    pylab.title('Ground Truth')
    pylab.scatter(pts[:, 0], pts[:, 1], c=colors)
    pylab.xlabel('Yaw')
    pylab.ylabel('Roll')
    pylab.savefig('gt_embedding.pdf')
Exemple #39
0
#Test procrusters function.

import procrustes
import numpy as np

X = [[0, 1], [0, 0], [1, 0]]
Y = [[-1, 0], [0, 0], [0, 1]]

X = np.array(X)
Y = np.array(Y)

d, z, t = procrustes.procrustes(X, Y, False, False)
print "d: ", d
print "z: ", z
print "t: ", t