예제 #1
0
def main(n1, n2, dim, seed):
    # Generate data with numpy
    np.random.seed(seed)
    pts1 = np.random.randn(n1, dim)
    pts2 = np.random.randn(n2, dim)

    # Scipy EMD
    if dim == 1:
        # scipy only works on univariate data
        scipy_loss = scipy_emd(pts1.squeeze(1), pts2.squeeze(1))
        print("Scipy EMD {:.4f}".format(scipy_loss))

    # PyEMD
    # each point becomes a histogram bin, each point set becomes a binary vector to
    # indicate which bins (i.e. points) it contains # use pairwise distances
    # between histogram bins to get the correct emd
    # pts = np.concatenate([pts1, pts2])
    # dst = scipy.spatial.distance_matrix(pts, pts)
    # hist1 = (1 / n1) * np.concatenate([np.ones(n1), np.zeros(n2)])
    # hist2 = (1 / n2) * np.concatenate([np.zeros(n1), np.ones(n2)])
    # py_loss = py_emd(hist1, hist2, dst)
    # print("PyEMD {:.4f}".format(py_loss))

    # OpenCV
    # each signature is a matrix, first column gives weight (should be uniform for
    # our purposes) and remaining columns give point coordinates, transformation
    # from pts to sig is through function pts_to_sig
    def pts_to_sig(pts):
        # cv2.EMD requires single-precision, floating-point input
        sig = np.empty((pts.shape[0], 1 + pts.shape[1]), dtype=np.float32)
        sig[:, 0] = (np.ones(pts.shape[0]) / pts.shape[0])
        sig[:, 1:] = pts
        return sig

    sig1 = pts_to_sig(pts1)
    sig2 = pts_to_sig(pts2)
    cv_loss, _, flow = cv_emd(sig1, sig2, cv2.DIST_L2)
    print("OpenCV EMD {:.4f}".format(cv_loss))

    # CUDA_EMD
    # pts1_cuda = torch.from_numpy(pts1).cuda().float().reshape(1, n1, dim)
    # pts2_cuda = torch.from_numpy(pts2).cuda().float().reshape(1, n2, dim)
    # pts1_cuda.requires_grad = True
    # pts2_cuda.requires_grad = True
    # cuda_loss = cuda_emd()(pts1_cuda, pts2_cuda)
    # print("CUDA EMD {:.4f}".format(cuda_loss.item()))

    # Tensorflow
    # tf Graph
    pts1_tf = tf.convert_to_tensor(pts1.reshape(1, n1, dim), dtype=tf.float32)
    pts2_tf = tf.convert_to_tensor(pts2.reshape(1, n2, dim), dtype=tf.float32)
    match = approx_match(pts1_tf, pts2_tf)
    tf_loss = match_cost(pts1_tf, pts2_tf, match)
    # tf Session
    sess = tf.Session()
    print("Tensorflow EMD {:.4f}".format(sess.run(tf_loss[0])))
    # print(sess.run(tf_loss))
    sess.close()
예제 #2
0
    def _create_ae_loss(self, loss_type):
        if loss_type == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_reconstr, self.gt)
            loss_ae_per_pc = tf.reduce_mean(cost_p1_p2, axis=1) + tf.reduce_mean(cost_p2_p1, axis=1)
        elif loss_type == 'emd':
            match = approx_match(self.x_reconstr, self.gt)
            loss_ae_per_pc = tf.reduce_mean(match_cost(self.x_reconstr, self.gt, match), axis=1)

        loss_ae = tf.reduce_mean(loss_ae_per_pc)
        return loss_ae, loss_ae_per_pc
예제 #3
0
    def chamfer_metric(self, particles, particles_ref, groundtruth, pos_range, loss_func, EMD = False):
        
        if EMD == True:
            
            bs = groundtruth.shape[0]
            Np = particles.shape[1]
            Ng = groundtruth.shape[1]
            
            match = approx_match(groundtruth, particles_ref) # [bs, Np, Ng]
            row_predicted = tf.reshape(  particles[:, :, 0:pos_range], [bs, Np, 1, pos_range])
            col_groundtru = tf.reshape(groundtruth[:, :, 0:pos_range], [bs, 1, Ng, pos_range])
            distance = tf.sqrt(tf.add_n(tf.unstack(tf.square(row_predicted - col_groundtru), axis = -1)))
            distance = distance * match
            distance_loss = tf.reduce_mean(tf.reduce_sum(distance, axis = -1))
        
        else:
            
            # test - shuffle the groundtruth and calculate the loss
            # rec_particles = tf.stack(list(map(lambda x: tf.random.shuffle(x), tf.unstack(self.ph_X[:, :, 0:6]))))
            # rec_particles = tf.random.uniform([self.batch_size, self.gridMaxSize, 3], minval = -1.0, maxval = 1.0)

            bs = groundtruth.shape[0]
            Np = particles.shape[1]
            Ng = groundtruth.shape[1]

            assert groundtruth.shape[2] == particles.shape[2]

            # NOTE: current using position (0:3) only here for searching nearest point.
            row_predicted = tf.reshape(  particles[:, :, 0:pos_range], [bs, Np, 1, pos_range])
            col_groundtru = tf.reshape(groundtruth[:, :, 0:pos_range], [bs, 1, Ng, pos_range])
            # distance = tf.norm(row_predicted - col_groundtru, ord = 'euclidean', axis = -1)
            distance = tf.sqrt(tf.add_n(tf.unstack(tf.square(row_predicted - col_groundtru), axis = -1)))
            
            rearrange_predicted_N = tf.argmin(distance, axis = 1, output_type = tf.int32)
            rearrange_groundtru_N = tf.argmin(distance, axis = 2, output_type = tf.int32)
            
            batch_subscriptG = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, Ng])
            batch_subscriptP = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, Np])
            rearrange_predicted = tf.stack([batch_subscriptG, rearrange_predicted_N], axis = 2)
            rearrange_groundtru = tf.stack([batch_subscriptP, rearrange_groundtru_N], axis = 2)

            nearest_predicted = tf.gather_nd(  particles[:, :, :], rearrange_predicted)
            nearest_groundtru = tf.gather_nd(groundtruth[:, :, :], rearrange_groundtru)

            if loss_func == tf.abs:
                distance_loss =\
                    tf.reduce_mean(loss_func(tf.cast(        particles, tf.float32) - tf.cast(nearest_groundtru, tf.float32))) +\
                    tf.reduce_mean(loss_func(tf.cast(nearest_predicted, tf.float32) - tf.cast(groundtruth      , tf.float32)))
            else:
                distance_loss =\
                    tf.reduce_mean(tf.sqrt(tf.reduce_sum(loss_func(tf.cast(        particles, tf.float32) - tf.cast(nearest_groundtru, tf.float32)), axis = -1))) +\
                    tf.reduce_mean(tf.sqrt(tf.reduce_sum(loss_func(tf.cast(nearest_predicted, tf.float32) - tf.cast(      groundtruth, tf.float32)), axis = -1)))

        return tf.cast(distance_loss, default_dtype)
예제 #4
0
    def _create_input_dist(self, loss_type):
        if loss_type == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.adv, self.x)
            input_dist_per_pc = tf.reduce_mean(cost_p1_p2, axis=1) + tf.reduce_mean(cost_p2_p1, axis=1)
            max_dist_per_pc = tf.reduce_max(cost_p1_p2, axis=1)
        elif loss_type == 'emd':
            match = approx_match(self.adv, self.x)
            m_cost = match_cost(self.adv, self.x, match)
            input_dist_per_pc = tf.reduce_mean(m_cost, axis=1)
            max_dist_per_pc = tf.reduce_max(m_cost, axis=1)

        input_dist = tf.reduce_mean(input_dist_per_pc)
        max_dist = tf.reduce_mean(max_dist_per_pc)
        return input_dist, input_dist_per_pc, max_dist, max_dist_per_pc
예제 #5
0
def main(n1, n2, dim, seed):
    # Generate data with numpy
    np.random.seed(seed)
    pts1 = np.random.randn(n1, dim)
    pts2 = np.random.randn(n2, dim)
    grad_ix_n = np.random.randint(min(n1, n2), size=5)
    grad_ix_dim = np.random.randint(dim, size=5)

    # OpenCV
    # each signature is a matrix, first column gives weight (should be uniform for
    # our purposes) and remaining columns give point coordinates, transformation
    # from pts to sig is through function pts_to_sig
    def pts_to_sig(pts):
        # cv2.EMD requires single-precision, floating-point input
        sig = np.empty((pts.shape[0], 1 + pts.shape[1]), dtype=np.float32)
        sig[:, 0] = (np.ones(pts.shape[0]) / pts.shape[0])
        sig[:, 1:] = pts
        return sig

    sig1 = pts_to_sig(pts1)
    sig2 = pts_to_sig(pts2)
    cv_loss, _, flow = cv_emd(sig1, sig2, cv2.DIST_L2)
    print("OpenCV EMD {:.4f}".format(cv_loss))

    # Tensorflow
    # tf Graph
    pts1_tf = tf.convert_to_tensor(pts1.reshape(1, n1, dim), dtype=tf.float32)
    pts2_tf = tf.convert_to_tensor(pts2.reshape(1, n2, dim), dtype=tf.float32)
    match = approx_match(pts1_tf, pts2_tf)
    tf_loss = match_cost(pts1_tf, pts2_tf, match)
    grads = tf.gradients([tf_loss], [pts1_tf, pts2_tf])
    # tf Session
    sess = tf.Session()
    print("Tensorflow EMD {:.4f}".format(sess.run(tf_loss[0])))
    pts1_grad_np, pts2_grad_np = sess.run(grads)
    print("CUDA EMD Grad t1 (mean) {:.4f}".format(pts1_grad_np.mean()))
    print("CUDA EMD Grad t1 (std) {:.4f}".format(pts1_grad_np.std()))
    print("CUDA EMD Grad t2 (mean) {:.4f}".format(pts2_grad_np.mean()))
    print("CUDA EMD Grad t2 (std) {:.4f}".format(pts2_grad_np.std()))
    print(
        "CUDA EMD Grad t1 (random) {0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}, {4:.4f}"
        .format(*pts1_grad_np[0, grad_ix_n, grad_ix_dim]))
    print(
        "CUDA EMD Grad t2 (random) {0:.4f}, {1:.4f}, {2:.4f}, {3:.4f}, {4:.4f}"
        .format(*pts2_grad_np[0, grad_ix_n, grad_ix_dim]))

    sess.close()
    def _create_loss(self):
        c = self.configuration

        if c.loss == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_reconstr, self.gt)
            self.loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
        elif c.loss == 'emd':
            match = approx_match(self.x_reconstr, self.gt)
            self.loss = tf.reduce_mean(match_cost(self.x_reconstr, self.gt, match))

        reg_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        if c.exists_and_is_not_none('w_reg_alpha'):
            w_reg_alpha = c.w_reg_alpha
        else:
            w_reg_alpha = 1.0

        for rl in reg_losses:
            self.loss += (w_reg_alpha * rl)
예제 #7
0
    def _create_loss(self):
        lambda_x = 1.0
        lambda_z = 1.0
        c = self.configuration

        n_output_feat = c.n_output[1]
        assert n_output_feat in [3, 6]

        if c.loss == 'chamfer':
            cost_p1_p2, _, cost_p2_p1, _ = nn_distance(
                self.x_reconstr[:, :, :n_output_feat],
                self.gt[:, :, :n_output_feat])
            self.x_loss = tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(
                cost_p2_p1)
        elif c.loss == 'emd':
            match = approx_match(self.x_reconstr[:, :, :n_output_feat],
                                 self.gt[:, :, :n_output_feat])
            self.x_loss = tf.reduce_mean(
                match_cost(self.x_reconstr[:, :, :n_output_feat],
                           self.gt[:, :, :n_output_feat], match))

        z_stopped = tf.stop_gradient(self.z)
        self.vz_loss = tf.nn.l2_loss(self.vz - z_stopped)
        self.z_total_loss = tf.nn.l2_loss(self.vz - self.z)

        self.x_loss *= lambda_x
        self.vz_loss *= lambda_z
        self.z_total_loss *= lambda_z
        self.total_loss = self.x_loss + self.z_total_loss

        reg_losses = self.graph.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        if c.exists_and_is_not_none('w_reg_alpha'):
            w_reg_alpha = c.w_reg_alpha
        else:
            w_reg_alpha = 1.0

        for rl in reg_losses:
            self.x_loss += (w_reg_alpha * rl)
            self.vz_loss += (w_reg_alpha * rl)
            self.total_loss += (w_reg_alpha * rl)
예제 #8
0
def aEMD(n, ref, rec):
    ref_3d = np.zeros((1, n, 3))
    rec_3d = np.zeros((1, n, 3))
    ref_3d[0, :, 0:2] = ref
    rec_3d[0, :, 0:2] = rec

    ph_ref = tf.placeholder(tf.float32, [1, n, 3])
    ph_rec = tf.placeholder(tf.float32, [1, n, 3])
    match = approx_match(ph_ref, ph_rec)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    plan = sess.run(match, feed_dict={ph_ref: ref_3d, ph_rec: rec_3d})[0, :, :]
    plan /= plan.sum()
    #     plt.imshow(plan)

    dist = np.sqrt(
        np.sum(
            np.square(np.reshape(ref, (1, n, 2)) - np.reshape(rec, (n, 1, 2))),
            axis=-1))

    return plan, (plan * dist).sum()
예제 #9
0
def minimum_mathing_distance_tf_graph(n_pc_points,
                                      batch_size=None,
                                      normalize=True,
                                      sess=None,
                                      verbose=False,
                                      use_sqrt=False,
                                      use_EMD=False):
    ''' Produces the graph operations necessary to compute the MMD and consequently also the Coverage due to their 'symmetric' nature.
    Assuming a "reference" and a "sample" set of point-clouds that will be matched, this function creates the operation that matches
    a _single_ "reference" point-cloud to all the "sample" point-clouds given in a batch. Thus, is the building block of the function
    ```minimum_mathing_distance`` and ```coverage``` that iterate over the "sample" batches and each "reference" point-cloud.
    Args:
        n_pc_points (int): how many points each point-cloud of those to be compared has.
        batch_size (optional, int): if the iterator code that uses this function will
            use a constant batch size for iterating the sample point-clouds you can
            specify it hear to speed up the compute. Alternatively, the code is adapted
            to read the batch size dynamically.
        normalize (boolean): if True, the matched distances are normalized by diving them with
            the number of points of the compared point-clouds (n_pc_points).
        use_sqrt (boolean): When the matching is based on Chamfer (default behavior), if True,
            the Chamfer is computed based on the (not-squared) euclidean distances of the
            matched point-wise euclidean distances.
        use_EMD (boolean): If true, the matchings are based on the EMD.
    '''
    if normalize:
        reducer = tf.reduce_mean
    else:
        reducer = tf.reduce_sum

    if sess is None:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)

    # Placeholders for the point-clouds: 1 for the reference (usually Ground-truth) and one of variable size for the collection
    # which is going to be matched with the reference.
    ref_pl = tf.placeholder(tf.float32, shape=(1, n_pc_points, 3))
    sample_pl = tf.placeholder(tf.float32, shape=(batch_size, n_pc_points, 3))

    if batch_size is None:
        batch_size = tf.shape(sample_pl)[0]

    ref_repeat = tf.tile(ref_pl, [batch_size, 1, 1])
    ref_repeat = tf.reshape(ref_repeat, [batch_size, n_pc_points, 3])

    if use_EMD:
        match = approx_match(ref_repeat, sample_pl)
        all_dist_in_batch = match_cost(ref_repeat, sample_pl, match)
        if normalize:
            all_dist_in_batch /= n_pc_points
    else:
        ref_to_s, _, s_to_ref, _ = nn_distance(ref_repeat, sample_pl)
        if use_sqrt:
            ref_to_s = tf.sqrt(ref_to_s)
            s_to_ref = tf.sqrt(s_to_ref)
        all_dist_in_batch = reducer(ref_to_s, 1) + reducer(s_to_ref, 1)

    best_in_batch = tf.reduce_min(
        all_dist_in_batch
    )  # Best distance, of those that were matched to single ref pc.
    location_of_best = tf.argmin(all_dist_in_batch, axis=0)
    return ref_pl, sample_pl, best_in_batch, location_of_best, sess