Пример #1
0
def discriminator_new_loss(D,
                           input_real,
                           input_fake,
                           inputs,
                           npts,
                           batch_size,
                           Ra=False,
                           gan_type='lsgan'):
    loss_total = tf.Variable(0.0)
    index = 0
    for input_single in tf.split(inputs, npts, axis=1):
        single_fake = input_fake[index]
        single_fake = tf.expand_dims(single_fake, axis=0)
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            single_fake, input_single)
        dist_index = tf.where(dists_forward > 0.005)
        single_fake_new = tf.gather_nd(single_fake, dist_index)
        single_fake_new = tf.reshape(single_fake_new, (1, -1, 3))

        single_real = input_real[index]
        single_real = tf.expand_dims(single_real, axis=0)
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            single_real, input_single)
        dist_index = tf.where(dists_forward > 0.005)
        single_real_new = tf.gather_nd(single_real, dist_index)
        single_real_new = tf.reshape(single_real_new, (1, -1, 3))

        single_loss = discriminator_loss(D, single_real_new, single_fake_new)
        loss_total = loss_total + single_loss

        index += 1

    loss_total /= batch_size

    return loss_total
Пример #2
0
def chamfer(pcd1, pcd2, radius=1.0):
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(pcd1, pcd2)
    CD_dist = 0.5 * dists_forward + 0.5 * dists_backward
    CD_dist = tf.reduce_mean(CD_dist, axis=1)
    CD_dist_norm = CD_dist / radius
    cd_loss = tf.reduce_mean(CD_dist_norm)
    return cd_loss
Пример #3
0
def generator_new_loss(D, G_fake, inputs, npts, batch_size):
    # outputs = [tf.reduce_max(f, axis=1, keepdims=keepdims)
    #     for f in tf.split(inputs, npts, axis=1)]
    # return tf.concat(outputs, axis=0)

    G_fake_new = []
    input_list = []
    loss_total = tf.Variable(0.0)
    index = 0
    for input_single in tf.split(inputs, npts, axis=1):
        single_fake = G_fake[index]
        single_fake = tf.expand_dims(single_fake, axis=0)
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            single_fake, input_single)
        dist_index = tf.where(dists_forward > 0.005)
        single_fake_new = tf.gather_nd(single_fake, dist_index)
        single_fake_new = tf.reshape(single_fake_new, (1, -1, 3))

        # single_fake_new = tf.concat([tf.ones([1,1,3]), single_fake_new], axis=1)

        G_fake_new.append(single_fake_new)
        input_list.append(input_single)
        single_loss = generator_loss(D, single_fake)

        loss_total = loss_total + single_loss

        index += 1
        # break # trick, don't known how to implement batch, loss will beacom nan

    loss_total = loss_total / batch_size

    return loss_total  #, input_list, G_fake, G_fake_new, dist_index, dists_forward
Пример #4
0
    def _compute_loss(self, prediction_tensor, target_tensor, weights):
        """

        Args:
            prediction_tensor (float): A tensor of shape [batch_size, h, w, 3]
            target_tensor (float): A tensor of shape [batch_size, h, w, 3]
            weights (float): A tensor of shape [batch_size, h, w, 1]

        Returns:
            loss (float): chamfer distance
        """

        # Multiply by valid mask
        valid_prediction_tensor = prediction_tensor * weights
        valid_target_tensor = target_tensor * weights

        # Reshape to (batch_size, n_points, 3)
        batch_size = prediction_tensor.get_shape()[0]
        valid_prediction_points = tf.reshape(valid_prediction_tensor, (batch_size, -1, 3))
        valid_target_points = tf.reshape(valid_target_tensor, (batch_size, -1, 3))

        dist1, idx1, dist2, idx2 = tf_nndistance.nn_distance(
            valid_prediction_points, valid_target_points)
        chamfer_dist = tf.reduce_sum(dist1) + tf.reduce_sum(dist2)
        avg_chamfer_dist = chamfer_dist / tf.cast(batch_size, tf.float32)

        return avg_chamfer_dist
Пример #5
0
def get_loss_ae(pred, gt, end_points):
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(pred, gt)
    loss = tf.reduce_mean(dists_forward + dists_backward)
    end_points['pcloss'] = loss

    loss = loss * 100
    end_points['loss'] = loss
    return loss, end_points
Пример #6
0
def get_cd_loss(pred, gt, radius):
    """ pred: BxNxC,
        label: BxN, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
    #dists_forward is for each element in gt, the cloest distance to this element
    CD_dist = 0.8*dists_forward + 0.2*dists_backward
    CD_dist = tf.reduce_mean(CD_dist, axis=1)
    CD_dist_norm = CD_dist/radius
    cd_loss = tf.reduce_mean(CD_dist_norm)
    return cd_loss,None
Пример #7
0
def get_loss(pred, label, end_points):
    """ pred: BxNx3,
        label: BxNx3, """
    dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pred, label)
    pc_loss = tf.reduce_mean(dists_forward+dists_backward)
    end_points['pcloss'] = pc_loss

    match = tf_approxmatch.approx_match(label, pred)
    loss = tf.reduce_mean(tf_approxmatch.match_cost(label, pred, match))
    tf.summary.scalar('loss', loss)
    return loss, end_points
Пример #8
0
def get_hausdorff_loss(pred, gt, radius, forward_weight=1.0, threshold=None):
    """
    pred: BxNxC,
    label: BxN,
    forward_weight: relative weight for forward_distance
    """
    with tf.name_scope("cd_loss"):
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
        # only care about distance within threshold (ignore strong outliers)
        if threshold is not None:
            dists_forward = tf.where(dists_forward < threshold, dists_forward, tf.zeros_like(dists_forward))
            dists_backward = tf.where(dists_backward < threshold, dists_backward, tf.zeros_like(dists_backward))
        # dists_forward is for each element in gt, the closest distance to this element
        dists_forward = tf.reduce_max(dists_forward, axis=1)
        dists_backward = tf.reduce_max(dists_backward, axis=1)
        CD_dist = forward_weight * dists_forward + dists_backward
        CD_dist_norm = CD_dist/radius
        cd_loss = tf.reduce_max(CD_dist_norm)
        return cd_loss, None
Пример #9
0
def get_cd_loss2(pred, gt, radius, forward_weight=1.0, threshold=100):
    """
    pred: BxNxC,
    label: BxN,
    forward_weight: relative weight for forward_distance
    """
    with tf.name_scope("cd_loss"):
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
        if threshold is not None:
            forward_threshold = tf.reduce_mean(dists_forward, keepdims=True, axis=1) * threshold
            backward_threshold = tf.reduce_mean(dists_backward, keepdims=True, axis=1) * threshold
            # only care about distance within threshold (ignore strong outliers)
            dists_forward = tf.where(dists_forward < forward_threshold, dists_forward, tf.zeros_like(dists_forward))
            dists_backward = tf.where(dists_backward < backward_threshold, dists_backward, tf.zeros_like(dists_backward))
        # dists_forward is for each element in gt, the closest distance to this element
        dists_forward = tf.reduce_mean(dists_forward, axis=1)
        dists_backward = tf.reduce_mean(dists_backward, axis=1)
        CD_dist = forward_weight * dists_forward + dists_backward
        # CD_dist_norm = CD_dist/radius
        cd_loss = tf.reduce_mean(CD_dist)
        return cd_loss#, None
Пример #10
0
def get_loss_pcn(gt, pred, gt_mask, num_parts, batch_size):
    dists_forward_total = tf.zeros(batch_size)
    dists_backward_total = tf.zeros(batch_size)
    for part in xrange(num_parts):
        dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(
            pred[:, part], gt[:, part])
        # zero out the non-existing parts
        dists_forward = tf.reduce_sum(tf.multiply(dists_forward,
                                                  gt_mask[:, part]),
                                      axis=-1)
        dists_backward = tf.reduce_sum(tf.multiply(dists_backward,
                                                   gt_mask[:, part]),
                                       axis=-1)
        dists_forward_total += dists_forward
        dists_backward_total += dists_backward

    loss = dists_forward_total + dists_backward_total
    # divide by the number of parts
    div = tf.reduce_sum(tf.reduce_mean(gt_mask, axis=-1), axis=-1)
    loss = tf.reduce_mean(tf.div(loss, div))

    return loss * 100
Пример #11
0
# NAME = FLAGS.name

print(GT_DIR)
gt_paths = glob(os.path.join(GT_DIR, '*.xyz'))

gt_names = [os.path.basename(p)[:-4] for p in gt_paths]
print(len(gt_paths))

gt = load(gt_paths[0])[:, :3]
pred_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
gt_placeholder = tf.placeholder(tf.float32, [1, 8192, 3])
pred_tensor, centroid, furthest_distance = normalize_point_cloud(
    pred_placeholder)
gt_tensor, centroid, furthest_distance = normalize_point_cloud(gt_placeholder)

cd_forward, _, cd_backward, _ = tf_nndistance.nn_distance(
    pred_tensor, gt_tensor)
cd_forward = cd_forward[0, :]
cd_backward = cd_backward[0, :]

precentages = np.array([0.008, 0.012])


def cal_nearest_distance(queries, pc, k=2):
    """
    """
    knn_search = NearestNeighbors(n_neighbors=k, algorithm='auto')
    knn_search.fit(pc)
    dis, knn_idx = knn_search.kneighbors(queries, return_distance=True)
    return dis[:, 1]

Пример #12
0
    # view num
    view_num = 33

    # path
    data_type = 'test/'
    ShapeNetv1_dir = '/home/cuda/Alex/trai/PC-NBV/Shapenet_v1/'
    pc_dir = "/home/cuda/Alex/trai/PC-NBV/Output_model_blender/" + data_type + "/pcd"
    save_dir = "/home/cuda/Alex/trai/PC-NBV/NBV_data/shapenet_33_views_640x480/test"
    model_dir = '/home/cuda/Alex/trai/PC-NBV/Shapenet_v1/' + data_type

    # for calculating surface coverage and register
    part_tensor = tf.placeholder(tf.float32, (1, None, 3))
    gt_tensor = tf.placeholder(tf.float32, (1, None, 3))
    sess = tf.Session()
    dist1, _, dist2, _ = tf_nndistance.nn_distance(part_tensor, gt_tensor)

    class_list = os.listdir(model_dir)

    f = open('generate_nbv.log', 'w+')

    for class_id in class_list:

        model_list = os.listdir(
            os.path.join(ShapeNetv1_dir, data_type, class_id))

        for model in model_list:
            save_model_path = os.path.join(save_dir, model)
            if os.path.exists(save_model_path):
                print("skip " + save_model_path)
                continue
Пример #13
0
def chamfer(pcd1, pcd2):
    dist1, _, dist2, _ = tf_nndistance.nn_distance(pcd1, pcd2)
    dist1 = tf.reduce_mean(tf.sqrt(dist1))
    dist2 = tf.reduce_mean(tf.sqrt(dist2))
    return (dist1 + dist2) / 2
Пример #14
0
def get_cd_loss(pred, gt, radius):
    """ pred: BxNxC,
        label: BxN, """
    dists_forward, _, dists_backward, _ = tf_nndistance.nn_distance(gt, pred)
    #dists_forward is for each element in gt, the cloest distance to this element
    CD_dist = 0.8*dists_forward + 0.2*dists_backward
    CD_dist = tf.reduce_mean(CD_dist, axis=1)
    CD_dist_norm = CD_dist/radius
    cd_loss = tf.reduce_mean(CD_dist_norm)
    return cd_loss,None

def get_uniform_loss_knn(pred):
    var, _ = knn_point(6, pred, pred)
    mean = tf.reduce_mean(var, axis=2)
    _, variance = tf.nn.moments(mean, axes=[1])
    variance1 = tf.reduce_sum(variance)
    _, var = tf.nn.moments(var, axes=[2])
    var = tf.reduce_sum(var)
    variance2 = tf.reduce_sum(var)
    return variance1 + variance2


if __name__ == '__main__':
    gt = tf.constant([[[1,0,0],[2,0,0],[3,0,0],[4,0,0]]],tf.float32)
    pred = tf.constant([[[-10,0,0], [1,0, 0], [2,0, 0], [3,0,0]]],tf.float32)

    dists_forward, idx1, dists_backward, idx2 = tf_nndistance.nn_distance(gt, pred)
    with tf.Session() as sess:
        print(idx1.eval()) # for each element in gt, the idx of pred
        print(idx2.eval()) # for each element in pred,