Exemple #1
0
def get_transformnet_loss_rotationvector(pred_angle, pointclouds_angle, pointclouds_pl, pointclouds_gt):
    """ pred: B*NUM_CLASSES,
        label: B, """
    # Enforce the transformation as orthogonal matrix
    transform_xyz_pred = Tools3D.batch_rotationVector2mat_tf(pred_angle)
    point_cloud_transformed = tf.matmul(pointclouds_gt, transform_xyz_pred)
    align_loss, emd_dist = get_emd_loss(point_cloud_transformed, pointclouds_pl)
    #
    # align_loss = 100 * align_loss
    # tf.summary.scalar('alignloss', align_loss)
    #
    # tf.add_to_collection('alignloss', align_loss)

    transform_xyz_raw = Tools3D.batch_rotationVector2mat_tf(pointclouds_angle)
    # mat_diff_loss = tf.nn.l2_loss(transform_xyz_pred-transform_xyz_raw)

    mat_diff_loss = tf.nn.l2_loss(pred_angle - pointclouds_angle)
    tf.summary.scalar('matloss', mat_diff_loss)
    tf.add_to_collection('matloss', mat_diff_loss)

    # Enforce the xyz transformation is the same as the standard one

    total_loss = 10 * mat_diff_loss + tf.losses.get_regularization_loss()

    tf.add_to_collection('losses', total_loss)

    return total_loss, emd_dist
Exemple #2
0
def generate_random_rotationVec(angleNum=10000):
    angles = np.zeros((angleNum, 4), dtype=float)
    for k in range(angleNum):
        rot_axis = np.random.uniform(size=3)
        norm = Tools3D.vector_length(rot_axis)

        rot_axis = (rot_axis[0] / norm, rot_axis[1] / norm, rot_axis[2] / norm)
        angles[k, 0:-1] = rot_axis
        angles[k, 3] = np.random.uniform() * 2 * np.pi
    return angles
Exemple #3
0
def eval_real_epoch(sess, ops, test_writer):
    PC_PATH = os.path.join(POINTCLOUD_DIR,
                           'Dataset/Data/partialface/real_scan/')
    INPUT_FOLDER = os.path.join(PC_PATH, 'sampled')
    OUTPUT_FOLDER = os.path.join(PC_PATH, 'pred')

    from glob import glob
    is_training = False

    samples = glob(INPUT_FOLDER + "/*.xyz")
    samples.sort(reverse=False)
    total_num = len(samples)
    for i in range(total_num):
        filename = samples[i].split('\\')[-1].replace('.xyz', '')
        print(filename)
        pointclouds_pl = np.loadtxt(samples[i])
        pointclouds_pl = np.expand_dims(pointclouds_pl, axis=0)

        feed_dict = {
            ops['pointclouds_pl']: pointclouds_pl,
            ops['is_training_pl']: is_training,
        }
        # loss_val, pred_angle = sess.run([ops['loss'], ops['pred_angle']], feed_dict=feed_dict)
        pred_angle = sess.run([ops['pred_angle']], feed_dict=feed_dict)
        pred_angle = np.squeeze(pred_angle)
        print(pred_angle.shape)

        print(pred_angle)
        transform_xyz = Tools3D.quaternion_To_rotation_matrix(
            np.squeeze(pred_angle))
        transform_xyz = np.array(transform_xyz)
        print(transform_xyz)
        np.savetxt(os.path.join(INPUT_FOLDER, filename + '.txt'),
                   np.expand_dims(pred_angle, axis=0),
                   fmt='%0.6f')

        point_cloud_transformed = np.matmul(pointcloud_gt_s, transform_xyz)

        # _point_cloud_transformed = sess.run(point_cloud_transformed, feed_dict=feed_dict)
        img_filename = '%d_coarse.png' % (
            i)  # , datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        img_filename = os.path.join(OUTPUT_FOLDER, img_filename)

        point_input = np.squeeze(pointclouds_pl)
        points_gt = np.squeeze(pointcloud_gt_s)
        points_rotated = np.squeeze(point_cloud_transformed)

        print(points_gt.shape, points_rotated.shape,
              point_cloud_transformed.shape)

        info = 'Nothing'

        pc_util.point_cloud_three_points(point_input, point_cloud_transformed,
                                         point_cloud_transformed, img_filename,
                                         info)
Exemple #4
0
def generateDataset_Quaternions(data, quaternions):
    pcData = np.zeros((quaternions.shape[0], data.shape[0], data.shape[1]),
                      dtype=float)
    print(pcData.shape)
    print(quaternions.shape)
    for i in range(quaternions.shape[0]):
        assert (quaternions.shape[1] == 4)
        rotationMat = Tools3D.quaternion_To_rotation_matrix(quaternions[i, :])
        pcData[i, :, :] = np.dot(data, rotationMat)

    return pcData
Exemple #5
0
def generateDataset_rotationVector(data, rotationVectors):
    pcData = np.zeros((rotationVectors.shape[0], data.shape[0], data.shape[1]),
                      dtype=float)
    print(pcData.shape)
    print(rotationVectors.shape)
    for i in range(rotationVectors.shape[0]):
        assert (rotationVectors.shape[1] == 4)
        rotationMat = Tools3D.rotation_vector_To_rotation_matrix(
            rotationVectors[i, 0:-1], rotationVectors[i, -1])
        pcData[i, :, :] = np.dot(data, rotationMat)

    return pcData
Exemple #6
0
def get_tuneNet_loss(pred_quaternion, input_quaternion, gt_quaternion, pointclouds_pl, pointclouds_gt, ratio=1):
    """ pred: B*NUM_CLASSES,
        label: B, """

    # Enforce the transformation as orthogonal matrix

    pred_matrix = Tools3D.batch_quaternion2mat_tf(pred_quaternion)

    #input_quaternion_T = Tools3D.batch_quaternion_T_tf(input_quaternion)

    pred_gt = input_quaternion#Tools3D.batch_quaternion_Mul_quaternion_tf(input_quaternion_T, gt_quaternion)
    # pred_gt_matrix = Tools3D.batch_quaternion2mat_tf(pred_gt)
    #
    # pred_gt_matrix = Tools3D.batch_quaternion2mat_tf(pred_gt)
    # point_cloud_gt_transformed = tf.matmul(pointclouds_pl, pred_gt_matrix)
    #
    # gt_matrix = Tools3D.batch_quaternion2mat_tf(gt_quaternion)
    # point_cloud_gt_transformed = tf.matmul(pointclouds_gt, pred_gt_matrix)

    pointclouds_pl_transformed = tf.matmul(pointclouds_gt, pred_matrix)

    emd_loss, emd_dist = get_emd_loss(pointclouds_pl_transformed, pointclouds_pl)
    emd_loss = 20000 * emd_loss

    sign_loss = tf.sign(input_quaternion) - tf.sign(pred_quaternion)
    sign_loss = tf.nn.l2_loss(sign_loss)


    vec_diff_loss = pred_gt - pred_quaternion

    vec_diff_loss = 10 * tf.nn.l2_loss(vec_diff_loss)

    total_loss = vec_diff_loss + emd_loss + tf.losses.get_regularization_loss()

    tf.summary.scalar('emd_loss', emd_loss)
    tf.add_to_collection('emd_loss', emd_loss)
    tf.summary.scalar('vecloss', vec_diff_loss)
    tf.add_to_collection('vecloss', vec_diff_loss)
    tf.summary.scalar('sign_loss', sign_loss)
    tf.add_to_collection('sign_loss', sign_loss)
    tf.add_to_collection('losses', total_loss)

    return total_loss, emd_dist
Exemple #7
0
def random_rotate_point_cloud(batch_data, rotation_base, hasnormal=False):
    B, N, C = batch_data.shape
    len, _ = rotation_base.shape
    # just random rotate
    seeds = np.random.randint(2, size=B)
    # just rotate smaller one
    seeds = np.array([1, 0]).reshape(1, 2).repeat(B, axis=0).reshape(-1)

    rotation_idx = np.random.randint(len, size=B)

    for i in range(B):
        points = np.squeeze(batch_data[i, :, :])
        if seeds[i] != 0:
            rotation_mat = Tools3D.quaternion_To_rotation_matrix(rotation_base[rotation_idx[i], :])
            # print(rotation_mat)
            batch_data[i, :, :3] = np.dot(points[:, :3], rotation_mat)
            if hasnormal:
                batch_data[i, :, 3:6] = np.dot(points[:, 3:6], rotation_mat)

    return batch_data
Exemple #8
0
def get_tuneNet_loss5(pred_quaternion, input_quaternion, gt_quaternion, pointclouds_pl, pointclouds_gt, ratio=1):
    """ pred: B*NUM_CLASSES,
        label: B, """

    # Enforce the transformation as orthogonal matrix
    BATCH_SIZE, NUM_POINT, _ = pointclouds_pl.get_shape()

    pred_matrix = Tools3D.batch_quaternion2mat_tf(pred_quaternion)

    input_matrix = Tools3D.batch_quaternion2mat_tf(input_quaternion)
    ouput_matrix = tf.matmul(input_matrix, pred_matrix)

    output_quaternion = Tools3D.batch_quaternion_Mul_quaternion_tf(input_quaternion, pred_quaternion)

    input_quaternion_T = Tools3D.batch_quaternion_T_tf(input_quaternion)

    pred_gt = Tools3D.batch_quaternion_Mul_quaternion_tf(input_quaternion_T, gt_quaternion)

    #output_quaternion = tf.nn.l2_normalize(output_quaternion, dim=1)

    # transform_xyz_pred = tf.transpose(transform_xyz_pred,[0,2,1])
    point_cloud_transformed = tf.matmul(pointclouds_pl, pred_matrix)

    gt_matrix = Tools3D.batch_quaternion2mat_tf(gt_quaternion)
    point_cloud_gt_transformed = tf.matmul(pointclouds_gt, gt_matrix)

    emd_loss, emd_dist = get_emd_loss(point_cloud_transformed, point_cloud_gt_transformed)
    emd_loss = 5000 * emd_loss


    sign_loss = tf.sign(gt_quaternion) - tf.sign(output_quaternion)
    sign_loss = 10*tf.nn.l2_loss(sign_loss)

    vec_diff_loss = pred_gt - pred_quaternion

    vec_diff_loss = 10 * tf.nn.l2_loss(vec_diff_loss)
    sign_loss = 0.1 * sign_loss

    total_loss = vec_diff_loss #+ tf.losses.get_regularization_loss()

    tf.summary.scalar('emd_loss', emd_loss)
    tf.add_to_collection('emd_loss', emd_loss)
    tf.summary.scalar('vecloss', vec_diff_loss)
    tf.add_to_collection('vecloss', vec_diff_loss)
    tf.summary.scalar('sign_loss', sign_loss)
    tf.add_to_collection('sign_loss', sign_loss)
    tf.add_to_collection('losses', total_loss)

    return total_loss, emd_dist
def rotation_test():
    print('-------------')
    folder_name='transformation_256_8_4_4_2'
    input_folder = os.path.join(POINTCLOUD_DIR, 'Dataset/%s/partialface' % 'einstein', "partial_scan/%s"%(folder_name))
    TestFolder = os.path.join(POINTCLOUD_DIR,'Dataset/test')
    SamplingFile = os.path.join(SAMPLING_FOLDER,"%s.xyz"%(folder_name))
    samplingData = np.loadtxt(SamplingFile)

    NAME = 'einstein'
    INPUT_FOLDER = os.path.join(POINTCLOUD_DIR, 'Dataset/%s' % (NAME))
    filename = os.path.join(INPUT_FOLDER, '%s_gt.xyz' % (NAME))
    gt_data = np.loadtxt(filename)
    samples = glob(input_folder + "/*.xyz")

    rotMat = Tools3D.quaternion_To_rotation_matrix(samplingData[0,:4])

    data = np.loadtxt(samples[0])
    dataT = np.dot(gt_data,rotMat)
    filename = samples[0].split('\\')[-1].replace('.xyz','')

    np.savetxt(os.path.join(TestFolder,filename+'_p.xyz'), data, fmt='%0.6f')
    np.savetxt(os.path.join(TestFolder,filename+'_gt.xyz'), dataT, fmt='%0.6f')
    print(rotMat)
Exemple #10
0
def eval_one_epoch(sess, ops, test_writer):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (BATCH_SIZE, NUM_POINT, TEST_DATASET.num_channel()))
    cur_batch_angle = np.zeros((BATCH_SIZE, 4))
    cur_batch_label = np.zeros((BATCH_SIZE))

    loss_sum = 0
    batch_idx = 0

    while TEST_DATASET.has_next_batch():
        batch_data, batch_angel, batch_data_label = TEST_DATASET.next_batch(
            augment=True, sess=sess)
        bsize = batch_data.shape[0]
        print('Batch: %03d, batch size: %d' % (batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_label[0:bsize] = batch_data_label
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_angle[0:bsize, ...] = batch_angel[:, :4]
        feed_dict = {
            ops['pointclouds_pl']: cur_batch_data,
            ops['pointclouds_gt']: pointcloud_gt_val,
            ops['pointclouds_gt_big']: pointcloud_gt_big_val,
            ops['pointclouds_angle']: cur_batch_angle,
            ops['is_training_pl']: is_training,
        }
        # loss_val, pred_angle = sess.run([ops['loss'], ops['pred_angle']], feed_dict=feed_dict)
        summary, step, loss_val, pred_angle, cd_dists, knn_dists = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'], ops['pred_angle'],
                ops['cd_dists'], ops['knn_dists']
            ],
            feed_dict=feed_dict)

        test_writer.add_summary(summary, step)

        loss_sum += loss_val
        batch_idx += 1

        transform_xyz_input = Tools3D.batch_quaternion2mat(cur_batch_angle)

        transform_xyz = Tools3D.batch_quaternion2mat(pred_angle)
        point_cloud_transformed = np.matmul(pointcloud_gt_val, transform_xyz)
        point_cloud_gt_transformed = np.matmul(pointcloud_gt_val,
                                               transform_xyz_input)

        # _point_cloud_transformed = sess.run(point_cloud_transformed, feed_dict=feed_dict)

        for i in range(bsize):
            index = cur_batch_label[i]
            img_filename = '%d.png' % (
                index)  # , datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
            img_filename = os.path.join(DUMP_DIR, img_filename)

            points_gt = np.squeeze(point_cloud_gt_transformed[i, :, :])
            points_rotated = np.squeeze(cur_batch_data[i, :, :])
            points_align = np.squeeze(point_cloud_transformed[i, :, :])
            # print(points_rotated.shape)
            # print(point_cloud_transformed.shape)
            # print(points_align.shape)
            info_input = pc_util.log_visu_vec('Input Data %d' % (index),
                                              cur_batch_angle[i, :])
            pre_angle = pc_util.log_visu_vec('Pred Data', pred_angle[i, :])
            matrix_input = pc_util.log_visu_matrix(
                'Input Matrix', np.squeeze(transform_xyz_input[i, :, :]))
            matrix_pred = pc_util.log_visu_matrix(
                'Pred Matrix', np.squeeze((transform_xyz[i, :, :])))
            # print(point_cloud_transformed[i,:,:].shape)
            cd_loss = cd_dists[i]
            knn_loss = knn_dists[i]
            matloss = np.sum(
                np.square(transform_xyz_input[i, :, :] -
                          transform_xyz[i, :, :])) / 2
            vecloss = np.sum(
                np.square(pred_angle[i, :] - cur_batch_angle[i, :])) / 2

            loss_cd = pc_util.log_visu_loss('CD  Loss', cd_loss)
            loss_knn = pc_util.log_visu_loss('KNN  Loss', knn_loss)
            loss_mat = pc_util.log_visu_loss('MAT Loss', matloss)
            vec_mat = pc_util.log_visu_loss('VEC Loss', vecloss)

            info = info_input + pre_angle + matrix_input + matrix_pred + vec_mat + loss_mat + loss_cd + loss_knn

            pc_util.point_cloud_three_points(points_rotated, points_gt,
                                             points_align, img_filename, info)

            # scipy.misc.imsave(img_filename, output_img)

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
    TEST_DATASET.reset()
    def eval_one_frame(self,
                       points,
                       isSampled=False,
                       isNormalized=False,
                       isSaving=False):
        is_training = False

        PC_PATH = os.path.join(
            POINTCLOUD_DIR,
            'Dataset/%s/partialface/real_scan/' % (self.model_name))
        OUTPUT_FOLDER = os.path.join(PC_PATH, 'pred')
        #PointCloudOperator.get_pairwise_distance(points)
        if isSampled:
            if self.predict_trans is not None:
                point_temp = np.dot(points.copy(),
                                    np.eye(3))  #self.predict_trans.T)
            else:
                point_temp = np.dot(points.copy(), np.eye(3))
            point_temp_max = np.max(point_temp, axis=0)
            point_temp_min = np.min(point_temp, axis=0)

            #print('point_temp_max:',point_temp_max)
            _clip_ = np.where((point_temp[:, 1] < point_temp_max[1] * 0.9))
            points = points[_clip_]

        #  interv = int(points.shape[0]/1024)
        #  print(points.shape[0],interv)
        #  idx = np.arange(points.shape[0])
        #
        #  idx_n = [i for i in idx if i % interv == 0]
        #
        # idx_n = np.arange(points.shape[0])
        # np.random.shuffle(idx_n)
        #
        # if points.shape[0]<POINT_NUM:
        #     offset = POINT_NUM - points.shape[0]
        #     idx_n = np.concatenate([np.arange(points.shape[0]), np.random.randint(0, points.shape[0], size=offset)], axis=0)
        #     np.random.shuffle(idx_n)
        #
        #     #idx_n = np.random.randint(0,points.shape[0],size=POINT_NUM)
        #
        #
        # idx_n = idx_n[:POINT_NUM]
        #
        # np.random.shuffle(idx_n)
        #points = points[idx_n,...]

        if isNormalized:
            centroid = np.mean(points, axis=0, keepdims=True)
            print('------------centroid:', centroid)
            furthest_distance = np.amax(np.sqrt(
                np.sum((points - centroid)**2, axis=-1)),
                                        keepdims=True)
            points = (points - centroid) / furthest_distance
            distance = np.sqrt(np.sum(points**2, axis=-1))
            med_distance = np.median(distance)
            max_distance = np.max(distance)
            scale = max_distance / med_distance * 0.8
            print('med_distance:', med_distance, '--max_distance:',
                  max_distance)
            _clip_ = np.where(distance < scale * med_distance)
            points = points[_clip_]

            centroid = np.mean(points, axis=0, keepdims=True)
            print('------------centroid:', centroid)
            furthest_distance = np.amax(np.sqrt(
                np.sum((points - centroid)**2, axis=-1)),
                                        keepdims=True) * 0.89
            points = (points - centroid) / furthest_distance

        pointclouds_pl = np.expand_dims(points, axis=0)
        # pointclouds_pl[:,:,1:3] =  -pointclouds_pl[:,:,1:3]
        #pointclouds_pl[:,:,2] =  -pointclouds_pl[:,:,2]

        pointcloud_gt_big = np.expand_dims(self.pointcloud_gt_big, axis=0)
        pointcloud_gt_small = np.expand_dims(self.pointcloud_gt_small, axis=0)

        feed_dict = {
            self.ops['pointclouds_pl']: pointclouds_pl,
            self.ops['pointclouds_pl_big']: pointcloud_gt_big,
            self.ops['pointclouds_gt_small']: pointcloud_gt_small,
            self.ops['is_training_pl']: is_training,
        }
        # loss_val, pred_angle = sess.run([ops['loss'], ops['pred_angle']], feed_dict=feed_dict)
        pred_angle = self.sess.run([self.ops['pred_angle']],
                                   feed_dict=feed_dict)
        pred_angle = np.squeeze(pred_angle)
        pred_angle = pred_angle[:4]
        print(pred_angle)
        transform_xyz = Tools3D.quaternion_To_rotation_matrix(pred_angle)
        transform_xyz = np.array(transform_xyz)

        if isSaving and (time() - self.startime > 2.5):
            self.startime = time()
            point_cloud_transformed = np.matmul(self.pointcloud_gt,
                                                transform_xyz)

            point_input = np.squeeze(pointclouds_pl)
            points_gt = np.squeeze(self.pointcloud_gt)
            points_aligned = np.squeeze(point_cloud_transformed)

            info = 'Nothing'
            filename = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
            pc_filename = os.path.join(OUTPUT_FOLDER, '%s.xyz' % (filename))
            np.savetxt(pc_filename, points, fmt='%0.6f')

            img_filename = os.path.join(OUTPUT_FOLDER, '%s.png' % (filename))
            #img_filename = os.path.join(OUTPUT_FOLDER, '1.png')
            pc_util.point_cloud_three_points(point_input, points_aligned,
                                             points_aligned, img_filename,
                                             info)
        # if self.predict_trans is not None:
        #     transform_xyz = 0.2*transform_xyz + 0.8*self.predict_trans
        self.predict_trans = transform_xyz
        return transform_xyz, points
Exemple #12
0
def save_one_epoch(sess, ops, test_writer):
    is_training = False

    # Make sure batch data is of same size
    cur_batch_data = np.zeros(
        (BATCH_SIZE, NUM_POINT, EVAL_DATASET.num_channel()))
    cur_batch_angle = np.zeros((BATCH_SIZE, 4))
    cur_batch_label = np.zeros((BATCH_SIZE))

    loss_sum = 0
    batch_idx = 0
    partial_data, gt_quat, labels = EVAL_DATASET.get_all_data()
    rotated_data = np.zeros(partial_data.shape)
    pred_quat = np.zeros(gt_quat.shape)
    index = 0
    while EVAL_DATASET.has_next_batch():
        batch_data, batch_angel, batch_data_label = EVAL_DATASET.next_batch(
            augment=False)
        bsize = batch_data.shape[0]

        print('Batch: %03d, batch size: %d' % (batch_idx, bsize))
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_angle[0:bsize, ...] = batch_angel
        cur_batch_label[0:bsize] = batch_data_label
        # for the last batch in the epoch, the bsize:end are from last batch
        cur_batch_data[0:bsize, ...] = batch_data
        cur_batch_angle[0:bsize, ...] = batch_angel
        feed_dict = {
            ops['pointclouds_pl']: cur_batch_data,
            ops['pointclouds_gt']: pointclouds_gt_val,
            ops['pointclouds_angle']: cur_batch_angle,
            ops['is_training_pl']: is_training,
        }
        # loss_val, pred_angle = sess.run([ops['loss'], ops['pred_angle']], feed_dict=feed_dict)
        summary, step, loss_val, pred_angle, cd_dists, knn_dists = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'], ops['pred_angle'],
                ops['cd_dists'], ops['knn_dists']
            ],
            feed_dict=feed_dict)

        test_writer.add_summary(summary, step)

        loss_sum += loss_val
        batch_idx += 1

        #print(pred_angle.shape)

        start_idx = (batch_idx - 1) * BATCH_SIZE
        end_idx = min(batch_idx * BATCH_SIZE, partial_data.shape[0])

        transform_xyz = Tools3D.batch_quaternion2mat(pred_angle)
        point_cloud_transformed = np.matmul(pointcloud_gt_s, transform_xyz)

        for i in range(bsize):
            np.savetxt(os.path.join(BASE_DIR, 'test/%d.xyz' % (index)),
                       point_cloud_transformed[i, ...],
                       fmt='%0.6f')
            index = index + 1

        rotated_data[start_idx:end_idx, :, :] = point_cloud_transformed[
            0:bsize, ...]
        pred_quat[start_idx:end_idx, :] = pred_angle[0:bsize, :]

        # _point_cloud_transformed = sess.run(point_cloud_transformed, feed_dict=feed_dict)
    filename = os.path.join(ROOT_DIR,
                            'TuneNet/data/quaternion_512_32_train4.h5')
    #PointCloudOperator.save_fineTune_data(filename, partial_data, rotated_data, pred_quat, gt_quat, labels)

    log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
Exemple #13
0
def get_tuneNet_loss2(pred_quaternion, input_quaternion, gt_quaternion, pointclouds_pl, pointclouds_gt, ratio=1):
    """ pred: B*NUM_CLASSES,
        label: B, """

    # Enforce the transformation as orthogonal matrix
    BATCH_SIZE, NUM_POINT, _ = pointclouds_pl.get_shape()

    pred_matrix = Tools3D.batch_quaternion2mat_tf(pred_quaternion)

    input_matrix = Tools3D.batch_quaternion2mat_tf(input_quaternion)
    ouput_matrix = tf.matmul(input_matrix, pred_matrix)

    output_quaternion = Tools3D.batch_quaternion_Mul_quaternion_tf(input_quaternion, pred_quaternion)
    output_quaternion = tf.nn.l2_normalize(output_quaternion, dim=1)
    refined_matrix = Tools3D.batch_quaternion2mat_tf(output_quaternion)

    # transform_xyz_pred = tf.transpose(transform_xyz_pred,[0,2,1])
    point_cloud_transformed = tf.matmul(pointclouds_gt, ouput_matrix)

    gt_matrix = Tools3D.batch_quaternion2mat_tf(gt_quaternion)
    point_cloud_gt_transformed = tf.matmul(pointclouds_gt, gt_matrix)

    emd_loss, emd_dist = get_emd_loss(point_cloud_transformed, point_cloud_gt_transformed)
    emd_loss = 5000 * emd_loss

    abs_loss = tf.square(point_cloud_transformed - point_cloud_gt_transformed)
    abs_loss = 1000 * tf.reduce_mean(abs_loss)

    cd_dists, knn_dists = get_cd_loss_circle(point_cloud_transformed, point_cloud_gt_transformed,
                                             tf.constant(ratio, dtype=tf.int32))
    cd_loss = 5000 * tf.reduce_mean(cd_dists)
    knn_loss = 5000 * tf.reduce_mean(knn_dists)
    # mat_diff = tf.matmul(transform_xyz_pred, tf.transpose(transform_xyz_pred, perm=[0, 2, 1]))



    # mat_diff = transform_xyz_raw - refined_matrix
    # mat_diff_loss = tf.nn.l2_loss(mat_diff)

    orientation_loss1 = 1000*tf.reduce_mean(tf.reduce_sum(gt_quaternion[:,:-1] * pred_quaternion[:,:-1],axis=1))
    orientation_loss2 = 1000*tf.reduce_mean(tf.reduce_sum(input_quaternion[:,:-1] * pred_quaternion[:,:-1],axis=1))





    mat_diff_loss = tf.nn.l2_loss(refined_matrix - ouput_matrix)

    cos_loss = 1.0 - tf.squeeze(tf.reduce_sum(gt_matrix * ouput_matrix, axis=1))

    # cos_loss = 1.0 - tf.squeeze(tf.reduce_sum(output_quaternion*gt_quaternion,axis=1))
    cos_loss = 150 * tf.reduce_mean(cos_loss)

    sign_loss = tf.sign(gt_quaternion) - tf.sign(output_quaternion)
    sign_loss = 10*tf.nn.l2_loss(sign_loss)

    vec_diff_loss = gt_quaternion - output_quaternion

    vec_diff_loss = 10 * tf.nn.l2_loss(vec_diff_loss)
    # vec_diff_loss = 1.0 - tf.squeeze(tf.reduce_sum(gt_quaternion * output_quaternion, axis=1))
    # emd_loss = 20000 * emd_loss #1000

    #  vec_diff_loss = 200 * tf.reduce_mean(vec_diff_loss)  # *100
    sign_loss = 0.1 * sign_loss

    total_loss = emd_loss + cos_loss + tf.losses.get_regularization_loss()

    tf.summary.scalar('orientation_loss1', orientation_loss1)
    tf.add_to_collection('orientation_loss1', orientation_loss1)
    tf.summary.scalar('orientation_loss2', orientation_loss2)
    tf.add_to_collection('orientation_loss2', orientation_loss2)

    tf.summary.scalar('cos_loss', cos_loss)
    tf.add_to_collection('cos_loss', cos_loss)

    tf.summary.scalar('abs_loss', abs_loss)
    tf.add_to_collection('abs_loss', abs_loss)

    tf.summary.scalar('emd_loss', emd_loss)
    tf.add_to_collection('emd_loss', emd_loss)
    tf.summary.scalar('vecloss', vec_diff_loss)
    tf.add_to_collection('vecloss', vec_diff_loss)
    tf.summary.scalar('matloss', mat_diff_loss)
    tf.add_to_collection('matloss', mat_diff_loss)

    tf.summary.scalar('cd_loss', cd_loss)
    tf.add_to_collection('cd_loss', cd_loss)

    tf.summary.scalar('knn_loss', knn_loss)
    tf.add_to_collection('knn_loss', knn_loss)
    tf.summary.scalar('sign_loss', sign_loss)
    tf.add_to_collection('sign_loss', sign_loss)
    tf.add_to_collection('losses', total_loss)

    return total_loss, cd_dists
Exemple #14
0
def get_partialNet_loss(pred_result, pointclouds_quaternion, pointclouds_pl, pointclouds_gt,pointclouds_gt_big,pointclouds_gt_small, ratio=5):
    """ pred: B*NUM_CLASSES,
        label: B, """
    # Enforce the transformation as orthogonal matrix
    BATCH_SIZE = pred_result.get_shape()[0].value
    _, NUM_POINT, _ = pointclouds_gt.get_shape()
    #pred_transform = pred_result[:,4:8]
    pred_quaternion = pred_result[:,:4]
    transform_xyz_pred = Tools3D.batch_quaternion2mat_tf(pred_quaternion)
    # transform_xyz_pred = tf.transpose(transform_xyz_pred,[0,2,1])
    point_cloud_transformed = tf.matmul(pointclouds_gt_big, transform_xyz_pred)
    # align_loss, emd_dist = get_emd_loss(point_cloud_transformed, pointclouds_pl)
    # align_loss, cd_dist = get_cd_loss(pointclouds_pl, point_cloud_transformed, tf.constant(1.0))

    cd_dists, knn_dists = get_cd_loss_circle(pointclouds_pl, point_cloud_transformed,
                                             tf.constant(ratio, dtype=tf.int32))
    cd_loss = 5000 * tf.reduce_mean(cd_dists)
    knn_loss = 10 * 1000 * tf.reduce_mean(knn_dists)

    transform_xyz_gt = Tools3D.batch_quaternion2mat_tf(pointclouds_quaternion[:,:4])

    point_cloud_pred_transformed = tf.matmul(pointclouds_gt_small, transform_xyz_pred)
    point_cloud_gt_transformed = tf.matmul(pointclouds_gt_small, transform_xyz_gt)
    emd_loss, emd_dist = get_emd_loss(point_cloud_pred_transformed, point_cloud_gt_transformed)
    emd_loss = 2000 * emd_loss
    #
    # align_loss = 100 * align_loss
    #
    # tf.summary.scalar('alignloss',  align_loss)
    #
    # tf.add_to_collection('alignloss',  align_loss)
    #
    #transform_xyz_raw = Tools3D.batch_quaternion2mat_tf(pointclouds_quaternion)
    # transform_xyz_raw = tf.transpose(transform_xyz_raw, [0, 2, 1])
    mat_diff_loss = tf.nn.l2_loss(transform_xyz_pred - transform_xyz_gt)

    sign_pred = tf.reshape(tf.sign(pred_quaternion), [-1])
    sign_gt = tf.reshape(tf.sign(pointclouds_quaternion), [-1])

    sign_loss = tf.sign(pred_result) - tf.sign(pointclouds_quaternion)
    sign_loss = tf.nn.l2_loss(sign_loss)
    # mat_diff_loss = tf.nn.l2_loss(pred_quaternion-pointclouds_quaternion)

    # mat_diff_loss = mat_diff_loss #/ 10
    vec_diff_loss = tf.nn.l2_loss(pred_result - pointclouds_quaternion)

    mat_diff_loss = 1 * mat_diff_loss
    vec_diff_loss = 10 * vec_diff_loss
    sign_loss = 0.8 * sign_loss
    total_loss = vec_diff_loss + emd_loss + tf.losses.get_regularization_loss()

    tf.summary.scalar('emd_loss', emd_loss)
    tf.add_to_collection('emd_loss', emd_loss)

    tf.summary.scalar('cd_loss', cd_loss)
    tf.add_to_collection('cd_loss', cd_loss)
    tf.summary.scalar('knn_loss', knn_loss)
    tf.add_to_collection('knn_loss', knn_loss)
    tf.summary.scalar('sign_loss', sign_loss)
    tf.add_to_collection('sign_loss', sign_loss)
    tf.summary.scalar('vecloss', vec_diff_loss)
    tf.add_to_collection('vecloss', vec_diff_loss)
    tf.add_to_collection('losses', total_loss)

    return cd_dists, knn_dists