Exemplo n.º 1
0
def struct2ply(filename, struct_data, dist_threshold):

    branch_data = struct_data['branches']
    cyl_data = struct_data['cylinders']

    vt = []
    ft = []
    ids = []
    for k, v in branch_data.iteritems():
        for vi in v['cylinder_ids']:
            cyl = cyl_data[vi]
            if cyl['length'] <= dist_threshold:
                vertices, facets = cylinder_from_spheres(
                    cyl['p1'], cyl['p2'], cyl['rad'], 10)
                vt.append(vertices)
                ft.append(facets)
                ids.append(np.full(vertices.shape[0], k))

    new_vv = np.concatenate(vt)
    new_ids = np.concatenate(ids)
    new_ft = np.array(ft[0])
    for ff in ft[1:]:
        tf = np.array(ff)
        new_ft = np.vstack((new_ft, (tf + np.max(new_ft) + 1)))

    save_ply(filename, new_vv, new_ft, scalar_array=new_ids)
    return
Exemplo n.º 2
0
def export_ply_monolithic_with_RGB_labels(batched_data, data_num,
                                          filepath_pred, batched_labels,
                                          setting):
    assert len(batched_data) == len(data_num)
    assert batched_data.shape[0:2] == batched_labels.shape

    # Take the the predefined valid number of points out of the batches and create a contiguous arrays.
    tmp_data = []
    tmp_labels = []
    for idx, (data_batch,
              label_batch) in enumerate(zip(batched_data, batched_labels)):
        num_points = data_num[idx]
        tmp_data.append(data_batch[:num_points])
        tmp_labels.append(label_batch[:num_points])
    data = np.concatenate(tmp_data)
    labels = np.concatenate(tmp_labels)

    # Create a lookup table (LUT) for efficient label-to-color mapping
    cmap = cm.get_cmap('tab20')
    label_max = setting.num_class
    cmap_LUT = np.array(
        [cmap(label / label_max)[:3] for label in range(label_max)])
    cmap_LUT[0] = (0.0, 0.0, 0.0)

    # Create segment colors according to the labels, using the color LUT
    rgb_labels = cmap_LUT[labels]

    folder = Path(filepath_pred).parent / 'PLY'
    filename = Path(filepath_pred).with_suffix('.ply').name
    filepath_label_ply = folder / filename
    data_utils.save_ply(data, str(filepath_label_ply), colors=rgb_labels)
Exemplo n.º 3
0
def export_ply_monolithic(batched_data, data_num, filepath_pred,
                          batched_labels, setting):
    assert len(batched_data) == len(data_num)
    assert batched_data.shape[0:2] == batched_labels.shape

    # Take the the predefined valid number of points out of the batches and create a contiguous arrays.
    tmp_data = []
    tmp_labels = []
    for idx, (data_batch,
              label_batch) in enumerate(zip(batched_data, batched_labels)):
        num_points = data_num[idx]
        tmp_data.append(data_batch[:num_points])
        tmp_labels.append(label_batch[:num_points])
    data = np.concatenate(tmp_data)
    labels = np.concatenate(tmp_labels).reshape(-1, 1)

    folder = Path(filepath_pred).parent / 'PLY'
    filename = Path(filepath_pred).with_suffix('.ply').name
    filepath_label_ply = folder / filename
    data_utils.save_ply(data, str(filepath_label_ply), labels=labels)
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)

            xforms_np, rotations_np = pf.get_xforms(
                BATCH_SIZE,
                rotation_range=rotation_range_val,
                scaling_range=scaling_range_val,
                order=setting.rotation_order)

            # Augment batched point clouds by rotation and jittering
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
                ops['xforms']: xforms_np,
                ops['rotations']: rotations_np,
                ops['jitter_range']: np.array([jitter_val])
            }

            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            pred_val = np.sum(pred_val, axis=1)
            # pred_val = np.argmax(pred_val, 1)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)
                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                ply_filename = os.path.join(DUMP_DIR, ply_filename)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Exemplo n.º 5
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_C)]
    total_correct_class = [0 for _ in range(NUM_C)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    # data_utils.shuffle_points(TEST_DATA)

    # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)
    # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)
    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    ####################################################
    print(current_data.shape)
    print(current_label.shape)

    filtered_data = []
    filtered_label = []
    for i in range(current_label.shape[0]):
        if (current_label[i] in OBJECTDATASET_TO_MODELNET.keys()):
            filtered_label.append(current_label[i])
            filtered_data.append(current_data[i, :])

    filtered_data = np.array(filtered_data)
    filtered_label = np.array(filtered_label)
    print(filtered_data.shape)
    print(filtered_label.shape)

    current_data = filtered_data
    current_label = filtered_label
    ###################################################

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros((cur_batch_size, 40))  # score for classes
        batch_pred_classes = np.zeros((cur_batch_size, 40))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
        # pred_val = np.argmax(batch_pred_classes, 1)
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        for i in range(start_idx, end_idx):
            total_seen += 1
            if (pred_val[i - start_idx]
                    not in MODELNET_TO_OBJECTDATASET.keys()):
                continue
            pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
            # if (pred_val[i-start_idx] == current_label[i]):
            if (pred == current_label[i]):
                total_correct += 1

        for i in range(start_idx, end_idx):

            l = current_label[i]
            total_seen_class[l] += 1

            if pred_val[i - start_idx] not in MODELNET_TO_OBJECTDATASET:
                pred_label = "NA"
            else:
                pred = MODELNET_TO_OBJECTDATASET[pred_val[i - start_idx]]
                total_correct_class[l] += (pred == l)

                pred_label = SHAPE_NAMES[pred]

            # groundtruth_label = SHAPE_NAMES[MODELNET_TO_OBJECTDATASET[l]]
            groundtruth_label = SHAPE_NAMES[l]

            fout.write('%s, %s\n' % (pred_label, groundtruth_label))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, groundtruth_label, pred_label)
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, groundtruth_label, pred_label)
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    # log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))

    seen_class_accuracies = []
    seen_correct_class = []
    for i in range(len(total_seen_class)):
        if total_seen_class[i] != 0:
            seen_class_accuracies.append(total_seen_class[i])
            seen_correct_class.append(total_correct_class[i])
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(seen_correct_class) /
        np.array(seen_class_accuracies, dtype=np.float))))

    for i, name in enumerate(SHAPE_NAMES):
        if (total_seen_class[i] == 0):
            accuracy = -1
        else:
            accuracy = total_correct_class[i] / float(total_seen_class[i])
        log_string('%10s:\t%0.3f' % (name, accuracy))
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder', '-f', help='Path to data folder')
    parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true')
    args = parser.parse_args()
    print(args)

    batch_size = 2048

    folder_cifar10 = args.folder if args.folder else '../../data/cifar10/cifar-10-batches-py'
    folder_pts = os.path.join(os.path.dirname(folder_cifar10), 'pts')

    train_test_files = [('train', ['data_batch_%d' % (idx + 1) for idx in range(5)]),
                        ('test', ['test_batch'])]

    data = np.zeros((batch_size, 1024, 6))
    label = np.zeros((batch_size), dtype=np.int32)
    for tag, filelist in train_test_files:
        data_list = []
        labels_list = []
        for filename in filelist:
            batch = unpickle(os.path.join(folder_cifar10, filename))
            data_list.append(np.reshape(batch[b'data'], (10000, 3, 32, 32)))
            labels_list.append(batch[b'labels'])
        images = np.concatenate(data_list, axis=0)
        labels = np.concatenate(labels_list, axis=0)

        idx_h5 = 0
        filename_filelist_h5 = os.path.join(os.path.dirname(folder_cifar10), '%s_files.txt' % tag)
        with open(filename_filelist_h5, 'w') as filelist_h5:
            for idx_img, image in enumerate(images):
                points = []
                pixels = []
                for x in range(32):
                    for z in range(32):
                        points.append((x, random.random() * 1e-6, z))
                        pixels.append((image[0, x, z], image[1, x, z], image[2, x, z]))
                points_array = np.array(points)
                pixels_array = (np.array(pixels).astype(np.float32) / 255)-0.5

                points_min = np.amin(points_array, axis=0)
                points_max = np.amax(points_array, axis=0)
                points_center = (points_min + points_max) / 2
                scale = np.amax(points_max - points_min) / 2
                points_array = (points_array - points_center) * (0.8 / scale)

                if args.save_ply:
                    filename_pts = os.path.join(folder_pts, tag, '{:06d}.ply'.format(idx_img))
                    data_utils.save_ply(points_array, filename_pts, colors=pixels_array+0.5)

                idx_in_batch = idx_img % batch_size
                data[idx_in_batch, ...] = np.concatenate((points_array, pixels_array), axis=-1)
                label[idx_in_batch] = labels[idx_img]
                if ((idx_img + 1) % batch_size == 0) or idx_img == len(images) - 1:
                    item_num = idx_in_batch + 1
                    filename_h5 = os.path.join(os.path.dirname(folder_cifar10), '%s_%d.h5' % (tag, idx_h5))
                    print('{}-Saving {}...'.format(datetime.now(), filename_h5))
                    filelist_h5.write('./%s_%d.h5\n' % (tag, idx_h5))

                    file = h5py.File(filename_h5, 'w')
                    file.create_dataset('data', data=data[0:item_num, ...])
                    file.create_dataset('label', data=label[0:item_num, ...])
                    file.close()

                    idx_h5 = idx_h5 + 1
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder', '-f', help='Path to data folder')
    parser.add_argument('--point_num', '-p', help='Point number for each sample', type=int, default=1024)
    parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true')
    parser.add_argument('--augment', '-a', help='Data augmentation', action='store_true')
    args = parser.parse_args()
    print(args)

    batch_size = 2048
    fold_num = 3

    tag_aug = '_ag' if args.augment else ''

    folder_svg = args.folder if args.folder else '../../data/tu_berlin/svg'
    root_folder = os.path.dirname(folder_svg)
    folder_pts = os.path.join(root_folder, 'pts' + tag_aug)
    filelist_svg = [line.strip() for line in open(os.path.join(folder_svg, 'filelist.txt'))]

    category_label = dict()
    with open(os.path.join(os.path.dirname(folder_svg), 'categories.txt'), 'w') as file_categories:
        for filename in filelist_svg:
            category = os.path.split(filename)[0]
            if category not in category_label:
                file_categories.write('%s %d\n' % (category, len(category_label)))
                category_label[category] = len(category_label)

    filelist_svg_failed = []
    data = np.zeros((batch_size, args.point_num, 6))
    label = np.zeros((batch_size), dtype=np.int32)
    for idx_fold in range(fold_num):
        filelist_svg_fold = [filename for i, filename in enumerate(filelist_svg) if i % fold_num == idx_fold]
        random.seed(idx_fold)
        random.shuffle(filelist_svg_fold)

        filename_filelist_svg_fold = os.path.join(root_folder, 'filelist_fold_%d.txt' % (idx_fold))
        with open(filename_filelist_svg_fold, 'w') as filelist_svg_fold_file:
            for filename in filelist_svg_fold:
                filelist_svg_fold_file.write('%s\n' % (filename))

        idx_h5 = 0
        idx = 0
        filename_filelist_h5 = os.path.join(root_folder, 'fold_%d_files%s.txt' % (idx_fold, tag_aug))
        with open(filename_filelist_h5, 'w') as filelist_h5_file:
            for idx_file, filename in enumerate(filelist_svg_fold):
                filename_svg = os.path.join(folder_svg, filename)
                try:
                    paths, attributes = svg2paths(filename_svg)
                except:
                    filelist_svg_failed.append(filename_svg)
                    print('{}-Failed to parse {}!'.format(datetime.now(), filename_svg))
                    continue

                points_array = np.zeros(shape=(args.point_num, 3), dtype=np.float32)
                normals_array = np.zeros(shape=(args.point_num, 3), dtype=np.float32)

                path = Path()
                for p in paths:
                    p_non_empty = Path()
                    for segment in p:
                        if segment.length() > 0:
                            p_non_empty.append(segment)
                    if len(p_non_empty) != 0:
                        path.append(p_non_empty)

                path_list = []
                if args.augment:
                    for removal_idx in range(6):
                        path_with_removal = Path()
                        for p in path[:math.ceil((0.4 + removal_idx * 0.1) * len(paths))]:
                            path_with_removal.append(p)
                        path_list.append(path_with_removal)
                    path_list = path_list + augment(path, 6)
                else:
                    path_list.append(path)

                for path_idx, path in enumerate(path_list):
                    for sample_idx in range(args.point_num):
                        sample_idx_float = (sample_idx + random.random()) / (args.point_num - 1)
                        while True:
                            try:
                                point = path.point(sample_idx_float)
                                normal = path.normal(sample_idx_float)
                                break
                            except:
                                sample_idx_float = random.random()
                                continue
                        points_array[sample_idx] = (point.real, sample_idx_float, point.imag)
                        normals_array[sample_idx] = (normal.real, random.random() * 1e-6, normal.imag)

                    points_min = np.amin(points_array, axis=0)
                    points_max = np.amax(points_array, axis=0)
                    points_center = (points_min + points_max) / 2
                    scale = np.amax(points_max - points_min) / 2
                    points_array = (points_array - points_center) * (0.8 / scale, 0.4, 0.8 / scale)

                    if args.save_ply:
                        tag_aug_idx = tag_aug + '_' + str(path_idx) if args.augment else tag_aug
                        filename_pts = os.path.join(folder_pts, filename[:-4] + tag_aug_idx + '.ply')
                        data_utils.save_ply(points_array, filename_pts, normals=normals_array)

                    idx_in_batch = idx % batch_size
                    data[idx_in_batch, ...] = np.concatenate((points_array, normals_array), axis=-1).astype(np.float32)
                    label[idx_in_batch] = category_label[os.path.split(filename)[0]]
                    if ((idx + 1) % batch_size == 0) \
                            or (idx_file == len(filelist_svg_fold) - 1 and path_idx == len(path_list) - 1):
                        item_num = idx_in_batch + 1
                        filename_h5 = 'fold_%d_%d%s.h5' % (idx_fold, idx_h5, tag_aug)
                        print('{}-Saving {}...'.format(datetime.now(), os.path.join(root_folder, filename_h5)))
                        filelist_h5_file.write('./%s\n' % (filename_h5))

                        file = h5py.File(os.path.join(root_folder, filename_h5), 'w')
                        file.create_dataset('data', data=data[0:item_num, ...])
                        file.create_dataset('label', data=label[0:item_num, ...])
                        file.close()

                        idx_h5 = idx_h5 + 1
                    idx = idx + 1

    if len(filelist_svg_failed) != 0:
        print('{}-Failed to parse {} sketches!'.format(datetime.now(), len(filelist_svg_failed)))
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder', '-f', help='Path to data folder')
    parser.add_argument('--point_num', '-p', help='Point number for each sample', type=int, default=256)
    parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true')
    args = parser.parse_args()
    print(args)

    batch_size = 2048

    folder_mnist = args.folder if args.folder else '../../data/mnist/zips'
    folder_pts = os.path.join(os.path.dirname(folder_mnist), 'pts')

    mnist_data = MNIST(folder_mnist)
    mnist_train_test = [(mnist_data.load_training(), 'train'), (mnist_data.load_testing(), 'test')]

    data = np.zeros((batch_size, args.point_num, 4))
    label = np.zeros((batch_size), dtype=np.int32)
    for ((images, labels), tag) in mnist_train_test:
        idx_h5 = 0
        filename_filelist_h5 = os.path.join(os.path.dirname(folder_mnist), '%s_files.txt' % tag)
        point_num_total = 0
        with open(filename_filelist_h5, 'w') as filelist_h5:
            for idx_img, image in enumerate(images):
                points = []
                pixels = []
                for idx_pixel, pixel in enumerate(image):
                    if pixel == 0:
                        continue
                    x = idx_pixel // 28
                    z = idx_pixel % 28
                    points.append((x, random.random() * 1e-6, z))
                    pixels.append(pixel)
                point_num_total = point_num_total + len(points)
                pixels_sum = sum(pixels)
                probs = [pixel / pixels_sum for pixel in pixels]
                indices = np.random.choice(list(range(len(points))), size=args.point_num,
                                           replace=(len(points) < args.point_num), p=probs)
                points_array = np.array(points)[indices]
                pixels_array_1d = (np.array(pixels)[indices].astype(np.float32) / 255) - 0.5
                pixels_array = np.expand_dims(pixels_array_1d, axis=-1)

                points_min = np.amin(points_array, axis=0)
                points_max = np.amax(points_array, axis=0)
                points_center = (points_min + points_max) / 2
                scale = np.amax(points_max - points_min) / 2
                points_array = (points_array - points_center) * (0.8 / scale)

                if args.save_ply:
                    filename_pts = os.path.join(folder_pts, tag, '{:06d}.ply'.format(idx_img))
                    data_utils.save_ply(points_array, filename_pts, colors=np.tile(pixels_array, (1, 3)) + 0.5)

                idx_in_batch = idx_img % batch_size
                data[idx_in_batch, ...] = np.concatenate((points_array, pixels_array), axis=-1)
                label[idx_in_batch] = labels[idx_img]
                if ((idx_img + 1) % batch_size == 0) or idx_img == len(images) - 1:
                    item_num = idx_in_batch + 1
                    filename_h5 = os.path.join(os.path.dirname(folder_mnist), '%s_%d.h5' % (tag, idx_h5))
                    print('{}-Saving {}...'.format(datetime.now(), filename_h5))
                    filelist_h5.write('./%s_%d.h5\n' % (tag, idx_h5))

                    file = h5py.File(filename_h5, 'w')
                    file.create_dataset('data', data=data[0:item_num, ...])
                    file.create_dataset('label', data=label[0:item_num, ...])
                    file.close()

                    idx_h5 = idx_h5 + 1
        print('Average point number in each sample is : %f!' % (point_num_total / len(images)))
Exemplo n.º 9
0
def eval_one_epoch(sess, ops, gmm, num_votes):
    """ ops: dict mapping from string to tf ops """
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')

    if (".h5" in TEST_FILE):
        current_data, current_label = data_utils.get_current_data_h5(
            TEST_DATA, TEST_LABELS, NUM_POINT)
    else:
        current_data, current_label = data_utils.get_current_data(
            TEST_DATA, TEST_LABELS, NUM_POINT)

    current_label = np.squeeze(current_label)

    num_batches = current_data.shape[0] // BATCH_SIZE

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        cur_batch_size = end_idx - start_idx

        # Aggregating BEG
        batch_loss_sum = 0  # sum of losses for the batch
        batch_pred_sum = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # score for classes
        batch_pred_classes = np.zeros(
            (cur_batch_size, NUM_CLASSES))  # 0/1 for classes
        for vote_idx in range(num_votes):
            rotated_data = provider.rotate_point_cloud_by_angle(
                current_data[start_idx:end_idx, :, :],
                vote_idx / float(num_votes) * np.pi * 2)
            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['w_pl']: gmm.weights_,
                ops['mu_pl']: gmm.means_,
                ops['sigma_pl']: np.sqrt(gmm.covariances_),
                ops['is_training_pl']: is_training
            }
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
        pred_val = np.argmax(batch_pred_sum, 1)
        # Aggregating END

        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
        total_correct += correct
        total_seen += cur_batch_size
        loss_sum += batch_loss_sum

        for i in range(start_idx, end_idx):
            l = current_label[i]
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_val[i - start_idx] == l)
            fout.write('%s, %s\n' %
                       (SHAPE_NAMES[pred_val[i - start_idx]], SHAPE_NAMES[l]))

            if pred_val[i -
                        start_idx] != l and FLAGS.visu:  # ERROR CASE, DUMP!
                img_filename = '%d_label_%s_pred_%s.jpg' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                img_filename = os.path.join(DUMP_DIR, img_filename)
                output_img = pc_util.point_cloud_three_views(
                    np.squeeze(current_data[i, :, :]))
                scipy.misc.imsave(img_filename, output_img)

                #save ply
                ply_filename = '%d_label_%s_pred_%s.ply' % (
                    error_cnt, SHAPE_NAMES[l],
                    SHAPE_NAMES[pred_val[i - start_idx]])
                data_utils.save_ply(np.squeeze(current_data[i, :, :]),
                                    ply_filename)
                error_cnt += 1

    log_string('total seen: %d' % (total_seen))
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))

    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Exemplo n.º 10
0
    import pickle

    with open(
            os.path.join(os.path.dirname(args.load_ckpt), os.pardir,
                         'data.pickle'), 'rb') as f:
        data = pickle.load(f)
    with open(
            os.path.join(os.path.dirname(args.load_ckpt), os.pardir,
                         'labels_pred.pickle'), 'rb') as f:
        labels_pred = pickle.load(f)

    print('{}-Saving ply of {}...'.format(datetime.now(), args.file_name))
    scene_name = os.path.splitext(os.path.basename(
        args.file_name))[0]  # Get filename without extension
    # Create subfolder
    predictions_folder = os.path.join(os.path.dirname(args.load_ckpt),
                                      os.pardir, 'predictions', 'test')
    if not os.path.exists(os.path.dirname(predictions_folder)):
        os.makedirs(predictions_folder)

    # Create point colors according to labels
    cmap = cm.get_cmap('tab20')
    label_max = setting.num_class
    cmap_LUT = [cmap(label / label_max)[:3] for label in range(label_max)]
    cmap_LUT[0] = (0.0, 0.0, 0.0)
    colors = np.array([cmap_LUT[label] for label in labels_pred.ravel()])
    filename = os.path.join(predictions_folder, scene_name + '_nudelholz.ply')
    data_utils.save_ply(data.reshape(-1, 3), filename, colors)

    print('{}-Done!'.format(datetime.now()))