예제 #1
0
def insert_batch(data, label, last_batch=False):
    global h5_batch_data, h5_batch_label
    global buffer_size, h5_index
    data_size = data.shape[0]
    # If there is enough space, just insert
    if buffer_size + data_size <= h5_batch_data.shape[0]:
        h5_batch_data[buffer_size:buffer_size + data_size, ...] = data
        h5_batch_label[buffer_size:buffer_size + data_size] = label
        buffer_size += data_size
    else:  # not enough space
        capacity = h5_batch_data.shape[0] - buffer_size
        assert (capacity >= 0)
        if capacity > 0:
            h5_batch_data[buffer_size:buffer_size + capacity,
                          ...] = data[0:capacity, ...]
            h5_batch_label[buffer_size:buffer_size + capacity,
                           ...] = label[0:capacity, ...]
        # Save batch data and label to h5 file, reset buffer_size
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label,
                               data_dtype, label_dtype)
        print('Stored {0} with size {1}'.format(h5_filename,
                                                h5_batch_data.shape[0]))
        fout_all_files.write(h5_filename + '\n')
        # -------------store the data to show -----------------
        if SHOW_DATA:
            fout = open(output_filename_prefix + '_' + str(h5_index) + '.txt',
                        'w')
            for batch in range(h5_batch_data.shape[0]):
                for p in range(h5_batch_data.shape[1]):
                    fout.write('v %f %f %f \n' % (h5_batch_data[batch, p, 0],
                                                  h5_batch_data[batch, p, 1],
                                                  h5_batch_data[batch, p, 2]))
            fout.close()
        # --------------------------------------------------
        h5_index += 1
        buffer_size = 0
        # recursive call
        insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
    if last_batch and buffer_size > 0:
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...],
                               h5_batch_label[0:buffer_size,
                                              ...], data_dtype, label_dtype)
        print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
        fout_all_files.write(h5_filename + '\n')
        # -------------store the data to show -----------------
        if SHOW_DATA:
            fout = open(output_filename_prefix + '_' + str(h5_index) + '.txt',
                        'w')
            for batch in range(h5_batch_data.shape[0]):
                for p in range(h5_batch_data.shape[1]):
                    fout.write('v %f %f %f \n' % (h5_batch_data[batch, p, 0],
                                                  h5_batch_data[batch, p, 1],
                                                  h5_batch_data[batch, p, 2]))
            fout.close()
        # --------------------------------------------------
        h5_index += 1
        buffer_size = 0
    return
def insert_batch(data, label, last_batch=False):
    global h5_batch_data, h5_batch_label
    global buffer_size, h5_index
    data_size = data.shape[0]
    # If there is enough space, just insert
    if buffer_size + data_size <= h5_batch_data.shape[0]:
        h5_batch_data[buffer_size:buffer_size + data_size, ...] = data
        h5_batch_label[buffer_size:buffer_size + data_size] = label
        buffer_size += data_size
    else:  # not enough space
        capacity = h5_batch_data.shape[0] - buffer_size
        assert (capacity >= 0)
        if capacity > 0:
            h5_batch_data[buffer_size:buffer_size + capacity,
                          ...] = data[0:capacity, ...]
            h5_batch_label[buffer_size:buffer_size + capacity,
                           ...] = label[0:capacity, ...]
        # Save batch data and label to h5 file, reset buffer_size
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label,
                               data_dtype, label_dtype)
        #print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0]))
        h5_index += 1
        buffer_size = 0
        # recursive call
        insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
    if last_batch and buffer_size > 0:
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...],
                               h5_batch_label[0:buffer_size,
                                              ...], data_dtype, label_dtype)
        #print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
        h5_index += 1
        buffer_size = 0
    return
예제 #3
0
def insert_batch(data, label, last_batch=False):
    global h5_batch_data, h5_batch_label
    global buffer_size, h5_index
    data_size = data.shape[0]
    # If there is enough space, just insert
    if buffer_size + data_size <= h5_batch_data.shape[0]:
        h5_batch_data[buffer_size:buffer_size+data_size, ...] = data
        h5_batch_label[buffer_size:buffer_size+data_size] = label
        buffer_size += data_size
    else: # not enough space
        capacity = h5_batch_data.shape[0] - buffer_size
        assert(capacity>=0)
        if capacity > 0:
           h5_batch_data[buffer_size:buffer_size+capacity, ...] = data[0:capacity, ...] 
           h5_batch_label[buffer_size:buffer_size+capacity, ...] = label[0:capacity, ...] 
        # Save batch data and label to h5 file, reset buffer_size
        h5_filename =  output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype) 
        print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0]))
        h5_index += 1
        buffer_size = 0
        # recursive call
        insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
    if last_batch and buffer_size > 0:
        h5_filename =  output_filename_prefix + '_' + str(h5_index) + '.h5'
        data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...], data_dtype, label_dtype)
        print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
        h5_index += 1
        buffer_size = 0
    return
예제 #4
0
def save_to_h5_files(data, labels, split):
    for i in range(math.ceil(len(data) / FILE_SIZE)):
        file_name = split + '_aligned_modelnet_' + str(i) + '.h5'
        data_util.save_h5('data/modelnet40_aligned/' + file_name,
                          data,
                          labels,
                          data_dtype='float32')
        doc_file = open('data/modelnet40_aligned/' + split + '_files.txt', 'a')
        doc_file.write('data/modelnet40_aligned/' + file_name)
        doc_file.close()
예제 #5
0
def insert_batch(data, label, output_dir, is_last_batch=False):
    global h5_batch_data, h5_batch_label
    global buffer_size, h5_index
    data_size = data.shape[0]

    #   print(f'data.shape = {data.shape}')

    all_files = os.path.join(output_dir, 'all_files.txt')
    print(f'all_files = {all_files}')

    # If there is enough space, just insert
    if buffer_size + data_size <= h5_batch_data.shape[0]:
        h5_batch_data[buffer_size:buffer_size + data_size, ...] = data
        h5_batch_label[buffer_size:buffer_size + data_size] = label
        buffer_size += data_size
    else:  # not enough space
        capacity = h5_batch_data.shape[0] - buffer_size
        assert (capacity >= 0)
        if capacity > 0:
            h5_batch_data[buffer_size:buffer_size + capacity,
                          ...] = data[0:capacity, ...]
            h5_batch_label[buffer_size:buffer_size + capacity,
                           ...] = label[0:capacity, ...]
        # Save batch data and label to h5 file, reset buffer_size
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        h5_filename = os.path.join(output_dir, f'ply_data_{h5_index!s}.h5')
        print(f'h5_filename = {h5_filename}')
        data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label,
                               data_dtype, label_dtype)
        write_to_all_files(all_files, h5_filename)

        h5_index += 1
        buffer_size = 0
        # recursive call
        insert_batch(data[capacity:, ...], label[capacity:, ...], output_dir,
                     is_last_batch)
    if is_last_batch and buffer_size > 0:
        h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
        h5_filename = os.path.join(output_dir, f'ply_data_{h5_index!s}.h5')
        print(f'is last batch')
        print(f'h5_filename = {h5_filename}')
        data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...],
                               h5_batch_label[0:buffer_size,
                                              ...], data_dtype, label_dtype)
        write_to_all_files(all_files, h5_filename)

        h5_index += 1
        buffer_size = 0

    return
예제 #6
0
def eval_one_epoch(sess, ops):
    is_training = False
    total_seen = 0
    for fn in range(len(INFER_FILES)):
        log_string('---- file number ' + str(fn+1) + ' out of ' + str(len(INFER_FILES)) + ' files ----')
        current_data, current_label = provider.loadDataFile(INFER_FILES[fn])
        current_data = current_data[:, 0:NUM_IN_POINTS, :]
        out_data_generated = np.zeros(
            (current_data.shape[0], NUM_GENERATED_POINTS, current_data.shape[2]))

        out_data_sampled_fw_plus_simple_continued_fps = np.zeros(
            (current_data.shape[0], NUM_OUT_POINTS, current_data.shape[2]))

        current_label_orig = current_label
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            print str(batch_idx) + '/' + str(num_batches-1)
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx], ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training}
            generated_points, idx = sess.run([ops['generated_points'], ops['idx']], feed_dict=feed_dict)

            out_data_generated[start_idx:end_idx, :, :] = generated_points

            outcloud_fw_plus_simple_continued_fps = SAMPLER_MODEL.nn_matching(current_data[start_idx:end_idx], idx, NUM_OUT_POINTS)
            out_data_sampled_fw_plus_simple_continued_fps[start_idx:end_idx, :, :] = outcloud_fw_plus_simple_continued_fps[:, 0:NUM_OUT_POINTS, :]

            total_seen += BATCH_SIZE

        file_name = os.path.split(INFER_FILES[fn])
        if not os.path.exists(OUT_DATA_PATH + '/generated/'): os.makedirs(OUT_DATA_PATH + '/generated/')
        data_prep_util.save_h5(OUT_DATA_PATH + '/generated/' + file_name[1], out_data_generated, current_label_orig, data_dtype, label_dtype)

        if not os.path.exists(OUT_DATA_PATH + '/sampled/'): os.makedirs(OUT_DATA_PATH + '/sampled/')
        data_prep_util.save_h5(OUT_DATA_PATH + '/sampled/' + file_name[1], out_data_sampled_fw_plus_simple_continued_fps, current_label_orig, data_dtype, label_dtype)
def eval_one_epoch(sess, ops):
    is_training = False
    total_seen = 0
    for fn in range(len(INFER_FILES)):
        log_string("---- file number " + str(fn + 1) + " out of " +
                   str(len(INFER_FILES)) + " files ----")
        current_data, current_label = provider.loadDataFile(INFER_FILES[fn])
        current_data = current_data[:, 0:NUM_IN_POINTS, :]
        out_data_simplified = np.zeros(
            (current_data.shape[0], NUM_SIMPLIFIED_POINTS,
             current_data.shape[2]))
        out_data_soft_projected = np.zeros(
            (current_data.shape[0], NUM_SIMPLIFIED_POINTS,
             current_data.shape[2]))
        out_data_hard_projected = np.zeros(
            (current_data.shape[0], NUM_SIMPLIFIED_POINTS,
             current_data.shape[2]))

        out_data_sampled = np.zeros(
            (current_data.shape[0], NUM_OUT_POINTS, current_data.shape[2]))

        current_label_orig = current_label
        current_label = np.squeeze(current_label)
        print(current_data.shape)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        for batch_idx in range(num_batches):
            print(str(batch_idx) + "/" + str(num_batches - 1))
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            feed_dict = {
                ops["pointclouds_pl"]: current_data[start_idx:end_idx],
                ops["labels_pl"]: current_label[start_idx:end_idx],
                ops["is_training_pl"]: is_training,
            }
            (
                simplified_points,
                soft_projected_points,
                hard_projected_points,
                nn_indices,
            ) = sess.run(
                [
                    ops["simplified_points"],
                    ops["soft_projected_points"],
                    ops["hard_projected_points"],
                    ops["idx"],
                ],
                feed_dict=feed_dict,
            )

            out_data_simplified[start_idx:end_idx, :, :] = simplified_points
            out_data_soft_projected[
                start_idx:end_idx, :, :] = soft_projected_points
            out_data_hard_projected[
                start_idx:end_idx, :, :] = hard_projected_points

            outcloud_sampled = SAMPLER_MODEL.nn_matching(
                current_data[start_idx:end_idx], nn_indices, NUM_OUT_POINTS)
            out_data_sampled[
                start_idx:end_idx, :, :] = outcloud_sampled[:, 0:
                                                            NUM_OUT_POINTS, :]

            total_seen += BATCH_SIZE

        file_name = os.path.split(INFER_FILES[fn])
        if not os.path.exists(OUT_DATA_PATH + "/simplified/"):
            os.makedirs(OUT_DATA_PATH + "/simplified/")
        data_prep_util.save_h5(
            OUT_DATA_PATH + "/simplified/" + file_name[1],
            out_data_simplified,
            current_label_orig,
            data_dtype,
            label_dtype,
        )

        if not os.path.exists(OUT_DATA_PATH + "/soft_projected/"):
            os.makedirs(OUT_DATA_PATH + "/soft_projected/")
        data_prep_util.save_h5(
            OUT_DATA_PATH + "/soft_projected/" + file_name[1],
            out_data_soft_projected,
            current_label_orig,
            data_dtype,
            label_dtype,
        )

        if not os.path.exists(OUT_DATA_PATH + "/hard_projected/"):
            os.makedirs(OUT_DATA_PATH + "/hard_projected/")
        data_prep_util.save_h5(
            OUT_DATA_PATH + "/hard_projected/" + file_name[1],
            out_data_hard_projected,
            current_label_orig,
            data_dtype,
            label_dtype,
        )

        if not os.path.exists(OUT_DATA_PATH + "/sampled/"):
            os.makedirs(OUT_DATA_PATH + "/sampled/")
        data_prep_util.save_h5(
            OUT_DATA_PATH + "/sampled/" + file_name[1],
            out_data_sampled,
            current_label_orig,
            data_dtype,
            label_dtype,
        )
예제 #8
0
def eval_one_epoch(sess, ops):
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    num_unique_idx = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('---- file number ' + str(fn + 1) + ' out of ' + str(len(TEST_FILES)) + ' files ----')
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:, 0:NUM_IN_POINTS, :]

        current_label_orig = current_label
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)

        out_data_generated = np.zeros((current_data.shape[0], NUM_OUT_POINTS, current_data.shape[2]))
        out_data_sampled = np.zeros((current_data.shape[0], NUM_OUT_POINTS, current_data.shape[2]))

        for batch_idx in range(num_batches):
            print str(batch_idx) + '/' + str(num_batches - 1)

            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx

            # Aggregating BEG
            batch_loss_sum = 0  # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES))  # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES))  # 0/1 for classes
            rotated_data = current_data[start_idx:end_idx, :, :]
            feed_dict = {ops['pointclouds_pl']: rotated_data, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training}
            generated_points, idx = sess.run([ops['generated_points'], ops['idx']], feed_dict=feed_dict)

            if MATCH_OUTPUT:
                outcloud = SAMPLER_MODEL.nn_matching(rotated_data, idx, NUM_OUT_POINTS)

            else:
                outcloud = generated_points

            for ii in range(0, BATCH_SIZE):
                num_unique_idx += np.size(np.unique(idx[ii]))
            feed_dict = {ops['pointclouds_pl']: rotated_data, ops['outcloud']: outcloud, ops['labels_pl']: current_label[start_idx:end_idx], ops['is_training_pl']: is_training}
            loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)

            out_data_generated[start_idx:end_idx, :, :] = generated_points
            out_data_sampled[start_idx:end_idx, :, :] = outcloud

            batch_pred_sum += pred_val
            batch_pred_val = np.argmax(pred_val, 1)
            for el_idx in range(cur_batch_size):
                batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
            batch_loss_sum += loss_val * cur_batch_size

            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)
                fout.write('%d, %d\n' % (pred_val[i - start_idx], l))

        if SAVE_POINTS:
            if not os.path.exists(OUT_DATA_PATH + '/generated/'):
                os.makedirs(OUT_DATA_PATH + '/generated/')
            if not os.path.exists(OUT_DATA_PATH + '/sampled/'):
                os.makedirs(OUT_DATA_PATH + '/sampled/')
            file_name = os.path.split(TEST_FILES[fn])
            data_prep_util.save_h5(OUT_DATA_PATH + '/generated/' + file_name[1], out_data_generated, current_label_orig, data_dtype, label_dtype)
            data_prep_util.save_h5(OUT_DATA_PATH + '/sampled/' + file_name[1], out_data_sampled, current_label_orig, data_dtype, label_dtype)

    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))))
    log_string('total_seen: %f' % (total_seen))

    class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))