Beispiel #1
0
    def test_iou_on_simple_data(self):
        vg1 = tf.random.uniform(shape=(10, 10, 10), minval=0.0, maxval=1.0)
        vg2 = 1.0 - vg1

        self.assertEqual(metrics.iou(vg1, vg1), 1.0)
        self.assertEqual(metrics.iou(vg1, vg2), 0.0)

        vg_all = tf.ones(shape=(10, 10, 10))
        vg_half = tf.concat([tf.ones(shape=(10, 10, 5)), tf.zeros(shape=(10, 10, 5))], axis=2)
        self.assertEqual(metrics.iou(vg_all, vg_half), 0.5)
def run_inference(elem):
    if not ARGS.use_best_iou:
        return model.model(elem)

    best_iou = 0.0
    best_inference = None
    for _ in range(300):
        inference = model.model(elem)
        iou = metrics.iou(elem['gt_occ'], inference['predicted_occ'])
        if ARGS.publish_each_sample:
            publish_inference(inference)
        if iou > best_iou:
            best_iou = iou
            best_inference = inference
    return best_inference
Beispiel #3
0
 def test_iou_on_shapes(self):
     d = load_test_files()
     self.assertEqual(metrics.iou(d[0], d[0]), 1.0)
     self.assertLess(metrics.iou(d[0], d[1]), 1.0)
     self.assertEqual(metrics.iou(d[0], d[1]), metrics.iou(d[1], d[0]))
Beispiel #4
0
 def m(vg1, vg2):
     vg_fit = fit.icp(vg2, vg1, scale=0.1, max_iter=10, downsample=2)
     return metrics.iou(vg1, vg_fit)
def publish_selection(metadata, str_msg):
    translation = 0

    ds = metadata.skip(selection_map[str_msg.data]).take(1)
    ds = data_tools.load_voxelgrids(ds)
    ds = data_tools.simulate_input(ds, 0, 0, 0)
    # sim_input_fn = lambda gt: data_tools.simulate_first_n_input(gt, 64**3 * 4/8)
    # sim_input_fn = lambda gt: data_tools.simulate_first_n_input(gt, 64**3)

    # ds = data_tools.simulate_input(ds, translation, translation, translation,
    #                                sim_input_fn=sim_input_fn)
    # ds = data_tools.simulate_condition_occ(ds, turn_on_prob = 0.00001, turn_off_prob=0.1)
    # ds = data_tools.simulate_condition_occ(ds, turn_on_prob = 0.00000, turn_off_prob=0.0)

    # ds = data_tools.simulate_partial_completion(ds)
    # ds = data_tools.simulate_random_partial_completion(ds)

    elem_raw = next(ds.__iter__())
    elem = {}

    for k in elem_raw.keys():
        elem_raw[k] = tf.expand_dims(elem_raw[k], axis=0)

    for k in elem_raw.keys():
        elem[k] = elem_raw[k].numpy()
    publish_np_elem(elem)

    if model is None:
        return

    elem = sampling_tools.prepare_for_sampling(elem)

    inference = run_inference(elem)
    publish_inference(inference)

    mismatch = np.abs(elem['gt_occ'] - inference['predicted_occ'].numpy())
    mismatch_pub.publish(to_msg(mismatch))
    print("There are {} mismatches".format(np.sum(mismatch > 0.5)))

    def multistep_error(elem, inference):
        a = inference['predicted_occ']
        # a = inference['predicted_occ'] +  elem['known_occ'] - elem['known_free']
        elem['conditioned_occ'] = np.float32(a > 0.5)
        inference = model.model(elem)
        mismatch = np.abs(elem['gt_occ'] - inference['predicted_occ'].numpy())
        mismatch_pub.publish(to_msg(mismatch))
        return elem, inference

    if ARGS.multistep:
        for _ in range(5):
            rospy.sleep(1)
            elem, inference = multistep_error(elem, inference)

    metric = metrics.p_correct_geometric_mean(inference['predicted_occ'],
                                              elem['gt_occ'])
    print("p_correct_geometric_mean: {}".format(metric.numpy()))
    print("p_correct: {}".format(
        metrics.p_correct(inference['predicted_occ'], elem['gt_occ'])))
    print("iou: {}".format(
        metrics.iou(elem['gt_occ'], inference['predicted_occ'])))

    if ARGS.sample:
        global stop_current_sampler
        global sampling_thread

        # print("Stopping old worker")
        stop_current_sampler = True
        if sampling_thread is not None:
            sampling_thread.join()

        sampling_thread = threading.Thread(target=sampler_worker,
                                           args=(elem, ))
        sampling_thread.start()