Esempio n. 1
0
 def test_metric(self):
     a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
     # L2
     t = pt.KdTree(a, pt.Metric.L2Squared, 10)
     self.assertEqual(t.metric(-2.0), 4)
     # L1
     t = pt.KdTree(a, pt.Metric.L1, 10)
     self.assertEqual(t.metric(-2.0), 2)
Esempio n. 2
0
    def test_search_box(self):
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)

        min = np.array([[0, 0], [2, 2], [0, 0], [6, 6]], dtype=np.float32)
        max = np.array([[3, 3], [3, 3], [9, 9], [9, 9]], dtype=np.float32)
        nns = t.search_box(min, max)
        self.assertEqual(len(nns), 4)
        self.assertEqual(nns.dtype, t.dtype_index)
        self.assertTrue(nns)

        # Test that the memory is re-used
        nns[0][0] = 42
        t.search_box(min, max, nns)
        self.assertEqual(nns[0][0], 0)

        # Check the amount of indices found.
        sizes = [1, 0, 3, 1]
        for n, s in zip(nns, sizes):
            self.assertEqual(len(n), s)

        nns = nns[0:4:2]
        self.assertEqual(len(nns), 2)

        sizes = [1, 3]
        for n, s in zip(nns, sizes):
            self.assertEqual(len(n), s)

        # Test negative indexing.
        self.assertEqual(len(nns[-1]), 3)
Esempio n. 3
0
    def test_creation_kd_tree(self):
        # Row major input check.
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float64, order='C')
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)
        self.assertEqual(a.shape[0], t.npts)
        self.assertEqual(a.shape[1], t.sdim)
        # The scalar dtype of the tree should be the same as the input.
        self.assertEqual(a.dtype, t.dtype_scalar)

        # Col major input check.
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32, order='F')
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)
        self.assertEqual(a.shape[1], t.npts)
        self.assertEqual(a.shape[0], t.sdim)
        self.assertEqual(a.dtype, t.dtype_scalar)

        # The tree implements the buffer protocol and can be inspected using a
        # memoryview. We'll use the view to check that the tree didn't copy the
        # numpy array.
        # Note: This invalidates the built index by the tree.
        a[0][0] = 42
        self.assertEqual(a[0][0], memoryview(t)[0, 0])
Esempio n. 4
0
    def test_creation_darray(self):
        # A DArray can be created from 3 different dtypes or their descriptors.
        # It is the easiest to use the dtype properties of the KdTree.

        # 1) [('index', '<i4'), ('distance', '<f4')]
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)
        d = pt.DArray(t.dtype_neighbor)
        self.assertEqual(d.dtype, t.dtype_neighbor)
        self.assertFalse(d)

        # 2) {'names':['index','distance'], 'formats':['<i4','<f8'], 'offsets':[0,8], 'itemsize':16}
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float64)
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)
        d = pt.DArray(dtype=t.dtype_neighbor)
        self.assertEqual(d.dtype, t.dtype_neighbor)
        self.assertFalse(d)

        # 3) int32
        d = pt.DArray(np.int32)
        self.assertEqual(d.dtype, t.dtype_index)
        d = pt.DArray(np.dtype(np.int32))
        self.assertEqual(d.dtype, t.dtype_index)
        self.assertFalse(d)
Esempio n. 5
0
def array_initialization():
    print("*** Array Initialization ***")
    p = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float64)
    # In and output distances are absolute distances when using Metric.L1.
    t = pt.KdTree(p, pt.Metric.L1, 10)

    # This type of forward initialization of arrays may be useful to streamline
    # loops that depend on them and where reusing memory is desired. E.g.: ICP.
    knns = np.empty((0), dtype=t.dtype_neighbor)
    print(knns.dtype)
    rnns = pt.DArray(dtype=t.dtype_neighbor)
    print(rnns.dtype)
    bnns = pt.DArray(dtype=t.dtype_index)
    print(bnns.dtype)
    print()
Esempio n. 6
0
    def test_search_knn(self):
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)

        # Test if the query actually works
        nns = t.search_knn(a, 2)
        self.assertEqual(nns.shape, (3, 2))

        for i in range(len(nns)):
            self.assertEqual(nns[i][0][0], i)
            self.assertAlmostEqual(nns[i][0][1], 0)

        # Test that the memory is re-used
        nns[0][0][0] = 42
        t.search_knn(a, 2, nns)
        self.assertEqual(nns[0][0][0], 0)
Esempio n. 7
0
def performance_test_pico_tree():
    print("*** Performance against scans.bin ***")
    # The benchmark documentation, docs/benchmark.md section "Running a new
    # benchmark", explains how to generate a scans.bin file from an online
    # dataset.
    try:
        p = np.fromfile(Path(__file__).parent / "scans.bin",
                        np.float64).reshape((-1, 3))
    except FileNotFoundError as e:
        print(f"Skipping test. File does not exist: {e.filename}")
        return

    cnt_build_time_before = perf_counter()
    # Tree creation is only slightly slower in Python vs C++ using the bindings.
    t = pt.KdTree(p, pt.Metric.L2, 10)
    #t = spKDTree(p, leafsize=10)
    #t = spcKDTree(p, leafsize=10)
    #t = skKDTree(p, leaf_size=10)
    cnt_build_time_after = perf_counter()
    print(
        f"{t} was built in {(cnt_build_time_after - cnt_build_time_before) * 1000.0}ms"
    )
    # Use the OMP_NUM_THREADS environment variable to influence the number of
    # threads used for querying: export OMP_NUM_THREADS=1
    k = 1
    cnt_query_time_before = perf_counter()
    # Searching for nearest neighbors is close to a constant second slower
    # using the bindings as compared to the C++ benchmark (regardless of k).
    # The following must be noted however: The Python benchmark simply calls
    # the knn function provided by the Python bindings. As such it does not
    # directly wrap the C++ benchmark. This means the performance difference is
    # not only due to the bindings overhead. The C++ implementation benchmark
    # may have been optimized more because is very simple. The bindings also
    # have various extra overhead: checks, numpy array memory creation, OpenMP,
    # etc.
    # TODO The actual overhead is probably very similar to that of the KdTree
    # creation, but it would be nice to measure the overhead w.r.t. the actual
    # query.
    unused_knns = t.search_knn(p, k)
    # unused_dd, unused_ii = t.query(p, k=k)
    cnt_query_time_after = perf_counter()
    print(
        f"{len(p)} points queried in {(cnt_query_time_after - cnt_query_time_before) * 1000.0}ms"
    )
    print()
Esempio n. 8
0
    def test_search_radius(self):
        a = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
        t = pt.KdTree(a, pt.Metric.L2Squared, 10)

        search_radius = t.metric(2.5)
        nns = t.search_radius(a, search_radius)
        self.assertEqual(len(nns), 3)
        self.assertEqual(nns.dtype, t.dtype_neighbor)
        self.assertTrue(nns)

        for i, n in enumerate(nns):
            self.assertEqual(n[0][0], i)
            self.assertAlmostEqual(n[0][1], 0)

        # Test that the memory is re-used
        nns[0][0][0] = 42
        t.search_radius(a, search_radius, nns)
        self.assertEqual(nns[0][0][0], 0)

        # This checks if DArray is also a sequence.
        for i in range(len(nns)):
            self.assertEqual(nns[i][0][0], i)
            self.assertAlmostEqual(nns[i][0][1], 0)
Esempio n. 9
0
def tree_creation_and_query_types():
    print("*** KdTree Creation And Basic Information ***")
    p = np.array([[2, 1], [4, 3], [8, 7]], dtype=np.float32)
    # In and output distances are squared distances when using Metric.L2.
    t = pt.KdTree(p, pt.Metric.L2, 1)
    print(f"{t}")
    print(f"Number of points used to build the tree: {t.npts}")
    print(f"Spatial dimension of the tree: {t.sdim}")
    value = -2.0
    print(f"Metric applied to {value}: {t.metric(value)}")
    print()

    print("*** Nearest Neighbor Search ***")
    # Nearest neighbors via return.
    knns = t.search_knn(p, 1)
    print("Single nn for each input point:")
    print(knns)
    # Possibly re-use the memory in a another query.
    # If the input size is incorrect, it gets resized.
    t.search_knn(p, 2, knns)
    print("Two nns for each input point:")
    print(knns)
    print()

    print("*** Approximate Nearest Neighbor Search ***")
    # Searching for approximate nearest neighbors works the same way.
    # An approximate nearest neighbor can be at most a distance factor of 1+e
    # farther away from the true nearest neighbor.
    max_error = 0.75
    # Apply the metric function to the ratio to get the squared ratio.
    max_error_ratio = t.metric(1.0 + max_error)
    knns = t.search_aknn(p, 2, max_error_ratio)
    t.search_aknn(p, 2, max_error_ratio, knns)
    # Note that we scale back the ann distance its original distance.
    print("The 2nd closest to each input point:")
    for knn in knns:
        print(
            f"Point index {knn[1][0]} with distance {knn[1][1] * max_error_ratio}"
        )
    print()

    print("*** Radius Search ***")
    # A radius search doesn't return a numpy array but a custom vector of numpy
    # arrays. This is because the amount of neighbors to each of input points
    # may vary for a radius search.
    search_radius = t.metric(2.5)
    print(f"Result with radius: {search_radius}")
    rnns = t.search_radius(p, search_radius)
    for rnn in rnns:
        print(f"{rnn}")
    search_radius = t.metric(5.0)
    t.search_radius(p, 25.0, rnns)
    print(f"Result with radius: {search_radius}")
    for rnn in rnns:
        print(f"{rnn}")
    print()

    print("*** Box Search ***")
    # A box search returns the same data structure as a radius search. However,
    # instead of containing neighbors it simply contains indices.
    min = np.array([[0, 0], [2, 2], [0, 0], [6, 6]], dtype=np.float32)
    max = np.array([[3, 3], [3, 3], [9, 9], [9, 9]], dtype=np.float32)
    bnns = t.search_box(min, max)
    t.search_box(min, max, bnns)
    print("Results for the orthogonal box search:")
    for bnn in bnns:
        print(f"{bnn}")
    print()

    print("*** DArray ***")
    # The custom type can also be indexed.
    print(f"Result size: {len(bnns)}")
    # Note that each numpy array is actually a view of a C++ vector.
    print(f"First index: {bnns[0]}")
    print(f"Second last index: {bnns[-2]}")
    half = bnns[0:4:2]
    print("Sliced results for the orthogonal box search:")
    for bnn in half:
        print(f"{bnn}")
    print()