Exemple #1
0
def unit_vectors(direction, return_numpy=True):
    """
    Calculate the unit vectors (UnitX, UnitY) from a given direction angle.

    Args:

        direction: 3D NumPy array - direction angles in degrees

        return_numpy: Necessary if using `use_gpu`. Specifies if a CuPy or Numpy
        array will be returned.

    Returns:

        UnitX, UnitY: 3D NumPy array, 3D NumPy array
            x- and y-vector component in arrays
    """
    direction_gpu = cupy.array(direction)
    direction_gpu_rad = cupy.deg2rad(direction_gpu)
    UnitX = -cupy.sin(0.5 * cupy.pi) * cupy.cos(direction_gpu_rad)
    UnitY = cupy.sin(0.5 * cupy.pi) * cupy.sin(direction_gpu_rad)
    del direction_gpu_rad

    UnitX[cupy.isclose(direction_gpu, -1)] = 0
    UnitY[cupy.isclose(direction_gpu, -1)] = 0
    del direction_gpu

    if return_numpy:
        return UnitX.get(), UnitY.get()
    return UnitX, UnitY
Exemple #2
0
def _check_symmetric_relations(a_matrix):
    """
    Check if the argument matrix is symmetric.  Raise a value error with details
    about the offending elements if it is not.  This is useful for checking the
    instantaneously linked nodes have the same link strength.

    Parameters
    ----------
    a_matrix : 2D numpy array
        Relationships between nodes at tau = 0. Indexed such that first index is
        node and second is parent, i.e. node j with parent i has strength
        a_matrix[j,i]
    """
    # Check it is symmetric
    if not np.allclose(a_matrix, a_matrix.T, rtol=1e-10, atol=1e-10):
        # Store the disagreement elements
        bad_elems = ~np.isclose(a_matrix, a_matrix.T, rtol=1e-10, atol=1e-10)
        bad_idxs = np.argwhere(bad_elems)
        error_message = ""
        for node, parent in bad_idxs:
            # Check that we haven't already printed about this pair
            if bad_elems[node, parent]:
                error_message += \
                    "Parent {:d} of node {:d}".format(parent, node)+\
                    " has coefficient {:f}.\n".format(a_matrix[node, parent])+\
                    "Parent {:d} of node {:d}".format(node, parent)+\
                    " has coefficient {:f}.\n".format(a_matrix[parent, node])
            # Check if we already printed about this one
            bad_elems[node, parent] = False
            bad_elems[parent, node] = False
        raise ValueError("Relationships between nodes at tau=0 are not"+\
                         " symmetric!\n"+error_message)
Exemple #3
0
 def test_is_close_scalar_scalar(self, dtype):
     # cupy.isclose always returns ndarray
     a = cupy.dtype(cupy.dtype).type(0)
     b = cupy.dtype(cupy.dtype).type(0)
     cond = cupy.isclose(a, b)
     assert cond.shape == ()
     assert bool(cond)
Exemple #4
0
def compare_scores(sorted_df, first_key, second_key, epsilon=DEFAULT_EPSILON):
    errors = sorted_df[~cupy.isclose(
        sorted_df[first_key], sorted_df[second_key], rtol=epsilon)]
    num_errors = len(errors)
    if num_errors > 0:
        print(errors)
    assert (
        num_errors == 0
    ), "Mismatch were found when comparing '{}' and '{}' (rtol = {})".format(
        first_key, second_key, epsilon)
Exemple #5
0
def _compare_bfs_spc(G, Gnx, source):
    df = cugraph.bfs(G, source, return_sp_counter=True)
    # This call should only contain 3 columns:
    # 'vertex', 'distance', 'predecessor', 'sp_counter'
    assert len(df.columns) == 4, (
        "The result of the BFS has an invalid " "number of columns"
    )
    _, _, nx_sp_counter = nxacb._single_source_shortest_path_basic(Gnx, source)
    sorted_nx = [nx_sp_counter[key] for key in sorted(nx_sp_counter.keys())]
    # We are not checking for distances / predecessors here as we assume
    # that these have been checked  in the _compare_bfs tests
    # We focus solely on shortest path counting

    # cugraph return a dataframe that should contain exactly one time each
    # vertex
    # We could us isin to filter only vertices that are common to both
    # But it would slow down the comparison, and in this specific case
    # nxacb._single_source_shortest_path_basic is a dictionary containing all
    # the vertices.
    # There is no guarantee when we get `df` that the vertices are sorted
    # thus we enforce the order so that we can leverage faster comparison after
    sorted_df = df.sort_values("vertex").rename(
        columns={"sp_counter": "cu_spc"}, copy=False
    )

    # This allows to detect vertices identifier that could have been
    # wrongly present multiple times
    cu_vertices = set(sorted_df['vertex'].values_host)
    nx_vertices = nx_sp_counter.keys()
    assert len(cu_vertices.intersection(nx_vertices)) == len(
        nx_vertices
    ), "There are missing vertices"

    # We add the nx shortest path counter in the cudf.DataFrame, both the
    # the DataFrame and `sorted_nx` are sorted base on vertices identifiers
    sorted_df["nx_spc"] = sorted_nx

    # We could use numpy.isclose or cupy.isclose, we can then get the entries
    # in the cudf.DataFrame where there are is a mismatch.
    # numpy / cupy allclose would get only a boolean and we might want the
    # extra information about the discrepancies
    shortest_path_counter_errors = sorted_df[
        ~cupy.isclose(
            sorted_df["cu_spc"], sorted_df["nx_spc"], rtol=DEFAULT_EPSILON
        )
    ]
    if len(shortest_path_counter_errors) > 0:
        print(shortest_path_counter_errors)
    assert len(shortest_path_counter_errors) == 0, (
        "Shortest path counters " "are too different"
    )
Exemple #6
0
 def assert_equal(
     cls,
     obj1,
     obj2,
     aprops1,
     aprops2,
     cprops1,
     cprops2,
     *,
     rel_tol=1e-9,
     abs_tol=0.0,
 ):
     assert (
         aprops1 == aprops2
     ), f"abstract property mismatch: {aprops1} != {aprops2}"
     if aprops1.get("dtype") == "float":
         assert (cupy.isclose(obj1.value, obj2.value)).all()
     else:
         assert (obj1.value == obj2.value).all()
Exemple #7
0
def test_stratified_split(type, test_size, train_size):
    # For more tolerance and reliable estimates
    X, y = make_classification(n_samples=10000)

    if type == 'cupy':
        X = cp.asarray(X)
        y = cp.asarray(y)

    if type == 'numba':
        X = cuda.to_device(X)
        y = cuda.to_device(y)

    def counts(y):
        _, y_indices = cp.unique(y, return_inverse=True)
        class_counts = cp.bincount(y_indices)
        total = cp.sum(class_counts)
        percent_counts = []
        for count in (class_counts):
            percent_counts.append(
                cp.around(float(count) / total.item(), decimals=2).item())
        return percent_counts

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=train_size,
                                                        test_size=test_size,
                                                        stratify=y)

    original_counts = counts(y)
    split_counts = counts(y_train)
    assert cp.isclose(original_counts, split_counts, equal_nan=False,
                      rtol=0.1).all()
    if type == 'cupy':
        assert isinstance(X_train, cp.ndarray)
        assert isinstance(X_test, cp.ndarray)

    if type in ['numba']:
        assert cuda.devicearray.is_cuda_ndarray(X_train)
        assert cuda.devicearray.is_cuda_ndarray(X_test)
Exemple #8
0
def evaluate_chunks(
        results: [cp.ndarray, cp.ndarray,
                  cp.ndarray],  # closest triangle, distance, projection
        all_pts: cp.ndarray = None,
        vertices: cp.ndarray = None,
        edges: cp.ndarray = None,
        edge_norms: cp.ndarray = None,
        edge_normssq: cp.ndarray = None,
        normals: cp.ndarray = None,
        norms: cp.ndarray = None,
        normssq: cp.ndarray = None,
        zero_tensor: cp.ndarray = None,
        one_tensor: cp.ndarray = None,
        tris: cp.ndarray = None,
        vertex_normals: cp.ndarray = None,
        bounding_box: dict = None,
        chunk_size: int = None,
        num_verts: int = None) -> None:

    #
    # Expand vertex normals if non empty
    if vertex_normals is not None:
        vertex_normals = vertex_normals[tris]
        vertex_normals = cp.tile(cp.expand_dims(vertex_normals, axis=2),
                                 (1, 1, chunk_size, 1))

    # begin = time.time()
    #
    # Load and extend the batch
    num_chunks = all_pts.shape[0] // chunk_size
    for i in range(num_chunks):
        #
        # Get subset of the query points
        start_index = i * chunk_size
        end_index = (i + 1) * chunk_size
        pts = all_pts[start_index:end_index, :]

        #
        # Match the dimensions to those assumed above.
        #    REPEATED       REPEATED
        # [triangle_index, vert_index, querypoint_index, coordinates]
        pts = cp.tile(cp.expand_dims(pts, axis=(0, 1)), (num_verts, 3, 1, 1))

        #
        # Compute the differences between
        # vertices on each triangle and the
        # points of interest
        #
        # [triangle_index, vert_index, querypoint_index, coordinates]
        # ===================
        # [:,0,:,:] = p - p1
        # [:,1,:,:] = p - p2
        # [:,2,:,:] = p - p3
        diff_vectors = pts - vertices

        #
        # Compute alpha, beta, gamma
        barycentric = cp.empty(diff_vectors.shape)

        #
        # gamma = u x (p - p1)
        barycentric[:, 2, :, :] = cp.cross(edges[:, 0, :, :],
                                           diff_vectors[:, 0, :, :])
        # beta = (p - p1) x v
        barycentric[:, 1, :, :] = cp.cross(diff_vectors[:, 0, :, :],
                                           edges[:, 1, :, :])
        # alpha = w x (p - p2)
        barycentric[:, 0, :, :] = cp.cross(edges[:, 2, :, :],
                                           diff_vectors[:, 1, :, :])
        barycentric = cp.divide(
            cp.sum(cp.multiply(barycentric, normals), axis=3), normssq)

        #
        # Test conditions
        less_than_one = cp.less_equal(barycentric, one_tensor)
        more_than_zero = cp.greater_equal(barycentric, zero_tensor)

        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        cond1 = cp.logical_and(less_than_one, more_than_zero)

        #
        #     if gamma <= 0:
        cond2 = cp.logical_not(more_than_zero[:, 2, :])
        cond2 = cp.tile(cp.expand_dims(cond2, axis=1), (1, 3, 1))

        #
        #     if beta <= 0:
        cond3 = cp.logical_not(more_than_zero[:, 1, :])
        cond3 = cp.tile(cp.expand_dims(cond3, axis=1), (1, 3, 1))

        #
        #     if alpha <= 0:
        cond4 = cp.logical_not(more_than_zero[:, 0, :])
        cond4 = cp.tile(cp.expand_dims(cond4, axis=1), (1, 3, 1))

        #
        # Get the projections for each case
        xi = cp.empty(barycentric.shape)
        barycentric_ext = cp.tile(cp.expand_dims(barycentric, axis=3),
                                  (1, 1, 1, 3))
        proj = cp.sum(cp.multiply(barycentric_ext, vertices), axis=1)
        #
        #     if 0 <= gamma and gamma <= 1
        #    and 0 <= beta and beta <= 1
        #    and 0 <= alpha and alpha <= 1:
        xi[cond1] = barycentric[cond1]

        #
        # if gamma <= 0:
        #  x = p - p1
        #  u = p2 - p1
        #  a = p1
        #  b = p2
        t2 = cp.divide(
            #
            # u.dot(x)
            cp.sum(cp.multiply(edges[:, 0, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 0])
        xi2 = cp.zeros((t2.shape[0], 3, t2.shape[1]))
        xi2[:, 0, :] = -t2 + 1
        xi2[:, 1, :] = t2
        #
        t2 = cp.tile(cp.expand_dims(t2, axis=2), (1, 1, 3))
        lz = cp.less(t2, cp.zeros(t2.shape))
        go = cp.greater(t2, cp.ones(t2.shape))
        proj2 = vertices[:, 0, :, :] + cp.multiply(t2, edges[:, 0, :, :])
        proj2[lz] = vertices[:, 0, :, :][lz]
        proj2[go] = vertices[:, 1, :, :][go]
        #
        xi[cond2] = xi2[cond2]
        proj[cp.swapaxes(cond2, 1, 2)] = proj2[cp.swapaxes(cond2, 1, 2)]

        #
        # if beta <= 0:
        #  x = p - p1
        #  v = p3 - p1
        #  a = p1
        #  b = p3
        t3 = cp.divide(
            #
            # v.dot(x)
            cp.sum(cp.multiply(edges[:, 1, :, :], diff_vectors[:, 0, :, :]),
                   axis=2),
            edge_normssq[:, 1])
        xi3 = cp.zeros((t3.shape[0], 3, t3.shape[1]))
        xi3[:, 0, :] = -t3 + 1
        xi3[:, 2, :] = t3
        #
        t3 = cp.tile(cp.expand_dims(t3, axis=2), (1, 1, 3))
        lz = cp.less(t3, cp.zeros(t3.shape))
        go = cp.greater(t3, cp.ones(t3.shape))
        proj3 = vertices[:, 0, :, :] + cp.multiply(t3, edges[:, 1, :, :])
        proj3[lz] = vertices[:, 0, :, :][lz]
        proj3[go] = vertices[:, 2, :, :][go]
        #
        xi[cond3] = xi3[cond3]
        proj[cp.swapaxes(cond3, 1, 2)] = proj3[cp.swapaxes(cond3, 1, 2)]

        #
        #     if alpha <= 0:
        #  y = p - p2
        #  w = p3 - p2
        #  a = p2
        #  b = p3
        t4 = cp.divide(
            #
            # w.dot(y)
            cp.sum(cp.multiply(edges[:, 2, :, :], diff_vectors[:, 1, :, :]),
                   axis=2),
            edge_normssq[:, 2])
        xi4 = cp.zeros((t4.shape[0], 3, t4.shape[1]))
        xi4[:, 1, :] = -t4 + 1
        xi4[:, 2, :] = t4
        #
        t4 = cp.tile(cp.expand_dims(t4, axis=2), (1, 1, 3))
        lz = cp.less(t4, cp.zeros(t4.shape))
        go = cp.greater(t4, cp.ones(t4.shape))
        proj4 = vertices[:, 1, :, :] + cp.multiply(t4, edges[:, 2, :, :])
        proj4[lz] = vertices[:, 1, :, :][lz]
        proj4[go] = vertices[:, 2, :, :][go]
        #
        xi[cond4] = xi4[cond4]
        proj[cp.swapaxes(cond4, 1, 2)] = proj4[cp.swapaxes(cond4, 1, 2)]

        vec_to_point = pts[:, 0, :, :] - proj
        distances = cp.linalg.norm(vec_to_point, axis=2)

        # n = "\n"
        # print(f"{pts[:,0,:,:]=}")
        # print(f"{proj=}")
        # print(f"{pts[:,0,:,:] - proj=}")
        # print(f"{distances=}")

        min_distances = cp.min(distances, axis=0)

        closest_triangles = cp.argmin(distances, axis=0)

        projections = proj[closest_triangles, np.arange(chunk_size), :]

        #
        # Distinguish close triangles
        is_close = cp.isclose(distances, min_distances)

        #
        # Determine sign
        signed_normal = normals[:, 0, :, :]
        if vertex_normals is not None:
            signed_normal = cp.sum(vertex_normals.transpose() * xi.transpose(),
                                   axis=2).transpose()

        is_negative = cp.less_equal(
            cp.sum(cp.multiply(vec_to_point, signed_normal), axis=2), 0.)

        #
        # Combine
        is_close_and_negative = cp.logical_and(is_close, is_negative)

        #
        # Determine if inside
        is_inside = cp.all(cp.logical_or(is_close_and_negative,
                                         cp.logical_not(is_close)),
                           axis=0)

        #
        # Overwrite the signs of points
        # that are outside of the box
        if bounding_box is not None:
            #
            # Extract
            rotation_matrix = cp.asarray(bounding_box['rotation_matrix'])
            translation_vector = cp.asarray(bounding_box['translation_vector'])
            size = cp.asarray(bounding_box['size'])
            #
            # Transform
            transformed_pts = cp.dot(
                all_pts[start_index:end_index, :] - translation_vector,
                rotation_matrix)

            #
            # Determine if outside bbox
            inside_bbox = cp.all(cp.logical_and(
                cp.less_equal(0., transformed_pts),
                cp.less_equal(transformed_pts, size)),
                                 axis=1)

            #
            # Treat points outside bbox as
            # being outside of lumen
            print(f"{inside_bbox=}")
            is_inside = cp.logical_and(is_inside, inside_bbox)

        #
        # Apply sign to indicate whether the distance is
        # inside or outside the mesh.
        min_distances[is_inside] = -1 * min_distances[is_inside]

        #
        # Emplace results
        # [triangle_index, vert_index, querypoint_index, coordinates]
        results[0][start_index:end_index] = closest_triangles
        results[1][start_index:end_index] = min_distances
        results[2][start_index:end_index, :] = projections
Exemple #9
0
 def isclose(self, a, b, *, rtol=1e-9, atol=0.0):
     a = a.tsr if isinstance(a, CuPyTensor) else a
     b = b.tsr if isinstance(b, CuPyTensor) else b
     y = cp.isclose(a, b, rtol=rtol, atol=atol)
     return CuPyTensor(y) if isinstance(y, cp.ndarray) else y
Exemple #10
0
    # Start the loops!
    tic = time.time()
    for i in range(ni):
        for j in range(nj):

            # Only do work for pixels in inside the "circle"
            if cp.sqrt((i - ni0)**2 + (j - nj0)**2) <= nj0:

                # Set initial tolerance for the "closeness" of
                # theta and phi to theta_img[i,j] and phi[i,j] resp.
                # theta_tol = theta_tol_array[i,j]
                # phi_tol = phi_tol_array[i,j]
                tol = 8e-3

                #isolate ray along theta, phi
                theta_ray = cp.isclose(thetav, theta_img[i, j], atol=tol)
                select_thetas = cp.logical_and(limit_theta, theta_ray)
                phi_ray = cp.isclose(phiv, phi_img[i, j], atol=tol)
                select_ql = cp.logical_and(select_thetas, phi_ray)
                qlk, qli, qlj = cp.where(select_ql == True)

                #sort ray by radius
                radii = rv[qlk, qli, qlj]
                sort = cp.argsort(radii)
                qlk = qlk[sort]
                qli = qli[sort]
                qlj = qlj[sort]

                # Now shift points of view
                for num in cp.arange(0, 11, 1):
Exemple #11
0
 def test_limit_check(self):
     result = sc.gammainc(1e-10, 1)
     limit = sc.gammainc(0, 1)
     assert cp.isclose(result, limit)