Esempio n. 1
0
    def compute_hypothetical_errors(self, point_indeces):
        """
        Computes the hypothetical errors of each point in the TIN, if they were to be removed.
        """
        indices, indptr = self.dt.vertex_neighbor_vertices

        for pt_index in point_indeces:
            current_vertex = self.dt.points[pt_index]
            point = self.grid.get(current_vertex[0], current_vertex[1])

            if point in self.grid.get_corner_set():
                continue

            # Find neighboring vertices of the vertex at pt_index & create hypothetical triangulation
            neighbor_indeces = indptr[indices[pt_index]:indices[pt_index + 1]]
            neighbors = self.dt.points[neighbor_indeces]
            hypothetical_triangulation = Delaunay(neighbors)

            # Store neighbor point data in the point for later
            point_neighbors = set()
            for pt in neighbors:
                point_neighbors.add(self.grid.get(pt[0], pt[1]))
            point.neighbors = point_neighbors

            # Locate current point in new triangulation & compute error
            simplex = hypothetical_triangulation.find_simplex(current_vertex)
            triangle_pts = hypothetical_triangulation.points[
                hypothetical_triangulation.simplices[simplex]]
            self.compute_point_error(point, triangle_pts)
Esempio n. 2
0
class BaseDelaunayInterpolator(SpatialInterpolator):
    """A base class for interpolators built on top of Delaunay triangulation of data points.

    The class triangulates input `coords` and stores the result in `tri` attribute of the created instance. It also
    constructs an IDW interpolator which is used to perform extrapolation for coordinates lying outside the convex hull
    of data points. Each concrete subclass must implement `_interpolate_inside_hull` method.
    """
    def __init__(self, coords, values=None, neighbors=3, dist_transform=2):
        super().__init__(coords, values)

        # Construct a convex hull of passed coords. Cast coords to float32, otherwise cv2 may fail. cv2 is used since
        # QHull can't handle degenerate hulls.
        self.coords_hull = cv2.convexHull(self.coords.astype(np.float32), returnPoints=True)

        # Construct an IDW interpolator to use outside the constructed hull
        self.idw_interpolator = IDWInterpolator(coords, values, neighbors=neighbors, dist_transform=dist_transform)

        # Triangulate input points
        try:
            self.tri = Delaunay(self.coords, incremental=False)
        except QhullError:
            # Delaunay fails in case of linearly dependent coordinates. Create artificial points in the corners of
            # given coordinate grid in order for Delaunay to work with a full rank matrix.
            min_x, min_y = np.min(self.coords, axis=0) - 1
            max_x, max_y = np.max(self.coords, axis=0) + 1
            corner_coords = [(min_x, min_y), (min_x, max_y), (max_x, min_y), (max_x, max_y)]
            self.coords = np.concatenate([self.coords, corner_coords])
            if self.values is not None:
                mean_values = np.mean(self.values, axis=0, keepdims=True, dtype=self.values.dtype)
                corner_values = np.repeat(mean_values, 4, axis=0)
                self.values = np.concatenate([self.values, corner_values])
            self.tri = Delaunay(self.coords, incremental=False)

        # Perform the first auxiliary call of the tri for it to work properly in different processes.
        # Otherwise interpolation may fail if called in a pipeline with prefetch with mpc target.
        _ = self.tri.find_simplex((0, 0))

    def _is_in_hull(self, coords):
        """Check whether items in `coords` lie within the convex hull of data points."""
        coords = coords.astype(np.float32)  # Cast coords to float32 to match the type of points in the convex hull
        return np.array([cv2.pointPolygonTest(self.coords_hull, coord, measureDist=False) >= 0 for coord in coords])

    def _interpolate_inside_hull(self, coords):
        """Perform interpolation for coordinates lying inside convex hull of data points. `coords` are guaranteed to be
        2-dimensional with shape (n_coords, 2)."""
        _ = coords
        raise NotImplementedError

    def _interpolate(self, coords):
        """Perform interpolation at given `coords`. Falls back to an IDW interpolator for points lying outside the
        convex hull of data points. `coords` are guaranteed to be 2-dimensional with shape (n_coords, 2)."""
        inside_hull_mask = self._is_in_hull(coords)
        values = np.empty((len(coords), self.values.shape[1]), dtype=self.values.dtype)
        values[inside_hull_mask] = self._interpolate_inside_hull(coords[inside_hull_mask])
        # pylint: disable-next=protected-access
        values[~inside_hull_mask] = self.idw_interpolator._interpolate(coords[~inside_hull_mask])
        return values
Esempio n. 3
0
 def _is_inside_scaffold(scaffold_positions: np.ndarray,
                         new_position: np.ndarray):
     hull = ConvexHull(scaffold_positions, incremental=False)
     vertices = scaffold_positions[hull.vertices]
     delaunay = Delaunay(vertices)
     return delaunay.find_simplex(new_position) >= 0
Esempio n. 4
0
def cube_frac2cart(cvalues, v1, v2, v3, centre=(0., 0., 0.), min_voxels=None, max_voxels=1000000, interp='linear',
                   make_cubic=False, bval=False):
    """convert a 3d cube of values, whose indexes relate to fractional coordinates of v1,v2,v3,
    into a cube of values in the cartesian basis
    (using a background value for coordinates outside the bounding box of v1,v2,v3)

    NB: there may be some edge effects for smaller cubes

    Properties
    ----------
    values : array((N,M,L))
        values in fractional basis
    v1 : array((3,))
    v2 : array((3,))
    v3 : array((3,))
    centre : array((3,))
        cartesian coordinates for centre of v1, v2, v3
    min_voxels : int or None
        minimum number of voxels in returned cube. If None, compute base on input cube
    max_voxels : int or None
        maximum number of voxels in returned cube. If None, compute base on input cube
    interp : str
        interpolation mode; 'nearest' or 'linear'
    make_cubic: bool
        if True, ensure all final cartesian cube sides are of the same length
    bval: float
        background value to use outside the bounding box of the cube.
        If False, use numpy.nan

    Returns
    -------
    B : array((P,Q,R))
        where P,Q,R <= longest_side
    min_bounds : array((3,))
        xmin,ymin,zmin
    max_bounds : array((3,))
        xmax,ymax,zmax

    Example
    -------

    >>> from pprint import pprint
    >>> import numpy as np
    >>> fcube = np.array(
    ...    [[[1.,5.],
    ...      [3.,7.]],
    ...     [[2.,6.],
    ...      [4.,8.]]])
    ...
    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [1.,0.,0.], [0.,1.,0.], [0.,0.,1.], min_voxels=30)
    >>> min_bound.tolist()
    [-0.5, -0.5, -0.5]
    >>> max_bound.tolist()
    [0.5, 0.5, 0.5]
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 1.0, 3.0, 5.0],
      [1.0, 1.0, 3.0, 5.0],
      [2.0, 2.0, 4.0, 6.0],
      [3.0, 3.0, 5.0, 7.0]],
     [[1.0, 1.0, 3.0, 5.0],
      [1.0, 1.0, 3.0, 5.0],
      [2.0, 2.0, 4.0, 6.0],
      [3.0, 3.0, 5.0, 7.0]],
     [[1.5, 1.5, 3.5, 5.5],
      [1.5, 1.5, 3.5, 5.5],
      [2.5, 2.5, 4.5, 6.5],
      [3.5, 3.5, 5.5, 7.5]],
     [[2.0, 2.0, 4.0, 6.0],
      [2.0, 2.0, 4.0, 6.0],
      [3.0, 3.0, 5.0, 7.0],
      [4.0, 4.0, 6.0, 8.0]]]

    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [2.,0.,0.], [0.,1.,0.], [0.,0.,1.], min_voxels=30)
    >>> min_bound.tolist()
    [-1.0, -0.5, -0.5]
    >>> max_bound.tolist()
    [1.0, 0.5, 0.5]
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 1.7, 4.3], [1.3, 2.0, 4.7], [2.7, 3.3, 6.0]],
     [[1.0, 1.7, 4.3], [1.3, 2.0, 4.7], [2.7, 3.3, 6.0]],
     [[1.2, 1.8, 4.5], [1.5, 2.2, 4.8], [2.8, 3.5, 6.2]],
     [[1.5, 2.2, 4.8], [1.8, 2.5, 5.2], [3.2, 3.8, 6.5]],
     [[1.8, 2.5, 5.2], [2.2, 2.8, 5.5], [3.5, 4.2, 6.8]],
     [[2.0, 2.7, 5.3], [2.3, 3.0, 5.7], [3.7, 4.3, 7.0]]]

    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [1.,0.,0.], [0.,2.,0.], [0.,0.,1.], min_voxels=30)
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 1.7, 4.3],
      [1.0, 1.7, 4.3],
      [1.3, 2.0, 4.7],
      [2.0, 2.7, 5.3],
      [2.7, 3.3, 6.0],
      [3.0, 3.7, 6.3]],
     [[1.2, 1.8, 4.5],
      [1.2, 1.8, 4.5],
      [1.5, 2.2, 4.8],
      [2.2, 2.8, 5.5],
      [2.8, 3.5, 6.2],
      [3.2, 3.8, 6.5]],
     [[1.8, 2.5, 5.2],
      [1.8, 2.5, 5.2],
      [2.2, 2.8, 5.5],
      [2.8, 3.5, 6.2],
      [3.5, 4.2, 6.8],
      [3.8, 4.5, 7.2]]]

    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [1.,0.,0.], [0.,1.,0.], [0.,0.,2.], min_voxels=30)
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 1.0, 1.7, 3.0, 4.3, 5.0],
      [1.3, 1.3, 2.0, 3.3, 4.7, 5.3],
      [2.7, 2.7, 3.3, 4.7, 6.0, 6.7]],
     [[1.2, 1.2, 1.8, 3.2, 4.5, 5.2],
      [1.5, 1.5, 2.2, 3.5, 4.8, 5.5],
      [2.8, 2.8, 3.5, 4.8, 6.2, 6.8]],
     [[1.8, 1.8, 2.5, 3.8, 5.2, 5.8],
      [2.2, 2.2, 2.8, 4.2, 5.5, 6.2],
      [3.5, 3.5, 4.2, 5.5, 6.8, 7.5]]]

    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [1.,0.,0.], [.7,.7,0.], [0.,0.,1.], min_voxels=30)
    >>> min_bound.tolist()
    [-0.85, -0.35, -0.5]
    >>> max_bound.tolist()
    [0.85, 0.35, 0.5]
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 1.7, 4.3], [nan, nan, nan]],
     [[1.1, 1.7, 4.4], [nan, nan, nan]],
     [[1.6, 2.3, 5.0], [2.0, 2.7, 5.3]],
     [[2.0, 2.7, 5.3], [2.5, 3.2, 5.8]],
     [[nan, nan, nan], [3.0, 3.7, 6.3]],
     [[nan, nan, nan], [nan, nan, nan]]]

    >>> ncube, min_bound, max_bound = cube_frac2cart(fcube, [2.,0.,0.], [0.,1.,0.], [0.,0.,1.], min_voxels=30, make_cubic=True)
    >>> min_bound.tolist()
    [-1.0, -0.5, -0.5]
    >>> max_bound.tolist()
    [1.0, 1.5, 1.5]
    >>> pprint(ncube.round(1).tolist())
    [[[1.0, 3.0, 5.0, nan],
      [2.0, 4.0, 6.0, nan],
      [3.0, 5.0, 7.0, nan],
      [nan, nan, nan, nan]],
     [[1.0, 3.0, 5.0, nan],
      [2.0, 4.0, 6.0, nan],
      [3.0, 5.0, 7.0, nan],
      [nan, nan, nan, nan]],
     [[1.5, 3.5, 5.5, nan],
      [2.5, 4.5, 6.5, nan],
      [3.5, 5.5, 7.5, nan],
      [nan, nan, nan, nan]],
     [[2.0, 4.0, 6.0, nan],
      [3.0, 5.0, 7.0, nan],
      [4.0, 6.0, 8.0, nan],
      [nan, nan, nan, nan]]]

   """
    cvalues = np.asarray(cvalues, dtype=float)

    min_voxels = min_voxels if min_voxels is not None else 1
    longest_side = max(cvalues.shape)
    if (min_voxels is not None) and (max_voxels is not None) and min_voxels > max_voxels:
        raise ValueError(
            "minimum dimension ({0}) must be less than or equal to maximum distance ({1})".format(min_voxels,
                                                                                                  max_voxels))
    if min_voxels is not None:
        longest_side = max(longest_side, int(min_voxels ** (1 / 3.)))
    if max_voxels is not None:
        longest_side = min(longest_side, int(max_voxels ** (1 / 3.)))

    # convert to numpy arrays
    origin = np.asarray([0, 0, 0], dtype=float)
    v1 = np.asarray(v1)
    v2 = np.asarray(v2)
    v3 = np.asarray(v3)

    # --------------
    # expand cube by one unit in all directions (for interpolation)
    cvalues = np.concatenate((np.array(cvalues[0], ndmin=3), cvalues, np.array(cvalues[-1], ndmin=3)), axis=0)
    start = np.transpose(np.array(cvalues[:, :, 0], ndmin=3), axes=[1, 2, 0])
    end = np.transpose(np.array(cvalues[:, :, -1], ndmin=3), axes=[1, 2, 0])
    cvalues = np.concatenate((start, cvalues, end), axis=2)
    start = np.transpose(np.array(cvalues[:, 0, :], ndmin=3), axes=[1, 0, 2])
    end = np.transpose(np.array(cvalues[:, -1, :], ndmin=3), axes=[1, 0, 2])
    cvalues = np.concatenate((start, cvalues, end), axis=1)
    # --------------

    # --------------
    # create fractional coordinate axes for cube
    f_axes = []
    for i, v in enumerate([v1, v2, v3]):
        step = 1. / (cvalues.shape[i] - 2.)
        ax = np.linspace(0, 1 + step, cvalues.shape[i]) - step / 2.
        f_axes.append(ax)
    # --------------

    # --------------
    # get bounding box for cartesian vectors and compute its volume and extents
    bbox_pts = np.asarray([origin, v1, v2, v3, v1 + v2, v1 + v3, v1 + v2 + v3, v2 + v3])
    hull = Delaunay(bbox_pts)
    bbox_x, bbox_y, bbox_z = bbox_pts.T
    xmin, xmax, ymin, ymax, zmin, zmax = (bbox_x.min(), bbox_x.max(), bbox_y.min(),
                                          bbox_y.max(), bbox_z.min(), bbox_z.max())  # l,r,bottom,top
    x_length = abs(xmin - xmax)
    y_length = abs(ymin - ymax)
    z_length = abs(zmin - zmax)
    if make_cubic:
        # min_bound, max_bound = min(xmin, ymin, zmin), max(xmax, ymax, zmin)
        max_length = max(x_length, y_length, z_length)
        xmax += max_length - (xmin + x_length)
        ymax += max_length - (ymin + y_length)
        zmax += max_length - (zmin + z_length)
        x_length = y_length = z_length = max_length

    # --------------

    # --------------
    # compute new cube size, in which the bounding box can fit
    xlen, ylen, zlen = 0, 0, 0
    while xlen * ylen * zlen < min_voxels:
        if x_length == max([x_length, y_length, z_length]):
            xlen = longest_side
            ylen = int(longest_side * y_length / float(x_length))
            zlen = int(longest_side * z_length / float(x_length))
        elif y_length == max([x_length, y_length, z_length]):
            ylen = longest_side
            xlen = int(longest_side * x_length / float(y_length))
            zlen = int(longest_side * z_length / float(y_length))
        else:
            zlen = longest_side
            xlen = int(longest_side * x_length / float(z_length))
            ylen = int(longest_side * y_length / float(z_length))
        longest_side += 1
    # --------------

    # --------------
    # create a new, initially empty cube
    new_array = np.full((xlen, ylen, zlen), bval if bval is not False else np.nan)
    # get the indexes for each voxel in cube
    xidx, yidx, zidx = np.meshgrid(range(new_array.shape[0]), range(new_array.shape[1]), range(new_array.shape[2]))
    xidx = xidx.flatten()
    yidx = yidx.flatten()
    zidx = zidx.flatten()
    xyzidx = np.concatenate((np.array(xidx, ndmin=2).T, np.array(yidx, ndmin=2).T, np.array(zidx, ndmin=2).T), axis=1)
    # --------------

    # --------------
    # get the cartesian coordinates for each voxel
    xyz = np.concatenate((np.array(xmin + (xyzidx[:, 0] * abs(xmin - xmax) / float(xlen)), ndmin=2).T,
                          np.array(ymin + (xyzidx[:, 1] * abs(ymin - ymax) / float(ylen)), ndmin=2).T,
                          np.array(zmin + (xyzidx[:, 2] * abs(zmin - zmax) / float(zlen)), ndmin=2).T), axis=1)
    # create a mask for filtering all cartesian coordinates which sit inside the bounding box
    inside_mask = hull.find_simplex(xyz) >= 0
    # --------------

    # --------------
    # for all coordinates inside the bounding box, get their equivalent fractional position and set interpolated value
    basis_transform = np.linalg.inv(np.transpose([v1, v2, v3]))
    uvw = np.einsum('...jk,...k->...j', basis_transform, xyz[inside_mask])
    mask_i, mask_j, mask_k = xyzidx[inside_mask][:, 0], xyzidx[inside_mask][:, 1], xyzidx[inside_mask][:, 2]
    new_array[mask_i, mask_j, mask_k] = interpn(f_axes, cvalues, uvw, bounds_error=True, method=interp)
    # --------------

    mins = np.array((xmin, ymin, zmin)) - 0.5 * (v1 + v2 + v3) + np.array(centre)
    maxes = np.array((xmax, ymax, zmax)) - 0.5 * (v1 + v2 + v3) + np.array(centre)
    return new_array, mins, maxes
Esempio n. 5
0
 def _in_box(box: np.ndarray, points: np.ndarray) -> np.ndarray:
     """For each point, return if its in the hull."""
     hull = ConvexHull(box)
     deln = Delaunay(box[hull.vertices])
     return deln.find_simplex(points) >= 0
ax = fig1.add_subplot()  # type:Axes
fig2 = plt.figure()
ax2 = fig2.add_subplot()  # type:Axes
fig3 = plt.figure()
ax3d = fig3.add_subplot(projection='3d')  # type:Axes3D

# vor = Voronoi(grid)
#
#
# fig = voronoi_plot_2d(vor)
tri = Delaunay(points)
print(type(tri))
ax.scatter(points[:, 0], points[:, 1])
ax2.scatter(points[:, 0], points[:, 1])
#
center_tri = tri.find_simplex(np.array([testpoint]))[0]
print(center_tri)
print(tri.simplices[center_tri])

ax.scatter(*testpoint, color="green", zorder=2)
ax2.scatter(*testpoint, color="green", zorder=2)
close_points = []
for dot in tri.simplices[center_tri]:
    point = tri.points[dot]
    z = (point[0] - 0.5)**2 + (point[1] - 0.5)**2
    close_points.append([point[0], point[1], z])
    print(point)
    print("test")
    ax2.scatter(point[0], point[1], color="red", zorder=2)
    ax3d.scatter(point[0], point[1], z, color="red", zorder=2)
ax2.triplot(points[:, 0], points[:, 1], tri.simplices.copy())
Esempio n. 7
0
class Tin(object):
    def __init__(self, triangulation_pts, grid):
        self.grid = grid
        self.triangulation_pts = triangulation_pts
        self.dt = Delaunay(triangulation_pts)
        self.triangles = self.__init_triangles(
            triangulation_pts[self.dt.simplices])

    def get_keys(self):
        return self.triangles.keys()

    def get_triangle(self, key):
        return self.triangles.get(key)

    def replace_triangle(self, key, new_triangle):
        """
        Replaces a triangle by param new_triangle given their key.
        """
        if self.triangles.get(key):
            del self.triangles[key]
            self.triangles[key] = new_triangle
            return True

        return False

    def distribute_points(self, points, remove=None):
        """
        Distributes some points into the Triangles to which they belong.
        """
        for point in points:
            if remove and remove == point:
                continue

            simplex = self.dt.find_simplex(np.array([(point.x, point.y)]))
            triangle_pts = self.triangulation_pts[self.dt.simplices[simplex]]
            triangle = self.triangles[Triangle.get_triangle_key(
                triangle_pts[0])]
            triangle.points = np.append(triangle.points, [point])

            self.compute_point_error(point, triangle_pts[0])

    def compute_point_error(self, point, triangle_pts):
        """
        Updates a points error value based on a list of triangle point coordinates using
        barycentric-coordinate interpolation.
        """
        # Get the points defining the triangle from the grid
        p1 = self.grid.get(triangle_pts[0][0], triangle_pts[0][1])
        p2 = self.grid.get(triangle_pts[1][0], triangle_pts[1][1])
        p3 = self.grid.get(triangle_pts[2][0], triangle_pts[2][1])

        # Edit the points estimation and error values
        estimation = estimate_point_in_triangle(point, p1, p2, p3)

        point.error = abs((estimation - point.value) / point.value)
        point.estimate = estimation

    @staticmethod
    def __init_triangles(tri_coords):
        """
        Creates a dictionary of triangles given their coordinates.
        """
        triangles = dict()
        for coord in tri_coords:
            p1 = Point(coord[0][0], coord[0][1])
            p2 = Point(coord[1][0], coord[1][1])
            p3 = Point(coord[2][0], coord[2][1])
            triangles[Triangle.get_triangle_key(coord)] = Triangle(p1, p2, p3)

        return triangles

    def compute_hypothetical_errors(self, point_indeces):
        """
        Computes the hypothetical errors of each point in the TIN, if they were to be removed.
        """
        indices, indptr = self.dt.vertex_neighbor_vertices

        for pt_index in point_indeces:
            current_vertex = self.dt.points[pt_index]
            point = self.grid.get(current_vertex[0], current_vertex[1])

            if point in self.grid.get_corner_set():
                continue

            # Find neighboring vertices of the vertex at pt_index & create hypothetical triangulation
            neighbor_indeces = indptr[indices[pt_index]:indices[pt_index + 1]]
            neighbors = self.dt.points[neighbor_indeces]
            hypothetical_triangulation = Delaunay(neighbors)

            # Store neighbor point data in the point for later
            point_neighbors = set()
            for pt in neighbors:
                point_neighbors.add(self.grid.get(pt[0], pt[1]))
            point.neighbors = point_neighbors

            # Locate current point in new triangulation & compute error
            simplex = hypothetical_triangulation.find_simplex(current_vertex)
            triangle_pts = hypothetical_triangulation.points[
                hypothetical_triangulation.simplices[simplex]]
            self.compute_point_error(point, triangle_pts)
Esempio n. 8
0
def _vox_tri_weights_worker(t_range,
                            inps_vox,
                            outps_vox,
                            tris,
                            spc,
                            factor,
                            ones=False):
    """
    Helper method for vox_tri_weights(). 

    Args: 
        t_range: iterable of triangle numbers to process
        in_surf: inner surface of cortex, voxel coordinates
        out_surf: outer surface of cortex, voxel coordinates 
        spc: ImageSpace in which surfaces lie 
        factor: voxel subdivision factor

    Returns: 
        sparse CSR matrix of size (n_vox x n_tris)
    """

    # Initialise a grid of sample points, sized by (factor) in each dimension.
    # We then shift the samples into each individual voxel.
    vox_tri_samps = sparse.dok_matrix((spc.size.prod(), tris.shape[0]),
                                      dtype=NP_FLOAT)
    samplers = [
        np.linspace(0, 1, 2 * f + 1, dtype=NP_FLOAT)[1:-1:2] for f in factor
    ]
    samples = (np.stack(np.meshgrid(*samplers), axis=-1).reshape(-1, 3) - 0.5)

    for t in t_range:

        # Stack the vertices of the inner and outer triangles into a 6x3 array.
        # We will then refer to these points by the indices abc, ABC; lower
        # case for the white surface, upper for the pial. We also cycle the
        # vertices (note, NOT A SHUFFLE) such that the highest index is first
        # (corresponding to A,a). The relative ordering of vertices remains the
        # same, so we use flagsum to check if B < C or C < B.
        tri = tris[t, :]
        tri_max = np.argmax(tri)
        tri_sort = [tri[(tri_max + i) % 3] for i in range(3)]
        flagsum = sum(
            [int(tri_sort[v] < tri_sort[(v + 1) % 3]) for v in range(3)])

        # Two positive divisions and one negative
        if flagsum == 2:
            tets = TETRA1

        # This MUST be two negatives and one positive.
        else:
            tets = TETRA2

        hull_ps = np.vstack((inps_vox[tri_sort, :], outps_vox[tri_sort, :]))

        # Get the neighbourhood of voxels through which this prism passes
        # in linear indices (note the +1 on the upper bound)
        bbox = (np.vstack(
            (np.maximum(0, hull_ps.min(0)),
             np.minimum(spc.size,
                        hull_ps.max(0) + 1))).round().astype(np.int32))
        hood = np.array(list(
            itertools.product(range(*bbox[:, 0]), range(*bbox[:, 1]),
                              range(*bbox[:, 2]))),
                        dtype=np.int32)

        # The bbox may not intersect any voxels within the FoV at all, skip
        if not hood.size:
            continue
        hood_vidx = np.ravel_multi_index(hood.T, spc.size)

        # Debug mode: just stick ones in all candidate voxels and continue
        if ones:
            vox_tri_samps[hood_vidx, t] = factor.prod()
            continue

        for vidx, ijk in zip(hood_vidx, hood.astype(NP_FLOAT)):
            v_samps = ijk + samples

            # The two triangles form an almost triangular prism in space (like a
            # toblerone bar...). It has 6 vertices and 8 triangular faces (2 end
            # caps, 3 almost rectangular side faces that are further split into 2
            # triangles each). Splitting the quadrilateral faces into triangles is
            # the tricky bit as it can be done in two ways, as below.
            #
            #   pial
            # N______N+1
            #  |\  /|
            #  | \/ |
            #  | /\ |
            # n|/__\|n+1
            #   white
            #
            # It is important to ensure that neighbouring prisms share the same
            # subdivision of their adjacent faces (ie, both of them agree to split
            # it in the \ or / direction) to avoid double counting regions of space.
            # This is achieved by enumerating the triangular faces of the prism in
            # a specific order according to the index numbers of the triangle
            # vertices. For each vertex n, if the index number of vertex n+1 (with
            # wraparound for the last vertex) is greater, then we split the face
            # that the edge (n, n+1) belongs to in a "positive" manner. Otherwise,
            # we split the face in a "negative" manner. A positive split means that
            # a diagonal will go from the pial vertex N to white vertex n+1. A
            # negative split will go from pial vertex N+1 to white vertex n. As a
            # result, around the complete prism formed by the two triangles, there
            # will be two face diagonals that ALWAYS meet at the WHITE vertex
            # with the HIGHEST index number (referred to as 'a'). With these two
            # diagonals fixed, the order of the last diagonal depends on the
            # condition B < C (+ve) or C < B (-ve). We check this using the
            # flagsum variable, which will be 2 for B < C or 1 for C < B. Finally,
            # knowing how the last diagonal is arranged, there are exactly two
            # ways of splitting the prism down, hardcoded at the top of this file.
            # See http://www.alecjacobson.com/weblog/?p=1888.

            # Test the sample points against the tetrahedra. We don't care about
            # double counting within the polyhedra (although in theory this
            # shouldn't happen). Hull formation can fail due to geometric
            # degeneracy so wrap it up in a try block
            samps_in = np.zeros(v_samps.shape[0], dtype=np.bool)
            for tet in tets:
                try:
                    hull = Delaunay(hull_ps[tet, :])
                    samps_in |= (hull.find_simplex(v_samps) >= 0)

                # Silent fail for geometric degeneracy, raise anything else
                except QhullError:
                    continue

                except Exception as e:
                    raise e

            # Don't write explicit zero
            if samps_in.any():
                vox_tri_samps[vidx, t] = samps_in.sum()

    return vox_tri_samps.tocsr()
Esempio n. 9
0
class SurrogateModel(abc.ABC):
    """A model which can be trained upon previously generated data,
    and then be more rapidly evaluated than generating fresh data.
    """
    @property
    def parameters(self) -> List[str]:
        """list of str: The names of the parameters that this model will be trained
        upon / can be evaluated using."""
        return self._parameter_labels

    @property
    def n_parameters(self) -> int:
        """The number the parameters that this model will be trained upon /
        can be evaluated using."""
        return len(self._parameter_labels)

    @property
    def convex_hull(self) -> Delaunay:
        """scipy.spatial.qhull: A convex hull which is wrapped around the parameters
        which were used to train the model."""
        return self._convex_hull

    def __init__(
        self,
        parameter_labels: List[str],
        condition_parameters: bool,
        condition_data: bool,
        double_precision: bool,
    ):
        """
        Parameters
        ----------
        parameter_labels: list of str
            The names of the parameters that this model will be trained upon /
            can be evaluated using.
        condition_parameters: bool
            If true, all training parameters for this model will be shifted to
            have a zero mean, and to fall within the range [-1, 1].
        condition_data: bool
            If true, all training data for this model will be shifted to
            have a zero mean, and to fall within the range [-1, 1]. The
            uncertainties in the training values will also be scaled by the
            same amount as the training values themselves.
        double_precision: bool
            Whether to use single or double precision.
        """

        self._parameter_labels = parameter_labels

        # Keep a track of the data that this model was trained upon
        self._training_parameters = None
        self._training_values = None
        self._training_uncertainties = None

        self._condition_parameters = condition_parameters
        self._condition_data = condition_data

        self._parameter_scale = None
        self._parameter_shift = None

        self._value_scale = None
        self._value_shift = None

        self._double_precision = double_precision

        # Define some useful torch constants
        self._zero = torch.tensor(
            0.0,
            dtype=torch.float32 if not double_precision else torch.float64)
        self._one = torch.tensor(
            1.0,
            dtype=torch.float32 if not double_precision else torch.float64)

        # Define the hull we will use to check whether the parameters
        # to evaluate lie within the models region of confidence.
        self._convex_hull = None

        self._flat_parameter_indices = []
        self._flat_parameter_values = []

    def _parameter_dict_to_tensor(
            self, parameters: Dict[str, numpy.ndarray]) -> torch.Tensor:
        """Convert a dictionary of numpy arrays to a single
        pytorch tensor (with the parameter ordering dictated by
        the ordering of the models parameter labels.

        Parameters
        ----------
        parameters: dict of str and numpy.ndarray
            The parameter dictionary to convert.

        Returns
        -------
        torch.Tensor
            The converted parameters.
        """

        array_parameters = parameter_dict_to_array(parameters,
                                                   self._parameter_labels)

        if not self._double_precision:
            return torch.from_numpy(array_parameters).float()
        else:
            return torch.from_numpy(array_parameters).double()

    def _validate_training_data(
        self,
        parameters: Dict[str, numpy.ndarray],
        values: numpy.ndarray,
        uncertainties: numpy.ndarray,
    ):
        """Validate the data to train this model on, checking among
        other things that all dimensions are correct, and converting
        any `numpy` arrays to `pytorch.Tensor` objects.

        Parameters
        ----------
        parameters: dict of str and numpy.ndarray
            The parameters used to generate the training data with
            shape=(n_data_points,).
        values: numpy.ndarray
            The training data with shape=(n_data_points,).
        uncertainties: numpy.ndarray
            The uncertainties in the `values` (assumed to be gaussian) with
            shape=(n_data_points,).

        Returns
        -------
        torch.Tensor
            The validated parameters with shape=(n_data_points, n_parameters).
        torch.Tensor
            The training data with shape=(n_data_points,).
        torch.Tensor
            The uncertainties in the `values` (assumed to be gaussian) Each array
            has a shape=(n_data_points, 1)).
        """

        # Make sure the parameter / values arrays are the correct shapes.
        parameters = self._parameter_dict_to_tensor(parameters)

        if values.ndim == 1:
            values = values.reshape(-1, 1)
        if uncertainties.ndim == 1:
            uncertainties = uncertainties.reshape(-1, 1)

        assert values.ndim == 2
        assert values.shape[1] == 1
        assert len(values) == len(parameters)

        assert uncertainties.shape == values.shape

        if self._double_precision:
            values = torch.from_numpy(values).double()
            uncertainties = torch.from_numpy(uncertainties).double()
        else:
            values = torch.from_numpy(values).float()
            uncertainties = torch.from_numpy(uncertainties).float()

        return parameters, values, uncertainties

    def _retrain(self):
        """Re-train the models hyperparameters based on the currently
        available training data.
        """
        raise NotImplementedError()

    def _rebuild_hull(self):

        numpy_parameters = self._training_parameters.numpy()

        if numpy_parameters.shape[0] < numpy_parameters.shape[1] + 2:
            # Check we have enough points to build a Delaunay hull.
            return

        # We need to remove any 'flat' degrees of freedom (i.e any
        # parameters where all training data has the same value, such
        # as removing temperatures if all training points were measured
        # at the same temperature).
        self._flat_parameter_indices = numpy.argwhere(
            numpy.all(numpy_parameters == numpy_parameters[0, :], axis=0))

        self._flat_parameter_values = numpy_parameters[
            0, self._flat_parameter_indices]

        index_mask = numpy.ones(numpy_parameters.shape[1], numpy.bool)
        index_mask[self._flat_parameter_indices] = 0

        hull_parameters = numpy_parameters[:, index_mask]

        self._convex_hull = Delaunay(hull_parameters)

    def add_training_data(
        self,
        parameters: Dict[str, numpy.ndarray],
        values: numpy.ndarray,
        uncertainties: numpy.ndarray,
    ):
        """Trains the model on a new set of data.

        Parameters
        ----------
        parameters: dict of str and numpy.ndarray
            The parameters used to collect the data with
            shape=(n_data_points, 1).
        values: numpy.ndarray
            The data collected using the specified parameters. Each array has
            a shape=(n_data_points, 1)).
        uncertainties: numpy.ndarray
            The uncertainties in the `values` (assumed to be gaussian) Each array
            has a shape=(n_data_points, 1)).
        """

        (parameters, values,
         uncertainties) = self._validate_training_data(parameters, values,
                                                       uncertainties)

        # Add the extra data the existing set.
        if self._training_parameters is None:
            self._training_parameters = parameters
        else:

            # Make sure to un-condition the existing parameters.
            self._training_parameters = (
                self._training_parameters * self._parameter_scale +
                self._parameter_shift)

            self._training_parameters = torch.cat(
                [self._training_parameters, parameters])

        if self._training_values is None:

            self._training_values = values
            self._training_uncertainties = uncertainties

        else:

            # First make sure to un-condition the existing data.
            self._training_values = (
                self._training_values * self._value_scale + self._value_shift)
            self._training_uncertainties = (self._training_uncertainties *
                                            self._value_scale)

            self._training_values = torch.cat([self._training_values, values])
            self._training_uncertainties = torch.cat(
                [self._training_uncertainties, uncertainties])

        # Determine any conditioning factors.
        if self._condition_parameters:

            # noinspection PyArgumentList
            self._parameter_shift = torch.mean(self._training_parameters,
                                               axis=0)
            # noinspection PyArgumentList
            self._parameter_scale = (self._training_parameters.max(axis=0)[0] -
                                     self._training_parameters.min(axis=0)[0])
            self._parameter_scale = torch.where(
                torch.isclose(self._parameter_scale, self._zero),
                self._one,
                self._parameter_scale,
            )

        else:

            self._parameter_shift = torch.zeros(
                (1, parameters.shape[1]),
                dtype=torch.float32
                if not self._double_precision else torch.float64,
            )
            self._parameter_scale = torch.ones(
                (1, parameters.shape[1]),
                dtype=torch.float32
                if not self._double_precision else torch.float64,
            )

        if self._condition_data:

            # noinspection PyArgumentList
            self._value_shift = torch.mean(self._training_values, axis=0)
            # noinspection PyArgumentList
            self._value_scale = (self._training_values.max(axis=0)[0] -
                                 self._training_values.min(axis=0)[0])
            self._value_scale = torch.where(
                torch.isclose(self._value_scale, self._zero),
                self._one,
                self._value_scale,
            )

        else:

            self._value_shift = torch.zeros(
                (1, ),
                dtype=torch.float32
                if not self._double_precision else torch.float64,
            )
            self._value_scale = torch.ones(
                (1, ),
                dtype=torch.float32
                if not self._double_precision else torch.float64,
            )

        # Condition the data.
        self._training_parameters = (
            self._training_parameters -
            self._parameter_shift) / self._parameter_scale

        self._training_values = (self._training_values -
                                 self._value_shift) / self._value_scale
        self._training_uncertainties = self._training_uncertainties / self._value_scale

        self._rebuild_hull()
        self._retrain()

    def can_evaluate(self, parameters: Dict[str, numpy.ndarray]) -> bool:
        """Checks whether this model has been trained upon sufficient
        data close to the parameters of interest to be able to be
        accurately evaluated.

        Parameters
        ----------
        parameters: dict of str and numpy.ndarray
            The parameters to evaluate the model at where
            each array has shape=(n_data_points).

        Returns
        -------
        bool
        """

        if self._convex_hull is None:
            return False

        parameters = self._parameter_dict_to_tensor(parameters)
        parameters = (parameters -
                      self._parameter_shift) / self._parameter_scale

        parameters = parameters.numpy()

        flat_parameters = parameters[:, self._flat_parameter_indices]

        if not numpy.allclose(flat_parameters, self._flat_parameter_values):
            return False

        index_mask = numpy.ones(parameters.shape[1], numpy.bool)
        index_mask[self._flat_parameter_indices] = 0

        hull_parameters = parameters[:, index_mask]

        return self._convex_hull.find_simplex(hull_parameters) >= 0

    @abc.abstractmethod
    def evaluate(
        self, parameters: Dict[str, numpy.ndarray]
    ) -> Tuple[numpy.ndarray, numpy.ndarray, Dict[str, numpy.ndarray]]:
        """Evaluate the model at the specified set of parameters.

        Parameters
        ----------
        parameters: dict of str and numpy.ndarray
            The parameters to evaluate the model at where
            each array has shape=(n_data_points).

        Returns
        -------
        numpy.ndarray
            The evaluated model values with shape=(n_data_points,)
        numpy.ndarray
            The uncertainty (assumed to be Gaussian) in each evaluated value
            with shape=(n_data_points,)
        dict of str and numpy.ndarray
            The gradient of each evaluated value with respect to the parameters
            with shape=(n_data_points,)
        """
        raise NotImplementedError()