예제 #1
0
    def get_segments(self,
                     seg_id: int,
                     bbox: Optional[Bounds] = None) -> nx.Graph:
        """Get a graph of a segmentation annotation within a bounding box.

        Arguments:
            seg_id  The segement to pull.
            bbox: The bounding box object, default None. If None, uses entire volume.

        Returns:
            G: A networkx subgraph from the specified segment and bounding box.
        """
        check_type(seg_id, (int, np.integer))
        if self.cv_segments is None:
            raise ValueError("Cannot get segments without segmentation data.")

        df = read_s3(self.url_segments, seg_id, self.mip)
        G = df_to_graph(df)
        if bbox is not None:
            if isinstance(bbox, Bbox):
                bbox = bbox.to_list()
            check_iterable_type(bbox, (int, np.integer))
            check_iterable_nonnegative(bbox)
            G = get_sub_neuron(G, [bbox[:3], bbox[3:]])
        return G
예제 #2
0
def removeSmallCCs(segmentation: np.ndarray, size: Union[int,
                                                         float]) -> np.ndarray:
    """Removes small connected components from an image.

    Parameters:
    segmentation : Segmentation data of image or volume.
    size : Maximum connected component size to remove.

    Returns:
    largeCCs : Segmentation with small connected components removed.
    """
    check_type(segmentation, (list, np.ndarray))
    check_type(size, numerical)

    labels = label(segmentation, return_num=False)

    if labels.max() == 0:
        raise ValueError("No connected components!")
    counts = np.bincount(labels.flat)[1:]

    for v, count in enumerate(counts):
        if count < size:
            labels[labels == v + 1] = 0

    largeCCs = labels != 0
    return largeCCs
예제 #3
0
def create_skel_segids(
        swc_dir: str,
        origin: Sequence[Union[int, float]]) -> Tuple[Skeleton, List[int]]:
    """Create skeletons to be uploaded as precomputed format

    Arguments:
        swc_dir: Path to consensus swc files.
        origin: x,y,z coordinate of coordinate frame in space in mircons.

    Returns:
        skeletons: .swc skeletons to be pushed to bucket.
        segids: List of ints for each swc's label.
    """
    check_type(swc_dir, str)
    check_size(origin)

    p = Path(swc_dir)
    files = [str(i) for i in p.glob("*.swc")]
    if len(files) == 0:
        raise FileNotFoundError(f"No .swc files found in {swc_dir}.")
    skeletons = []
    segids = []
    for i in tqdm(files, desc="converting swcs to neuroglancer format..."):
        skeletons.append(swc2skeleton(i, origin=origin))
        segids.append(skeletons[-1].id)
    return skeletons, segids
예제 #4
0
def upload_segments(input_path, precomputed_path, num_mips):
    """Uploads segmentation data from local to precomputed path.

    Arguments:
        input_path: The filepath to the root directory of the octree data with consensus-swcs folder.
        precomputed_path: CloudVolume precomputed path or url.
        num_mips: The number of resolutions to upload (for info file).
    """
    check_type(input_path, str)
    check_precomputed(precomputed_path)
    check_type(num_mips, (int, np.integer))
    if num_mips < 1:
        raise ValueError(
            f"Number of resolutions should be > 0, not {num_mips}")

    (_, _, vox_size, img_size, origin) = get_volume_info(
        input_path,
        num_mips,
    )
    vols = create_cloud_volume(
        precomputed_path,
        img_size,
        vox_size,
        num_mips,
        layer_type="segmentation",
    )
    swc_dir = Path(input_path) / "consensus-swcs"
    segments, segids = create_skel_segids(str(swc_dir), origin)
    for skel in segments:
        vols[0].skeleton.upload(skel)
예제 #5
0
    def create_tubes(self,
                     seg_id: Union[int, float],
                     bbox: Bounds,
                     radius: Optional[int] = None):
        """Creates voxel-wise foreground/background labels associated with a particular neuron trace,
        within a given bounding box of voxel coordinates.

        Arguments:
            seg_id: The id of the .swc file.
            bbox: The bounding box to draw tubes within.
            radius: Euclidean distance threshold used to draw tubes, default None = 1 px thick.

        Returns:
            labels: A volume within the bounding box, with 1 on tubes and 0 elsewhere.
        """
        if self.cv_segments is None:
            raise ValueError("Cannot get segments without segmentation data.")
        check_type(seg_id, int)
        if radius is not None:
            check_type(radius, (int, np.integer, float, np.float))
            if radius <= 0:
                raise ValueError("Radius must be positive.")

        G = self.get_segments(seg_id, bbox)
        paths = graph_to_paths(G)
        if isinstance(bbox, Bbox):
            bbox = bbox.to_list()
        check_iterable_type(bbox, (int, np.integer))
        check_iterable_nonnegative(bbox)
        labels = tubes_from_paths(np.subtract(bbox[3:], bbox[:3]), paths,
                                  radius)
        return labels
예제 #6
0
    def pull_voxel(self,
                   seg_id: int,
                   v_id: int,
                   radius: int = 1) -> Tuple[np.ndarray, Bbox, np.ndarray]:
        """Pull a subvolume around a specified skeleton vertex with of shape [2r+1, 2r+1, 2r+1], in voxels.

        Arguments:
            seg_id: ID of the segment to use, depends on data in s3.
            v_id: ID of the vertex to use, depends on the segment.
            radius: Radius of pulled volume around central voxel, in voxels.
                Optional, default is 1 (3x3 volume is pulled, centered at the vertex).

        Returns:
            img: A 2*nx+1 X 2*ny+1 X 2*nz+1 volume.
            bounds: Bounding box object which contains the bounds of the volume.
            vox_in_img: List of coordinates which locate the initial point in the volume.
        """
        check_type(radius, (int, np.integer))
        if radius < 0:
            raise ValueError(f"{radius} should be nonnegative.")

        voxel = self._get_voxel(seg_id,
                                v_id)  # does type checking for seg_id and v_id
        bounds = Bbox(voxel, voxel)
        seed = bounds.to_list()
        shape = [radius] * 3
        bounds = Bbox(np.subtract(seed[:3], shape),
                      np.add(np.add(seed[3:], shape), 1))
        img = self.pull_bounds_img(bounds)
        # img = self.cv.download(bounds, mip=self.mip)
        vox_in_img = voxel - np.array(bounds.to_list()[:3])
        return np.squeeze(np.array(img)), bounds, vox_in_img
예제 #7
0
    def __init__(
        self,
        url: str,  #  = "s3://open-neurodata/brainlit/brain1"
        mip: int = 0,
        url_segments: Optional[str] = None,
    ):
        check_precomputed(url)
        check_type(mip, (int, np.integer))
        self.url = url
        self.cv = CloudVolume(url, parallel=False)
        if mip < 0 or mip >= len(self.cv.scales):
            raise ValueError(
                f"{mip} should be between 0 and {len(self.cv.scales)}.")
        self.mip = mip
        self.chunk_size = self.cv.scales[self.mip]["chunk_sizes"][0]
        self.scales = self.cv.scales[self.mip]["resolution"]

        self.url_segments = url_segments
        if url_segments is None:
            try:  # default is to add _segments
                self.cv_segments = CloudVolume(url + "_segments",
                                               parallel=False)
                self.url_segments = url + "_segments"
            except InfoUnavailableError:
                warnings.warn(
                    UserWarning(
                        f"Segmentation volume not found at {self.url_segments}, defaulting to None."
                    ))
                self.cv_segments = None
        else:
            check_precomputed(url_segments)
            self.cv_segments = CloudVolume(url_segments, parallel=False)
예제 #8
0
def create_skel_segids(
    swc_dir: str,
    origin: Sequence[Union[int, float]],
    benchmarking: Optional[bool] = False,
) -> Tuple[Skeleton, List[int]]:
    """Create skeletons to be uploaded as precomputed format

    Arguments:
        swc_dir: Path to consensus swc files.
        origin: x,y,z coordinate of coordinate frame in space in mircons.
        benchmarking: Optional, scales swc benchmarking data.

    Returns:
        skeletons: .swc skeletons to be pushed to bucket.
        segids: List of ints for each swc's label.
    """
    check_type(swc_dir, str)
    check_size(origin)
    check_type(benchmarking, bool)

    p = Path(swc_dir)
    files = [str(i) for i in p.glob("*.swc")]
    if len(files) == 0:
        raise FileNotFoundError(f"No .swc files found in {swc_dir}.")
    skeletons = []
    segids = []
    for i in tqdm(files, desc="converting swcs to neuroglancer format..."):
        swc_trace = NeuronTrace(path=i)
        skel = swc_trace.get_skel(benchmarking, origin=np.asarray(origin))

        skeletons.append(skel)
        segids.append(skeletons[-1].id)
    return skeletons, segids
예제 #9
0
    def ssd(pts1, pts2):
        """Compute significant spatial distance metric between two traces as defined in APP1.
        Args:
            pts1 (np.array): array containing coordinates of points of trace 1. shape: npoints x ndims
            pts2 (np.array): array containing coordinates of points of trace 1. shape: npoints x ndims
        Returns:
            [float]: significant spatial distance as defined by APP1

        Example
        -------
        >>> pts1 = swc_trace.get_paths()[0][1:10]
        >> pts2 = swc_trace.get_paths()[0][11:20]

        >>> NeuronTrace.ssd(pts1,pts2)

        >>>6.247937554557103

        """
        check_type(pts1, np.ndarray)
        check_type(pts2, np.ndarray)

        _, dists1 = pairwise_distances_argmin_min(pts1, pts2)
        dists1 = dists1[dists1 >= 2]
        _, dists2 = pairwise_distances_argmin_min(pts2, pts1)
        dists2 = dists2[dists2 >= 2]
        # If there are is no significant distance between the 2 sets
        if len(dists1) == 0 and len(dists2) == 0:
            ssd = 0
        # Else, calculate the mean
        else:
            dists = np.concatenate([dists1, dists2])
            ssd = np.mean(dists)

        return ssd
예제 #10
0
def tubes_from_paths(
    size: Tuple[int, int, int],
    paths: List[List[int]],
    radius: Optional[Union[float, int]] = None,
):
    """Constructs tubes from list of paths.
    Returns densely labeled paths within the shape of the image.

    Arguments:
        size: The size of image to consider.
        paths: The list of paths. Each path is a list of points along the path (non-dense).
        radius: The radius of the line to draw. Default is None = 1 pixel wide line.
    """
    check_size(size)
    for path in paths:
        [check_iterable_type(vert, (int, np.integer)) for vert in path]
    if radius is not None:
        check_type(radius, (int, np.integer, float, np.float))
        if radius <= 0:
            raise ValueError(f"Radius {radius} must be positive.")

    def _within_img(line, size):
        arrline = np.array(line).astype(int)
        arrline = arrline[:, arrline[0, :] < size[0]]
        arrline = arrline[:, arrline[0, :] >= 0]
        arrline = arrline[:, arrline[1, :] < size[1]]
        arrline = arrline[:, arrline[1, :] >= 0]
        arrline = arrline[:, arrline[2, :] < size[2]]
        arrline = arrline[:, arrline[2, :] >= 0]
        return (arrline[0, :], arrline[1, :], arrline[2, :])

    coords = [[], [], []]
    for path in tqdm(paths):
        for i in range(len(path) - 1):
            line = draw.line_nd(path[i], path[i + 1])
            line = _within_img(line, size)
            if len(line) > 0:
                coords[0] = np.concatenate((coords[0], line[0]))
                coords[1] = np.concatenate((coords[1], line[1]))
                coords[2] = np.concatenate((coords[2], line[2]))

    try:
        coords = (coords[0].astype(int), coords[1].astype(int),
                  coords[2].astype(int))
    except AttributeError:  # if a list was passed
        coords = (coords[0], coords[1], coords[2])

    if radius is not None:
        line_array = np.ones(size, dtype=int)
        line_array[coords] = 0
        seg = distance_transform_edt(line_array)
        labels = np.where(seg <= radius, 1, 0)
    else:
        labels = np.zeros(size, dtype=int)
        labels[coords] = 1

    return labels
예제 #11
0
    def get_paths(self, spacing=None, origin=None):
        """Converts dataframe in either spatial or voxel coordinates into a list of paths.
        Will convert to voxel coordinates if spacing is specified.

        Arguments
        ----------
        spacing : None, :class:`numpy.array` (default = None)
            Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
            Provided if graph should convert to voxel coordinates first.  Default is None.
        origin : None, :class:`numpy.array`
            Origin of the spatial coordinate, if converting to voxels. Default is None.
            Assumed to be np.array([x,y,z])

        Returns
        -------
        paths : list
            List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
            units. Each array is one path.

        Example
        -------
        >>> swc_trace.get_paths()[0][1:10]
        >>> array([[-52, -1, -1],
                    [-51, -1, 0],
                    [-51, -1, 0],
                    [-50, 0, 0],
                    [-50, 0, 0],
                    [-49, 0, 0],
                    [-48, 0, 0],
                    [-46, 0, 0],
                    [-46, 0, 0]], dtype=object)
        """
        check_type(spacing, (type(None), np.ndarray))
        if type(spacing) == np.ndarray:
            check_size(spacing)
        check_type(origin, (type(None), np.ndarray))
        if type(origin) == np.ndarray:
            check_size(origin)

        # if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
        if type(spacing) == np.ndarray and origin is None:
            origin = np.array([0, 0, 0])

        # voxel conversion option
        if type(spacing) == np.ndarray:
            df_voxel = self._df_in_voxel(self.df, spacing, origin)
            G = self._df_to_graph(df_voxel)

        # no voxel conversion option
        else:
            G = self._df_to_graph(self.df)

        paths = self._graph_to_paths(G)

        return paths
예제 #12
0
def upload_segments(input_path,
                    precomputed_path,
                    num_mips,
                    benchmarking: Optional[bool] = False):
    """Uploads segmentation data from local to precomputed path.

    Arguments:
        input_path: The filepath to the root directory of the octree data with consensus-swcs folder.
        precomputed_path: CloudVolume precomputed path or url.
        num_mips: The number of resolutions to upload (for info file).
        benchmarking: Optional, scales swc benchmarking data.

    """
    check_type(input_path, str)
    check_precomputed(precomputed_path)
    check_type(num_mips, (int, np.integer))
    if num_mips < 1:
        raise ValueError(
            f"Number of resolutions should be > 0, not {num_mips}")

    if benchmarking == True:
        # Getting swc scaling parameters
        f = Path(input_path).parts[4].split("_")
        image = f[0]
        date = type_to_date[image]
        scale = scales[date]
        (_, _, vox_size, img_size, origin) = get_volume_info(
            input_path,
            num_mips,
            benchmarking=True,
        )
        chunk_size = [int(i) for i in img_size]
    else:
        (_, _, vox_size, img_size, origin) = get_volume_info(
            input_path,
            num_mips,
        )
        chunk_size = None

    vols = create_cloud_volume(
        precomputed_path,
        img_size,
        vox_size,
        num_mips,
        layer_type="segmentation",
        chunk_size=chunk_size,
    )

    swc_dir = Path(input_path) / "consensus-swcs"
    segments, segids = create_skel_segids(str(swc_dir), origin, benchmarking)
    for skel in segments:
        if benchmarking == True:
            skel.vertices /= scale  # Dividing vertices by scale factor
        vols[0].skeleton.upload(skel)
예제 #13
0
    def get_sub_neuron(self, bounding_box, spacing=None, origin=None):
        """Returns sub-neuron with node coordinates bounded by start and end

        Arguments
        ----------
        bounding_box : tuple or list or None
            Defines a bounding box around a sub-region around the neuron. Length 2
            tuple/list. First element is the coordinate of one corner (inclusive)
            and second element is the coordinate of the opposite corner (exclusive).
            Both coordinates are numpy.array([x,y,z])in voxel units.
        spacing : None, :class:`numpy.array` (default = None)
            Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
            Provided if graph should convert to voxel coordinates first.  Default is None.
        origin : :class:`numpy.array`
            Origin of the spatial coordinate, if converting to voxels. Default is None.
            Assumed to be np.array([x,y,z])
        Returns
        -------
        G_sub : :class:`networkx.classes.digraph.DiGraph`
            Neuron from swc represented as directed graph. Coordinates x,y,z are
            node attributes accessed by keys 'x','y','z' respectively.

        Example
        -------
        >>> bounding_box=[[1,2,4],[1,2,3]]

        >>> #swc input, no spacing and origin
        >>> swc_trace.get_sub_neuron(bounding_box)
        >>> <networkx.classes.digraph.DiGraph at 0x7f81a95d1e50>
        """

        check_type(bounding_box, (tuple, list))

        if len(bounding_box) != 2:
            raise ValueError("Bounding box must be length 2")
        check_type(spacing, (type(None), np.ndarray))

        check_type(spacing, (type(None), np.ndarray))
        if type(spacing) == np.ndarray:
            check_size(spacing)
        check_type(origin, (type(None), np.ndarray))
        if type(origin) == np.ndarray:
            check_size(origin)

        # if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
        if type(spacing) == np.ndarray and origin is None:
            origin = np.array([0, 0, 0])

        # voxel conversion option
        if type(spacing) == np.ndarray:
            df_voxel = self._df_in_voxel(self.df, spacing, origin)
            G = self._df_to_graph(df_voxel)

        # no voxel conversion option
        else:
            G = self._df_to_graph(self.df)

        G_sub = self._get_sub_neuron(G, bounding_box)

        return G_sub
예제 #14
0
def getLargestCC(segmentation: np.ndarray) -> np.ndarray:
    """Returns the largest connected component of a image.

    Arguments:
    segmentation : Segmentation data of image or volume.

    Returns:
    largeCC : Segmentation with only largest connected component.
    """

    check_type(segmentation, (list, np.ndarray))
    labels = label(segmentation)
    if labels.max() == 0:
        raise ValueError("No connected components!")  # assume at least 1 CC
    largestCC = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1
    return largestCC
예제 #15
0
    def get_graph(self, spacing=None, origin=None):
        """Converts dataframe in either spatial or voxel coordinates into a directed graph.
        Will convert to voxel coordinates if spacing is specified.

        Arguments
        ----------
        spacing : None, :class:`numpy.array` (default = None)
            Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
            Provided if graph should convert to voxel coordinates first. Default is None.
        origin : None, :class:`numpy.array` (default = None)
            Origin of the spatial coordinate, if converting to voxels. Default is None.
            Assumed to be np.array([x,y,z])

        Returns
        -------
        G : :class:`networkx.classes.digraph.DiGraph`
            Neuron from swc represented as directed graph. Coordinates x,y,z are
            node attributes accessed by keys 'x','y','z' respectively.

        Example
        -------
        >>> swc_trace.get_graph()
        >>> <networkx.classes.digraph.DiGraph at 0x7f81a83937f0>
        """
        check_type(spacing, (type(None), np.ndarray))
        if type(spacing) == np.ndarray:
            check_size(spacing)
        check_type(origin, (type(None), np.ndarray))
        if type(origin) == np.ndarray:
            check_size(origin)

        # if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
        if type(spacing) == np.ndarray and origin is None:
            origin = np.array([0, 0, 0])

        # voxel conversion option
        if type(spacing) == np.ndarray:
            df_voxel = self._df_in_voxel(self.df, spacing, origin)
            G = self._df_to_graph(df_voxel)

        # no voxel conversion option
        else:
            G = self._df_to_graph(self.df)
        return G
예제 #16
0
def process(file_path: str, bin_path: List[str], vol: CloudVolumePrecomputed):
    """The parallelizable method to upload data.

    Loads the image into memory, and pushes it to specific ranges in the CloudVolume.

    Arguments:
        file_path: Path to the image file.
        bin_path: Binary path to the image file.
        vol: CloudVolume object to upload.
    """
    check_type(file_path, str)
    check_binary_path(bin_path)
    check_type(vol, CloudVolumePrecomputed)

    array = tf.imread(file_path).T
    ranges = get_data_ranges(bin_path, vol.scales[-1]["size"])
    vol[ranges[0][0]:ranges[0][1], ranges[1][0]:ranges[1][1],
        ranges[2][0]:ranges[2][1], ] = array
    return
예제 #17
0
    def get_df_voxel(self, spacing, origin=np.array([0, 0, 0])):
        """Converts coordinates in pd.DataFrame from spatial units to voxel units

        Arguments
        ----------
        spacing : :class:`numpy.array`
            Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z])
        origin : :class:`numpy.array`
            Origin of the spatial coordinate. Default is (0,0,0). Assumed to be
            np.array([x,y,z])
        Returns
        -------
        df_voxel : :class:`pandas.DataFrame`
            Indicies, coordinates, and parents of each node in the swc. Coordinates
            are in voxel units.

        Example
        -------
        >>> swc_trace.get_df_voxel(spacing=np.asarray([2,2,2]))
        >>> sample    structure    x    y    z    r    parent
            0    1    0    -26    -1    -1    1.0    -1
            1    2    0    -26    -1    -1    1.0    1
            2    3    0    -26    -1    0    1.0    2
            3    4    0    -26    -1    0    1.0    3
            4    5    0    -25    0    0    1.0    4
            ...    ...    ...    ...    ...    ...    ...    ...
            148    149    0    23    7    -4    1.0    148
            149    150    0    23    7    -4    1.0    149
            150    151    0    23    7    -4    1.0    150
            151    152    0    24    8    -4    1.0    151
            152    153    6    24    8    -4    1.0    152
            153 rows × 7 columns


        """
        check_type(spacing, np.ndarray)
        check_size(spacing)
        check_type(origin, np.ndarray)
        check_size(origin)

        df_voxel = self._df_in_voxel(self.df, spacing, origin)
        return df_voxel
예제 #18
0
    def get_segments(
        self,
        seg_id: int,
        bbox: Optional[Bounds] = None,
        rounding: Optional[bool] = True,
    ) -> nx.Graph:
        """Get a graph of a segmentation annotation within a bounding box.

        Arguments:
            seg_id  The segement to pull.
            bbox: The bounding box object, default None. If None, uses entire volume.
            rounding: Optional, default True. Whether you want S3 file to be rounded or not.

        Returns:
            G: A networkx subgraph from the specified segment and bounding box.
        """

        check_type(seg_id, (int, np.integer))
        check_type(rounding, bool)
        if self.cv_segments is None:
            raise ValueError("Cannot get segments without segmentation data.")
        s3_trace = Neuron_trace.NeuronTrace(self.url_segments,
                                            seg_id,
                                            self.mip,
                                            rounding,
                                            use_https=self.use_https)

        G = s3_trace.get_graph()
        paths = s3_trace.get_paths()

        if bbox is not None:
            if isinstance(bbox, Bbox):
                bbox = bbox.to_list()
            check_iterable_type(bbox, (int, np.integer))
            check_iterable_nonnegative(bbox)
            G = s3_trace.get_sub_neuron([bbox[:3], bbox[3:]])
            paths = s3_trace.get_sub_neuron_paths([bbox[:3], bbox[3:]])

        return [G, paths]
예제 #19
0
def subsample(
    arr: np.ndarray, orig_shape: List[int], dest_shape: List[int]
) -> np.ndarray:
    """Subsamples a flattened neighborhood to a smaller flattened neighborhood.

    Arguments:
        arr: The flattened array
        orig_shape: The original shape of the array before flattening
        dest_shape: The desired shape of the array before flattening
    """
    check_type(arr, np.ndarray)
    if len(orig_shape) != len(dest_shape):
        raise ValueError("Mismatched in and out dimensions.")
    if np.prod(orig_shape) != len(arr):
        raise ValueError("Original shape is incorrect.")
    if len(orig_shape) == 3:
        check_size(orig_shape, dim=3)
    elif len(orig_shape) == 2:
        check_size(dest_shape, dim=2)
    else:
        raise NotImplementedError("Only 2 and 3 dimensions supported.")

    start = np.subtract(orig_shape, dest_shape) // 2
    end = start + dest_shape
    if len(orig_shape) == 2:
        idx = np.ravel_multi_index(
            (np.mgrid[start[0] : end[0], start[1] : end[1]].reshape(2, -1)), orig_shape
        )
    elif len(orig_shape) == 3:
        idx = np.ravel_multi_index(
            (
                np.mgrid[
                    start[0] : end[0], start[1] : end[1], start[2] : end[2]
                ].reshape(3, -1)
            ),
            orig_shape,
        )
    return arr[idx]
예제 #20
0
    def _get_voxel(self, seg_id: int, v_id: int) -> Tuple[int, int, int]:
        """Gets coordinates of segment vertex, in voxel space.

        Arguments:
            seg_id: The id of the segment to use.
            v_id: The id of the vertex to use from the given segment.

        Returns:
            voxel: The voxel coordinates in (x, y, z) voxel space.
        """
        check_type(seg_id, (int, np.integer))
        check_type(v_id, (int, np.integer))
        if self.cv_segments is None:
            raise ValueError("Cannot get voxel without segmentation data")
        seg = self.cv_segments.skeleton.get(seg_id).vertices
        if v_id < 0 or v_id >= len(seg):
            raise ValueError(f"{v_id} should be between 0 and {len(seg)}.")

        vertex = seg[v_id]
        voxel = np.round(
            np.divide(
                vertex,
                self.cv_segments.scales[self.mip]["resolution"])).astype(int)
        return voxel
예제 #21
0
    def get_skel(self, benchmarking=False, origin=None):
        """Gets a skeleton version of dataframe, if swc input is provided

        Arguments
        ----------
            origin : None, numpy array with shape (3,1) (default = None)
                origin of coordinate frame in microns, (default: None assumes (0,0,0) origin)
            benchmarking : bool
                For swc files, specifies whether swc file is from benchmarking dataset, to obtain skeleton ID
        Returns
        --------
            skel : cloudvolume.Skeleton
                Skeleton object of given SWC file

        Example
        -------
        >>> swc_trace.get_skel(benchmarking=True)
        >>> Skeleton(segid=, vertices=(shape=153, float32), edges=(shape=152, uint32), radius=(153, float32), vertex_types=(153, uint8), vertex_color=(153, float32), space='physical' transform=[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]])
        """
        check_type(origin, (type(None), np.ndarray))
        check_type(benchmarking, bool)
        if type(origin) == np.ndarray:
            check_size(origin)

        if self.input_type == "swc":
            skel = self._swc2skeleton(self.path, benchmarking, origin)
            return skel
        elif self.input_type == "skel":
            cv = CloudVolume(
                self.path,
                mip=self.mip,
                fill_missing=self.fill_missing,
                use_https=self.use_https,
            )
            skel = cv.skeleton.get(self.seg_id)
            return skel
예제 #22
0
    def pull_vertex_list(
        self,
        seg_id: int,
        v_id_list: List[int],
        buffer: int = 1,
        expand: bool = False,
    ) -> Tuple[np.ndarray, Bbox, List[Tuple[int, int, int]]]:
        """Pull a subvolume containing all listed vertices.

        Arguments:
            seg_id: ID of the segment to use, depends on data in s3.
            v_id_list: list of vertex IDs to use.
            buffer: Buffer around the bounding box (in voxels). Default 1, set to 0 if expand is True.
            expand: Flag whether to expand subvolume to closest set of chunks.

        Returns:
            img: The image volume containing all vertices.
            bounds: Bounding box object which contains the bounds of the volume.
            vox_in_img_list: List of coordinates which locate the vertices in the volume.
        """
        check_type(seg_id, (int, np.integer))
        check_iterable_type(v_id_list, (int, np.integer))
        check_type(buffer, (int, np.integer))
        if buffer < 0:
            raise ValueError(f"Buffer {buffer} shouild not be negative.")
        check_type(expand, bool)
        if expand:
            buffer = 0
        buffer = [buffer] * 3

        voxel_list = [self._get_voxel(seg_id, i) for i in v_id_list]
        if len(voxel_list) == 1:  # edge case of 1 vertex
            bounds = Bbox(voxel_list[0] - buffer, voxel_list[0] + buffer + 1)
        else:
            voxel_list = np.array(voxel_list)
            lower = list(np.min(voxel_list, axis=0) - buffer)
            higher = list(np.max(voxel_list, axis=0) + buffer + 1)
            bounds = Bbox(lower, higher)
        if expand:
            bounds = bounds.expand_to_chunk_size(self.chunk_size)

        vox_in_img_list = np.array(voxel_list) - bounds.to_list()[:3]

        img = self.pull_bounds_img(bounds)
        return img, bounds, vox_in_img_list
예제 #23
0
    def pull_chunk(
        self,
        seg_id: int,
        v_id: int,
        radius: int = 0,
    ) -> Tuple[np.ndarray, Bbox, Tuple[int, int, int]]:
        """Pull a subvolume around a specified skeleton vertex according to chunk size.
        Each data set has a specified chunk size, which can be found by calling self.cv.info.

        Arguments:
            seg_id: ID of the segment to use, depends on data in s3.
            v_id: ID of the vertex to use, depends on the segment.
            radius: Radius of pulled volume around central chunk, in chunks.
                Optional, default is 0 (single chunk which contains the voxel).

        Returns:
            img: A chunk_size[0]*2*nx X chunk_size[1]*2*ny X chunk_size[2]*2*nz volume.
            bounds: Bounding box object which contains the bounds of the volume.
            vox_in_img: List of coordinates which locate the initial point in the volume.
        """
        check_type(seg_id, (int, np.integer))
        check_type(v_id, (int, np.integer))
        check_type(radius, (int, np.integer))
        if radius < 0:
            raise ValueError(f"Radius of {radius} should be nonnegative.")

        voxel = self._get_voxel(seg_id, v_id)
        bounds = Bbox(voxel, voxel).expand_to_chunk_size(self.chunk_size)
        seed = bounds.to_list()
        shape = [
            self.chunk_size[0] * radius,
            self.chunk_size[1] * radius,
            self.chunk_size[2] * radius,
        ]
        bounds = Bbox(np.subtract(seed[:3], shape), np.add(seed[3:], shape))
        img = self.pull_bounds_img(bounds)
        vox_in_img = voxel - np.array(bounds.to_list()[:3])
        return np.squeeze(np.array(img)), bounds, vox_in_img
예제 #24
0
    def get_sub_neuron_paths(self, bounding_box, spacing=None, origin=None):
        """Returns sub-neuron with node coordinates bounded by start and end

        Arguments
        ----------
        bounding_box : tuple or list or None
            Defines a bounding box around a sub-region around the neuron. Length 2
            tuple/list. First element is the coordinate of one corner (inclusive)
            and second element is the coordinate of the opposite corner (exclusive).
            Both coordinates are numpy.array([x,y,z])in voxel units.
        spacing : None, :class:`numpy.array` (default = None)
            Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
            Provided if graph should convert to voxel coordinates first.  Default is None.
        origin : :class:`numpy.array`
            Origin of the spatial coordinate, if converting to voxels. Default is None.
            Assumed to be np.array([x,y,z])
        Returns
        -------
        paths : list
            List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
            units. Each array is one path.

        Example
        -------
        >>> bounding_box=[[1,2,4],[1,2,3]]

        >>> #swc input, no spacing and origin
        >>> swc_trace.get_sub_neuron_paths(bounding_box)
        >>> array([], dtype=object)

        """

        check_type(bounding_box, (tuple, list))

        if len(bounding_box) != 2:
            raise ValueError("Bounding box must be length 2")
        check_type(spacing, (type(None), np.ndarray))

        check_type(spacing, (type(None), np.ndarray))
        if type(spacing) == np.ndarray:
            check_size(spacing)
        check_type(origin, (type(None), np.ndarray))
        if type(origin) == np.ndarray:
            check_size(origin)

        # if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
        if type(spacing) == np.ndarray and origin is None:
            origin = np.array([0, 0, 0])

        # voxel conversion option
        if type(spacing) == np.ndarray:
            df_voxel = self._df_in_voxel(self.df, spacing, origin)
            G = self._df_to_graph(df_voxel)

        # no voxel conversion option
        else:
            G = self._df_to_graph(self.df)

        G_sub = self._get_sub_neuron(G, bounding_box)

        paths = self._graph_to_paths(G_sub)

        return paths
예제 #25
0
    def __init__(
        self,
        path,
        seg_id=None,
        mip=None,
        rounding=True,
        read_offset=False,
        fill_missing=True,
        use_https=False,
    ):
        self.path = path
        self.input_type = None
        self.df = None
        self.args = []
        self.seg_id = seg_id
        self.mip = mip
        self.rounding = rounding
        self.fill_missing = fill_missing
        self.use_https = use_https

        check_type(path, str)
        check_type(seg_id, (type(None), int))
        check_type(mip, (type(None), int))
        check_type(read_offset, bool)
        check_type(rounding, bool)
        if (seg_id == None and type(mip) == int) or (type(seg_id) == int
                                                     and mip == None):
            raise ValueError(
                "For 'swc' do not input mip or seg_id, and for 'skel', provide both mip and seg_id"
            )

        # first check if it is a skel
        if seg_id != None and mip != None:
            cv = CloudVolume(path,
                             mip=mip,
                             fill_missing=fill_missing,
                             use_https=use_https)
            skeleton = cv.skeleton.get(seg_id)
            if type(skeleton) is Skeleton:
                self.input_type = "skel"

        # else, check if it is a swc by checking if file exists/extension is .swc
        elif os.path.isfile(
                self.path) and os.path.splitext(path)[-1].lower() == ".swc":
            self.input_type = "swc"

        # if it is not a swc or skeleton, raise error
        if self.input_type != "swc" and self.input_type != "skel":
            raise ValueError("Did not input 'swc' filepath or 'skel' url")

        # next, convert to a dataframe
        if self.input_type == "swc" and read_offset == False:
            df, offset, color, cc, branch = self._read_swc(self.path)
            args = [offset, color, cc, branch]
            self.df = df
            self.args = args

        elif self.input_type == "swc" and read_offset == True:
            df, color, cc, branch = self._read_swc_offset(path)
            args = [None, color, cc, branch]
            self.df = df
            self.args = args

        elif self.input_type == "skel":
            df = self._read_s3(path, seg_id, mip, rounding)
            (self.path, seg_id, mip)
            self.df = df
예제 #26
0
def find_somas(volume: np.ndarray, res: list) -> Tuple[int, np.ndarray, np.ndarray]:
    r"""Find bright neuron somas in an input volume.

    This simple soma detector assumes that somas are brighter than the
    rest of the objects contained in the input volume.

    To detect somas, these steps are performed:

    #. **Check input volume shape.** This detector requires the `x` and `y` dimensions of the input volumes to be larger than `20` pixels.

    #. **Zoom volume.** We found that this simple soma detector works best when then input volume has size `160 x 160 x 50`. We use `ndimage.zoom <https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html>`_ to scale the input volume size to the desired shape.

    #. **Binarize volume.** We use `Otsu thresholding <https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.threshold_otsu>`_ to binarize the image.

    #. **Erode the binarized image.** We erode the binarized image with a structuring element which size is directly proportional to the maximum zoom factor applied to the input volume.

    #. **Remove unreasonable connected components.** After erosion, we compute the equivalent diameter `d` of each connected component, and only keep those ones such that `5\mu m \leq d < 21 \mu m`

    #. **Find relative centroids.** Finally, we compute the centroids of the remaining connected components. The centroids are in voxel units, relative to the input volume.

    Parameters
    ----------
    volume : numpy.ndarray
        The 3D image array to run the detector on.

    res : list
        A `1 x 3` list containing the resolution of each voxel in `nm`.

    Returns
    -------
    label : bool
        A boolean value indicating whether the detector found any somas in the input volume.

    rel_centroids : numpy.ndarray
        A `N x 3` array containing the relative voxel positions of the detected somas.

    out : numpy.ndarray
        A `160 x 160 x 50` array containing the detection mask.

    Examples
    --------
    >>> # download a volume
    >>> dir = "s3://open-neurodata/brainlit/brain1"
    >>> dir_segments = "s3://open-neurodata/brainlit/brain1_segments"
    >>> volume_keys = "4807349.0_3827990.0_2922565.75_4907349.0_3927990.0_3022565.75"
    >>> mip = 1
    >>> ngl_sess = NeuroglancerSession(
    >>>     mip=mip, url=dir, url_segments=dir_segments, use_https=False
    >>> )
    >>> res = ngl_sess.cv_segments.scales[ngl_sess.mip]["resolution"]
    >>> volume_coords = np.array(os.path.basename(volume_keys).split("_")).astype(float)
    >>> volume_vox_min = np.round(np.divide(volume_coords[:3], res)).astype(int)
    >>> volume_vox_max = np.round(np.divide(volume_coords[3:], res)).astype(int)
    >>> bbox = Bbox(volume_vox_min, volume_vox_max)
    >>> img = ngl_sess.pull_bounds_img(bbox)
    >>> # apply soma detector
    >>> label, rel_centroids, out = find_somas(img, res)
    """

    check_type(volume, np.ndarray)
    check_iterable_type(volume.flatten(), np.uint16)
    volume_dim = volume.ndim
    if volume_dim != 3:
        raise ValueError("Input volume must be three-dimensional")
    if volume.shape[0] < 20 or volume.shape[1] < 20:
        raise ValueError("Input volume is too small")

    check_type(res, list)
    check_iterable_type(res, (int, float))
    if len(res) != 3:
        raise ValueError("Resolution must be three-dimensional")
    if np.any([el == 0 for el in res]):
        raise ValueError("Resolution must be non-zero at every position")

    desired_size = np.array([160, 160, 50])
    zoom_factors = np.divide(desired_size, volume.shape)
    res = np.divide(res, zoom_factors)
    out = ndimage.zoom(volume, zoom=zoom_factors)
    out = out / np.max(out.flatten())
    # 1) binarize volume using Otsu's method
    t = filters.threshold_otsu(out)
    out = out > t
    # 2) erode with structuring element proportional to zoom factors
    selem_size = np.amax(np.ceil(zoom_factors)).astype(int)
    clean_selem = morphology.octahedron(selem_size)
    out = morphology.erosion(out, clean_selem)
    # 3) identify connected components
    out, num_labels = morphology.label(out, background=0, return_num=True)
    # 4) remove connected components with diameter not in reasonable range, find centroids of candidate regions
    properties = ["label", "equivalent_diameter"]
    props = measure.regionprops_table(out, properties=properties)
    df_props = pd.DataFrame(props)
    rel_centroids = []
    for _, row in df_props.iterrows():
        l = row["label"]
        d = row["equivalent_diameter"]
        dmu = d * np.mean(res[:1]) / 1000
        if dmu < 5 or dmu >= 21:
            out[out == l] = 0
            num_labels -= 1
        else:
            ids = np.where(out == l)
            centroid = np.round([np.median(u) for u in ids]).astype(int)
            centroid = np.divide(centroid, zoom_factors)
            rel_centroids.append(centroid)
    return num_labels > 0, np.array(rel_centroids), out
예제 #27
0
def speed(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the speed of a B-Spline.

    The speed is the norm of the first derivative of the B-Spline.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the speed of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        speed: A `1xL` array containing the speed of the B-Spline evaluated at `x`

    References:
    .. [1] Kouba, Parametric Equations.
        https://www.math.ucdavis.edu/~kouba/Math21BHWDIRECTORY/ArcLength.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first and second derivatives
    # deriv, dderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
    # tranpose deriv
    deriv = deriv.T

    speed = np.linalg.norm(deriv, axis=1)
    if aux_outputs == False:
        return speed
    else:
        return speed, deriv
예제 #28
0
def gabor_filter(
    input: np.ndarray,
    sigma: Union[float, List[float]],
    phi: Union[float, List[float]],
    frequency: float,
    offset: float = 0.0,
    output: Optional[Union[np.ndarray, np.dtype, None]] = None,
    mode: str = "reflect",
    cval: float = 0.0,
    truncate: float = 4.0,
) -> Tuple[np.ndarray, np.ndarray]:
    """Multidimensional Gabor filter. A gabor filter
    is an elementwise product between a Gaussian
    and a complex exponential.

    Parameters
    ----------
    input : array_like
        The input array.
    sigma : scalar or sequence of scalars
        Standard deviation for Gaussian kernel. The standard
        deviations of the Gaussian filter are given for each axis as a
        sequence, or as a single number, in which case it is equal for
        all axes.
    phi : scalar or sequence of scalars
        Angles specifying orientation of the periodic complex
        exponential. If the input is n-dimensional, then phi
        is a sequence of length n-1. Convention follows
        https://en.wikipedia.org/wiki/N-sphere#Spherical_coordinates.
    frequency : scalar
        Frequency of the complex exponential. Units are revolutions/voxels.
    offset : scalar
        Phase shift of the complex exponential. Units are radians.
    output : array or dtype, optional
        The array in which to place the output, or the dtype of the returned array.
        By default an array of the same dtype as input will be created. Only the real component will be saved
        if output is an array.
    mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
        The mode parameter determines how the input array is extended beyond its boundaries.
        Default is ‘reflect’.
    cval : scalar, optional
        Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
    truncate : float
        Truncate the filter at this many standard deviations.
        Default is 4.0.

    Returns
    -------
    real, imaginary : arrays
        Returns real and imaginary responses, arrays of same
        shape as `input`.

    Notes
    -----
    The multidimensional filter is implemented by creating
    a gabor filter array, then using the convolve method.
    Also, sigma specifies the standard deviations of the
    Gaussian along the coordinate axes, and the Gaussian
    is not rotated. This is unlike
    skimage.filters.gabor, whose Gaussian is
    rotated with the complex exponential.
    The reasoning behind this design choice is that
    sigma can be more easily designed to deal with
    anisotropic voxels.

    Examples
    --------
    >>> from brainlit.preprocessing import gabor_filter
    >>> a = np.arange(50, step=2).reshape((5,5))
    >>> a
    array([[ 0,  2,  4,  6,  8],
           [10, 12, 14, 16, 18],
           [20, 22, 24, 26, 28],
           [30, 32, 34, 36, 38],
           [40, 42, 44, 46, 48]])
    >>> gabor_filter(a, sigma=1, phi=[0.0], frequency=0.1)
    (array([[ 3,  5,  6,  8,  9],
            [ 9, 10, 12, 13, 14],
            [16, 18, 19, 21, 22],
            [24, 25, 27, 28, 30],
            [29, 30, 32, 34, 35]]),
     array([[ 0,  0, -1,  0,  0],
            [ 0,  0, -1,  0,  0],
            [ 0,  0, -1,  0,  0],
            [ 0,  0, -1,  0,  0],
            [ 0,  0, -1,  0,  0]]))

    >>> from scipy import misc
    >>> import matplotlib.pyplot as plt
    >>> fig = plt.figure()
    >>> plt.gray()  # show the filtered result in grayscale
    >>> ax1 = fig.add_subplot(121)  # left side
    >>> ax2 = fig.add_subplot(122)  # right side
    >>> ascent = misc.ascent()
    >>> result = gabor_filter(ascent, sigma=5, phi=[0.0], frequency=0.1)
    >>> ax1.imshow(ascent)
    >>> ax2.imshow(result[0])
    >>> plt.show()
    """
    check_type(input, (list, np.ndarray))
    check_iterable_or_non_iterable_type(sigma, numerical)
    check_iterable_or_non_iterable_type(phi, numerical)
    check_type(frequency, numerical)
    check_type(offset, numerical)
    check_type(cval, numerical)
    check_type(truncate, numerical)

    input = np.asarray(input)

    # Checks that dimensions of inputs are correct
    sigmas = ndi._ni_support._normalize_sequence(sigma, input.ndim)
    phi = ndi._ni_support._normalize_sequence(phi, input.ndim - 1)

    limits = [np.ceil(truncate * sigma).astype(int) for sigma in sigmas]
    ranges = [range(-limit, limit + 1) for limit in limits]
    coords = np.meshgrid(*ranges, indexing="ij")
    filter_size = coords[0].shape
    coords = np.stack(coords, axis=-1)

    new_shape = np.ones(input.ndim)
    new_shape = np.append(new_shape, -1).astype(int)
    sigmas = np.reshape(sigmas, new_shape)

    g = np.zeros(filter_size, dtype=np.complex)
    g[:] = np.exp(-0.5 * np.sum(np.divide(coords, sigmas)**2, axis=-1))

    g /= (2 * np.pi)**(input.ndim / 2) * np.prod(sigmas)
    orientation = np.ones(input.ndim)
    for i, p in enumerate(phi):
        orientation[i + 1] = orientation[i] * np.sin(p)
        orientation[i] = orientation[i] * np.cos(p)
    orientation = np.flip(orientation)
    rotx = coords @ orientation
    g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))

    if isinstance(output, (type, np.dtype)):
        otype = output
    elif isinstance(output, str):
        otype = np.typeDict[output]
    else:
        otype = None

    output = ndi.convolve(input,
                          weights=np.real(g),
                          output=output,
                          mode=mode,
                          cval=cval)
    imag = ndi.convolve(input,
                        weights=np.imag(g),
                        output=otype,
                        mode=mode,
                        cval=cval)

    result = (output, imag)
    return result
예제 #29
0
def torsion(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the torsion of a B-Spline.

    The torsion measures the failure of a curve, `r(u)`, to be planar.
    If the curvature `k` of a curve is not zero, then the torsion is defined as

    .. math::

        \tau = -n \cdot b',

    where `n` is the principal normal vector, and `b'` the derivative w.r.t. the
    arc length `s` of the binormal vector.

    The torsion can also be computed as

    .. math::
        \tau = \lvert r'(t), r''(t), r'''(t) \rvert / \lVert r'(t) \times r''(t) \rVert^2,

    where `r(u)` is the position vector as a function of time.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the torsion of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        torsion: A `1xL` array containing the torsion of the B-Spline evaluated at `x`

    References:
    .. [1] Máté Attila, The Frenet–Serret formulas.
        http://www.sci.brooklyn.cuny.edu/~mate/misc/frenet_serret.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first, second, and third derivatives
    # deriv, dderiv, ddderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    dderiv = np.empty((n_dim, L))
    ddderiv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
        dderiv[i, :] = spl.derivative(nu=2)(x) if k - 2 >= 0 else np.zeros(L)
        ddderiv[i, :] = spl.derivative(nu=3)(x) if k - 3 >= 0 else np.zeros(L)
    # transpose derivs
    deriv = deriv.T
    dderiv = dderiv.T
    ddderiv = ddderiv.T

    cross = np.cross(deriv, dderiv)

    # Could be more efficient by only computing dot products of corresponding rows
    num = np.diag((cross @ ddderiv.T))
    denom = np.linalg.norm(cross, axis=1)**2

    torsion = np.nan_to_num(num / denom)

    if aux_outputs == True:
        return torsion, deriv, dderiv, ddderiv
    else:
        return torsion
예제 #30
0
    def get_bfs_subgraph(self,
                         node_id,
                         depth,
                         df=None,
                         spacing=None,
                         origin=None):
        """
         Creates a spanning subgraph from a seed node and parent graph using BFS.

        Arguments
         ----------
         node_id : int
             The id of the node to use as a seed.
             If df is not None this become the node index.
         depth : int
             The max depth for BFS to traven in each direction.
         df : None, DataFrame (default = None)
             Dataframe storing indices.
             In some cases indexing by row number is preferred.
         spacing : None, :class:`numpy.array` (default = None)
             Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
             Provided if graph should convert to voxel coordinates first.  Default is None.
         origin : :class:`numpy.array`
             Origin of the spatial coordinate, if converting to voxels. Default is None.
             Assumed to be np.array([x,y,z])

         Returns
         -------
         G_sub : :class:`networkx.classes.digraph.DiGraph`
             Subgraph

         tree : DiGraph
             The tree returned by BFS.

         paths : list
            List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
            units. Each array is one path.

        Example
        -------
        >>> #swc input, specify node_id and depth
        >>> swc_trace.get_bfs_subgraph(node_id=11,depth=2)
        >>>(<networkx.classes.digraph.DiGraph at 0x7f7f2ce65670>,
            <networkx.classes.digraph.DiGraph at 0x7f7f2ce65370>,
            array([array([[4727, 4440, 3849],
                        [4732, 4442, 3850],
                        [4739, 4455, 3849]]),
                        array([[4732, 4442, 3850],
                        [4749, 4439, 3856]])], dtype=object))
        """

        check_type(node_id, (list, int))
        check_type(depth, int)
        check_type(df, (type(None), pd.core.frame.DataFrame))

        check_type(spacing, (type(None), np.ndarray))
        if type(spacing) == np.ndarray:
            check_size(spacing)
        check_type(origin, (type(None), np.ndarray))
        if type(origin) == np.ndarray:
            check_size(origin)

        # if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
        if type(spacing) == np.ndarray and origin is None:
            origin = np.array([0, 0, 0])

        # voxel conversion option
        if type(spacing) == np.ndarray:
            df_voxel = self._df_in_voxel(self.df, spacing, origin)
            G = self._df_to_graph(df_voxel)

        # no voxel conversion option
        else:
            G = self._df_to_graph(self.df)

        G_sub, tree = self._get_bfs_subgraph(G, node_id, depth, df)

        paths = self._graph_to_paths(G_sub)

        return G_sub, tree, paths