def run(
        self, stack: ImageStack,
    ) -> Tuple[IntensityTable, ConnectedComponentDecodingResult]:
        """decode pixels and combine them into spots using connected component labeling

        Parameters
        ----------
        stack : ImageStack
            ImageStack containing spots

        Returns
        -------
        IntensityTable :
            IntensityTable containing decoded spots
        ConnectedComponentDecodingResult :
            Results of connected component labeling

        """
        pixel_intensities = IntensityTable.from_image_stack(
            stack, crop_x=self.crop_x, crop_y=self.crop_y, crop_z=self.crop_z)
        decoded_intensities = self.codebook.metric_decode(
            pixel_intensities,
            max_distance=self.distance_threshold,
            min_intensity=self.magnitude_threshold,
            norm_order=self.norm_order,
            metric=self.metric
        )
        caf = CombineAdjacentFeatures(
            min_area=self.min_area,
            max_area=self.max_area,
            mask_filtered_features=True
        )
        decoded_spots, image_decoding_results = caf.run(intensities=decoded_intensities)

        return decoded_spots, image_decoding_results
def compute_magnitudes(stack, norm_order=2):

    pixel_intensities = IntensityTable.from_image_stack(zero_norm_stack)
    feature_traces = pixel_intensities.stack(traces=(Indices.CH.value,
                                                     Indices.ROUND.value))
    norm = np.linalg.norm(feature_traces.values, ord=norm_order, axis=1)

    return norm
示例#3
0
    def test_run_pipline(self):
        tempdir = exec.stages(self.stages, self.subdirs, keep_data=True)
        intensities = IntensityTable.load(
            os.path.join(tempdir, "results", self.spots_file))
        self.verify_results(intensities)

        if os.getenv("TEST_KEEP_DATA") is None:
            shutil.rmtree(tempdir)
示例#4
0
 def intensities(self, codebook=None) -> IntensityTable:
     if codebook is None:
         codebook = self.codebook()
     intensities = IntensityTable.synthetic_intensities(
         codebook, self.n_z, self.height, self.width, self.n_spots,
         self.mean_fluor_per_spot, self.mean_photons_per_fluor)
     assert intensities.dtype == np.float32 and intensities.max() <= 1
     return intensities
def test_synthetic_intensities_generates_correct_number_of_features(
        loaded_codebook):
    n_spots = 2
    intensities = IntensityTable.synthetic_intensities(loaded_codebook,
                                                       n_spots=n_spots)
    assert isinstance(intensities, IntensityTable)

    # shape should have n_spots and channels and hybridization rounds equal to the codebook's shape
    assert intensities.shape == (n_spots, *loaded_codebook.shape[1:])
def test_empty_intensity_table():
    x = [1, 2]
    y = [2, 3]
    z = [1, 1]
    r = [1, 1]
    spot_attributes = pd.MultiIndex.from_arrays([x, y, z, r],
                                                names=('x', 'y', 'z', 'r'))
    empty = IntensityTable.empty_intensity_table(spot_attributes, 2, 2)
    assert empty.shape == (2, 2, 2)
    assert np.sum(empty.values) == 0
def test_synthetic_intensities_have_correct_number_of_on_features(
        loaded_codebook):
    n_spots = 2
    intensities = IntensityTable.synthetic_intensities(loaded_codebook,
                                                       n_spots=n_spots)
    on_features = np.sum(intensities.values != 0)
    # this asserts that the number of features "on" in intensities should be equal to the
    # number of "on" features in the codewords, times the total number of spots in intensities.
    assert on_features == loaded_codebook.sum(
        (Indices.CH, Indices.HYB)).values.mean() * n_spots
def test_reshaping_between_stack_and_intensities():
    """
    transform an pixels of an ImageStack into an IntensityTable and back again, then verify that
    the created Imagestack is the same as the original
    """
    np.random.seed(777)
    image = ImageStack.from_numpy_array(np.random.rand(1, 2, 3, 4, 5).astype(np.float32))
    pixel_intensities = IntensityTable.from_image_stack(image, 0, 0, 0)
    image_shape = (image.shape['z'], image.shape['y'], image.shape['x'])
    image_from_pixels = pixel_intensities_to_imagestack(pixel_intensities, image_shape)
    assert np.array_equal(image.numpy_array, image_from_pixels.numpy_array)
示例#9
0
def measure_spot_intensities(
    data_image: ImageStack,
    spot_attributes: SpotAttributes,
    measurement_function: Callable[[Sequence], Number],
    radius_is_gyration: bool = False,
) -> IntensityTable:
    """given spots found from a reference image, find those spots across a data_image

    Parameters
    ----------
    data_image : ImageStack
        ImageStack containing multiple volumes for which spots' intensities must be calculated
    spot_attributes : pd.Dataframe
        Locations and radii of spots
    measurement_function : Callable[[Sequence], Number])
        Function to apply over the spot volumes to identify the intensity (e.g. max, mean, ...)
    radius_is_gyration : bool
        if True, indicates that the radius corresponds to radius of gyration, which is a function of
        spot intensity, but typically is a smaller unit than the sigma generated by blob_log.
        In this case, the spot's bounding box is rounded up instead of down when measuring
        intensity. (default False)

    Returns
    -------
    IntensityTable :
        3d tensor of (spot, channel, round) information for each coded spot

    """

    # determine the shape of the intensity table
    n_ch = data_image.shape[Indices.CH]
    n_round = data_image.shape[Indices.ROUND]

    # construct the empty intensity table
    intensity_table = IntensityTable.empty_intensity_table(
        spot_attributes=spot_attributes,
        n_ch=n_ch,
        n_round=n_round,
    )

    # fill the intensity table
    indices = product(range(n_ch), range(n_round))
    for c, r in indices:
        image, _ = data_image.get_slice({Indices.CH: c, Indices.ROUND: r})
        blob_intensities: pd.Series = measure_spot_intensity(
            image,
            spot_attributes,
            measurement_function,
            radius_is_gyration=radius_is_gyration)
        intensity_table[:, c, r] = blob_intensities

    return intensity_table
示例#10
0
def small_intensity_table():
    intensities = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 0],
                                                                 [1, 1]]])

    spot_attributes = dataframe_to_multiindex(
        pd.DataFrame(
            data={
                IntensityTable.SpotAttributes.X: [0, 1, 2],
                IntensityTable.SpotAttributes.Y: [3, 4, 5],
                IntensityTable.SpotAttributes.Z: [0, 0, 0],
                IntensityTable.SpotAttributes.RADIUS: [0.1, 0.2, 0.3]
            }))

    return IntensityTable.from_spot_data(intensities, spot_attributes)
示例#11
0
    def _cli(cls, args, print_help=False):
        """Runs the decoder component based on parsed arguments."""

        if args.decoder_algorithm_class is None or print_help:
            cls.decoder_group.print_help()
            cls.decoder_group.exit(status=2)

        instance = args.decoder_algorithm_class(**vars(args))

        # load intensities and codebook
        intensities = IntensityTable.load(args.input)
        codebook = Codebook.from_json(args.codebook)

        # decode and save output
        intensities = instance.decode(intensities, codebook)
        intensities.save(args.output)
示例#12
0
    def _measure_spot_intensities(
            self, stack: ImageStack,
            spot_attributes: pd.DataFrame) -> IntensityTable:

        n_ch = stack.shape[Indices.CH]
        n_hyb = stack.shape[Indices.HYB]
        spot_attribute_index = dataframe_to_multiindex(spot_attributes)
        intensity_table = IntensityTable.empty_intensity_table(
            spot_attribute_index, n_ch, n_hyb)

        indices = product(range(n_ch), range(n_hyb))
        for c, h in indices:
            image, _ = stack.get_slice({Indices.CH: c, Indices.HYB: h})
            blob_intensities: pd.Series = self._measure_blob_intensity(
                image, spot_attributes, self.measurement_function)
            intensity_table[:, c, h] = blob_intensities

        return intensity_table
示例#13
0
    def test_run_pipeline(self):
        tempdir = tempfile.mkdtemp()
        coverage_enabled = "STARFISH_COVERAGE" in os.environ

        def callback(interval):
            print(" ".join(stage[:2]), " ==> {} seconds".format(interval))

        try:
            for subdir in TestWithIssData.SUBDIRS:
                os.makedirs(
                    "{tempdir}".format(tempdir=os.path.join(tempdir, subdir)))
            for stage in TestWithIssData.STAGES:
                cmdline = [
                    element(tempdir=tempdir) if callable(element) else element
                    for element in stage
                ]
                if cmdline[0] == "starfish" and coverage_enabled:
                    coverage_cmdline = [
                        "coverage",
                        "run",
                        "-p",
                        "--source",
                        "starfish",
                        "-m",
                        "starfish",
                    ]
                    coverage_cmdline.extend(cmdline[1:])
                    cmdline = coverage_cmdline
                with clock.timeit(callback):
                    subprocess.check_call(cmdline)

            intensities = IntensityTable.load(
                os.path.join(tempdir, "results", "spots.nc"))
            genes, counts = np.unique(
                intensities.coords[Codebook.Constants.GENE.value],
                return_counts=True)
            gene_counts = pd.Series(counts, genes)
            assert gene_counts['ACTB_human'] > gene_counts['ACTB_mouse']

        finally:
            if os.getenv("TEST_ISS_KEEP_DATA") is None:
                shutil.rmtree(tempdir)
示例#14
0
    def _cli(cls, args, print_help=False):
        """Runs the gene_assignment component based on parsed arguments."""
        from starfish import munge

        if args.gene_assignment_algorithm_class is None or print_help:
            cls.gene_assignment_group.print_help()
            cls.gene_assignment_group.exit(status=2)

        with open(args.coordinates_geojson, "r") as fh:
            coordinates = json.load(fh)
        regions = munge.geojson_to_region(coordinates)

        print('Assigning genes to cells...')
        intensity_table = IntensityTable.load(args.intensities)

        instance = args.gene_assignment_algorithm_class(**vars(args))

        result = instance.assign_genes(intensity_table, regions)

        print("Writing | cell_id | spot_id to: {}".format(args.output))
        result.to_json(args.output, orient="records")
示例#15
0
def concatenate_spot_attributes_to_intensities(
    spot_attributes: Sequence[Tuple[SpotAttributes, Dict[Indices, int]]]
) -> IntensityTable:
    """
    Merge multiple spot attributes frames into a single IntensityTable without merging across
    channels and imaging rounds

    Parameters
    ----------
    spot_attributes : Sequence[Tuple[SpotAttributes, Dict[Indices, int]]]
        A sequence of SpotAttribute objects and the Indices (channel, round) that each object is
        associated with.

    Returns
    -------
    IntensityTable :
        concatenated input SpotAttributes, converted to an IntensityTable object

    """
    n_ch: int = max(inds[Indices.CH] for _, inds in spot_attributes) + 1
    n_round: int = max(inds[Indices.ROUND] for _, inds in spot_attributes) + 1

    all_spots = pd.concat([sa.data for sa, inds in spot_attributes])
    # this drop call ensures only x, y, z, radius, and quality, are passed to the IntensityTable
    features_coordinates = all_spots.drop(['spot_id', 'intensity'], axis=1)

    intensity_table = IntensityTable.empty_intensity_table(
        SpotAttributes(features_coordinates),
        n_ch,
        n_round,
    )

    i = 0
    for attrs, inds in spot_attributes:
        for _, row in attrs.data.iterrows():
            intensity_table[i, inds[Indices.CH],
                            inds[Indices.ROUND]] = row['intensity']
            i += 1

    return intensity_table
示例#16
0
def small_intensity_table():
    intensities = np.array([
        [[0, 1], [1, 0]],
        [[1, 0], [0, 1]],
        [[0, 0], [1, 1]],
        [
            [0.5, 0.5],  # this one should fail decoding
            [0.5, 0.5]
        ],
        [[0.1, 0], [0,
                    0.1]],  # this one is a candidate for intensity filtering
    ])

    spot_attributes = pd.DataFrame(
        data={
            Indices.X.value: [0, 1, 2, 3, 4],
            Indices.Y.value: [3, 4, 5, 6, 7],
            Indices.Z.value: [0, 0, 0, 0, 0],
            Features.SPOT_RADIUS: [0.1, 2, 3, 2, 1]
        })

    return IntensityTable.from_spot_data(intensities, spot_attributes)
    def _calculate_mean_pixel_traces(
        label_image: np.ndarray,
        intensities: IntensityTable,
    ) -> IntensityTable:
        """
        For all pixels that contribute to a connected component, calculate the mean value for
        each (ch, round), producing an average "trace" of a feature across the imaging experiment

        Parameters
        ----------
        label_image : np.ndarray
            An image where all pixels of a connected component share the same integer ID
        intensities : IntensityTable
            decoded intensities

        Returns
        -------
        IntensityTable :
            an IntensityTable where the number of features equals the number of connected components
            and the intensities of each each feature is its mean trace.

        """
        pixel_labels = label_image.reshape(-1)
        intensities['spot_id'] = (Features.AXIS, pixel_labels)
        mean_pixel_traces = intensities.groupby('spot_id').mean(Features.AXIS)
        mean_distances = intensities[Features.DISTANCE].groupby(
            'spot_id').mean(Features.AXIS)
        mean_pixel_traces[Features.DISTANCE] = ('spot_id',
                                                np.ravel(mean_distances))

        # the 0th pixel trace corresponds to background. If present, drop it.
        try:
            mean_pixel_traces = mean_pixel_traces.drop(0, dim='spot_id')
        except KeyError:
            pass

        return mean_pixel_traces
示例#18
0
    def metric_decode(self,
                      intensities: IntensityTable,
                      max_distance: Number,
                      min_intensity: Number,
                      norm_order: int,
                      metric: str = 'euclidean') -> IntensityTable:
        """Assign the closest target by euclidean distance to each feature in an intensity table

        Normalizes both the codes and the features to be unit vectors and finds the closest code
        for each feature

        Parameters
        ----------
        intensities : IntensityTable
            features to be decoded
        max_distance : Number
            maximum distance between a feature and its closest code for which the coded target will
            be assigned.
        min_intensity : Number
            minimum intensity for a feature to receive a target annotation
        norm_order : int
            the scipy.linalg norm to apply to normalize codes and intensities
        metric : str
            the sklearn metric string to pass to NearestNeighbors

        See Also
        --------
        The available norms for this function can be found at the following link:
        https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html

        Returns
        -------
        IntensityTable :
            Intensity table containing normalized intensities, target assignments, distances to
            the nearest code, and the filtering status of each feature.

        """

        self._validate_decode_intensity_input_matches_codebook_shape(
            intensities)

        # normalize both the intensities and the codebook
        norm_intensities, norms = self._normalize_features(
            intensities, norm_order=norm_order)
        norm_codes, _ = self._normalize_features(self, norm_order=norm_order)

        metric_outputs, targets = self._approximate_nearest_code(
            norm_codes, norm_intensities, metric=metric)

        # only targets with low distances and high intensities should be retained
        passes_filters = np.logical_and(norms >= min_intensity,
                                        metric_outputs <= max_distance,
                                        dtype=np.bool)

        # set targets, distances, and filtering results
        norm_intensities[Features.TARGET] = (Features.AXIS, targets)
        norm_intensities[Features.DISTANCE] = (Features.AXIS, metric_outputs)
        norm_intensities[Features.PASSES_THRESHOLDS] = (Features.AXIS,
                                                        passes_filters)

        # norm_intensities is a DataArray, make it back into an IntensityTable
        return IntensityTable(norm_intensities)
示例#19
0
    def decode_per_round_max(self,
                             intensities: IntensityTable) -> IntensityTable:
        """decode each feature by selecting the per-imaging-round max-valued channel

        Notes
        -----
        - If no code matches the per-round maximum for a feature, it will be assigned 'nan' instead
          of a target value
        - Numpy's argmax breaks ties by picking the first channel -- this can lead to
          unexpected results where some features with "tied" channels will decode, but others will
          be assigned 'nan'.

        Parameters
        ----------
        intensities : IntensityTable
            features to be decoded

        Returns
        -------
        IntensityTable :
            intensity table containing additional data variables for target assignments

        """
        def _view_row_as_element(array: np.ndarray) -> np.ndarray:
            """view an entire code as a single element

            This view allows vectors (codes) to be compared for equality without need for multiple
            comparisons by casting the data in each code to a structured dtype that registers as
            a single value

            Parameters
            ----------
            array : np.ndarray
                2-dimensional numpy array of shape (n_observations, (n_ch * n_round)) where
                observations may be either features or codes.

            Returns
            -------
            np.ndarray :
                1-dimensional vector of shape n_observations

            """
            nrows, ncols = array.shape
            dtype = {
                'names': ['f{}'.format(i) for i in range(ncols)],
                'formats': ncols * [array.dtype]
            }
            return array.view(dtype)

        self._validate_decode_intensity_input_matches_codebook_shape(
            intensities)

        max_channels = intensities.argmax(Indices.CH.value)
        codes = self.argmax(Indices.CH.value)

        # TODO ambrosejcarr, dganguli: explore this quality score further
        # calculate distance scores by evaluating the fraction of signal in each round that is
        # found in the non-maximal channels.
        max_intensities = intensities.max(Indices.CH.value)
        round_intensities = intensities.sum(Indices.CH.value)
        distance = 1 - (max_intensities / round_intensities).mean(
            Indices.ROUND.value)

        a = _view_row_as_element(codes.values.reshape(self.shape[0], -1))
        b = _view_row_as_element(
            max_channels.values.reshape(intensities.shape[0], -1))

        targets = np.full(intensities.shape[0],
                          fill_value=np.nan,
                          dtype=object)

        # decode the intensities
        for i in np.arange(codes.shape[0]):
            targets[np.where(a[i] == b)[0]] = codes[Features.TARGET][i]

        # a code passes filters if it decodes successfully
        passes_filters = ~pd.isnull(targets)

        intensities[Features.TARGET] = (Features.AXIS, targets.astype('U'))
        intensities[Features.DISTANCE] = (Features.AXIS, distance)
        intensities[Features.PASSES_THRESHOLDS] = (Features.AXIS,
                                                   passes_filters)

        return intensities
示例#20
0
 def intensities(self, codebook=None) -> IntensityTable:
     if codebook is None:
         codebook = self.codebook()
     return IntensityTable.synthetic_intensities(
         codebook, self.n_z, self.height, self.width, self.n_spots,
         self.mean_fluor_per_spot, self.mean_photons_per_fluor)
示例#21
0
def test_imagestack_to_intensity_table():
    codebook, intensity_table, image = codebook_intensities_image_for_single_synthetic_spot()
    pixel_intensities = IntensityTable.from_image_stack(image)
    pixel_intensities = codebook.metric_decode(
        pixel_intensities, max_distance=0, min_intensity=1000, norm_order=2)
    assert isinstance(pixel_intensities, IntensityTable)
示例#22
0
    def decode_per_hyb_max(self,
                           intensities: IntensityTable) -> IntensityTable:
        """decode each feature by selecting the per-hybridization round max-valued channel

        Notes
        -----
        If no code matches the per-channel max of a feature, it will be assigned np.nan instead
        of a gene value

        Parameters
        ----------
        intensities : IntensityTable
            features to be decoded

        Returns
        -------
        IntensityTable :
            intensity table containing additional data variables for gene assignments

        """
        def _view_row_as_element(array: np.ndarray) -> np.ndarray:
            """view an entire code as a single element

            This view allows vectors (codes) to be compared for equality without need for multiple
            comparisons by casting the data in each code to a structured dtype that registers as
            a single value

            Parameters
            ----------
            array : np.ndarray
                2-dimensional numpy array of shape (n_observations, (n_ch * n_hyb)) where
                observations may be either features or codes.

            Returns
            -------
            np.ndarray :
                1-dimensional vector of shape n_observations

            """
            nrows, ncols = array.shape
            dtype = {
                'names': ['f{}'.format(i) for i in range(ncols)],
                'formats': ncols * [array.dtype]
            }
            return array.view(dtype)

        max_channels = intensities.argmax(Indices.CH.value)
        codes = self.argmax(Indices.CH.value)

        a = _view_row_as_element(codes.values.reshape(self.shape[0], -1))
        b = _view_row_as_element(
            max_channels.values.reshape(intensities.shape[0], -1))

        genes = np.empty(intensities.shape[0], dtype=object)
        genes.fill(np.nan)

        for i in np.arange(a.shape[0]):
            genes[np.where(a[i] == b)[0]] = codes['gene_name'][i]
        gene_index = pd.Index(genes.astype('U'))

        intensities[IntensityTable.Constants.GENE.value] = (
            IntensityTable.Constants.FEATURES.value, gene_index)

        return intensities
示例#23
0
    def decode_euclidean(self, intensities: IntensityTable) -> IntensityTable:
        """Assign the closest gene by euclidean distance to each feature in an intensity table

        Parameters
        ----------
        intensities : IntensityTable
            features to be decoded

        Returns
        -------
        IntensityTable :
            intensity table containing additional data variables for gene assignments and feature
            qualities

        """
        def _min_euclidean_distance(observation: xr.DataArray,
                                    codes: Codebook) -> np.ndarray:
            """find the code with the closest euclidean distance to observation

            Parameters
            ----------
            observation : xr.DataArray
                2-dimensional DataArray of shape (n_ch, n_hyb)
            codes :
                Codebook containing codes to compare to observation

            Returns
            -------
            np.ndarray :
                1-d vector containing the distance of each code to observation

            """
            squared_diff = (codes - observation)**2
            code_distances = np.sqrt(
                squared_diff.sum((Indices.CH, Indices.HYB)))
            # order of codes changes here (automated sorting on the reshaping?)
            return code_distances

        # normalize both the intensities and the codebook
        norm_intensities = intensities.groupby(
            IntensityTable.Constants.FEATURES.value).apply(
                lambda x: x / x.sum())
        norm_codes = self.groupby(
            Codebook.Constants.GENE.value).apply(lambda x: x / x.sum())

        # calculate pairwise euclidean distance between codes and features
        func = functools.partial(_min_euclidean_distance, codes=norm_codes)
        distances = norm_intensities.groupby(
            IntensityTable.Constants.FEATURES.value).apply(func)

        # calculate quality of each decoded spot
        qualities = 1 - distances.min(Codebook.Constants.GENE.value)
        qualities_index = pd.Index(qualities)

        # identify genes associated with closest codes
        closest_code_index = distances.argmin(Codebook.Constants.GENE.value)
        gene_ids = distances.indexes[Codebook.Constants.GENE.value].values[
            closest_code_index.values]
        gene_index = pd.Index(gene_ids)

        # set new values on the intensity table in-place
        intensities[IntensityTable.Constants.GENE.value] = (
            IntensityTable.Constants.FEATURES.value, gene_index)
        intensities[IntensityTable.Constants.QUALITY.value] = (
            IntensityTable.Constants.FEATURES.value, qualities_index)

        return intensities
##### visualization of rolonies
# EPY: END markdown

# EPY: START code
distance_threshold = min_dist

psd = SpotFinder.PixelSpotDetector(codebook=exp.codebook,
                                   metric='euclidean',
                                   distance_threshold=distance_threshold,
                                   magnitude_threshold=magnitude_threshold,
                                   min_area=area_threshold[0],
                                   max_area=area_threshold[1])

spot_intensities, results = psd.run(zero_norm_stack)
spot_intensities = IntensityTable(
    spot_intensities.where(spot_intensities[Features.PASSES_THRESHOLDS],
                           drop=True))
# EPY: END code

# EPY: START code
# exclude spots that don't meet our area thresholds
area_lookup = lambda x: 0 if x == 0 else results.region_properties[x - 1].area
vfunc = np.vectorize(area_lookup)
mask = np.squeeze(vfunc(results.label_image))
new_image = np.squeeze(results.decoded_image) * (mask > area_threshold[0]) * (
    mask < area_threshold[1])

plt.figure(figsize=(10, 10))
plt.imshow(new_image, cmap='nipy_spectral')
plt.axis('off')
plt.title('Coded rolonies')
    def run(
        self, intensities: IntensityTable
    ) -> Tuple[IntensityTable, ConnectedComponentDecodingResult]:
        """
        Execute the combine_adjacent_features method on an IntensityTable containing pixel
        intensities

        Parameters
        ----------
        intensities : IntensityTable
            Pixel intensities of an imaging experiment

        Returns
        -------
        IntensityTable :
            Table whose features comprise sets of adjacent pixels that decoded to the same target
        ConnectedComponentDecodingResult :
            NamedTuple containing :
                region_properties :
                    the properties of each connected component, in the same order as the
                    IntensityTable
                label_image : np.ndarray
                    An image where all pixels of a connected component share the same integer ID
                decoded_image : np.ndarray
                    Image whose pixels correspond to the targets that the given position in the
                    ImageStack decodes to.

        """

        # map target molecules to integers so they can be reshaped into an image that can
        # be subjected to a connected-component algorithm to find adjacent pixels with the
        # same targets
        targets = intensities[Features.TARGET].values
        target_map = TargetsMap(targets)

        # create the decoded_image
        decoded_image = self._intensities_to_decoded_image(
            intensities,
            target_map,
            self._mask_filtered,
        )

        # label the decoded image to extract connected component features
        label_image: np.ndarray = label(decoded_image,
                                        connectivity=self._connectivity)

        # calculate properties of each feature
        props: List = regionprops(np.squeeze(label_image))

        # calculate mean intensities across the pixels of each feature
        mean_pixel_traces = self._calculate_mean_pixel_traces(
            label_image,
            intensities,
        )

        # Create SpotAttributes and determine feature filtering outcomes
        spot_attributes, passes_filter = self._create_spot_attributes(
            props,
            decoded_image,
            target_map,
        )

        # augment the SpotAttributes with filtering results and distances from nearest codes
        spot_attributes.data[Features.DISTANCE] = mean_pixel_traces[
            Features.DISTANCE]
        spot_attributes.data[Features.PASSES_THRESHOLDS] = passes_filter

        # create new indexes for the output IntensityTable
        channel_index = mean_pixel_traces.indexes[Indices.CH]
        round_index = mean_pixel_traces.indexes[Indices.ROUND]
        coords = IntensityTable._build_xarray_coords(spot_attributes,
                                                     channel_index,
                                                     round_index)

        # create the output IntensityTable
        dims = (Features.AXIS, Indices.CH.value, Indices.ROUND.value)
        intensity_table = IntensityTable(data=mean_pixel_traces,
                                         coords=coords,
                                         dims=dims)

        # combine the various non-IntensityTable results into a NamedTuple before returning
        ccdr = ConnectedComponentDecodingResult(props, label_image,
                                                decoded_image)

        return intensity_table, ccdr
示例#26
0
def synthetic_intensity_table(loaded_codebook) -> IntensityTable:
    return IntensityTable.synthetic_intensities(loaded_codebook, n_spots=2)
示例#27
0
def test_imagestack_to_intensity_table_no_noise(synthetic_spot_pass_through_stack):
    codebook, intensity_table, image = synthetic_spot_pass_through_stack
    pixel_intensities = IntensityTable.from_image_stack(image)
    pixel_intensities = codebook.metric_decode(
        pixel_intensities, max_distance=0, min_intensity=1000, norm_order=2)
    assert isinstance(pixel_intensities, IntensityTable)
示例#28
0
    def synthetic_spots(
        cls,
        intensities: IntensityTable,
        num_z: int,
        height: int,
        width: int,
        n_photons_background=1000,
        point_spread_function=(4, 2, 2),
        camera_detection_efficiency=0.25,
        background_electrons=1,
        graylevel: float = 37000.0 / 2**16,
        ad_conversion_bits=16,
    ) -> "ImageStack":
        """Generate a synthetic ImageStack from a set of Features stored in an IntensityTable

        Parameters
        ----------
        intensities : IntensityTable
            IntensityTable containing coordinates of fluorophores. Used to position and generate
            spots in the output ImageStack
        num_z : int
            Number of z-planes in the ImageStack
        height : int
            Height in pixels of the ImageStack
        width : int
            Width in pixels of the ImageStack
        n_photons_background : int
            Poisson rate for the number of background photons to add to each pixel of the image.
            Set this parameter to 0 to eliminate background.
            (default 1000)
        point_spread_function : Tuple[int]
            The width of the gaussian density wherein photons spread around their light source.
            Set to zero to eliminate this (default (4, 2, 2))
        camera_detection_efficiency : float
            The efficiency of the camera to detect light. Set to 1 to remove this filter (default
            0.25)
        background_electrons : int
            Poisson rate for the number of spurious electrons detected per pixel during image
            capture by the camera (default 1)
        graylevel : float
            The number of shades of gray displayable by the synthetic camera. Larger numbers will
            produce higher resolution images (default 37000 / 2 ** 16)
        ad_conversion_bits : int
            The number of bits used during analog to visual conversion (default 16)

        Returns
        -------
        ImageStack :
            synthetic spots

        """
        # check some params
        if not 0 < camera_detection_efficiency <= 1:
            raise ValueError(
                f'invalid camera_detection_efficiency value: {camera_detection_efficiency}. '
                f'Must be in the interval (0, 1].')

        def select_uint_dtype(array):
            """choose appropriate dtype based on values of an array"""
            max_val = np.max(array)
            for dtype in (np.uint8, np.uint16, np.uint32):
                if max_val <= np.iinfo(dtype).max:
                    return array.astype(dtype)
            raise ValueError(
                'value exceeds dynamic range of largest skimage-supported type'
            )

        # make sure requested dimensions are large enough to support intensity values
        indices = zip((Indices.Z.value, Indices.Y.value, Indices.X.value),
                      (num_z, height, width))
        for index, requested_size in indices:
            required_size = intensities.coords[index].values.max()
            if required_size > requested_size:
                raise ValueError(
                    f'locations of intensities contained in table exceed the size of requested '
                    f'dimension {index}. Required size {required_size} > {requested_size}.'
                )

        # create an empty array of the correct size
        image = np.zeros(
            (intensities.sizes[Indices.ROUND.value],
             intensities.sizes[Indices.CH.value], num_z, height, width),
            dtype=np.uint32)

        # starfish uses float images, but the logic here requires uint. We cast, and will cast back
        # at the end of the function
        intensities.values = img_as_uint(intensities)

        for ch, round_ in product(*(range(s) for s in intensities.shape[1:])):
            spots = intensities[:, ch, round_]

            # numpy deprecated casting a specific way of casting floats that is triggered in xarray
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', FutureWarning)
                values = spots.where(spots, drop=True)

            image[round_, ch, values.z, values.y, values.x] = values

        intensities.values = img_as_float32(intensities)

        # add imaging noise
        image += np.random.poisson(n_photons_background,
                                   size=image.shape).astype(np.uint32)

        # blur image over coordinates, but not over round_/channels (dim 0, 1)
        sigma = (0, 0) + point_spread_function
        image = gaussian_filter(image, sigma=sigma, mode='nearest')

        image = image * camera_detection_efficiency

        image += np.random.normal(scale=background_electrons, size=image.shape)

        # mimic analog to digital conversion
        image = (image / graylevel).astype(int).clip(0, 2**ad_conversion_bits)

        # clip in case we've picked up some negative values
        image = np.clip(image, 0, a_max=None)

        # set the smallest int datatype that supports the data's intensity range
        image = select_uint_dtype(image)

        # convert to float for ImageStack
        with warnings.catch_warnings():
            # possible precision loss when casting from uint to float is acceptable
            warnings.simplefilter('ignore', UserWarning)
            image = img_as_float32(image)

        return cls.from_numpy_array(image)