def saveTable(table: DecodedIntensityTable, savename: str):
    """
    Reformats and saves a DecodedIntensityTable.
    """
    intensities = IntensityTable(
        table.where(table[Features.PASSES_THRESHOLDS], drop=True))
    traces = intensities.stack(traces=(Axes.ROUND.value, Axes.CH.value))
    # traces = table.stack(traces=(Axes.ROUND.value, Axes.CH.value))
    traces = traces.to_features_dataframe()
    traces.to_csv(savename)
Ejemplo n.º 2
0
def test_from_spot_attributes_must_have_aligned_dimensions_spot_attributes_and_data(
):
    """
    Number of features must match number of SpotAttributes. Pass two attributes and 3 features and
    verify a ValueError is raised.
    """
    spot_attributes = spot_attribute_factory(2)
    data = np.zeros(30).reshape(3, 5, 2)
    with pytest.raises(ValueError):
        IntensityTable.from_spot_data(data, spot_attributes)
Ejemplo n.º 3
0
def test_from_spot_attributes_throws_type_error_when_passed_a_dataframe():
    """SpotAttributes should be passed instead."""
    # input has two spots
    not_spot_attributes = pd.DataFrame(
        data=np.array([[1, 1, 1, 1], [2, 2, 2, 1]]),
        columns=[Indices.Z, Indices.Y, Indices.X, Features.SPOT_RADIUS])

    with pytest.raises(TypeError):
        IntensityTable.empty_intensity_table(not_spot_attributes,
                                             n_ch=1,
                                             n_round=3)
Ejemplo n.º 4
0
def decoded_intensity_table_factory() -> Tuple[IntensityTable, np.ndarray]:
    """
    Create an IntensityTable that has gene labels, including null labels. The data doesn't matter,
    so will use np.zeros
    """
    data = np.zeros((1, 1, 2, 3, 3), dtype=np.float32)
    labels = np.array(
        [[[0, 1, 1],
          [0, 2, 2],
          [1, 1, 1]],
         [[0, 1, 1],
          [1, 1, 1],
          [0, 1, 2]]],
        dtype='<U3'
    )
    labels_with_nan = labels.copy()
    labels_with_nan[labels == '0'] = 'nan'

    # create an intensity table and add the labels
    image_stack = ImageStack.from_numpy(data)
    intensities = IntensityTable.from_image_stack(image_stack)
    intensities[Features.TARGET] = (Features.AXIS, np.ravel(labels_with_nan))

    # label the third column of this data as failing filters
    passes_filters = np.ones(data.shape, dtype=bool)
    passes_filters[:, :, :, :, -1] = 0
    intensities[Features.PASSES_THRESHOLDS] = (Features.AXIS, np.ravel(passes_filters))

    return intensities, labels_with_nan
Ejemplo n.º 5
0
def create_intensity_table_with_coords(area: Area,
                                       n_spots: int = 10) -> IntensityTable:
    """
    Creates a 50X50 intensity table with physical coordinates within
    the given Area.

    Parameters
    ----------
    area: Area
        The area of physical space the IntensityTable should be defined over
    n_spots:
        Number of spots to add to the IntensityTable
    """
    codebook = factories.codebook_array_factory()
    it = IntensityTable.synthetic_intensities(codebook,
                                              num_z=1,
                                              height=50,
                                              width=50,
                                              n_spots=n_spots)
    # intensity table 1 has 10 spots, xmin = 0, ymin = 0, xmax = 2, ymax = 1
    it[Coordinates.X.value] = xr.DataArray(np.linspace(area.min_x, area.max_x,
                                                       n_spots),
                                           dims=Features.AXIS)
    it[Coordinates.Y.value] = xr.DataArray(np.linspace(area.min_y, area.max_y,
                                                       n_spots),
                                           dims=Features.AXIS)
    return it
Ejemplo n.º 6
0
def compute_magnitudes(stack, norm_order=2):

    pixel_intensities = IntensityTable.from_image_stack(stack)
    feature_traces = pixel_intensities.stack(traces=(Axes.CH.value, Axes.ROUND.value))
    norm = np.linalg.norm(feature_traces.values, ord=norm_order, axis=1)

    return norm
def labeled_intensities_factory(
) -> Tuple[IntensityTable, np.ndarray, np.ndarray]:
    """
    Create a decoded IntensityTable with distance scores, and a corresponding label_image and
    decoded_image.
    """
    data = np.array(
        [
            [
                [[0., 0.], [.1, .1]],  # ch 1
                [[.5, .5], [.2, .3]]
            ],
            [
                [[.1, .1], [0, 0]],  # ch 2, x & y are reversed
                [[.2, .3], [.5, .5]]
            ]
        ],
        dtype=np.float32)
    image_stack = ImageStack.from_numpy(data.reshape(1, 2, 2, 2, 2))
    intensity_table = IntensityTable.from_image_stack(image_stack)
    intensity_table[Features.DISTANCE] = (Features.AXIS,
                                          np.zeros(intensity_table.shape[0]))
    label_image = np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]])
    decoded_image = np.array([[[5, 4], [3, 2]], [[5, 4], [3, 2]]])

    # verify that the listed label image is what would be created by the function we use in the
    # code
    assert np.array_equal(label(decoded_image), label_image)

    return intensity_table, label_image, decoded_image
Ejemplo n.º 8
0
 def intensities(self, codebook=None) -> IntensityTable:
     if codebook is None:
         codebook = self.codebook()
     intensities = IntensityTable.synthetic_intensities(
         codebook, self.n_z, self.height, self.width, self.n_spots,
         self.mean_fluor_per_spot, self.mean_photons_per_fluor)
     assert intensities.dtype == np.float32 and intensities.max() <= 1
     return intensities
Ejemplo n.º 9
0
 def verify_results(self, tempdir: Path):
     intensities = IntensityTable.open_netcdf(
         os.fspath(tempdir / "decoded_spots.nc"))
     genes, counts = np.unique(intensities.coords[Features.TARGET],
                               return_counts=True)
     gene_counts = pd.Series(counts, genes)
     assert gene_counts['ACTB'] == 9
     assert gene_counts['GAPDH'] == 9
Ejemplo n.º 10
0
def processing_pipeline(
    experiment: starfish.Experiment,
    fov_name: str,
    n_processes: Optional[int] = None
) -> Tuple[starfish.ImageStack, starfish.IntensityTable]:
    """Process a single field of view of an experiment

    Parameters
    ----------
    experiment : starfish.Experiment
        starfish experiment containing fields of view to analyze
    fov_name : str
        name of the field of view to process
    n_processes : int

    Returns
    -------
    starfish.IntensityTable :
        decoded IntensityTable containing spots matched to the genes they are hybridized against
    """

    print("Loading images...")
    primary_image = experiment[fov_name].get_image(FieldOfView.PRIMARY_IMAGES)
    all_intensities = list()
    codebook = experiment.codebook

    images = enumerate(experiment[fov_name].iterate_image_type(
        FieldOfView.PRIMARY_IMAGES))

    for image_number, primary_image in images:

        print(f"Filtering image {image_number}...")
        filter_kwargs = dict(in_place=True,
                             verbose=True,
                             n_processes=n_processes)
        print("Applying Clip...")
        clip1.run(primary_image, **filter_kwargs)
        print("Applying Bandpass...")
        bandpass.run(primary_image, **filter_kwargs)
        print("Applying Gaussian Low Pass...")
        glp.run(primary_image, **filter_kwargs)
        print("Applying Clip...")
        clip2.run(primary_image, **filter_kwargs)

        print("Calling spots...")
        spot_attributes = tlmpf.run(primary_image)
        all_intensities.append(spot_attributes)

    spot_attributes = IntensityTable.concatenate_intensity_tables(
        all_intensities)

    print("Decoding spots...")
    decoded = codebook.decode_per_round_max(spot_attributes)
    decoded = decoded[decoded["total_intensity"] > .025]

    print("Processing complete.")

    return primary_image, decoded
Ejemplo n.º 11
0
def test_intensity_table_can_be_constructed_from_a_numpy_array_and_spot_attributes(
):
    """
    Verify that the IntensityTable can be created and that the resulting data matches the array
    it was constructed from.
    """
    spot_attributes = spot_attribute_factory(3)
    data = np.zeros(30).reshape(3, 5, 2)
    intensities = IntensityTable.from_spot_data(data, spot_attributes)

    assert intensities.shape == data.shape
    assert np.array_equal(intensities.values, data)
Ejemplo n.º 12
0
def intensity_table_factory() -> IntensityTable:
    """IntensityTable with a single feature that was measured over 2 channels and 2 rounds."""

    intensities = np.array([[[0, 3], [4, 0]]], dtype=float)
    spot_attribute_data = pd.DataFrame(
        data=[0, 0, 0, 1],
        index=[Axes.ZPLANE, Axes.Y, Axes.X, Features.SPOT_RADIUS]).T
    spot_attributes = SpotAttributes(spot_attribute_data)

    intensity_table = IntensityTable.from_spot_data(intensities,
                                                    spot_attributes)
    return intensity_table
Ejemplo n.º 13
0
def intensity_table_factory(data: np.ndarray=np.array([[[0, 3], [4, 0]]])) -> IntensityTable:
    """IntensityTable with a single feature that was measured over 2 channels and 2 rounds."""

    # generates spot attributes equal in size to the number of passed features.
    # each attribute has coordinates (z, y, x) equal to the feature index, and radius 1.
    spot_attributes_data = pd.DataFrame(
        data=np.array([[i, i, i, 1] for i in np.arange(data.shape[0])]),
        columns=[Axes.ZPLANE, Axes.Y, Axes.X, Features.SPOT_RADIUS]
    )

    spot_attributes = SpotAttributes(spot_attributes_data)
    intensity_table = IntensityTable.from_spot_data(data, spot_attributes)
    return intensity_table
Ejemplo n.º 14
0
def synthetic_decoded_intensity_table(
    codebook,
    num_z: int = 12,
    height: int = 50,
    width: int = 40,
    n_spots: int = 10,
    mean_fluor_per_spot: int = 200,
    mean_photons_per_fluor: int = 50,
) -> DecodedIntensityTable:
    """
    Creates an IntensityTable with synthetic spots, that correspond to valid
    codes in a provided codebook.

    Parameters
    ----------
    codebook : Codebook
        Starfish codebook object.
    num_z : int
        Number of z-planes to use when localizing spots.
    height : int
        y dimension of each synthetic plane.
    width : int
        x dimension of each synthetic plane.
    n_spots : int
        Number of spots to generate.
    mean_fluor_per_spot : int
         Mean number of fluorophores per spot.
    mean_photons_per_fluor : int
        Mean number of photons per fluorophore.

    Returns
    -------
    DecodedIntensityTable
    """

    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=num_z,
        height=height,
        width=width,
        n_spots=n_spots,
        mean_fluor_per_spot=mean_fluor_per_spot,
        mean_photons_per_fluor=mean_photons_per_fluor)
    targets = np.random.choice(codebook.coords[Features.TARGET],
                               size=n_spots,
                               replace=True)

    return DecodedIntensityTable.from_intensity_table(intensities,
                                                      targets=(Features.AXIS,
                                                               targets))
Ejemplo n.º 15
0
    def verify_results(self, intensities):
        assert intensities[Features.PASSES_THRESHOLDS].sum()

        spots_df = IntensityTable(
            intensities.where(intensities[Features.PASSES_THRESHOLDS],
                              drop=True)).to_features_dataframe()
        spots_df['area'] = np.pi * spots_df['radius']**2

        # verify number of spots detected
        spots_passing_filters = intensities[Features.PASSES_THRESHOLDS].sum()
        assert spots_passing_filters == 53  # TODO note, had to change this by 1

        # compare to benchmark data -- note that this particular part of the dataset
        # appears completely uncorrelated
        cnts_benchmark = pd.read_csv(
            'https://d2nhj9g34unfro.cloudfront.net/20181005/DARTFISH/fov_001/counts.csv'
        )

        min_dist = 0.6
        cnts_starfish = spots_df[spots_df.distance <= min_dist].groupby(
            'target').count()['area']
        cnts_starfish = cnts_starfish.reset_index(level=0)
        cnts_starfish.rename(columns={
            'target': 'gene',
            'area': 'cnt_starfish'
        },
                             inplace=True)

        # get top 5 genes and verify they are correct
        high_expression_genes = cnts_starfish.sort_values(
            'cnt_starfish', ascending=False).head(5)

        assert np.array_equal(high_expression_genes['cnt_starfish'].values,
                              [7, 3, 2, 2, 2])
        assert np.array_equal(high_expression_genes['gene'].values,
                              ['MBP', 'MOBP', 'ADCY8', 'TRIM66', 'SYT6'])

        # verify correlation is accurate for this subset of the image
        benchmark_comparison = pd.merge(cnts_benchmark,
                                        cnts_starfish,
                                        on='gene',
                                        how='left')
        benchmark_comparison.head(20)

        x = benchmark_comparison.dropna().cnt.values
        y = benchmark_comparison.dropna().cnt_starfish.values
        corrcoef = np.corrcoef(x, y)
        corrcoef = corrcoef[0, 1]

        assert np.round(corrcoef, 5) == 0.03028
Ejemplo n.º 16
0
def test_intensity_table_concatenation():
    """create two IntensityTables and assert that they are being concatenated properly."""

    r, c, z, y, x = 3, 3, 2, 2, 5
    data = np.zeros(180, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    intensities2 = intensities.copy()

    original_shape = intensities.shape

    expected_shape = list(original_shape)
    expected_shape[0] *= 2  # only features is concatenated
    assert np.array_equal(
        concatenate([intensities, intensities2]).shape, expected_shape)

    # slice out a single channel and round from both experiments, such that the data no longer match
    # across all dimensions but the concatenation dimension. The resulting structure should be
    # 2 (r) * 2 (c) * 5 (z), 2, 2 = 40, 2, 2
    i1 = intensities.where(np.logical_and(intensities.r == 0,
                                          intensities.c == 0),
                           drop=True)
    i2 = intensities.where(np.logical_and(intensities.r == 1,
                                          intensities.c == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i2.shape[0], 2, 2)
    result = concatenate([i1, i2])

    assert expected_shape == result.shape

    # slice a larger r value for second array, however, there are still only two values, so
    # shape should be 40, 2, 2
    i3 = intensities.where(np.logical_and(intensities.r == 2,
                                          intensities.c == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i3.shape[0], 2, 2)
    result = concatenate([i1, i3])

    assert expected_shape == result.shape

    # slice out z in addition to reduce the total feature number by 1/2
    i4 = intensities.where(np.logical_and(intensities.r == 0,
                                          intensities.z == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i4.shape[0], 3, 1)
    result = concatenate([i1, i4])

    assert expected_shape == result.shape
Ejemplo n.º 17
0
def test_tranfering_physical_coords_to_intensity_table():
    stack_shape = OrderedDict([(Axes.ROUND, 3), (Axes.CH, 2), (Axes.ZPLANE, 1),
                               (Axes.Y, 50), (Axes.X, 40)])

    physical_coords = OrderedDict([(PhysicalCoordinateTypes.X_MIN, 1),
                                   (PhysicalCoordinateTypes.X_MAX, 2),
                                   (PhysicalCoordinateTypes.Y_MIN, 4),
                                   (PhysicalCoordinateTypes.Y_MAX, 6),
                                   (PhysicalCoordinateTypes.Z_MIN, 1),
                                   (PhysicalCoordinateTypes.Z_MAX, 3)])

    stack = factories.imagestack_with_coords_factory(stack_shape,
                                                     physical_coords)
    codebook = factories.codebook_array_factory()

    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=stack_shape[Axes.ZPLANE],
        height=stack_shape[Axes.Y],
        width=stack_shape[Axes.X],
        n_spots=NUMBER_SPOTS)

    intensities = intensity_table_coordinates.\
        transfer_physical_coords_from_imagestack_to_intensity_table(stack, intensities)

    # Assert that new cords were added
    xc = intensities.coords[Coordinates.X]
    yc = intensities.coords[Coordinates.Y]
    zc = intensities.coords[Coordinates.Z]
    assert xc.size == NUMBER_SPOTS
    assert yc.size == NUMBER_SPOTS
    assert zc.size == NUMBER_SPOTS

    # Assert that the physical coords align with their corresponding pixel coords
    for spot in xc.features:
        pixel_x = spot[Axes.X.value].data
        physical_x = stack.xarray[Coordinates.X.value][pixel_x]
        assert np.isclose(spot[Coordinates.X.value], physical_x)

    for spot in yc.features:
        pixel_y = spot[Axes.Y.value].data
        physical_y = stack.xarray[Coordinates.Y.value][pixel_y]
        assert np.isclose(spot[Coordinates.Y.value], physical_y)

    # Assert that zc value is middle of z range
    for spot in zc.features:
        z_plane = spot[Axes.ZPLANE.value].data
        physical_z = stack.xarray[Coordinates.Z.value][z_plane]
        assert np.isclose(spot[Coordinates.Z.value], physical_z)
Ejemplo n.º 18
0
def test_tranfering_physical_coords_to_expression_matrix():
    stack_shape = OrderedDict([(Axes.ROUND, 3), (Axes.CH, 2), (Axes.ZPLANE, 1),
                               (Axes.Y, 50), (Axes.X, 40)])

    physical_coords = OrderedDict([(PhysicalCoordinateTypes.X_MIN, 1),
                                   (PhysicalCoordinateTypes.X_MAX, 2),
                                   (PhysicalCoordinateTypes.Y_MIN, 4),
                                   (PhysicalCoordinateTypes.Y_MAX, 6),
                                   (PhysicalCoordinateTypes.Z_MIN, 1),
                                   (PhysicalCoordinateTypes.Z_MAX, 3)])

    stack = factories.imagestack_with_coords_factory(stack_shape,
                                                     physical_coords)
    codebook = factories.codebook_array_factory()

    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=stack_shape[Axes.ZPLANE],
        height=stack_shape[Axes.Y],
        width=stack_shape[Axes.X],
        n_spots=NUMBER_SPOTS)

    intensities = intensity_table_coordinates. \
        transfer_physical_coords_from_imagestack_to_intensity_table(stack, intensities)

    # Check that error is thrown before target assignment
    try:
        intensities.to_expression_matrix()
    except KeyError as e:
        # Assert value error is thrown with right message
        assert e.args[0] == "IntensityTable must have 'cell_id' assignments for each cell before " \
                            "this function can be called. See starfish.TargetAssignment.Label."

    # mock out come cell_ids
    cell_ids = random.sample(range(1, 20), NUMBER_SPOTS)
    intensities[Features.CELL_ID] = (Features.AXIS, cell_ids)

    expression_matrix = intensities.to_expression_matrix()
    # Assert that coords were transferred
    xc = expression_matrix.coords[Coordinates.X]
    yc = expression_matrix.coords[Coordinates.Y]
    zc = expression_matrix.coords[Coordinates.Z]
    assert xc.size == len(set(cell_ids))
    assert yc.size == len(set(cell_ids))
    assert zc.size == len(set(cell_ids))
Ejemplo n.º 19
0
def test_save_expression_matrix():

    codebook = codebook_array_factory()

    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=3,
        height=100,
        width=100,
        n_spots=10
    )
    # mock out come cell_ids
    cell_ids = random.sample(range(1, 20), NUMBER_SPOTS)
    intensities[Features.CELL_ID] = (Features.AXIS, cell_ids)

    expression_matrix = intensities.to_expression_matrix()

    # test all saving methods
    expression_matrix.save("expression")
Ejemplo n.º 20
0
def test_intensity_table_serialization():
    """
    Test that an IntensityTable can be saved to disk, and that when it is reloaded, the data is
    unchanged
    """

    # create an IntensityTable
    data = np.zeros(100, dtype=np.float32).reshape(1, 5, 2, 2, 5)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # dump it to disk
    tempdir = tempfile.mkdtemp()
    filename = os.path.join(tempdir, 'test.nc')
    intensities.save(filename)

    # verify the data has not changed
    loaded = intensities.load(filename)
    assert intensities.equals(loaded)
Ejemplo n.º 21
0
def dummy_intensities() -> IntensityTable:

    codebook = test_utils.codebook_array_factory()
    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=10,
        height=10,
        width=10,
        n_spots=5,
    )

    intensities[Coordinates.Z.value] = (Features.AXIS, [0, 1, 0, 1, 0])
    intensities[Coordinates.Y.value] = (Features.AXIS, [10, 30, 50, 40, 20])
    intensities[Coordinates.X.value] = (Features.AXIS, [50.2, 30.2, 60.2, 40.2, 70.2])

    # remove target from dummy to test error messages
    del intensities[Features.TARGET]

    return intensities
Ejemplo n.º 22
0
def test_intensity_table_can_be_created_from_spot_attributes():
    """
    This test creates an IntensityTable from spot attributes, and verifies that the size matches
    what was requested and that the values are all zero.
    """

    # input has two spots
    spot_attributes = SpotAttributes(
        pd.DataFrame(
            data=np.array([[1, 1, 1, 1], [2, 2, 2, 1]]),
            columns=[Indices.Z, Indices.Y, Indices.X, Features.SPOT_RADIUS]))

    intensities = IntensityTable.empty_intensity_table(spot_attributes,
                                                       n_ch=1,
                                                       n_round=3)

    assert intensities.sizes[Indices.CH] == 1
    assert intensities.sizes[Indices.ROUND] == 3
    assert intensities.sizes[Features.AXIS] == 2
    assert np.all(intensities.values == 0)
Ejemplo n.º 23
0
def _load_data(fov_data, exp):
    """
    Load a field of view dataset 

    Parameters
    ----------
    fov_data : pd.DataFrame
            Table of file locations for the spot results and mask label image
            corresponding to the dataset to be viewed
    exp : Experiment
            Experiment file corresponding to the data analysis

    Returns
    -------
    im_max_proj : np.ndarray
            Image used for the spot detection
    points : List[Dict]
            List of dictionaries containing the points and target names for the
            points to be rendered
    mask_im : np.ndarray
            Label image for the segmentation mask 

    """
    # Get the RNAscope images
    im = exp[fov_data.fov_name].get_image("primary")
    im_max_proj = np.squeeze(im.max_proj(Axes.ZPLANE).xarray.values)

    # Get the spots
    spots_file = fov_data.spot_file
    it = IntensityTable.open_netcdf(spots_file)

    points = _get_points(it, exp)

    # Get the segmentation mask
    mask_file = fov_data.mask_file
    mask_im = io.imread(mask_file)

    return im_max_proj, points, mask_im
Ejemplo n.º 24
0
def test_to_mermaid_dataframe():
    """
    Creates a basic IntensityTable from an ImageStack and verifies that it can be dumped to disk
    as a DataFrame which MERmaid can load. Does not explicitly load the DataFrame in MERmaid.

    Verifies that the save function throws an error when target assignments are not present, which
    are required by MERmaid.
    """
    r, c, z, y, x = 1, 5, 2, 2, 5
    data = np.zeros(100, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # without a target assignment, should raise RuntimeError.
    with pytest.raises(RuntimeError):
        with TemporaryDirectory() as dir_:
            intensities.save_mermaid(os.path.join(dir_, 'test.csv.gz'))

    # assign targets
    intensities[Features.TARGET] = (Features.AXIS, np.random.choice(list('ABCD'), size=20))
    intensities[Features.DISTANCE] = (Features.AXIS, np.random.rand(20))
    with TemporaryDirectory() as dir_:
        intensities.save_mermaid(os.path.join(dir_, 'test.csv.gz'))
Ejemplo n.º 25
0
def test_synthetic_intensity_generation():
    """
    Create a 2-spot IntensityTable of pixel size (z=3, y=4, x=5) from a codebook with 3 channels
    and 2 rounds.

    Verify that the constructed Synthetic IntensityTable conforms to those dimensions, and given
    a known random seed, that the output spots decode to match a target in the input Codebook
    """
    # set seed to check that codebook is matched. This seed generates 2 instances of GENE_B
    np.random.seed(1)
    codebook = test_utils.codebook_array_factory()
    num_z, height, width = 3, 4, 5
    intensities = IntensityTable.synthetic_intensities(codebook,
                                                       num_z=num_z,
                                                       height=height,
                                                       width=width,
                                                       n_spots=2)

    # sizes should match codebook
    assert intensities.sizes[Axes.ROUND] == 2
    assert intensities.sizes[Axes.CH] == 3
    assert intensities.sizes[Features.AXIS] == 2

    # attributes should be bounded by the specified size
    assert np.all(intensities[Axes.ZPLANE.value] <= num_z)
    assert np.all(intensities[Axes.Y.value] <= height)
    assert np.all(intensities[Axes.X.value] <= width)

    # both codes should match GENE_B
    assert np.array_equal(
        np.where(intensities.values),
        [
            [0, 0, 1, 1],  # two each in feature 0 & 1
            [1, 2, 1, 2],  # one each in channel 1 & 2
            [1, 0, 1, 0]
        ],  # channel 1 matches round 1, channel 2 matches round zero
    )
Ejemplo n.º 26
0
def test_intensity_table_can_be_constructed_from_an_imagestack():
    """
    ImageStack has enough information to create an IntensityTable without additional SpotAttributes.
    Each feature is a pixel, and therefore the SpotAttributes can be extracted from the relative
    locations.
    """
    r, c, z, y, x = 1, 5, 2, 2, 5
    data = np.zeros(100, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # there should be 100 features
    assert np.product(intensities.shape) == 100

    # the max features should be equal to the array extent (2, 2, 5) minus one, since indices
    # are being compared and python is zero based
    # import pdb; pdb.set_trace()
    assert np.max(intensities[Axes.ZPLANE.value].values) == z - 1
    assert np.max(intensities[Axes.Y.value].values) == y - 1
    assert np.max(intensities[Axes.X.value].values) == x - 1

    # the number of channels and rounds should match the ImageStack
    assert intensities.sizes[Axes.CH.value] == c
    assert intensities.sizes[Axes.ROUND.value] == r
Ejemplo n.º 27
0
def test_take_max():
    """
    Create two overlapping IntensityTables with differing number of spots and verify that
    by concatenating them with the TAKE_MAX strategy we only include spots in the overlapping
    section from the IntensityTable that had the most.
    """
    it1 = create_intensity_table_with_coords(Area(min_x=0,
                                                  max_x=2,
                                                  min_y=0,
                                                  max_y=2),
                                             n_spots=10)
    it2 = create_intensity_table_with_coords(Area(min_x=1,
                                                  max_x=2,
                                                  min_y=1,
                                                  max_y=3),
                                             n_spots=20)

    concatenated = IntensityTable.concatanate_intensity_tables(
        [it1, it2], overlap_strategy=OverlapStrategy.TAKE_MAX)

    # The overlap section hits half of the spots from each intensity table, 5 from it1
    # and 10 from i21. It2 wins and the resulting concatenated table should have all the
    # spots from it2 (20) and 6 (one on the border) from it1 (6) for a total of 26 spots
    assert concatenated.sizes[Features.AXIS] == 26
Ejemplo n.º 28
0
tmp = tempfile.gettempdir()
iss_nc = os.path.join(tmp, "iss.nc")
merfish_nc = os.path.join(tmp, "merfish.nc")
dartfish_nc = os.path.join(tmp, "dartfish.nc")


def curl(dest_path, link):
    with open(dest_path, "wb") as fh:
        fh.write(requests.get(link).content)


curl(iss_nc, iss_link)
curl(merfish_nc, merfish_link)
curl(dartfish_nc, dartfish_link)

iss_intensity_table = IntensityTable.load(iss_nc)
merfish_intensity_table = IntensityTable.load(merfish_nc)
dartfish_intensity_table = IntensityTable.load(dartfish_nc)
# EPY: END code

# EPY: START code
datasets = [
    iss_intensity_table, merfish_intensity_table, dartfish_intensity_table
]
# EPY: END code

# EPY: START markdown
### Load Background Images
# EPY: END markdown

# EPY: START code
Ejemplo n.º 29
0
plt.title('Set minimum distance threshold')

from starfish import IntensityTable

distance_threshold = min_dist

psd = DetectPixels.PixelSpotDecoder(codebook=experiment.codebook,
                                    metric='euclidean',
                                    distance_threshold=distance_threshold,
                                    magnitude_threshold=magnitude_threshold,
                                    min_area=area_threshold[0],
                                    max_area=area_threshold[1])

spot_intensities, results = psd.run(filtered_imgs)
spot_intensities = IntensityTable(
    spot_intensities.where(spot_intensities[Features.PASSES_THRESHOLDS],
                           drop=True))

###################################################################################################
# Here, we:
#
# 1. Pick a rolony that was succesfully decoded to a gene.
# 2. Pull out the average pixel trace for that rolony.
# 3. Plot that pixel trace against the barcode of that gene.
#
# In order to assess, visually, how close decoded barcodes match their targets.

# reshape the spot intensity table into a RxC barcode vector
pixel_traces = spot_intensities.stack(traces=(Axes.ROUND.value, Axes.CH.value))

# extract dataframe from spot intensity table for indexing purposes
Ejemplo n.º 30
0
def test_tranfering_physical_coords_to_intensity_table():
    stack_shape = OrderedDict([(Axes.ROUND, 3), (Axes.CH, 2),
                               (Axes.ZPLANE, 1), (Axes.Y, 50), (Axes.X, 40)])

    physical_coords = OrderedDict([(PhysicalCoordinateTypes.X_MIN, 1),
                                   (PhysicalCoordinateTypes.X_MAX, 2),
                                   (PhysicalCoordinateTypes.Y_MIN, 4),
                                   (PhysicalCoordinateTypes.Y_MAX, 6),
                                   (PhysicalCoordinateTypes.Z_MIN, 1),
                                   (PhysicalCoordinateTypes.Z_MAX, 3)])

    stack = test_utils.imagestack_with_coords_factory(stack_shape, physical_coords)
    codebook = test_utils.codebook_array_factory()

    intensities = IntensityTable.synthetic_intensities(
        codebook,
        num_z=stack_shape[Axes.ZPLANE],
        height=stack_shape[Axes.Y],
        width=stack_shape[Axes.X],
        n_spots=NUMBER_SPOTS
    )

    intensities = intensity_table_coordinates.\
        transfer_physical_coords_from_imagestack_to_intensity_table(stack, intensities)

    # Assert that new cords were added
    xc = intensities.coords[Coordinates.X]
    yc = intensities.coords[Coordinates.Y]
    zc = intensities.coords[Coordinates.Z]
    assert xc.size == NUMBER_SPOTS
    assert yc.size == NUMBER_SPOTS
    assert zc.size == NUMBER_SPOTS

    physical_pixel_size_x = physical_coordinate_calculator._calculate_physical_pixel_size(
        coord_min=physical_coords[PhysicalCoordinateTypes.X_MIN],
        coord_max=physical_coords[PhysicalCoordinateTypes.X_MAX],
        num_pixels=stack_shape[Axes.X])

    physical_pixel_size_y = physical_coordinate_calculator._calculate_physical_pixel_size(
        coord_min=physical_coords[PhysicalCoordinateTypes.Y_MIN],
        coord_max=physical_coords[PhysicalCoordinateTypes.Y_MAX],
        num_pixels=stack_shape[Axes.Y])

    # Assert that the physical coords align with their corresponding pixel coords
    for spot in xc.features:
        pixel_x = spot[Axes.X.value].data
        physical_x = spot[Coordinates.X.value].data
        calculated_pixel = physical_cord_to_pixel_value(physical_x,
                                                        physical_pixel_size_x,
                                                        physical_coords[
                                                            PhysicalCoordinateTypes.X_MIN
                                                        ])
        assert np.isclose(pixel_x, calculated_pixel)

    for spot in yc.features:
        pixel_y = spot[Axes.Y.value].data
        physical_y = spot[Coordinates.Y.value].data
        calculated_pixel = physical_cord_to_pixel_value(physical_y,
                                                        physical_pixel_size_y,
                                                        physical_coords[
                                                            PhysicalCoordinateTypes.Y_MIN
                                                        ])
        assert np.isclose(pixel_y, calculated_pixel)

    # Assert that zc value is middle of z range
    for spot in zc.features:
        physical_z = spot[Coordinates.Z.value].data
        assert np.isclose(physical_coords[PhysicalCoordinateTypes.Z_MAX],
                          (physical_z * 2) - physical_coords[PhysicalCoordinateTypes.Z_MIN])