Esempio n. 1
0
def labeled_intensities_factory(
) -> Tuple[IntensityTable, np.ndarray, np.ndarray]:
    """
    Create a decoded IntensityTable with distance scores, and a corresponding label_image and
    decoded_image.
    """
    data = np.array(
        [
            [
                [[0., 0.], [.1, .1]],  # ch 1
                [[.5, .5], [.2, .3]]
            ],
            [
                [[.1, .1], [0, 0]],  # ch 2, x & y are reversed
                [[.2, .3], [.5, .5]]
            ]
        ],
        dtype=np.float32)
    image_stack = ImageStack.from_numpy_array(data.reshape(1, 2, 2, 2, 2))
    intensity_table = IntensityTable.from_image_stack(image_stack)
    intensity_table[Features.DISTANCE] = (Features.AXIS,
                                          np.zeros(intensity_table.shape[0]))
    label_image = np.array([[[1, 2], [3, 4]], [[1, 2], [3, 4]]])
    decoded_image = np.array([[[5, 4], [3, 2]], [[5, 4], [3, 2]]])

    # verify that the listed label image is what would be created by the function we use in the
    # code
    assert np.array_equal(label(decoded_image), label_image)

    return intensity_table, label_image, decoded_image
Esempio n. 2
0
def create_imagestack_with_magnitude_scale():
    """create an imagestack with increasing magnitudes"""
    data = np.linspace(0, 1, 11, dtype=np.float32)
    data = np.repeat(data[None, :], 2, axis=0)
    # reshape data into a 2-channel, (1, 11, 1) image in (x, y, z)
    data = data.reshape(1, 2, 1, 11, 1)
    imagestack = ImageStack.from_numpy_array(data)
    return imagestack
Esempio n. 3
0
def test_imagestack_xarray_deepcopy(nitems: int=10) -> None:
    """
    Instantiate an :py:class:`ImageStack` and deepcopy the xarray directly.  This should work, but
    prompt a warning.
    """
    shape = (nitems, 3, 4, 5, 6)
    dtype = np.float32
    source = np.zeros(shape, dtype=dtype)
    imagestack = ImageStack.from_numpy_array(source)
    with warnings.catch_warnings(record=True) as warnings_:
        copy.deepcopy(imagestack.xarray)
        assert len(warnings_) == 1  # type: ignore
def traversing_code() -> ImageStack:
    """this code walks in a sequential direction, and should only be detectable from some anchors"""
    img = np.zeros((3, 2, 20, 50, 50), dtype=np.float32)

    # code 1
    img[0, 0, 5, 35, 35] = 10
    img[1, 1, 5, 32, 32] = 10
    img[2, 0, 5, 29, 29] = 10

    # blur points
    gaussian_filter(img, (0, 0, 0.5, 1.5, 1.5), output=img)

    return ImageStack.from_numpy_array(img)
def jitter_code() -> ImageStack:
    """this code has some minor jitter <= 3px at the most distant point"""
    img = np.zeros((3, 2, 20, 50, 50), dtype=np.float32)

    # code 1
    img[0, 0, 5, 35, 35] = 10
    img[1, 1, 5, 34, 35] = 10
    img[2, 0, 6, 35, 33] = 10

    # blur points
    gaussian_filter(img, (0, 0, 0.5, 1.5, 1.5), output=img)

    return ImageStack.from_numpy_array(img)
Esempio n. 6
0
def test_intensity_table_concatenation():
    """create two IntensityTables and assert that they are being concatenated properly."""

    r, c, z, y, x = 3, 3, 2, 2, 5
    data = np.zeros(180, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    intensities2 = intensities.copy()

    original_shape = intensities.shape

    expected_shape = list(original_shape)
    expected_shape[0] *= 2  # only features is concatenated
    assert np.array_equal(
        concatenate([intensities, intensities2]).shape, expected_shape)

    # slice out a single channel and round from both experiments, such that the data no longer match
    # across all dimensions but the concatenation dimension. The resulting structure should be
    # 2 (r) * 2 (c) * 5 (z), 2, 2 = 40, 2, 2
    i1 = intensities.where(np.logical_and(intensities.r == 0,
                                          intensities.c == 0),
                           drop=True)
    i2 = intensities.where(np.logical_and(intensities.r == 1,
                                          intensities.c == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i2.shape[0], 2, 2)
    result = concatenate([i1, i2])

    assert expected_shape == result.shape

    # slice a larger r value for second array, however, there are still only two values, so
    # shape should be 40, 2, 2
    i3 = intensities.where(np.logical_and(intensities.r == 2,
                                          intensities.c == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i3.shape[0], 2, 2)
    result = concatenate([i1, i3])

    assert expected_shape == result.shape

    # slice out z in addition to reduce the total feature number by 1/2
    i4 = intensities.where(np.logical_and(intensities.r == 0,
                                          intensities.z == 1),
                           drop=True)
    expected_shape = (i1.shape[0] + i4.shape[0], 3, 1)
    result = concatenate([i1, i4])

    assert expected_shape == result.shape
def two_perfect_codes() -> ImageStack:
    """this code has no jitter"""
    img = np.zeros((3, 2, 20, 50, 50), dtype=np.float32)

    # code 1
    img[0, 0, 5, 20, 35] = 10
    img[1, 1, 5, 20, 35] = 10
    img[2, 0, 5, 20, 35] = 10

    # code 1
    img[0, 0, 5, 40, 45] = 10
    img[1, 1, 5, 40, 45] = 10
    img[2, 0, 5, 40, 45] = 10

    # blur points
    gaussian_filter(img, (0, 0, 0.5, 1.5, 1.5), output=img)

    return ImageStack.from_numpy_array(img)
Esempio n. 8
0
def test_imagestack_deepcopy(nitems: int=10) -> None:
    """
    Instantiate an :py:class:`ImageStack` and deepcopy it.  Worker processes reconstitute a numpy
    array from the buffer and attempts to writes to the numpy array.  Writes in the worker process
    should be visible in the parent process.
    """
    shape = (nitems, 3, 4, 5, 6)
    dtype = np.float32
    source = np.zeros(shape, dtype=np.float32)
    imagestack = ImageStack.from_numpy_array(source)
    imagestack_copy = copy.deepcopy(imagestack)
    _start_process_to_test_shmem(
        array_holder=imagestack_copy._data._backing_mp_array,
        decoder=partial(_decode_imagestack_array_to_numpy_array, shape, dtype),
        nitems=nitems)
    for ix in range(nitems):
        assert (imagestack.xarray[ix] == 0).all()
        assert np.allclose(imagestack_copy.xarray[ix], ix)
Esempio n. 9
0
def test_intensity_table_serialization():
    """
    Test that an IntensityTable can be saved to disk, and that when it is reloaded, the data is
    unchanged
    """

    # create an IntensityTable
    data = np.zeros(100, dtype=np.float32).reshape(1, 5, 2, 2, 5)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # dump it to disk
    tempdir = tempfile.mkdtemp()
    filename = os.path.join(tempdir, 'test.nc')
    intensities.save(filename)

    # verify the data has not changed
    loaded = intensities.load(filename)
    assert intensities.equals(loaded)
Esempio n. 10
0
def setup_linear_unmixing_test():
    """
        Create the image stack, coeff matrix, and reference result
        for the linear unmixing test

    """
    # Create image
    r, c, z, y, x = 2, 3, 6, 5, 4
    im = np.ones((r, c, z, y, x), dtype=np.float32)
    stack = ImageStack.from_numpy_array(im)

    # Create coefficients matrix
    coeff_mat = np.array([[1, 0, 0], [-0.25, 1, -0.25], [0, 0, 1]])

    # Create reference result
    ref_result = np.ones((r, c, z, y, x))
    ref_result[:, 1, ...] = 0.5 * np.ones((z, y, x))

    return stack, coeff_mat, ref_result
def multiple_possible_neighbors() -> ImageStack:
    """this image is intended to be tested with anchor_round in {0, 1}, last round has more spots"""
    img = np.zeros((3, 2, 20, 50, 50), dtype=np.float32)

    # round 1
    img[0, 0, 5, 20, 40] = 10
    img[0, 0, 5, 40, 20] = 10

    # round 2
    img[1, 1, 5, 20, 40] = 10
    img[1, 1, 5, 40, 20] = 10

    # round 3
    img[2, 0, 5, 20, 40] = 10
    img[2, 0, 5, 35, 35] = 10
    img[2, 0, 5, 40, 20] = 10

    # blur points
    gaussian_filter(img, (0, 0, 0.5, 1.5, 1.5), output=img)

    return ImageStack.from_numpy_array(img)
Esempio n. 12
0
def _create_dataset(pixel_dimensions: Tuple[int, int, int],
                    spot_coordinates: Sequence[Tuple[int, int, int]],
                    codebook: Codebook) -> ImageStack:
    """
    creates a numpy array containing one spot per codebook entry at spot_coordinates. length of
    spot_coordinates must therefore match the number of codes in Codebook.
    """
    assert len(spot_coordinates) == codebook.sizes[Features.TARGET]

    data_shape = (codebook.sizes[Axes.ROUND.value],
                  codebook.sizes[Axes.CH.value], *pixel_dimensions)
    imagestack_data = np.zeros((data_shape), dtype=np.float32)

    for ((z, y, x), f) in zip(spot_coordinates,
                              range(codebook.sizes[Features.TARGET])):
        imagestack_data[:, :, z, y,
                        x] = codebook[f].transpose(Axes.ROUND.value,
                                                   Axes.CH.value)

    # blur with a small non-isotropic kernel TODO make kernel smaller.
    imagestack_data = gaussian_filter(imagestack_data,
                                      sigma=(0, 0, 0.7, 1.5, 1.5))
    return ImageStack.from_numpy_array(imagestack_data)
Esempio n. 13
0
def test_to_mermaid_dataframe():
    """
    Creates a basic IntensityTable from an ImageStack and verifies that it can be dumped to disk
    as a DataFrame which MERmaid can load. Does not explicitly load the DataFrame in MERmaid.

    Verifies that the save function throws an error when target assignments are not present, which
    are required by MERmaid.
    """
    r, c, z, y, x = 1, 5, 2, 2, 5
    data = np.zeros(100, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # without a target assignment, should raise RuntimeError.
    with pytest.raises(RuntimeError):
        with TemporaryDirectory() as dir_:
            intensities.save_mermaid(os.path.join(dir_, 'test.csv.gz'))

    # assign targets
    intensities[Features.TARGET] = (Features.AXIS, np.random.choice(list('ABCD'), size=20))
    intensities[Features.DISTANCE] = (Features.AXIS, np.random.rand(20))
    with TemporaryDirectory() as dir_:
        intensities.save_mermaid(os.path.join(dir_, 'test.csv.gz'))
Esempio n. 14
0
def test_intensity_table_can_be_constructed_from_an_imagestack():
    """
    ImageStack has enough information to create an IntensityTable without additional SpotAttributes.
    Each feature is a pixel, and therefore the SpotAttributes can be extracted from the relative
    locations.
    """
    r, c, z, y, x = 1, 5, 2, 2, 5
    data = np.zeros(100, dtype=np.float32).reshape(r, c, z, y, x)
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)

    # there should be 100 features
    assert np.product(intensities.shape) == 100

    # the max features should be equal to the array extent (2, 2, 5) minus one, since indices
    # are being compared and python is zero based
    # import pdb; pdb.set_trace()
    assert np.max(intensities[Axes.ZPLANE.value].values) == z - 1
    assert np.max(intensities[Axes.Y.value].values) == y - 1
    assert np.max(intensities[Axes.X.value].values) == x - 1

    # the number of channels and rounds should match the ImageStack
    assert intensities.sizes[Axes.CH.value] == c
    assert intensities.sizes[Axes.ROUND.value] == r
def decoded_intensity_table_factory() -> Tuple[IntensityTable, np.ndarray]:
    """
    Create an IntensityTable that has gene labels, including null labels. The data doesn't matter,
    so will use np.zeros
    """
    data = np.zeros((1, 1, 2, 3, 3), dtype=np.float32)
    labels = np.array(
        [[[0, 1, 1], [0, 2, 2], [1, 1, 1]], [[0, 1, 1], [1, 1, 1], [0, 1, 2]]],
        dtype='<U3')
    labels_with_nan = labels.copy()
    labels_with_nan[labels == '0'] = 'nan'

    # create an intensity table and add the labels
    image_stack = ImageStack.from_numpy_array(data)
    intensities = IntensityTable.from_image_stack(image_stack)
    intensities[Features.TARGET] = (Features.AXIS, np.ravel(labels_with_nan))

    # label the third column of this data as failing filters
    passes_filters = np.ones(data.shape, dtype=bool)
    passes_filters[:, :, :, :, -1] = 0
    intensities[Features.PASSES_THRESHOLDS] = (Features.AXIS,
                                               np.ravel(passes_filters))

    return intensities, labels_with_nan
Esempio n. 16
0
def generate_default_data():
    data = np.random.rand(2, 2, 2, 40, 50).astype(np.float32)
    return ImageStack.from_numpy_array(data)
Esempio n. 17
0
iss_dots = experiment.fov()['dots'].max_proj(Indices.CH, Indices.ROUND,
                                             Indices.Z)
# EPY: END code

# EPY: START code
experiment = starfish.data.MERFISH()
merfish_nuclei = experiment.fov()['nuclei'].max_proj(Indices.CH, Indices.ROUND,
                                                     Indices.Z)

# merfish doesn't have a dots image, and some of the channels are stronger than others.
# We can use the scale factors to get the right levels
merfish_background = experiment.fov().primary_image.max_proj(
    Indices.CH, Indices.ROUND)
merfish_background = np.reshape(merfish_background,
                                (1, 1, *merfish_background.shape))
merfish_background = ImageStack.from_numpy_array(merfish_background)

from starfish.image import Filter

clip = Filter.Clip(p_max=99.7)
merfish_dots = clip.run(merfish_background)

merfish_dots = merfish_dots.max_proj(Indices.CH, Indices.ROUND, Indices.Z)
# EPY: END code

# EPY: START markdown
### Load Decoded Images
# EPY: END markdown

# EPY: START markdown
#Numpy load can't download files from s3 either.