Esempio n. 1
0
def test_select_and_adjust_segmentation_ceiling(
    data_dir,
    image,
    cell_index,
    cell_ceiling_adjustment,
    expected_image,
):
    """
    The example data used to test this function was generated with the original function
    and then stored with `aicsimageio.writers.OmeTiffWriter` after doing an
    `aicsimageio.transforms.transpose_to_dims` to transpose to "CZYX" as `OmeTiffWriter`
    requires data have the "YX" dimensions last. Additionally, metadata has been updated
    to the Channel name standards in the constants.py file.
    """
    # Get actual
    image = AICSImage(data_dir / image).get_image_data("CYXZ", S=0, T=0)
    actual_image = image_utils.select_and_adjust_segmentation_ceiling(
        image, cell_index, cell_ceiling_adjustment=cell_ceiling_adjustment
    )

    # Read expected
    expected_image = AICSImage(data_dir / expected_image)

    # Assert actual equals expected
    assert np.array_equiv(actual_image, expected_image.get_image_data("CYXZ", S=0, T=0))
Esempio n. 2
0
def get_reader(path: "PathLike",
               in_memory: bool) -> Optional["ReaderFunction"]:
    """
    Given a single path or list of paths, return the appropriate aicsimageio reader.
    """
    # Only support single path
    if isinstance(path, list):
        print("AICSImageIO: Multi-file reading not yet supported.")

    # See if there is a supported reader for the file(s) provided
    try:
        # There is an assumption that the images are stackable and
        # I think it is also safe to assume that if stackable, they are of the same type
        # So only determine reader for the first one
        AICSImage.determine_reader(path)

        # The above line didn't error so we know we have a supported reader
        # Return a partial function with in_memory determined
        return partial(reader_function, in_memory=in_memory)

    # No supported reader, return None
    except exceptions.UnsupportedFileFormatError:
        print("AICSImageIO: Unsupported file format.")
        return None

    except Exception as e:
        print("AICSImageIO: exception occurred during reading...")
        print(e)
        print("If this issue looks like a problem with AICSImageIO, "
              "please file a bug report: "
              "https://github.com/AllenCellModeling/napari-aicsimageio")
        return None
Esempio n. 3
0
    def open_path(self, path):

        if os.path.isfile(path):

            # remove exitings layers from napari
            viewer.layers.select_all()
            viewer.layers.remove_selected()

            # get the metadata
            md, addmd = imf.get_metadata(path)

            # add the metadata and adapt the table display
            mdbrowser.update_metadata(md)
            mdbrowser.update_style()

            use_dask = checkbox.cbox.isChecked()
            print('Use Dask : ', use_dask)

            # get AICSImageIO object
            img = AICSImage(path)
            if use_dask:
                stack = img.dask_data
            if not use_dask:
                stack = img.get_image_data()

            # add the image stack to the napari viewer
            show_image_napari(stack,
                              md,
                              blending='additive',
                              gamma=0.85,
                              rename_sliders=True)
def open_image_stack(filepath, use_dask=False):
    """ Open a file using AICSImageIO and display it using napari

    :param path: filepath of the image
    :type path: str
    :param use_dask: use Dask Delayed reader, defaults to False
    :type use_dask: bool, optional
    """

    if os.path.isfile(filepath):

        # remove existing layers from napari
        viewer.layers.select_all()
        viewer.layers.remove_selected()

        # get the metadata
        metadata, add_metadata = imf.get_metadata(filepath)

        # add the metadata and adapt the table display
        mdbrowser.update_metadata(metadata)
        mdbrowser.update_style()

        # get AICSImageIO object
        img = AICSImage(filepath)

        if not use_dask:
            stack = img.get_image_data()
        if use_dask:
            stack = img.get_image_dask_data()

        # add the image stack to the napari viewer
        show_image_napari(stack, metadata,
                          blending='additive',
                          gamma=0.85,
                          rename_sliders=True)
Esempio n. 5
0
def get_img_slice(img_path_single, dim):
  '''loads and transforms the image into the right format''' 
  img1 = AICSImage(img_path_single)
  img1 = img1.get_image_data("ZYX", C=0, S=0, T=0)  # returns 4D CZYX numpy array
  img_temp = np.zeros((1,1,dim,dim))
  img_temp[0] = img1
  return img_temp
Esempio n. 6
0
def test_generate_thumbnail(data_dir, input_file, thumb_size, expected_thumb,
                            expected_original_size, expected_thumb_size):
    # Resolve the input file path
    input_file = data_dir / input_file

    # Mock the request
    url = f"https://example.com/{input_file}"
    responses.add(responses.GET,
                  url=url,
                  body=input_file.read_bytes(),
                  status=200)

    # Create the lambda request event
    event = _make_event({"url": url, "size": thumb_size})

    # Get the response
    response = lambda_handler(event, None)

    # Assert the request was handled with no errors
    assert response["statusCode"] == 200

    # Parse the body / the returned thumbnail
    body = json.loads(read_body(response))

    # Assert basic metadata was fill properly
    assert body["info"]["original_size"] == expected_original_size
    assert body["info"]["thumbnail_size"] == expected_thumb_size

    # Assert the produced image is the same as the expected
    actual = AICSImage(base64.b64decode(body['thumbnail'])).reader.data
    expected = AICSImage(data_dir / expected_thumb).reader.data
    assert np.array_equal(actual, expected)
Esempio n. 7
0
 def get_single_cell_images(self, row, return_stack=False):
     imgs = []
     channel_names = []
     imtypes = ["crop_raw", "crop_seg"]
     for imtype in imtypes:
         if imtype in row:
             path = Path(row[imtype])
             if not path.is_file():
                 path = self.control.get_staging() / f"loaddata/{row[imtype]}"
             reader = AICSImage(path)
             channel_names += reader.get_channel_names()
             img = reader.get_image_data('CZYX', S=0, T=0)
             imgs.append(img)
     try:
         name_dict = eval(row.name_dict)
         channel_names = []
         for imtype in imtypes:
             channel_names += name_dict[imtype]
     except Exception as ex:
         if not channel_names:
             raise ValueError(f"Channel names not found, {ex}")
         else: pass
     imgs = np.vstack(imgs)
     if return_stack:
         return imgs, channel_names
     imgs_dict = {}
     for imtype in imtypes:
         if imtype in row:
             for ch, img in zip(channel_names, imgs):
                 imgs_dict[ch] = img
     return imgs_dict
Esempio n. 8
0
    def open_path(self, path):

        if os.path.isfile(path):

            # remove exiting layers from napari
            viewer.layers.select_all()
            viewer.layers.remove_selected()

            # get the metadata
            md, addmd = imf.get_metadata(path)

            # temporary workaround for slider / floating point issue
            # https://forum.image.sc/t/problem-with-dimension-slider-when-adding-array-as-new-layer-for-ome-tiff/39092/2?u=sebi06

            md['XScale'] = np.round(md['XScale'], 3)
            md['YScale'] = np.round(md['YScale'], 3)
            md['ZScale'] = np.round(md['ZScale'], 3)

            # get AICSImageIO object using the python wrapper for libCZI (if file is CZI)
            img = AICSImage(path)
            stack = img.get_image_data()

            if md['ImageType'] == 'czi':
                use_pylibczi = True
            if md['ImageType'] == 'ometiff':
                use_pylibczi = False

            add_napari(stack,
                       md,
                       blending='additive',
                       gamma=0.85,
                       verbose=True,
                       use_pylibczi=use_pylibczi,
                       rename_sliders=True)
Esempio n. 9
0
def load_single_image(args, fn, time_flag=False):

    if time_flag:
        img = fn[:, args.InputCh, :, :]
        img = img.astype(float)
        img = np.transpose(img, axes=(1, 0, 2, 3))
    else:
        data_reader = AICSImage(fn)
        if isinstance(args.InputCh, List):
            channel_list = args.InputCh
        else:
            channel_list = [args.InputCh]
        img = data_reader.get_image_data('CZYX', S=0, T=0, C=channel_list)

    # normalization
    if args.mode == 'train':
        for ch_idx in range(args.nchannel):
            struct_img = img[
                ch_idx, :, :, :]  # note that struct_img is only a view of img, so changes made on struct_img also affects img
            struct_img = (struct_img - struct_img.min()) / (struct_img.max() -
                                                            struct_img.min())
    elif not args.Normalization == 0:
        img = input_normalization(img, args)

    # rescale
    if len(args.ResizeRatio) > 0:
        img = zoom(
            img,
            (1, args.ResizeRatio[0], args.ResizeRatio[1], args.ResizeRatio[2]),
            order=1)

    return img
Esempio n. 10
0
    def _get_channels_from_path(self, image_path: str) -> List[Channel]:
        img = AICSImage(image_path)
        img.set_scene(0)

        channels = list()
        for index, name in enumerate(img.channel_names):
            channels.append(Channel(index, name))
        return channels
Esempio n. 11
0
    def _generate_single_cell_features(
        row_index: int,
        row: pd.Series,
        cell_ceiling_adjustment: int,
        save_dir: Path,
        overwrite: bool,
    ) -> Union[SingleCellFeaturesResult, SingleCellFeaturesError]:
        # Don't use dask for image reading
        aicsimageio.use_dask(False)

        # Get the ultimate end save path for this cell
        save_path = save_dir / f"{row.CellId}.json"

        # Check skip
        if not overwrite and save_path.is_file():
            log.info(f"Skipping cell feature generation for Cell Id: {row.CellId}")
            return SingleCellFeaturesResult(row.CellId, save_path)

        # Overwrite or didn't exist
        log.info(f"Beginning cell feature generation for CellId: {row.CellId}")

        # Wrap errors for debugging later
        try:
            # Read the standardized FOV
            image = AICSImage(row.StandardizedFOVPath)

            # Preload image data
            image.data

            # Select and adjust cell shape ceiling for this cell
            adjusted = image_utils.select_and_adjust_segmentation_ceiling(
                image=image.get_image_data("CYXZ", S=0, T=0),
                cell_index=row.CellIndex,
                cell_ceiling_adjustment=cell_ceiling_adjustment,
            )

            # Crop the FOV to the segmentation portions
            cropped = image_utils.crop_raw_channels_with_segmentation(
                image=adjusted,
                channels=image.get_channel_names(),
            )

            # Generate features
            features = image_utils.get_features_from_image(cropped)

            # Save to JSON
            with open(save_path, "w") as write_out:
                json.dump(features, write_out)

            log.info(f"Completed cell feature generation for CellId: {row.CellId}")
            return SingleCellFeaturesResult(row.CellId, save_path)

        # Catch and return error
        except Exception as e:
            log.info(
                f"Failed cell feature generation for CellId: {row.CellId}. Error: {e}"
            )
            return SingleCellFeaturesError(row.CellId, str(e))
Esempio n. 12
0
def load_img_Z(img_path, channel, t, divisor):
    img = AICSImage(img_path)
    img = img.get_image_data("CYX", S=0, T=t, Z=channel)
    # img = img.get_image_data("YX", S=0, T=0, C=0, Z=0)
    # print(img.shape, img.dtype)
    x_dim = img.shape[1]
    y_dim = img.shape[2]
    x_div = x_dim//divisor
    y_div = y_dim//divisor
    return img, x_div, y_div
Esempio n. 13
0
def test_known_dims(data, dims, expected_shape):
    img = AICSImage(data, known_dims=dims)
    assert img.data.shape == expected_shape
    assert img.size_x == expected_shape[5]
    assert img.size_y == expected_shape[4]
    assert img.size_z == expected_shape[3]
    assert img.size_c == expected_shape[2]
    assert img.size_t == expected_shape[1]
    assert img.size_s == expected_shape[0]
    assert img.size(dims) == data.shape
Esempio n. 14
0
def get_daskstack(aics_img: AICSImage) -> List:

    stacks = []
    for scene in aics_img.scenes:
        aics_img.set_scene(scene)
        stacks.append(aics_img.dask_data)

    stacks = da.stack(stacks)

    return stacks
Esempio n. 15
0
def load_img_T(img_path, channel, z, divisor):
    img = AICSImage(img_path)
    img = img.get_image_data("CYX", S=0, Z=z,
                             T=channel)  # in my case channel is the Time
    # img = img.get_image_data("YX", S=0, T=0, C=0, Z=0)
    # print(img.shape, img.dtype)
    x_dim = img.shape[1]
    y_dim = img.shape[2]
    x_div = x_dim // divisor
    y_div = y_dim // divisor
    return img, x_div, y_div
Esempio n. 16
0
 def read_parameterized_intensity(self, index, return_intensity_names=False):
     path = f"parameterization/representations/{index}.tif"
     path = self.control.get_staging() / path
     if not path.is_file():
         raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
     code = AICSImage(path)
     intensity_names = code.get_channel_names()
     code = code.data.squeeze()
     if return_intensity_names:
         return code, intensity_names
     return code
Esempio n. 17
0
def load_img_Z(img_path, channel, t, divisor):
    img = AICSImage(img_path)
    img = img.get_image_data("ZYX", S=0, T=channel,
                             C=t)  ### because the image gets read in like that
    # img = img.get_image_data("YX", S=0, T=0, C=0, Z=0)
    # print(img.shape, img.dtype)
    x_dim = img.shape[1]
    y_dim = img.shape[2]
    x_div = x_dim // divisor
    y_div = y_dim // divisor
    return img, x_div, y_div
Esempio n. 18
0
def test_force_dims(data_shape, dims, expected):
    img = AICSImage(data=da.zeros(data_shape))
    img._reader._dims = dims
    assert img.data.shape == expected
    assert data_shape == img.get_image_data(out_orientation=dims).shape
    assert img.size_x == expected[5]
    assert img.size_y == expected[4]
    assert img.size_z == expected[3]
    assert img.size_c == expected[2]
    assert img.size_t == expected[1]
    assert img.size_s == expected[0]
    assert img.size(dims) == data_shape
def read_and_contrast_image(
    image_path,
    image_dims="CYX",
    contrast_method="simple_quantile",
    contrast_kwargs=DEFAULT_CONTRAST_KWARGS,
    channels=DEFAULT_CHANNELS,
    channel_groups=DEFAULT_CHANNEL_GROUPS,
    verbose=False,
):
    r"""
    Load an image from a file path, return two lists: max projects per channel,
    and autocontrast versions of same.
    Args:
        image_path (str): location of input tiff image
        image_dims (str): input image dimension ordering, default="CYX"
        contrast_method (str): method for autocontrasting, default=="simple_quantile"
        contrast_kwargs (dict):, default=DEFAULT_CONTRAST_KWARGS
        channels (dict): {"name":index} map for input tiff, default=DEFAULT_CHANNELS
        channel_groups (dict): fluor/bf/seg grouping, default=DEFAULT_CHANNEL_GROUPS
        verbose (bool): print info while processing or not, default=False
    Returns:
        (Cmaxs, Cautos): tuple of two lists
            - unadjusted maxprojects per channel
            - stretched versions of same (for channels to be stretched, else unadjusted)
    """

    channel_types = {c: g for g, ms in channel_groups.items() for c in ms}

    # set which contrast function we're using
    contrast_fn = CONTRAST_METHOD[contrast_method]

    # set which contrast method gets applied to images vs segmentations
    contrast_fns = {
        grp: contrast_fn if grp != "seg" else img_as_ubyte_nowarn
        for grp, kwds in contrast_kwargs.items()
    }

    # read in all data for image and check that channel dim is correct length and all labeled
    im = AICSImage(image_path, known_dims=image_dims)
    assert dict(zip(im.dims, im.data.shape))["C"] == len(channels)

    # list of input max projects for each channels
    Cmaxs = [im.get_image_data("YX", C=c) for c in sorted(channels.values())]

    # auto contrast each channel according to what type of image it is
    Cautos = [
        contrast_fns[channel_types[c_name]](
            Cmaxs[c_ind], **contrast_kwargs[channel_types[c_name]])
        for (c_name, c_ind) in channels.items()
    ]

    return Cmaxs, Cautos
def run_iteration(file: Path, save_path: Path) -> Path:
    # Read image
    img = AICSImage(file)

    # Select middle slice of structure channel
    data = img.get_image_data(
        "YX", S=0, T=0, C=img.get_channel_names().index("structure"), Z=img.size_z // 2,
    )

    # Write out image as png
    imwrite(save_path, data)

    return save_path
    def execute(self, args):

        if not args.data_type.startswith('.'):
            args.data_type = '.' + args.data_type

        filenames = glob(args.raw_path + os.sep + '*' + args.data_type)
        filenames.sort()

        existing_files = glob(args.train_path + os.sep + 'img_*.ome.tif')
        print(len(existing_files))

        training_data_count = len(existing_files) // 3
        for _, fn in enumerate(filenames):

            training_data_count += 1

            # load raw
            reader = AICSImage(fn)
            struct_img = reader.get_image_data("CZYX",
                                               S=0,
                                               T=0,
                                               C=[args.input_channel
                                                  ]).astype(np.float32)
            struct_img = input_normalization(img, args)

            # load seg
            seg_fn = args.seg_path + os.sep + os.path.basename(
                fn)[:-1 * len(args.data_type)] + '_struct_segmentation.tiff'
            reader = AICSImage(seg_fn)
            seg = reader.get_image_data("ZYX", S=0, T=0, C=0) > 0.01
            seg = seg.astype(np.uint8)
            seg[seg > 0] = 1

            # excluding mask
            cmap = np.ones(seg.shape, dtype=np.float32)
            mask_fn = args.mask_path + os.sep + os.path.basename(
                fn)[:-1 * len(args.data_type)] + '_mask.tiff'
            if os.path.isfile(mask_fn):
                reader = AICSImage(mask_fn)
                mask = reader.get_image_data("ZYX", S=0, T=0, C=0)
                cmap[mask == 0] = 0

            with OmeTiffWriter(args.train_path + os.sep + 'img_' +
                               f'{training_data_count:03}' +
                               '.ome.tif') as writer:
                writer.save(struct_img)

            with OmeTiffWriter(args.train_path + os.sep + 'img_' +
                               f'{training_data_count:03}' +
                               '_GT.ome.tif') as writer:
                writer.save(seg)

            with OmeTiffWriter(args.train_path + os.sep + 'img_' +
                               f'{training_data_count:03}' +
                               '_CM.ome.tif') as writer:
                writer.save(cmap)
Esempio n. 22
0
def test_physical_pixel_size(resources_dir, filename, expected_sizes):
    # Get filepath
    f = resources_dir / filename

    # Check that there are no open file pointers after init
    proc = Process()
    assert str(f) not in [f.path for f in proc.open_files()]

    # Check basics
    img = AICSImage(f)
    assert img.get_physical_pixel_size() == expected_sizes

    # Check that there are no open file pointers after basics
    assert str(f) not in [f.path for f in proc.open_files()]
Esempio n. 23
0
def test_generate_thumbnail(
        data_dir,
        input_file,
        params,
        expected_thumb,
        expected_original_size,
        expected_thumb_size,
        num_pages,
        status
):
    # don't actually modify the environment in tests
    with patch.object(index, 'set_pdf_env', return_value=None) as set_env:
        # Resolve the input file path
        input_file = data_dir / input_file
        # Mock the request
        url = f"https://example.com/{input_file}"
        responses.add(
            responses.GET,
            url=url,
            body=input_file.read_bytes(),
            status=200
        )
        # Create the lambda request event
        event = _make_event({"url": url, **params})
        # Get the response
        response = index.lambda_handler(event, None)
        # Assert the request was handled with no errors
        assert response["statusCode"] == 200, f"response: {response}"
        # only check the body and expected image if it's a successful call
        # Parse the body / the returned thumbnail
        body = json.loads(read_body(response))
        # Assert basic metadata was filled properly
        assert body["info"]["thumbnail_size"] == expected_thumb_size
        if expected_original_size:  # PDFs don't have an expected size
            assert body["info"]["original_size"] == expected_original_size
        if "countPages" in params:
            assert body["info"]["page_count"] == num_pages
        # Assert the produced image is the same as the expected
        if params.get('input') == 'pdf':
            actual = Image.open(BytesIO(base64.b64decode(body['thumbnail'])))
            expected = Image.open(data_dir / expected_thumb)
            actual_array = np.array(actual)
            expected_array = np.array(expected)
            assert set_env.call_count == 1
            assert actual_array.shape == expected_array.shape
            assert np.allclose(expected_array, actual_array, atol=15, rtol=0.1)
        else:
            actual = AICSImage(base64.b64decode(body['thumbnail'])).reader.data
            expected = AICSImage(data_dir / expected_thumb).reader.data
            assert np.array_equal(actual, expected)
Esempio n. 24
0
 def worker_full(file, df):
     df_file = df[df["file"] == file]
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", category=FutureWarning)
         image = AICSImage(file)
     for i, row in df_file.iterrows():
         image_ZYX = normalize(image, channel=row["channel_index"])
         with OmeTifWriter(row["normalized_single_channel_image"],
                           overwrite_file=True) as writer:
             writer.save(
                 image_ZYX,
                 channel_names=row["channel_content"],
                 pixels_physical_size=image.get_physical_pixel_size(),
             )
Esempio n. 25
0
def test_channel_names(resources_dir, filename, expected_channel_names):
    # Get filepath
    f = resources_dir / filename

    # Check that there are no open file pointers after init
    proc = Process()
    assert str(f) not in [f.path for f in proc.open_files()]

    # Check basics
    img = AICSImage(f)
    assert img.get_channel_names() == expected_channel_names
    assert len(img.get_channel_names()) == img.size_c

    # Check that there are no open file pointers after basics
    assert str(f) not in [f.path for f in proc.open_files()]
Esempio n. 26
0
def test_known_dims(data, dims, expected_shape):
    # Check basics
    with Profiler() as prof:
        img = AICSImage(data, known_dims=dims)
        assert img.data.shape == expected_shape
        assert img.size_x == expected_shape[5]
        assert img.size_y == expected_shape[4]
        assert img.size_z == expected_shape[3]
        assert img.size_c == expected_shape[2]
        assert img.size_t == expected_shape[1]
        assert img.size_s == expected_shape[0]
        assert img.size(dims) == data.shape

        # Due to reshape and transpose there will be 2 tasks in the graph
        assert len(prof.results) == 2
Esempio n. 27
0
def combineFiles(files, out, channel_names=None):
    finalimage = None
    for f in files:
        ai = AICSImage(f)
        # ai.data is 6d.
        image = ai.data
        if image.dtype == numpy.float32:
            # normalizes data in range 0 - uint16max
            image = image.clip(min=0.0)
            image = image / image.max()
            image = 65535 * image
            # convert float to uint16
            image = image.astype(numpy.uint16)
        if finalimage is None:
            finalimage = [image[0][0]]
        else:
            finalimage = numpy.append(finalimage, [image[0][0]], axis=1)
    print(finalimage.shape)
    finalimage = finalimage.transpose([0, 2, 1, 3, 4])
    with OmeTiffWriter(file_path=out, overwrite_file=True) as writer:
        writer.save(
            finalimage,
            channel_names=channel_names,
            pixels_physical_size=[0.108, 0.108, 0.290],
        )
Esempio n. 28
0
    def _get_channel_data_default(self, channel_index: int, layer: Layer):
        if len(layer.data.shape) >= 6:
            # Has scenes
            image_from_layer = [
                layer.data[i, :, :, :, :, :]
                for i in range(layer.data.shape[0])
            ]
        else:
            image_from_layer = layer.data

        img = AICSImage(image_from_layer)  # gives us a 6D image

        # use get_image_data() to parse out ZYX dimensions
        # segmenter requries 3D images.
        img.set_scene(0)
        return img.get_image_data("ZYX", T=0, C=channel_index)
Esempio n. 29
0
 def test_metadata(self):
     packing_list = [[0], [1, 2], [3, 4]]
     prefix = "atlas"
     # arrange
     with AICSImage("img/img40_1.ome.tif") as image:
         # act
         atlas = generate_texture_atlas(
             image, prefix=prefix, pack_order=packing_list
         )
     # assert
     metadata = atlas.get_metadata()
     self.assertTrue(
         all(
             key in metadata
             for key in (
                 "tile_width",
                 "tile_height",
                 "width",
                 "height",
                 "channels",
                 "channel_names",
                 "tiles",
                 "rows",
                 "cols",
                 "atlas_width",
                 "atlas_height",
                 "images",
             )
         )
     )
     self.assertTrue(len(metadata["channel_names"]) == metadata["channels"])
Esempio n. 30
0
def test_support_for_ndarray(arr):
    # Check basics
    with Profiler() as prof:
        actual_reader = AICSImage.determine_reader(arr)
        assert actual_reader == readers.ArrayLikeReader
        # Check that basic details don't require task computation
        assert len(prof.results) == 0