def run(self):
        self.logger.name = type(self).__name__
        self.logger.setLevel(self.args["log_level"])

        ophys_etl_commit_sha = os.environ.get("OPHYS_ETL_COMMIT_SHA",
                                              "local build")
        self.logger.info(f"OPHYS_ETL_COMMIT_SHA: {ophys_etl_commit_sha}")
        t0 = time.time()

        video_path = pathlib.Path(self.args["video_path"])
        exp_id = video_path.name.split("_")[0]

        roi_path = pathlib.Path(self.args["roi_path"])
        graph_path = pathlib.Path(self.args["graph_path"])

        proj = get_max_and_avg(video_path)
        self.logger.info("Calculated mean and max images...")
        avg_img = proj["avg"]
        max_img = proj["max"]
        corr_img = graph_to_img(graph_path)
        self.logger.info("Calculated correlation image...")

        quantiles = (self.args["low_quantile"], self.args["high_quantile"])
        q0, q1 = np.quantile(max_img, quantiles)
        max_img = normalize_array(array=max_img,
                                  lower_cutoff=q0,
                                  upper_cutoff=q1)

        q0, q1 = np.quantile(avg_img, quantiles)
        avg_img = normalize_array(array=avg_img,
                                  lower_cutoff=q0,
                                  upper_cutoff=q1)

        q0, q1 = np.quantile(corr_img, quantiles)
        corr_img = normalize_array(array=corr_img,
                                   lower_cutoff=q0,
                                   upper_cutoff=q1)
        self.logger.info("Normalized images...")

        with open(roi_path, "rb") as in_file:
            extract_roi_list = sanitize_extract_roi_list(json.load(in_file))

        selected_rois = self.args['selected_rois']
        if selected_rois is None:
            selected_rois = [roi['id'] for roi in extract_roi_list]
        selected_rois = set(selected_rois)

        self.logger.info("Creating and writing ROI artifacts...")
        for roi in extract_roi_list:
            if roi['id'] not in selected_rois:
                continue
            self._write_thumbnails(extract_roi=roi,
                                   max_img=max_img,
                                   avg_img=avg_img,
                                   corr_img=corr_img,
                                   exp_id=exp_id)

        self.logger.info(f"Created ROI artifacts in {time.time()-t0:.0f} "
                         "seconds.")
def projection_process(data: np.ndarray,
                       projection: str = "max") -> np.ndarray:
    """

    Parameters
    ----------
    data: np.ndarray
        nframes x nrows x ncols, uint16
    projection: str
        "max" or "avg"

    Returns
    -------
    proj: np.ndarray
        nrows x ncols, uint8

    """
    if projection == "max":
        proj = np.max(data, axis=0)
    elif projection == "avg":
        proj = np.mean(data, axis=0)
    else:
        raise ValueError("projection can be \"max\" or \"avg\" not "
                         f"{projection}")
    return normalize_array(proj)
def surface_fixture(splitter_tmp_dir_fixture, roi_index_to_z_fixture):
    """
    Create the raw surface TIFF file as well as the
    expected TIFFs associated with individual experiments.

    Return a dict mapping
    'raw' -> the path to the raw surface TIFF

    'expected_{exp_id}' -> the path to the expected TIFF
    for an individual experiment
    """
    n_rois = len(roi_index_to_z_fixture)
    tmp_dir = splitter_tmp_dir_fixture
    raw_tiff_path = tempfile.mkstemp(dir=tmp_dir, suffix='_surface.tiff')[1]
    expected_path_list = []
    for ii in range(n_rois):
        expected_path = tmp_dir / f'expected_surface_{ii}.tiff'
        expected_path_list.append(expected_path)
    n_pages = 7

    data_list = []
    expected_img_list = []

    rng = np.random.default_rng(554433)
    for ii in range(n_rois):
        this_data = rng.integers(0, 100, (n_pages, 24, 24))
        this_expected = normalize_array(this_data.mean(axis=0))
        data_list.append(this_data)
        expected_img_list.append(this_expected)

    tiff_data = []
    for i_page in range(n_pages):
        for ii in range(n_rois):
            tiff_data.append(data_list[ii][i_page, :, :])

    tifffile.imsave(raw_tiff_path, tiff_data)
    for expected_path, expected_img in zip(expected_path_list,
                                           expected_img_list):
        tifffile.imsave(expected_path, expected_img)

    result = dict()
    result['raw'] = raw_tiff_path
    for ii in range(n_rois):
        str_path = str(expected_path_list[ii].resolve().absolute())
        result[f'expected_{ii}'] = str_path
    yield result

    for key in result:
        this_path = pathlib.Path(result[key])
        if this_path.is_file():
            this_path.unlink()
def depth_fixture(splitter_tmp_dir_fixture, image_metadata_fixture,
                  z_to_exp_id_fixture):
    """
    Create the raw depth TIFF file as well as the
    expected TIFFs associated with individual experiments.

    Return a dict mapping
    'raw' -> the path to the raw depth TIFF

    'expected_{exp_id}' -> the path to the expected TIFF
    for an individual experiment
    """
    rng = np.random.default_rng(334422)
    tmp_dir = splitter_tmp_dir_fixture
    z_list = image_metadata_fixture[0]['SI.hStackManager.zsAllActuators']
    z_to_data = dict()
    n_pages = 11
    for z_pair in z_list:
        z_pair = tuple(z_pair)
        z_to_data[z_pair] = dict()
        for zz in z_pair:
            data = rng.integers(zz, 2 * zz, (n_pages, 24, 24))
            z_to_data[z_pair][zz] = data
    tiff_data = []
    for i_page in range(n_pages):
        for z_pair in z_list:
            z_pair = tuple(z_pair)
            for zz in z_pair:
                tiff_data.append(z_to_data[z_pair][zz][i_page, :, :])
    raw_path = tempfile.mkstemp(dir=tmp_dir, suffix='_depth.tiff')[1]
    tifffile.imsave(raw_path, tiff_data)
    result = dict()
    result['raw'] = raw_path
    for z_pair in z_list:
        z_pair = tuple(z_pair)
        for zz in z_pair:
            exp_id = z_to_exp_id_fixture[z_pair][zz]
            expected_path = tmp_dir / f'expected_{exp_id}_depth.tiff'
            expected_data = np.mean(z_to_data[z_pair][zz], axis=0)
            expected_data = normalize_array(array=expected_data)
            tifffile.imsave(expected_path, expected_data)
            str_path = str(expected_path.resolve().absolute())
            result[f'expected_{exp_id}'] = str_path
    yield result

    for key in result:
        this_path = pathlib.Path(result[key])
        if this_path.is_file():
            this_path.unlink()
Exemple #5
0
    def run(self):

        img = median_filtered_max_projection_from_path(
            self.args['video_path'],
            self.args['input_frame_rate'],
            self.args['downsampled_frame_rate'],
            self.args['median_filter_kernel_size'],
            self.args['n_parallel_workers'],
            n_frames_at_once=self.args['n_frames_at_once'])

        with h5py.File(self.args['full_output_path'], 'w') as out_file:
            out_file.create_dataset('max_projection', data=img)

        img = PIL.Image.fromarray(normalize_array(img))
        img.save(self.args['image_path'])
def test_array_to_rgb(lower_cutoff, upper_cutoff):

    img = np.arange(144, dtype=float).reshape(12, 12)
    scaled = au.normalize_array(array=img,
                                lower_cutoff=lower_cutoff,
                                upper_cutoff=upper_cutoff)

    rgb = au.array_to_rgb(input_array=img,
                          lower_cutoff=lower_cutoff,
                          upper_cutoff=upper_cutoff)

    assert rgb.dtype == np.uint8
    assert rgb.shape == (12, 12, 3)
    for ic in range(3):
        np.testing.assert_array_equal(rgb[:, :, ic], scaled)
Exemple #7
0
def classifier2021_corr_png_fixture(classifier2021_tmpdir_fixture,
                                    classifier2021_corr_graph_fixture):

    tmpdir = classifier2021_tmpdir_fixture
    png_path = tempfile.mkstemp(dir=tmpdir, prefix='corr_png_',
                                suffix='.png')[1]

    img = graph_to_img(classifier2021_corr_graph_fixture,
                       attribute_name='filtered_hnc_Gaussian')

    img = normalize_array(img)
    img = PIL.Image.fromarray(img)
    img.save(png_path)
    png_path = pathlib.Path(png_path)
    yield png_path
    if png_path.is_file():
        png_path.unlink()
Exemple #8
0
def downsample_normalize(movie_path: Path, frame_rate: float, bin_size: float,
                         lower_quantile: float,
                         upper_quantile: float) -> np.ndarray:
    """reads in a movie (nframes x nrows x ncols), downsamples,
    creates an average projection, and normalizes according to
    quantiles in that projection.

    Parameters
    ----------
    movie_path: Path
        path to an h5 file, containing an (nframes x nrows x ncol) dataset
        named 'data'
    frame_rate: float
        frame rate of the movie specified by 'movie_path'
    bin_size: float
        desired duration in seconds of a downsampled bin, i.e. the reciprocal
        of the desired downsampled frame rate.
    lower_quantile: float
        arg supplied to `np.quantile()` to determine lower cutoff value from
        avg projection for normalization.
    upper_quantile: float
        arg supplied to `np.quantile()` to determine upper cutoff value from
        avg projection for normalization.

    Returns
    -------
    ds: np.ndarray
        a downsampled and normalized array

    Notes
    -----
    This strategy was satisfactory in the labeling app for maintaining
    consistent visibility.

    """
    ds = downsample_h5_video(movie_path,
                             input_fps=frame_rate,
                             output_fps=1.0 / bin_size)
    avg_projection = ds.mean(axis=0)
    lower_cutoff, upper_cutoff = np.quantile(avg_projection.flatten(),
                                             (lower_quantile, upper_quantile))
    ds = normalize_array(ds,
                         lower_cutoff=lower_cutoff,
                         upper_cutoff=upper_cutoff)
    return ds
def test_thumbnail_from_path(tmpdir, example_unnormalized_rgb_video,
                             example_unnormalized_rgb_video_path, min_max,
                             quantiles, timesteps):
    """
    Test thumbnail_from_path by comparing output to result
    from thumbnail_from_array
    """

    if timesteps is None:
        n_t = example_unnormalized_rgb_video.shape[0]
    else:
        n_t = len(timesteps)

    sub_video = np.copy(example_unnormalized_rgb_video[:, 18:30, 14:29, :])

    if quantiles is not None:
        local_min_max = np.quantile(example_unnormalized_rgb_video, quantiles)
    else:
        local_min_max = min_max

    sub_video = normalize_array(sub_video,
                                lower_cutoff=local_min_max[0],
                                upper_cutoff=local_min_max[1])

    control_video = thumbnail_video_from_array(sub_video, (0, 0), (12, 15),
                                               tmp_dir=pathlib.Path(tmpdir),
                                               timesteps=timesteps)

    test_video = thumbnail_video_from_path(example_unnormalized_rgb_video_path,
                                           (18, 14), (12, 15),
                                           tmp_dir=pathlib.Path(tmpdir),
                                           min_max=min_max,
                                           quantiles=quantiles,
                                           timesteps=timesteps)

    assert test_video.origin == (18, 14)
    assert test_video.frame_shape == (12, 15)
    control_data = imageio.mimread(control_video.video_path)
    test_data = imageio.mimread(test_video.video_path)
    assert len(control_data) == len(test_data)
    assert len(test_data) == n_t
    for ii in range(len(control_data)):
        np.testing.assert_array_equal(control_data[ii], test_data[ii])
def create_correlation_projection(file_path: pathlib.Path) -> np.ndarray:
    """
    Parameters
    ----------
    file_path: pathlib.Path
        Path to correlation projection data (either pkl data
        containing a graph or png file containing an image)

    Returns
    -------
    correlation_projection: np.ndarray
        Scaled to np.uint8
    """
    if str(file_path).endswith('png'):
        correlation_img_data = np.array(PIL.Image.open(file_path, 'r'))
    else:
        correlation_img_data = graph_to_img(file_path)

    correlation_img_data = normalize_array(correlation_img_data)
    return correlation_img_data
def test_runner(tmpdir, video_data_fixture, video_path_fixture,
                n_frames_at_once):

    input_frame_rate = 6.0
    downsampled_frame_rate = 4.0
    median_kernel_size = 3
    n_processors = 3
    expected = median_filtered_max_projection_from_array(
        video_data_fixture, input_frame_rate, downsampled_frame_rate,
        median_kernel_size, n_processors)

    args = dict()
    args['video_path'] = video_path_fixture
    args['input_frame_rate'] = input_frame_rate
    args['downsampled_frame_rate'] = downsampled_frame_rate
    args['n_parallel_workers'] = n_processors
    args['median_filter_kernel_size'] = median_kernel_size
    args['n_frames_at_once'] = n_frames_at_once

    image_path = tempfile.mkstemp(dir=tmpdir, prefix='image_',
                                  suffix='.png')[1]

    full_path = tempfile.mkstemp(dir=tmpdir, prefix='data_', suffix='.h5')[1]

    args['image_path'] = image_path
    args['full_output_path'] = full_path

    runner = MedianFilteredMaxProjectionRunner(args=[], input_data=args)
    runner.run()

    assert pathlib.Path(image_path).is_file()
    assert pathlib.Path(full_path).is_file()
    with h5py.File(full_path, 'r') as in_file:
        actual = in_file['max_projection'][()]
    np.testing.assert_array_equal(actual, expected)

    expected = normalize_array(expected)
    actual = np.array(PIL.Image.open(image_path, 'r'))
    np.testing.assert_array_equal(expected, actual)
    def write_output_file(self, i_roi: int, z_value: Optional[float],
                          output_path: pathlib.Path) -> None:
        """
        Write the image created by averaging all of the TIFF
        pages associated with an (i_roi, z_value) pair to a TIFF
        file.

        Parameters
        ----------
        i_roi: int

        z_value: Optional[int]
            If None, will be detected automatically (assuming there
            is only one)

        output_path: pathlib.Path
            Path to file to be written

        Returns
        -------
        None
            Output is written to output_path
        """

        if output_path.suffix not in ('.tif', '.tiff'):
            msg = "expected .tiff output path; "
            msg += f"you specified {output_path.resolve().absolute()}"

        if z_value is None:
            z_value = self._get_z_value(i_roi=i_roi)
        data = np.array(self._get_pages(i_roi=i_roi, z_value=z_value))
        avg_img = np.mean(data, axis=0)
        avg_img = normalize_array(array=avg_img,
                                  lower_cutoff=None,
                                  upper_cutoff=None)
        tifffile.imsave(output_path, avg_img)
        return None
Exemple #13
0
def plot_rois_over_img(
        img: np.ndarray,
        roi_list: Union[List[OphysROI], List[Dict]],
        color: Union[Tuple[int, int, int],
                     Dict[int, Tuple[int, int, int]]],
        alpha: float = 0.5) -> np.ndarray:
    """
    Plot contours from a list of ROIs over a provided image

    Parameters
    ----------
    img: np.ndarray
        The image, either grayscale or RGB

    roi_list: Union[List[OphysROI], List[Dict]]
        A list of ROIs represented either as an OphysROI
        or an ExtractROI

    color: Union[Tuple[int, int, int],
                 Dict[int, Tuple[int, int, int]]
        Either a tuple indicating a single RGB color for all ROIs
        or a dict mapping ROI ID to an RGB color (as a tuple of ints)

    alpha: float
        The transparency

    Returns
    -------
    new_img: np.ndarray
        An RGB image with the ROIs overplotted (does not
        modify img in place)

    Notes
    -----
    Unless the background image is blank, the image is automatically
    scaled so that the full dynamic range of pixels is cast to the
    range [0, 255]. If you want to do something more clever with scaling
    in the image, you should do so before passing it to this method.

    If the image is blank (all pixels have the same value), then the
    image will set to a blank black background (0, 0, 0).
    """
    if len(img.shape) > 3 or len(img.shape) < 2:
        msg = f"Cannot handle image with shape {img.shape}"
        raise ValueError(msg)
    elif len(img.shape) == 3:
        if img.shape[2] > 3:
            msg = f"Cannot handle image with shape {img.shape}"
            raise ValueError(msg)

    # detect if image is blank
    is_blank = _is_img_blank(img=img)

    if is_blank:
        # if the image is blank, just create an array of zeros
        img = np.zeros((img.shape[0],
                        img.shape[1],
                        3), dtype=np.uint8)

    elif len(img.shape) == 2:
        img = array_to_rgb(img)
    else:
        new_img = normalize_array(array=img)
        img = new_img

    new_img = add_list_of_roi_contours_to_img(
                img=img,
                roi_list=roi_list,
                color=color,
                alpha=alpha)
    return new_img
def _create_image_tiff(
    tmp_dir: pathlib.Path,
    z_value_list: List[int],
    n_rois: int,
    use_zs: bool = False,
    is_surface: bool = False
) -> Tuple[pathlib.Path, Dict[Tuple[int, int], np.ndarray], Dict[Tuple[
        int, int], List[np.ndarray]], dict]:
    """
    A fixture simulating a depth TIFF sampling 2 ROIs,
    4 zs at each ROI.

    if use_zs == True, then use SI.hStackManager.zs
    in place of SI.hStackManager.zsAllActuators

    if is_surface == True, SI.hStackManager.zsAllActuators
    will look like [[z0, 0], [z1, 0], [z2, 0]...]

    Returns
    -------
    the path to the TIFF

    a dict mapping roi_id, z -> normalized average image

    a dict mapping roi_id, z -> expected tiff pages

    a dict that our mock of tifffile.read_scanimage_metadata needs
    to return
    """

    avg_img_lookup = dict()
    page_lookup = dict()
    tiff_pages = []
    n_pages = 5
    n_z_per_roi = len(z_value_list) // n_rois
    for i_page in range(n_pages):
        for i_z, z_value in enumerate(z_value_list):
            i_roi = i_z // n_z_per_roi
            if (i_roi, z_value) not in page_lookup:
                page_lookup[(i_roi, z_value)] = []
            value = i_roi + z_value + i_page * len(z_value_list)
            page = np.arange(value, value + 24 * 24).astype(np.uint16)
            page = page.reshape((24, 24))
            page[i_roi:i_roi + 4, i_roi:i_roi + 4] = 3
            page[i_z:i_z + 2, i_z:i_z + 2] = 0
            tiff_pages.append(page)
            page_lookup[(i_roi, z_value)].append(page)
    tmp_path = pathlib.Path(tempfile.mkstemp(dir=tmp_dir, suffix='tiff')[1])
    tifffile.imsave(tmp_path, tiff_pages)

    tiff_pages = np.array(tiff_pages)

    for i_z, z_value in enumerate(z_value_list):
        sub_arr = tiff_pages[i_z::len(z_value_list), :, :]
        mean_img = np.mean(sub_arr, axis=0)
        roi_id = i_z // n_z_per_roi
        avg_img_lookup[(roi_id, z_value)] = normalize_array(mean_img)

    z_array = []
    if is_surface:
        for z_value in z_value_list:
            z_array.append([z_value, 0])
    else:
        for ii in range(0, len(z_value_list), 2):
            z0 = z_value_list[ii]
            z1 = z_value_list[1 + ii]
            z_array.append([z0, z1])

    metadata = []
    if use_zs:
        key_name = 'SI.hStackManager.zs'
    else:
        key_name = 'SI.hStackManager.zsAllActuators'
    metadata.append({key_name: z_array})

    roi_list = []
    for ii in range(0, len(z_value_list), n_z_per_roi):
        this_list = copy.deepcopy(list(z_value_list[ii:ii + n_z_per_roi]))
        this_list.sort()
        roi_list.append({'zs': this_list})

    roi_metadata = {'RoiGroups': {'imagingRoiGroup': {'rois': roi_list}}}

    metadata.append(roi_metadata)

    return (tmp_path, avg_img_lookup, page_lookup, metadata)
Exemple #15
0
def test_labeler_artifact_generator(
        tmp_path_factory, classifier2021_video_fixture,
        classifier2021_video_hash_fixture, suite2p_roi_fixture,
        suite2p_roi_hash_fixture, classifier2021_corr_graph_fixture,
        classifier2021_corr_graph_hash_fixture,
        classifier2021_corr_png_fixture, classifier2021_corr_png_hash_fixture,
        video_lower_quantile, video_upper_quantile, projection_lower_quantile,
        projection_upper_quantile, use_graph, with_motion_border):
    """
    Test that LabelerArtifactGenerator runs and produces expected output
    """

    tmpdir = tmp_path_factory.mktemp('full_artifact_generation')
    if with_motion_border:
        motion_path = pathlib.Path(
            tempfile.mkstemp(dir=tmpdir, suffix='.csv')[1])
        with open(motion_path, 'w') as out_file:
            out_file.write('x,y\n')
            out_file.write('5,6\n')
            out_file.write('14,-3\n')
        expected_motion_border = {
            'bottom': 6.0,
            'top': 3.0,
            'right_side': 14.0,
            'left_side': 0.0
        }

        motion_path = str(motion_path.resolve().absolute())

    else:
        motion_path = None
        expected_motion_border = {
            'top': 0,
            'bottom': 0,
            'left_side': 0,
            'right_side': 0
        }

    if use_graph:
        corr_fixture = classifier2021_corr_graph_fixture
        corr_hash = classifier2021_corr_graph_hash_fixture
    else:
        corr_fixture = classifier2021_corr_png_fixture
        corr_hash = classifier2021_corr_png_hash_fixture

    output_tuple = tempfile.mkstemp(dir=tmpdir,
                                    prefix='artifact_file_',
                                    suffix='.h5')

    # without this, got a "too many files open" error
    os.close(output_tuple[0])

    output_path = pathlib.Path(output_tuple[1])

    # because tempfile.mkstemp actually creates the file
    output_path.unlink()

    input_data = dict()
    input_data['video_path'] = str(classifier2021_video_fixture)
    input_data['roi_path'] = str(suite2p_roi_fixture)
    input_data['correlation_path'] = str(corr_fixture)
    input_data['artifact_path'] = str(output_path)
    input_data['clobber'] = False
    input_data['video_lower_quantile'] = video_lower_quantile
    input_data['video_upper_quantile'] = video_upper_quantile
    input_data['projection_lower_quantile'] = projection_lower_quantile
    input_data['projection_upper_quantile'] = projection_upper_quantile
    input_data['motion_border_path'] = motion_path

    generator = LabelerArtifactGenerator(input_data=input_data, args=[])
    generator.run()

    assert output_path.is_file()

    with h5py.File(output_path, 'r') as artifact_file:

        motion_border = json.loads(
            artifact_file['motion_border'][()].decode('utf-8'))
        assert motion_border == expected_motion_border
        # test that ROIs were written correctly
        with open(suite2p_roi_fixture, 'rb') as in_file:
            expected_rois = json.load(in_file)
        expected_rois = sanitize_extract_roi_list(expected_rois)

        artifact_rois = json.loads(artifact_file['rois'][()].decode('utf-8'))

        assert expected_rois == artifact_rois

        # test that all ROIs appear in color map
        color_map = json.loads(
            artifact_file['roi_color_map'][()].decode('utf-8'))
        assert len(color_map) == len(expected_rois)
        for roi in expected_rois:
            assert str(roi['id']) in color_map

        # test that traces were written correctly
        ophys_rois = [extract_roi_to_ophys_roi(roi) for roi in expected_rois]
        expected_traces = get_traces(classifier2021_video_fixture, ophys_rois)

        for roi_id in expected_traces:
            np.testing.assert_array_equal(
                expected_traces[roi_id], artifact_file[f'traces/{roi_id}'][()])

        # test that the scaled video data was written correctly
        assert artifact_file['video_data'].chunks is not None
        scaled_video = artifact_file['video_data'][()]

        with h5py.File(classifier2021_video_fixture, 'r') as raw_file:
            raw_video = raw_file['data'][()]
        raw_max = np.max(raw_video, axis=0)
        raw_avg = np.mean(raw_video, axis=0)

        mn, mx = np.quantile(raw_video,
                             (video_lower_quantile, video_upper_quantile))

        raw_video = np.where(raw_video > mn, raw_video, mn)
        raw_video = np.where(raw_video < mx, raw_video, mx)
        delta = mx - mn
        raw_video = raw_video - mn
        raw_video = raw_video.astype(float)
        raw_video = np.round(255.0 * raw_video / delta).astype(np.uint8)
        np.testing.assert_array_equal(raw_video, scaled_video)
        del raw_video
        del scaled_video

        # test that max and avg projection images wer written correctly
        for raw_img, img_key in zip((raw_max, raw_avg),
                                    ('max_projection', 'avg_projection')):
            artifact_img = artifact_file[img_key][()]
            mn, mx = np.quantile(
                raw_img,
                (projection_lower_quantile, projection_upper_quantile))
            raw_img = np.where(raw_img > mn, raw_img, mn)
            raw_img = np.where(raw_img < mx, raw_img, mx)
            raw_img = raw_img.astype(float)
            np.testing.assert_array_equal(raw_img, artifact_img)

        artifact_corr = artifact_file['correlation_projection'][()]
        if use_graph:
            expected_corr = normalize_array(
                graph_to_img(corr_fixture,
                             attribute_name='filtered_hnc_Gaussian'))
        else:
            expected_corr = normalize_array(
                np.array(PIL.Image.open(corr_fixture, 'r')))

        np.testing.assert_array_equal(artifact_corr, expected_corr)

        metadata = json.loads(artifact_file['metadata'][()].decode('utf-8'))

    # test that metadata has the right contents
    assert metadata['video']['path'] == str(classifier2021_video_fixture)
    assert metadata['video']['hash'] == classifier2021_video_hash_fixture

    assert metadata['rois']['path'] == str(suite2p_roi_fixture)
    assert metadata['rois']['hash'] == suite2p_roi_hash_fixture

    assert metadata['correlation']['path'] == str(corr_fixture)
    assert metadata['correlation']['hash'] == corr_hash

    assert metadata['generator_args'] == input_data
    if with_motion_border:
        assert 'motion_csv' in metadata
    else:
        assert 'motion_csv' not in metadata

    tmpdir = pathlib.Path(tmpdir)
    path_list = [n for n in tmpdir.rglob('*')]
    for this_path in path_list:
        if this_path.is_file():
            try:
                this_path.unlink()
            except Exception:
                pass
def _read_and_scale_all_at_once(
        full_video_path: Path,
        origin: Tuple[int, int],
        frame_shape: Tuple[int, int],
        quantiles: Optional[Tuple[float, float]] = None,
        min_max: Optional[Tuple[float, float]] = None) -> np.ndarray:
    """
    Read in a video from an HDF5 file and scale it to np.uint8
    without chunking

    Parameters
    ----------
    full_video_path: pathlib.Path
        Path to the HDF5 file

    origin: Tuple[int, int]
        Origin of the desired field of view

    frame_shape: Tuple[int, int]
        Shape of the desired field of view

    quantiles: Optional[Tuple[float, float]]
        Quantiles of full video used for scale normalization
        (default: None)

    min_max: Optional[Tuple[float, float][
        Minimum and maximum values used for scale normalization
        (default: None)

    Returns
    -------
    data: np.ndarray
        Video, cropped to the specified field of view and scaled
        to np.uint8 (i.e. dynamic range is [0, 255])

    Notes
    -----
    One and only one of quantiles, min_max must be specified. If
    both or neither are specified, a RuntimeError will be raised.
    """

    if quantiles is None and min_max is None:
        raise RuntimeError("must specify either quantiles or min_max "
                           "in _read_and_scale_all_at_once; both are None")
    if quantiles is not None and min_max is not None:
        raise RuntimeError("cannot specify both quantiles and min_max "
                           "in _read_and_scale_all_at_once")

    with h5py.File(full_video_path, 'r') as in_file:
        if quantiles is not None:
            data = in_file['data'][()]
            min_max = np.quantile(data, quantiles)
            data = data[:, origin[0]:origin[0] + frame_shape[0],
                        origin[1]:origin[1] + frame_shape[1]]
        else:
            data = in_file['data'][:, origin[0]:origin[0] + frame_shape[0],
                                   origin[1]:origin[1] + frame_shape[1]]

    if min_max[0] > min_max[1]:
        raise RuntimeError(f"min_max {min_max} in "
                           "_read_and_scale_all_at_once; "
                           "order seems to be reversed")

    data = normalize_array(data,
                           lower_cutoff=min_max[0],
                           upper_cutoff=min_max[1])
    return data
def _read_and_scale_by_chunks(full_video_path: Path,
                              origin: Tuple[int, int],
                              frame_shape: Tuple[int, int],
                              quantiles: Optional[Tuple[int, int]] = None,
                              min_max: Optional[Tuple[int, int]] = None,
                              time_chunk_size: int = 100) -> np.ndarray:
    """
    Read in a video from an HDF5 file and scale it to np.uint8
    one chunk at a time

    Parameters
    ----------
    full_video_path: pathlib.Path
        Path to the HDF5 file

    origin: Tuple[int, int]
        Origin of the desired field of view

    frame_shape: Tuple[int, int]
        Shape of the desired field of view

    quantiles: Optional[Tuple[float, float]]
        Quantiles of full video used for scale normalization
        (default: None)

    min_max: Optional[Tuple[float, float][
        Minimum and maximum values used for scale normalization
        (default: None)

    time_chunk_size: int
        Number of time steps to process at once.
        (default: 100)

    Returns
    -------
    data: np.ndarray
        Video, cropped to the specified field of view and scaled
        to np.uint8 (i.e. dynamic range is [0, 255])

    Notes
    -----
    One and only one of quantiles, min_max must be specified. If
    both or neither are specified, a RuntimeError will be raised.
    """

    if quantiles is None and min_max is None:
        raise RuntimeError("must specify either quantiles or min_max "
                           "in _read_and_scale_by_chunk; both are None")
    if quantiles is not None and min_max is not None:
        raise RuntimeError("cannot specify both quantiles and min_max "
                           "in _read_and_scale_by_chunks")

    with h5py.File(full_video_path, 'r') as in_file:
        dataset = in_file['data']
        rowmin = origin[0]
        rowmax = min(dataset.shape[1], origin[0] + frame_shape[0])
        colmin = origin[1]
        colmax = min(dataset.shape[2], origin[1] + frame_shape[1])

        if quantiles is not None:

            # Note: we are trying to avoid carrying two copies
            # of the video in memory at any given time.
            # A few lines down, we create a full sized array
            # of uint8s that get populated from chunks of the
            # raw movie. If we kept the result of dataset[()]
            # around at this point, we would have two complete
            # movies in memory at once, which could get expensive.

            min_max = np.quantile(dataset[()], quantiles)

        if min_max[0] > min_max[1]:
            raise RuntimeError(f"min_max {min_max} in "
                               "_read_and_scale_by_chunks; "
                               "order seems to be reversed")

        nt = dataset.shape[0]
        final_output = np.zeros((nt, rowmax - rowmin, colmax - colmin),
                                dtype=np.uint8)

        for t0 in range(0, nt, time_chunk_size):
            t1 = min(t0 + time_chunk_size, nt)
            data_chunk = normalize_array(dataset[t0:t1, rowmin:rowmax,
                                                 colmin:colmax],
                                         lower_cutoff=min_max[0],
                                         upper_cutoff=min_max[1])

            final_output[t0:t1, :, :] = data_chunk

    return final_output
def test_thumbnail_from_roi_and_path(tmpdir, example_unnormalized_rgb_video,
                                     example_unnormalized_rgb_video_path,
                                     quantiles, min_max, roi_color, timesteps,
                                     padding, with_others):
    """
    Test _thumbnail_from_ROI_path by comparing output to result
    from _thumbnail_from_ROI_array
    """

    if min_max is not None and quantiles is not None:
        return
    if min_max is None and quantiles is None:
        return

    if timesteps is None:
        n_t = example_unnormalized_rgb_video.shape[0]
    else:
        n_t = len(timesteps)

    mask = np.zeros((12, 15), dtype=bool)
    mask[2:10, 3:13] = True

    x0 = 14
    y0 = 18
    width = 15
    height = 12

    roi = ExtractROI(x=x0,
                     width=width,
                     y=y0,
                     height=height,
                     mask=[list(i) for i in mask],
                     id=0)

    if with_others:
        other_roi = []
        ct = 0
        for dx, dy in product((1, 2), (-1, 0, 1)):
            ct += 1
            other_roi.append(
                ExtractROI(id=ct,
                           y=y0 + dy,
                           x=x0 + dx,
                           height=height,
                           width=width,
                           valid=True,
                           mask=[list(row) for row in mask]))
    else:
        other_roi = None

    if isinstance(roi_color, str):
        roi_color = dict()
        roi_color[0] = (255, 0, 0)
        if with_others:
            rng = np.random.default_rng(2823)
            for roi in other_roi:
                color = rng.integers(0, 255, size=3)
                roi_color[roi['id']] = tuple(color)

    h5_fname = example_unnormalized_rgb_video_path

    if quantiles is not None:
        local_min_max = np.quantile(example_unnormalized_rgb_video, quantiles)
    else:
        local_min_max = min_max

    normalized_video = normalize_array(np.copy(example_unnormalized_rgb_video),
                                       lower_cutoff=local_min_max[0],
                                       upper_cutoff=local_min_max[1])

    control_video = _thumbnail_video_from_ROI_array(
        normalized_video,
        roi,
        other_roi=other_roi,
        padding=padding,
        roi_color=roi_color,
        tmp_dir=pathlib.Path(tmpdir),
        timesteps=timesteps)

    test_video = _thumbnail_video_from_ROI_path(h5_fname,
                                                roi,
                                                other_roi=other_roi,
                                                padding=padding,
                                                roi_color=roi_color,
                                                tmp_dir=pathlib.Path(tmpdir),
                                                quantiles=quantiles,
                                                min_max=min_max,
                                                timesteps=timesteps)

    origin, fov = video_bounds_from_ROI(roi, normalized_video.shape[1:3],
                                        padding)

    assert test_video.origin == origin
    assert test_video.frame_shape == fov

    control_data = imageio.mimread(control_video.video_path)
    test_data = imageio.mimread(test_video.video_path)
    assert test_video.origin == control_video.origin
    assert test_video.frame_shape == control_video.frame_shape
    assert len(control_data) == len(test_data)
    assert len(test_data) == n_t
    for ii in range(len(control_data)):
        np.testing.assert_array_equal(control_data[ii], test_data[ii])
Exemple #19
0
def test_normalize_array(array, lower_cutoff, upper_cutoff, expected):
    normalized = au.normalize_array(array, lower_cutoff, upper_cutoff)
    np.testing.assert_array_equal(normalized, expected)
Exemple #20
0
def test_plot_rois_over_img(extract_roi_list_fixture,
                            corrupted_extract_roi_list_fixture,
                            ophys_roi_list_fixture, color_map_fixture,
                            roi_list_choice, use_color_map, alpha, use_rgb,
                            use_float, blank_image):
    """
    Test that plot_rois_over_img produces an image with the expected
    contours drawn at the expected place in the expected colors
    """
    rng = np.random.default_rng(7612322)

    # choose a list of ROIs
    if roi_list_choice == 0:
        roi_list = extract_roi_list_fixture
    elif roi_list_choice == 1:
        roi_list = corrupted_extract_roi_list_fixture
    elif roi_list_choice == 2:
        roi_list = ophys_roi_list_fixture

    # either unique colors, or one color for all ROIs
    if use_color_map:
        color = color_map_fixture
    else:
        color = (0, 255, 0)

    # create an input image
    if use_float:
        if blank_image:
            img = 2000.0 * np.ones((20, 20), dtype=float)
            if use_rgb:
                img = np.stack([img, img, img]).transpose(1, 2, 0)
        else:
            if use_rgb:
                img = rng.random((20, 20, 3)) * 2111.0
            else:
                img = rng.random((20, 20)) * 2111.0
    else:
        if blank_image:
            img = 111 * np.zeros((20, 20), dtype=np.uint8)
            if use_rgb:
                img = np.stack([img, img, img]).transpose(1, 2, 0)
        else:
            if use_rgb:
                img = rng.integers(0,
                                   np.iinfo(np.uint8).max, (20, 20, 3),
                                   dtype=np.uint8)
            else:
                img = rng.integers(0,
                                   np.iinfo(np.uint8).max, (20, 20),
                                   dtype=np.uint8)

    result = plot_rois_over_img(img=img,
                                roi_list=roi_list,
                                color=color,
                                alpha=alpha)

    # expected_image is what we would expect the image to be
    # without any ROIs plotted over it
    if blank_image:
        expected_image = np.zeros((20, 20, 3), dtype=np.uint8)
    else:
        expected_image = normalize_array(array=img,
                                         lower_cutoff=img.min(),
                                         upper_cutoff=img.max())

        if len(expected_image.shape) < 3:
            expected_image = np.stack(
                [expected_image, expected_image,
                 expected_image]).transpose(1, 2, 0)

    # make sure that input img as not changed
    assert not np.array_equal(img, result)
    assert not np.array_equal(expected_image, result)

    # check the shape and dtype of the output
    assert result.shape == (img.shape[0], img.shape[1], 3)
    assert result.dtype == np.uint8

    # keep track of pixels that are not part of ROI contour
    not_roi_mask = np.ones((20, 20), dtype=bool)

    # check that ROI contour pixels were all set to correct color
    for roi in ophys_roi_list_fixture:
        not_roi_mask[roi.y0:roi.y0 + roi.height,
                     roi.x0:roi.x0 + roi.width][roi.contour_mask] = False

        if isinstance(color, tuple):
            this_color = color
        else:
            this_color = color[roi.roi_id]
        for ic in range(3):

            expected_channel = expected_image[roi.y0:roi.y0 + roi.height,
                                              roi.x0:roi.x0 + roi.width, ic]
            expected_channel = expected_channel[roi.contour_mask].flatten()
            expected_channel = np.round(alpha * this_color[ic] +
                                        (1.0 - alpha) * expected_channel)
            expected_channel = expected_channel.astype(np.uint8)

            channel = result[:, :, ic]
            actual_channel = channel[roi.y0:roi.y0 + roi.height,
                                     roi.x0:roi.x0 +
                                     roi.width][roi.contour_mask]
            actual_channel = actual_channel.flatten()
            np.testing.assert_array_equal(actual_channel, expected_channel)

    # check that pixels not in the ROI contour were all left untouched
    for ic in range(3):
        channel = result[:, :, ic]
        expected_channel = expected_image[:, :, ic]
        np.testing.assert_array_equal(channel[not_roi_mask],
                                      expected_channel[not_roi_mask])
def test_scale_to_uint8(input_array, expected_array):
    """
    Test normalize_array when cutoffs are not specified
    """
    actual = au.normalize_array(input_array)
    np.testing.assert_array_equal(actual, expected_array)