def run(self):
        self.logger.name = type(self).__name__
        self.logger.setLevel(self.args["log_level"])

        ophys_etl_commit_sha = os.environ.get("OPHYS_ETL_COMMIT_SHA",
                                              "local build")
        self.logger.info(f"OPHYS_ETL_COMMIT_SHA: {ophys_etl_commit_sha}")
        t0 = time.time()

        video_path = pathlib.Path(self.args["video_path"])
        exp_id = video_path.name.split("_")[0]

        roi_path = pathlib.Path(self.args["roi_path"])
        graph_path = pathlib.Path(self.args["graph_path"])

        proj = get_max_and_avg(video_path)
        self.logger.info("Calculated mean and max images...")
        avg_img = proj["avg"]
        max_img = proj["max"]
        corr_img = graph_to_img(graph_path)
        self.logger.info("Calculated correlation image...")

        quantiles = (self.args["low_quantile"], self.args["high_quantile"])
        q0, q1 = np.quantile(max_img, quantiles)
        max_img = normalize_array(array=max_img,
                                  lower_cutoff=q0,
                                  upper_cutoff=q1)

        q0, q1 = np.quantile(avg_img, quantiles)
        avg_img = normalize_array(array=avg_img,
                                  lower_cutoff=q0,
                                  upper_cutoff=q1)

        q0, q1 = np.quantile(corr_img, quantiles)
        corr_img = normalize_array(array=corr_img,
                                   lower_cutoff=q0,
                                   upper_cutoff=q1)
        self.logger.info("Normalized images...")

        with open(roi_path, "rb") as in_file:
            extract_roi_list = sanitize_extract_roi_list(json.load(in_file))

        selected_rois = self.args['selected_rois']
        if selected_rois is None:
            selected_rois = [roi['id'] for roi in extract_roi_list]
        selected_rois = set(selected_rois)

        self.logger.info("Creating and writing ROI artifacts...")
        for roi in extract_roi_list:
            if roi['id'] not in selected_rois:
                continue
            self._write_thumbnails(extract_roi=roi,
                                   max_img=max_img,
                                   avg_img=avg_img,
                                   corr_img=corr_img,
                                   exp_id=exp_id)

        self.logger.info(f"Created ROI artifacts in {time.time()-t0:.0f} "
                         "seconds.")
Beispiel #2
0
def add_list_of_roi_contours_to_img(
        img: np.ndarray,
        roi_list: Union[List[OphysROI], List[Dict]],
        color: Union[Tuple[int, int, int],
                     Dict[int, Tuple[int, int, int]]] = (255, 0, 0),
        alpha: float = 0.25) -> np.ndarray:
    """
    Add colored ROI contours to an image

    Parameters
    ----------
    img: np.ndarray
        RGB representation of image

    roi_list: List[OphysROI]
        list of ROIs to add to image

    color: Union[Tuple[int,int, int],
                 Dict[int, Tuple[int, int, int]]
        Either a representing an RGB color, or a dict
        mapping roi_id to tuples representing RGB colors
        (default = (255, 0, 0))

    alpha: float
        transparency factor to apply to ROI (default=0.25)

    Returns
    -------
    new_img: np.ndarray
        New image with ROI borders superimposed
    """

    new_img = np.copy(img)
    if len(roi_list) == 0:
        return new_img

    if not isinstance(roi_list[0], OphysROI):
        roi_list = sanitize_extract_roi_list(roi_list)
        roi_list = [extract_roi_to_ophys_roi(roi)
                    for roi in roi_list]

    if isinstance(color, tuple):
        color_map = {roi.roi_id: color for roi in roi_list}
    else:
        color_map = color

    for roi in roi_list:
        new_img = add_roi_contour_to_img(
                      new_img,
                      roi,
                      color_map[roi.roi_id],
                      alpha)

    return new_img
Beispiel #3
0
def test_sanitize_extract_roi_list(suite2p_roi_fixture):

    with open(suite2p_roi_fixture, 'rb') as in_file:
        raw_roi_list = json.load(in_file)
    assert len(raw_roi_list) > 0
    extract_roi_list = sanitize_extract_roi_list(raw_roi_list)
    assert len(extract_roi_list) == len(raw_roi_list)
    for raw_roi, extract_roi in zip(raw_roi_list, extract_roi_list):

        assert 'mask_matrix' not in extract_roi
        assert 'valid_roi' not in extract_roi
        assert 'roi_id' not in extract_roi
        for e_key, r_key in (('id', 'roi_id'), ('mask', 'mask_matrix'),
                             ('valid', 'valid_roi'), ('x', 'x'), ('y', 'y'),
                             ('width', 'width'), ('height', 'height')):

            assert extract_roi[e_key] == raw_roi[r_key]
Beispiel #4
0
def test_get_traces(classifier2021_video_fixture, suite2p_roi_fixture):

    video_path = classifier2021_video_fixture
    roi_path = suite2p_roi_fixture

    with open(roi_path, 'rb') as in_file:
        raw_roi_list = json.load(in_file)
    extract_roi_list = sanitize_extract_roi_list(raw_roi_list)
    ophys_roi_list = [
        extract_roi_to_ophys_roi(roi) for roi in extract_roi_list
    ]

    found_traces = get_traces(video_path, ophys_roi_list)

    assert len(found_traces) == len(extract_roi_list)

    with h5py.File(video_path, 'r') as in_file:
        video_data = in_file['data'][()]

    assert len(extract_roi_list) > 0
    for roi in extract_roi_list:
        assert roi['id'] in found_traces
        expected_trace = np.zeros(video_data.shape[0], dtype=float)
        npix = 0
        r0 = roi['y']
        c0 = roi['x']
        for ir in range(roi['height']):
            for ic in range(roi['width']):
                if not roi['mask'][ir][ic]:
                    continue
                npix += 1
                row = r0 + ir
                col = c0 + ic
                expected_trace += video_data[:, row, col]
        expected_trace = expected_trace / npix
        np.testing.assert_array_equal(expected_trace, found_traces[roi['id']])
def test_sanitize_extract_roi_list(example_ophys_roi_list_fixture,
                                   munge_mapping, extra_field):

    extract_roi_list = [
        rois_utils.ophys_roi_to_extract_roi(roi)
        for roi in example_ophys_roi_list_fixture
    ]

    if extra_field:
        for ii, roi in enumerate(extract_roi_list):
            roi['nonsense'] = ii
        for roi in extract_roi_list:
            assert 'nonsense' in roi

    bad_roi_list = []
    for roi in extract_roi_list:
        new_roi = copy.deepcopy(roi)
        for k in munge_mapping:
            new_roi[munge_mapping[k]] = new_roi.pop(k)
        bad_roi_list.append(new_roi)

    assert bad_roi_list != extract_roi_list
    cleaned_roi_list = rois_utils.sanitize_extract_roi_list(bad_roi_list)
    assert cleaned_roi_list == extract_roi_list
Beispiel #6
0
def test_labeler_artifact_generator(
        tmp_path_factory, classifier2021_video_fixture,
        classifier2021_video_hash_fixture, suite2p_roi_fixture,
        suite2p_roi_hash_fixture, classifier2021_corr_graph_fixture,
        classifier2021_corr_graph_hash_fixture,
        classifier2021_corr_png_fixture, classifier2021_corr_png_hash_fixture,
        video_lower_quantile, video_upper_quantile, projection_lower_quantile,
        projection_upper_quantile, use_graph, with_motion_border):
    """
    Test that LabelerArtifactGenerator runs and produces expected output
    """

    tmpdir = tmp_path_factory.mktemp('full_artifact_generation')
    if with_motion_border:
        motion_path = pathlib.Path(
            tempfile.mkstemp(dir=tmpdir, suffix='.csv')[1])
        with open(motion_path, 'w') as out_file:
            out_file.write('x,y\n')
            out_file.write('5,6\n')
            out_file.write('14,-3\n')
        expected_motion_border = {
            'bottom': 6.0,
            'top': 3.0,
            'right_side': 14.0,
            'left_side': 0.0
        }

        motion_path = str(motion_path.resolve().absolute())

    else:
        motion_path = None
        expected_motion_border = {
            'top': 0,
            'bottom': 0,
            'left_side': 0,
            'right_side': 0
        }

    if use_graph:
        corr_fixture = classifier2021_corr_graph_fixture
        corr_hash = classifier2021_corr_graph_hash_fixture
    else:
        corr_fixture = classifier2021_corr_png_fixture
        corr_hash = classifier2021_corr_png_hash_fixture

    output_tuple = tempfile.mkstemp(dir=tmpdir,
                                    prefix='artifact_file_',
                                    suffix='.h5')

    # without this, got a "too many files open" error
    os.close(output_tuple[0])

    output_path = pathlib.Path(output_tuple[1])

    # because tempfile.mkstemp actually creates the file
    output_path.unlink()

    input_data = dict()
    input_data['video_path'] = str(classifier2021_video_fixture)
    input_data['roi_path'] = str(suite2p_roi_fixture)
    input_data['correlation_path'] = str(corr_fixture)
    input_data['artifact_path'] = str(output_path)
    input_data['clobber'] = False
    input_data['video_lower_quantile'] = video_lower_quantile
    input_data['video_upper_quantile'] = video_upper_quantile
    input_data['projection_lower_quantile'] = projection_lower_quantile
    input_data['projection_upper_quantile'] = projection_upper_quantile
    input_data['motion_border_path'] = motion_path

    generator = LabelerArtifactGenerator(input_data=input_data, args=[])
    generator.run()

    assert output_path.is_file()

    with h5py.File(output_path, 'r') as artifact_file:

        motion_border = json.loads(
            artifact_file['motion_border'][()].decode('utf-8'))
        assert motion_border == expected_motion_border
        # test that ROIs were written correctly
        with open(suite2p_roi_fixture, 'rb') as in_file:
            expected_rois = json.load(in_file)
        expected_rois = sanitize_extract_roi_list(expected_rois)

        artifact_rois = json.loads(artifact_file['rois'][()].decode('utf-8'))

        assert expected_rois == artifact_rois

        # test that all ROIs appear in color map
        color_map = json.loads(
            artifact_file['roi_color_map'][()].decode('utf-8'))
        assert len(color_map) == len(expected_rois)
        for roi in expected_rois:
            assert str(roi['id']) in color_map

        # test that traces were written correctly
        ophys_rois = [extract_roi_to_ophys_roi(roi) for roi in expected_rois]
        expected_traces = get_traces(classifier2021_video_fixture, ophys_rois)

        for roi_id in expected_traces:
            np.testing.assert_array_equal(
                expected_traces[roi_id], artifact_file[f'traces/{roi_id}'][()])

        # test that the scaled video data was written correctly
        assert artifact_file['video_data'].chunks is not None
        scaled_video = artifact_file['video_data'][()]

        with h5py.File(classifier2021_video_fixture, 'r') as raw_file:
            raw_video = raw_file['data'][()]
        raw_max = np.max(raw_video, axis=0)
        raw_avg = np.mean(raw_video, axis=0)

        mn, mx = np.quantile(raw_video,
                             (video_lower_quantile, video_upper_quantile))

        raw_video = np.where(raw_video > mn, raw_video, mn)
        raw_video = np.where(raw_video < mx, raw_video, mx)
        delta = mx - mn
        raw_video = raw_video - mn
        raw_video = raw_video.astype(float)
        raw_video = np.round(255.0 * raw_video / delta).astype(np.uint8)
        np.testing.assert_array_equal(raw_video, scaled_video)
        del raw_video
        del scaled_video

        # test that max and avg projection images wer written correctly
        for raw_img, img_key in zip((raw_max, raw_avg),
                                    ('max_projection', 'avg_projection')):
            artifact_img = artifact_file[img_key][()]
            mn, mx = np.quantile(
                raw_img,
                (projection_lower_quantile, projection_upper_quantile))
            raw_img = np.where(raw_img > mn, raw_img, mn)
            raw_img = np.where(raw_img < mx, raw_img, mx)
            raw_img = raw_img.astype(float)
            np.testing.assert_array_equal(raw_img, artifact_img)

        artifact_corr = artifact_file['correlation_projection'][()]
        if use_graph:
            expected_corr = normalize_array(
                graph_to_img(corr_fixture,
                             attribute_name='filtered_hnc_Gaussian'))
        else:
            expected_corr = normalize_array(
                np.array(PIL.Image.open(corr_fixture, 'r')))

        np.testing.assert_array_equal(artifact_corr, expected_corr)

        metadata = json.loads(artifact_file['metadata'][()].decode('utf-8'))

    # test that metadata has the right contents
    assert metadata['video']['path'] == str(classifier2021_video_fixture)
    assert metadata['video']['hash'] == classifier2021_video_hash_fixture

    assert metadata['rois']['path'] == str(suite2p_roi_fixture)
    assert metadata['rois']['hash'] == suite2p_roi_hash_fixture

    assert metadata['correlation']['path'] == str(corr_fixture)
    assert metadata['correlation']['hash'] == corr_hash

    assert metadata['generator_args'] == input_data
    if with_motion_border:
        assert 'motion_csv' in metadata
    else:
        assert 'motion_csv' not in metadata

    tmpdir = pathlib.Path(tmpdir)
    path_list = [n for n in tmpdir.rglob('*')]
    for this_path in path_list:
        if this_path.is_file():
            try:
                this_path.unlink()
            except Exception:
                pass
Beispiel #7
0
    def run(self):
        video_path = pathlib.Path(self.args['video_path'])
        correlation_path = pathlib.Path(self.args['correlation_path'])
        roi_path = pathlib.Path(self.args['roi_path'])
        if self.args['motion_border_path'] is not None:
            motion_border_path = pathlib.Path(self.args['motion_border_path'])
            max_shifts = get_max_correction_from_file(
                input_csv=motion_border_path)
            motion_border = motion_border_from_max_shift(max_shifts)
        else:
            motion_border_path = None
            motion_border = MotionBorder(left_side=0,
                                         right_side=0,
                                         top=0,
                                         bottom=0)

        output_path = pathlib.Path(self.args['artifact_path'])

        with open(roi_path, 'rb') as in_file:
            raw_rois = json.load(in_file)
        extract_roi_list = sanitize_extract_roi_list(raw_rois)
        ophys_roi_list = [
            extract_roi_to_ophys_roi(roi) for roi in extract_roi_list
        ]

        logger.info("read ROIs")

        trace_lookup = get_traces(video_path, ophys_roi_list)

        logger.info("wrote traces")

        metadata = create_metadata(input_args=self.args,
                                   video_path=video_path,
                                   roi_path=roi_path,
                                   correlation_path=correlation_path,
                                   motion_csv_path=motion_border_path)

        logger.info("hashed all input files")

        color_map = get_roi_color_map(ophys_roi_list)

        logger.info("computed color map")

        (avg_img_data, max_img_data) = create_max_and_avg_projections(
            video_path, self.args['projection_lower_quantile'],
            self.args['projection_upper_quantile'])

        logger.info("Created max and avg projection images")

        correlation_img_data = create_correlation_projection(correlation_path)

        logger.info("Created correlation image")

        scaled_video = read_and_scale(
            video_path=pathlib.Path(video_path),
            origin=(0, 0),
            frame_shape=max_img_data.shape,
            quantiles=(self.args['video_lower_quantile'],
                       self.args['video_upper_quantile']))

        logger.info("Created scaled video")

        # determine chunks in which to save the video data
        ntime = scaled_video.shape[0]
        nrows = scaled_video.shape[1]
        ncols = scaled_video.shape[2]
        video_chunks = (max(1, ntime // 10), max(1, nrows // 16),
                        max(1, ncols // 16))

        with h5py.File(output_path, 'a') as out_file:
            out_file.create_dataset(
                'metadata',
                data=json.dumps(metadata, sort_keys=True).encode('utf-8'))
            out_file.create_dataset(
                'rois', data=json.dumps(extract_roi_list).encode('utf-8'))
            out_file.create_dataset('roi_color_map',
                                    data=json.dumps(color_map).encode('utf-8'))
            out_file.create_dataset('max_projection', data=max_img_data)
            out_file.create_dataset('avg_projection', data=avg_img_data)
            out_file.create_dataset('correlation_projection',
                                    data=correlation_img_data)
            out_file.create_dataset('video_data',
                                    data=scaled_video,
                                    chunks=video_chunks)

            # note the transposition below;
            # if you shift up, the suspect pixels are those that wrap
            # on the bottom; if you shift right, the suspect pixels
            # are those that wrap on the right, etc.
            out_file.create_dataset(
                'motion_border',
                data=json.dumps(
                    {
                        'bottom': motion_border.bottom,
                        'top': motion_border.top,
                        'left_side': motion_border.left_side,
                        'right_side': motion_border.right_side
                    },
                    indent=2).encode('utf-8'))

            trace_group = out_file.create_group('traces')
            for roi_id in trace_lookup:
                trace_group.create_dataset(str(roi_id),
                                           data=trace_lookup[roi_id])

        logger.info("Wrote all artifacts")