Exemple #1
0
def test_read_and_scale(chunked_video_path, unchunked_video_path, to_use,
                        normalization, geometry):
    if to_use == 'chunked':
        video_path = chunked_video_path
    elif to_use == 'unchunked':
        video_path = unchunked_video_path
    else:
        raise RuntimeError(f'bad to_use value: {to_use}')

    with h5py.File(video_path, 'r') as in_file:
        full_data = in_file['data'][()]
        if normalization['quantiles'] is not None:
            min_max = np.quantile(full_data, normalization['quantiles'])
        else:
            min_max = normalization['min_max']

    if geometry['frame_shape'] is None:
        frame_shape = full_data.shape[1:3]
    else:
        frame_shape = geometry['frame_shape']

    r0 = geometry['origin'][0]
    r1 = r0 + frame_shape[0]
    c0 = geometry['origin'][1]
    c1 = c0 + frame_shape[1]
    full_data = full_data[:, r0:r1, c0:c1]
    expected = scale_video_to_uint8(full_data, min_max[0], min_max[1])

    actual = read_and_scale(video_path,
                            geometry['origin'],
                            frame_shape,
                            quantiles=normalization['quantiles'],
                            min_max=normalization['min_max'])

    np.testing.assert_array_equal(actual, expected)
Exemple #2
0
def test_read_and_scale_norm_exceptions(unchunked_video_path,
                                        chunked_video_path, quantiles, min_max,
                                        match_str, to_use):

    if to_use == 'chunked':
        video_path = chunked_video_path
    elif to_use == 'unchunked':
        video_path = unchunked_video_path

    with pytest.raises(RuntimeError, match=match_str):
        _ = read_and_scale(video_path,
                           origin=(0, 0),
                           frame_shape=(5, 5),
                           quantiles=quantiles,
                           min_max=min_max)
def _thumbnail_video_from_ROI_path(
        video_path: pathlib.Path,
        roi: ExtractROI,
        padding: int = 0,
        other_roi: Union[None, List[ExtractROI]] = None,
        roi_color: Union[None,
                         Tuple[int, int, int],
                         Dict[int, Tuple[int, int, int]]] = None,
        timesteps: Optional[np.ndarray] = None,
        file_path: Optional[pathlib.Path] = None,
        tmp_dir: Optional[pathlib.Path] = None,
        fps: int = 31,
        quality: int = 5,
        min_max: Optional[Tuple[numbers.Number, numbers.Number]] = None,
        quantiles: Optional[Tuple[numbers.Number, numbers.Number]] = None,
        ) -> ThumbnailVideo:
    """
    Get a thumbnail video from a HDF5 file path and an ROI

    Parameters
    ----------
    video_path: pathlib.Path
        path to HDF5 file storing video data.
        Shape of data is (n_times, nrows, ncols)

    roi: ExtractROI

    padding: int
        The number of pixels to be added to the FOV beyond
        the ROI bounds (if possible)
        (default = 0)

    other_roi: Union[None, List[ExtractROI]]
        Other ROIs to display

    roi_color: Union[None,
                     Tuple[int, int, int],
                     Dict[int, Tuple[int, int, int]]]
        RGB color in which to draw the ROI in the video;
        or a dict mapping ROI ID to the RGB color
        (if None, ROI is not drawn; default = None)

    timesteps: Optional[np.ndarray]
        Timesteps of full_video to be copied into the thumbnail
        (if None, use all timesteps; default=None)

    file_path: Optional[pathlib.Path]
        Where to write the thumbnail video (if None, tempfile
        will be used to create a path; default is None)

    tmp_dir: Optional[pathlib.Path]
        Directory where file will be written (ignored if file_path is
        not None). If none, tempfile will be used to create a temporary
        directory (default: None)

    fps: int
        frames per second (default: 31)

    quality: int
        Quality parameter passed to imageio.mimsave
        (maximum is 9; default is 5)

    min_max: Optional[Tuple[numbers.Number, numbers.Number]]
        If not None, the minimum and maximum values used to clip
        and normalize the movie brightness values (default: None).

    quantiles: Optional[Tuple[numbers.Number, numbers.Number]]
        If not None, the minimum and maximum quantiles used to
        clip and normalize the movie brightness values (default: None)

    Returns
    -------
    thumbnail: ThumbnailVideo

    Raises
    ------
    RuntimeError
       If both min_max and quantiles are None or if both
       min_max and quantiles are not None (i.e. one and only
       one of min_max and quantiles must be not None)

    RuntimeError
        If min_max[0] > min_max[1]

    Notes
    -----
    This method will scale video data values to [0, 255]
    """

    if min_max is None and quantiles is None:
        raise RuntimeError("both min_max and quantiles are None "
                           "in thumbnail_video_from_path; must "
                           "specify one")

    if min_max is not None and quantiles is not None:
        raise RuntimeError("both min_max and quantiles are are not None "
                           "in thumbnail_video_from_path; can only specify "
                           "one")

    with h5py.File(video_path, 'r') as in_file:
        img_shape = in_file['data'].shape

    # find bounds of thumbnail
    (origin,
     fov_shape) = video_bounds_from_ROI(roi,
                                        img_shape[1:3],
                                        padding)

    full_video = read_and_scale(video_path,
                                origin,
                                fov_shape,
                                quantiles=quantiles,
                                min_max=min_max)

    sub_video = get_rgb_sub_video(full_video,
                                  (0, 0),
                                  fov_shape,
                                  timesteps=timesteps)

    # if an ROI color has been specified, plot the ROI
    # contour over the video in the specified color
    roi_list = None
    if roi_color is not None:
        roi_list = [roi]
        if other_roi is not None:
            for roi2 in other_roi:
                if roi2['id'] != roi['id']:
                    roi_list.append(roi2)

    thumbnail = thumbnail_video_from_array(
                    sub_video,
                    (0, 0),
                    sub_video.shape[1:3],
                    timesteps=None,
                    file_path=file_path,
                    tmp_dir=tmp_dir,
                    fps=fps,
                    quality=quality,
                    origin_offset=origin,
                    roi_list=roi_list,
                    roi_color=roi_color)

    return thumbnail
def thumbnail_video_from_path(
        full_video_path: pathlib.Path,
        origin: Tuple[int, int],
        frame_shape: Tuple[int, int],
        timesteps: Optional[np.ndarray] = None,
        file_path: Optional[pathlib.Path] = None,
        tmp_dir: Optional[pathlib.Path] = None,
        fps: int = 31,
        quality: int = 5,
        min_max: Optional[Tuple[numbers.Number, numbers.Number]] = None,
        quantiles: Optional[Tuple[numbers.Number, numbers.Number]] = None,
        origin_offset: Optional[Tuple[int, int]] = None,
        roi_list: Optional[List[ExtractROI]] = None,
        roi_color: Tuple[int, int, int] = (255, 0, 0)) -> ThumbnailVideo:
    """
    Create a ThumbnailVideo (mp4) from a path to an HDF5 file.
    Automatically converts video to an array of np.uint8s

    Parameters
    ----------
    full_video_path: pathlib.Path
        Path to the h5 file

    origin: Tuple[int, int]
        (row_min, col_min) of the desired thumbnail

    frame_shape: Tuple[int, int]
        (n_rows, n_cols) of the desired thumbnail

    timesteps: Optional[np.ndarray]
        Array of timesteps. If None, keep all timesteps from
        full_video (default: None)

    file_path: Optional[pathlib.Path]
        Where to write the thumbnail video (if None, tempfile
        will be used to create a path; default is None)

    tmp_dir: Optional[pathlib.Path]
        Directory where file will be written (ignored if file_path is
        not None). If none, tempfile will be used to create a temporary
        directory (default: None)

    fps: int
        frames per second (default: 31)

    quality: int
        Parameter passed to imageio.mimsave controlling
        quality of video file produced (max is 10; default is 5)

    min_max: Optional[Tuple[numbers.Number, numbers.Number]]
        If not None, the minimum and maximum values used to clip
        and normalize the movie brightness values (default: None).

    quantiles: Optional[Tuple[numbers.Number, numbers.Number]]
        If not None, the minimum and maximum quantiles used to
        clip and normalize the movie brightness values (default: None)

    origin_offset: Optional[Tuple[int, int]]
        Offset values to be added to origin in container.
        *Should only be used by methods which call this method
        after pre-truncating the video in space; do NOT use this
        by hand*

    roi_list: Optional[List[ExtractROI]]
        If not None, list of ROIs whose contours are to be drawn
        in the thumbnail video (default: None)

    roi_color: Optional[Tuple[int, int, int]]
        RGB color of ROIs to be drawn in the thumbnail video.
        (default (255, 0, 0))

    Returns
    -------
    ThumbnailVideo
        Containing the metadata about the written thumbnail video

    Raises
    ------
    RuntimeError
       If both min_max and quantiles are None or if both
       min_max and quantiles are not None (i.e. one and only
       one of min_max and quantiles must be not None)

    RuntimeError
        If min_max[0] > min_max[1]
    """

    if min_max is None and quantiles is None:
        raise RuntimeError("both min_max and quantiles are None "
                           "in thumbnail_video_from_path; must "
                           "specify one")

    if min_max is not None and quantiles is not None:
        raise RuntimeError("both min_max and quantiles are are not None "
                           "in thumbnail_video_from_path; can only specify "
                           "one")

    data = read_and_scale(full_video_path,
                          origin,
                          frame_shape,
                          quantiles=quantiles,
                          min_max=min_max)

    # origin is set to (0,0) because, when we read in the
    # HDF5 file, we only read in the pixels we actually
    # wanted for the thumbnail
    thumbnail = thumbnail_video_from_array(
                    data,
                    (0, 0),
                    frame_shape,
                    timesteps=timesteps,
                    file_path=file_path,
                    tmp_dir=tmp_dir,
                    fps=fps,
                    quality=quality,
                    origin_offset=origin,
                    roi_list=roi_list,
                    roi_color=roi_color)
    return thumbnail
Exemple #5
0
    def run(self):
        video_path = pathlib.Path(self.args['video_path'])
        correlation_path = pathlib.Path(self.args['correlation_path'])
        roi_path = pathlib.Path(self.args['roi_path'])
        if self.args['motion_border_path'] is not None:
            motion_border_path = pathlib.Path(self.args['motion_border_path'])
            max_shifts = get_max_correction_from_file(
                input_csv=motion_border_path)
            motion_border = motion_border_from_max_shift(max_shifts)
        else:
            motion_border_path = None
            motion_border = MotionBorder(left_side=0,
                                         right_side=0,
                                         top=0,
                                         bottom=0)

        output_path = pathlib.Path(self.args['artifact_path'])

        with open(roi_path, 'rb') as in_file:
            raw_rois = json.load(in_file)
        extract_roi_list = sanitize_extract_roi_list(raw_rois)
        ophys_roi_list = [
            extract_roi_to_ophys_roi(roi) for roi in extract_roi_list
        ]

        logger.info("read ROIs")

        trace_lookup = get_traces(video_path, ophys_roi_list)

        logger.info("wrote traces")

        metadata = create_metadata(input_args=self.args,
                                   video_path=video_path,
                                   roi_path=roi_path,
                                   correlation_path=correlation_path,
                                   motion_csv_path=motion_border_path)

        logger.info("hashed all input files")

        color_map = get_roi_color_map(ophys_roi_list)

        logger.info("computed color map")

        (avg_img_data, max_img_data) = create_max_and_avg_projections(
            video_path, self.args['projection_lower_quantile'],
            self.args['projection_upper_quantile'])

        logger.info("Created max and avg projection images")

        correlation_img_data = create_correlation_projection(correlation_path)

        logger.info("Created correlation image")

        scaled_video = read_and_scale(
            video_path=pathlib.Path(video_path),
            origin=(0, 0),
            frame_shape=max_img_data.shape,
            quantiles=(self.args['video_lower_quantile'],
                       self.args['video_upper_quantile']))

        logger.info("Created scaled video")

        # determine chunks in which to save the video data
        ntime = scaled_video.shape[0]
        nrows = scaled_video.shape[1]
        ncols = scaled_video.shape[2]
        video_chunks = (max(1, ntime // 10), max(1, nrows // 16),
                        max(1, ncols // 16))

        with h5py.File(output_path, 'a') as out_file:
            out_file.create_dataset(
                'metadata',
                data=json.dumps(metadata, sort_keys=True).encode('utf-8'))
            out_file.create_dataset(
                'rois', data=json.dumps(extract_roi_list).encode('utf-8'))
            out_file.create_dataset('roi_color_map',
                                    data=json.dumps(color_map).encode('utf-8'))
            out_file.create_dataset('max_projection', data=max_img_data)
            out_file.create_dataset('avg_projection', data=avg_img_data)
            out_file.create_dataset('correlation_projection',
                                    data=correlation_img_data)
            out_file.create_dataset('video_data',
                                    data=scaled_video,
                                    chunks=video_chunks)

            # note the transposition below;
            # if you shift up, the suspect pixels are those that wrap
            # on the bottom; if you shift right, the suspect pixels
            # are those that wrap on the right, etc.
            out_file.create_dataset(
                'motion_border',
                data=json.dumps(
                    {
                        'bottom': motion_border.bottom,
                        'top': motion_border.top,
                        'left_side': motion_border.left_side,
                        'right_side': motion_border.right_side
                    },
                    indent=2).encode('utf-8'))

            trace_group = out_file.create_group('traces')
            for roi_id in trace_lookup:
                trace_group.create_dataset(str(roi_id),
                                           data=trace_lookup[roi_id])

        logger.info("Wrote all artifacts")