コード例 #1
0
ファイル: __init__.py プロジェクト: yarikoptic/AllenSDK
def get_synchronized_frame_times(session_sync_file: Path,
                                 sync_line_label_keys: Tuple[str, ...]) -> pd.Series:
    """Get experimental frame times from an experiment session sync file.

    Parameters
    ----------
    session_sync_file : Path
        Path to an ephys session sync file.
        The sync file contains rising/falling edges from a daq system which
        indicates when certain events occur (so they can be related to
        each other).
    sync_line_label_keys : Tuple[str, ...]
        Line label keys to get times for. See class attributes of
        allensdk.brain_observatory.sync_dataset.Dataset for a listing of
        possible keys.

    Returns
    -------
    pd.Series
        An array of times when frames for the eye tracking camera were acquired.
    """
    sync_dataset = Dataset(str(session_sync_file))

    frame_times = sync_dataset.get_edges(
        "rising", sync_line_label_keys, units="seconds"
    )

    # Occasionally an extra set of frame times are acquired after the rest of
    # the signals. We detect and remove these.
    frame_times = trim_discontiguous_times(frame_times)

    return pd.Series(frame_times)
コード例 #2
0
ファイル: time_sync.py プロジェクト: vrhaynes/AllenSDK
    def __init__(self,
                 sync_file,
                 scanner=None,
                 dff_file=None,
                 stimulus_pkl=None,
                 eye_video=None,
                 behavior_video=None,
                 long_stim_threshold=LONG_STIM_THRESHOLD):
        self.scanner = scanner if scanner is not None else "SCIVIVO"
        self._dataset = Dataset(sync_file)
        self._keys = get_keys(self._dataset)
        self.long_stim_threshold = long_stim_threshold

        self._monitor_delay = None
        self._clipped_stim_ts_delta = None
        self._clipped_stim_timestamp_values = None

        if dff_file is not None:
            self.ophys_data_length = get_ophys_data_length(dff_file)
        else:
            self.ophys_data_length = None
        if stimulus_pkl is not None:
            self.stim_data_length = get_stim_data_length(stimulus_pkl)
        else:
            self.stim_data_length = None
        if eye_video is not None:
            self.eye_data_length = get_video_length(eye_video)
        else:
            self.eye_data_length = None
        if behavior_video is not None:
            self.behavior_data_length = get_video_length(behavior_video)
        else:
            self.behavior_data_length = None
コード例 #3
0
ファイル: __main__.py プロジェクト: vrhaynes/AllenSDK
def main(stimulus_pkl_path, sync_h5_path, output_path, wheel_radius,
         subject_position, use_median_duration, **kwargs):

    stim_file = pd.read_pickle(stimulus_pkl_path)
    sync_dataset = Dataset(sync_h5_path)

    # Why the rising edge? See Sweepstim.update in camstim. This method does:
    # 1. updates the stimuli
    # 2. updates the "items", causing a running speed sample to be acquired
    # 3. sets the vsync line high
    # 4. flips the buffer
    frame_times = sync_dataset.get_edges("rising",
                                         Dataset.FRAME_KEYS,
                                         units="seconds")

    # occasionally an extra set of frame times are acquired after the rest of
    # the signals. We detect and remove these
    frame_times = sync_utilities.trim_discontiguous_times(frame_times)
    num_raw_timestamps = len(frame_times)

    dx_deg = running_from_stim_file(stim_file, "dx", num_raw_timestamps)

    if num_raw_timestamps != len(dx_deg):
        raise ValueError(
            f"found {num_raw_timestamps} rising edges on the vsync line, "
            f"but only {len(dx_deg)} rotation samples")

    vsig = running_from_stim_file(stim_file, "vsig", num_raw_timestamps)
    vin = running_from_stim_file(stim_file, "vin", num_raw_timestamps)

    velocities = extract_running_speeds(
        frame_times=frame_times,
        dx_deg=dx_deg,
        vsig=vsig,
        vin=vin,
        wheel_radius=wheel_radius,
        subject_position=subject_position,
        use_median_duration=use_median_duration)

    raw_data = pd.DataFrame({
        "vsig": vsig,
        "vin": vin,
        "frame_time": frame_times,
        "dx": dx_deg
    })

    store = pd.HDFStore(output_path)
    store.put("running_speed", velocities)
    store.put("raw_data", raw_data)
    store.close()

    return {"output_path": output_path}
コード例 #4
0
ファイル: __init__.py プロジェクト: vrhaynes/AllenSDK
def get_ophys_frames(dataset: SyncDataset,
                     permissive: bool = False) -> np.ndarray:
    """ Report the timestamps of each optical physiology video frame

    Parameters
    ----------
    dataset : describes experiment timing

    Returns
    -------
    array of timestamps (floating point; seconds; relative to experiment start).
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

    Notes
    -----
    use rising edge for Scientifica, falling edge for Nikon 
    http://confluence.corp.alleninstitute.org/display/IT/Ophys+Time+Sync
    This function uses rising edges

    """
    try:
        return dataset.get_edges("rising", '2p_vsync', "seconds")
    except KeyError:
        if not permissive:
            raise
        return
コード例 #5
0
ファイル: __init__.py プロジェクト: vrhaynes/AllenSDK
def get_trigger(dataset: SyncDataset,
                permissive: bool = False) -> Optional[np.ndarray]:
    """ Returns (as a 1-element array) the time at which optical physiology 
    acquisition was started.

    Parameters
    ----------
    dataset : describes experiment timing
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

   Returns
    -------
    timestamps (floating point; seconds; relative to experiment start) 
        or None. If None, no timestamps were found in this sync dataset.

    Notes
    -----
    Ophys frame timestamps can be recorded before acquisition start when 
        experimenters are setting up the recording session. These do not 
        correspond to acquired ophys frames.

    """
    return dataset.get_edges("rising", ["2p_trigger", "acq_trigger"],
                             "seconds", permissive)
コード例 #6
0
def build_full_NP_behavior_stim_table(behavior_pkl_path, mapping_pkl_path,
                                      replay_pkl_path, sync_path):

    pkl_files = []
    for pkl in [behavior_pkl_path, mapping_pkl_path, replay_pkl_path]:
        if isinstance(pkl, str):
            pkl = pd.read_pickle(pkl)
        pkl_files.append(pkl)

    behavior_pkl = pkl_files[0]
    mapping_pkl = pkl_files[1]
    replay_pkl = pkl_files[2]

    if isinstance(sync_path, str):
        sync_dataset = Dataset(sync_path)
    else:
        sync_dataset = sync_path


#    behavior_pkl = pd.read_pickle(behavior_pkl_path)
#    mapping_pkl = pd.read_pickle(mapping_pkl_path)
#    replay_pkl = pd.read_pickle(replay_pkl_path)

    frame_counts = []
    for p in [behavior_pkl, mapping_pkl, replay_pkl]:
        total_frames = len(p['intervalsms']) + 1 if 'intervalsms' in p \
                else len(p['items']['behavior']['intervalsms']) + 1
        frame_counts.append(total_frames)

    #mapping_stim_file = CamStimOnePickleStimFile.factory(mapping_pkl_path)
    mapping_stim_file = CamStimOnePickleStimFile(mapping_pkl)

    frame_offsets = get_frame_offsets(sync_dataset, frame_counts)

    stim_table_behavior = generate_behavior_stim_table(
        behavior_pkl, sync_dataset, frame_offset=frame_offsets[0])
    stim_table_mapping = generate_mapping_stim_table(
        mapping_stim_file, sync_dataset, frame_offset=frame_offsets[1])
    stim_table_replay = generate_replay_stim_table(
        replay_pkl,
        sync_dataset,
        stim_table_behavior,
        frame_offset=frame_offsets[2])

    #Rearrange columns to make a bit more readable; the rest of the cols are just alphabetical
    stim_table_behavior = sort_columns(stim_table_behavior, [
        'stimulus_block', 'active', 'stimulus_name', 'Start', 'End',
        'duration', 'start_frame', 'end_frame'
    ])

    stim_table_full = pd.concat(
        [stim_table_behavior, stim_table_mapping, stim_table_replay],
        sort=False)
    stim_table_full.loc[:, 'duration'] = stim_table_full[
        'End'] - stim_table_full['Start']
    stim_table_full.loc[stim_table_full['stimulus_name'].isnull(),
                        'stimulus_name'] = 'spontaneous'
    stim_table_full['presentation_index'] = np.arange(len(stim_table_full))

    return stim_table_full.set_index('presentation_index')
コード例 #7
0
ファイル: __init__.py プロジェクト: wesley-jones/AllenSDK
def get_raw_stimulus_frames(
    dataset: SyncDataset, 
    permissive: bool = False
) -> np.ndarray:
    """ Report the raw timestamps of each stimulus frame. This corresponds to 
    the time at which the psychopy window's flip method returned, but not 
    necessarily to the time at which the stimulus frame was displayed.

    Parameters
    ----------
    dataset : describes experiment timing
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

    Returns
    -------
    array of timestamps (floating point; seconds; relative to experiment start).

    """
    try:
        return dataset.get_edges("falling",'stim_vsync', "seconds")
    except KeyError:
        if not permissive:
            raise
        return
コード例 #8
0
def get_synchronized_camera_frame_times(session_sync_file: Path) -> pd.Series:
    """Get eye tracking camera frame times from an experiment session sync file.

    Args:
        session_sync_file (Path): Path to an ephys session sync file.
            The sync file contains rising/falling edges from a daq system which
            indicates when certain events occur (so they can be related to
            each other).

    Returns:
        pandas.Series: An array of times when frames for the eye tracking
            camera were acquired.
    """
    sync_dataset = Dataset(str(session_sync_file))

    frame_times = sync_dataset.get_edges("rising",
                                         Dataset.EYE_TRACKING_KEYS,
                                         units="seconds")

    # Occasionally an extra set of frame times are acquired after the rest of
    # the signals. We detect and remove these.
    frame_times = sync_utilities.trim_discontiguous_times(frame_times)

    return pd.Series(frame_times)
コード例 #9
0
ファイル: __init__.py プロジェクト: vrhaynes/AllenSDK
def get_lick_times(dataset: SyncDataset,
                   permissive: bool = False) -> Optional[np.ndarray]:
    """ Report the timestamps of each detected lick

    Parameters
    ----------
    dataset : describes experiment timing
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

    Returns
    -------
    array of timestamps (floating point; seconds; relative to experiment start) 
        or None. If None, no lick timestamps were found in this sync 
        dataset.

    """
    return dataset.get_edges("rising", ["lick_times", "lick_sensor"],
                             "seconds", permissive)
コード例 #10
0
ファイル: __init__.py プロジェクト: vrhaynes/AllenSDK
def get_eye_tracking(dataset: SyncDataset,
                     permissive: bool = False) -> Optional[np.ndarray]:
    """ Report the timestamps of each frame of the eye tracking video

    Parameters
    ----------
    dataset : describes experiment timing
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

    Returns
    -------
    array of timestamps (floating point; seconds; relative to experiment start) 
        or None. If None, no eye tracking timestamps were found in this sync 
        dataset.

    """
    return dataset.get_edges("rising", ["cam2_exposure", "eye_tracking"],
                             "seconds", permissive)
コード例 #11
0
ファイル: __init__.py プロジェクト: vrhaynes/AllenSDK
def get_stim_photodiode(dataset: SyncDataset,
                        permissive: bool = False) -> Optional[List[float]]:
    """ Report the timestamps of each detected sync square transition (both 
    black -> white and white -> black) in this experiment.

    Parameters
    ----------
    dataset : describes experiment timing
    permissive : If True, None will be returned if timestamps are not found. If 
        False, a KeyError will be raised

    Returns
    -------
    array of timestamps (floating point; seconds; relative to experiment start) 
        or None. If None, no photodiode timestamps were found in this sync 
        dataset.

    """
    return dataset.get_edges("all", ["stim_photodiode", "photodiode"],
                             "seconds", permissive)
コード例 #12
0
def build_full_NP_behavior_stim_table(behavior_pkl_path, mapping_pkl_path,
                                      replay_pkl_path, sync_path):

    behavior_pkl = pd.read_pickle(behavior_pkl_path)
    mapping_pkl = pd.read_pickle(mapping_pkl_path)
    replay_pkl = pd.read_pickle(replay_pkl_path)

    frame_counts = []
    for p in [behavior_pkl, mapping_pkl, replay_pkl]:
        total_frames = len(p['intervalsms']) + 1 if 'intervalsms' in p \
                else len(p['items']['behavior']['intervalsms']) + 1
        frame_counts.append(total_frames)

    mapping_stim_file = CamStimOnePickleStimFile.factory(mapping_pkl_path)

    sync_dataset = Dataset(sync_path)
    frame_offsets = get_frame_offsets(sync_dataset, frame_counts)

    stim_table_behavior = generate_behavior_stim_table(
        behavior_pkl, sync_dataset, frame_offset=frame_offsets[0])
    stim_table_mapping = generate_mapping_stim_table(
        mapping_stim_file, sync_dataset, frame_offset=frame_offsets[1])
    stim_table_replay = generate_replay_stim_table(
        replay_pkl,
        sync_dataset,
        stim_table_behavior,
        frame_offset=frame_offsets[2])

    #Rearrange columns to make a bit more readable; the rest of the cols are just alphabetical
    stim_table_behavior = sort_columns(stim_table_behavior, [
        'stimulus_block', 'active', 'stimulus_name', 'Start', 'End',
        'duration', 'start_frame', 'end_frame'
    ])

    stim_table_full = pd.concat(
        [stim_table_behavior, stim_table_mapping, stim_table_replay],
        sort=False)
    stim_table_full.loc[:, 'duration'] = stim_table_full[
        'End'] - stim_table_full['Start']

    return stim_table_full
コード例 #13
0
ファイル: time_sync.py プロジェクト: vrhaynes/AllenSDK
class OphysTimeAligner(object):
    def __init__(self,
                 sync_file,
                 scanner=None,
                 dff_file=None,
                 stimulus_pkl=None,
                 eye_video=None,
                 behavior_video=None,
                 long_stim_threshold=LONG_STIM_THRESHOLD):
        self.scanner = scanner if scanner is not None else "SCIVIVO"
        self._dataset = Dataset(sync_file)
        self._keys = get_keys(self._dataset)
        self.long_stim_threshold = long_stim_threshold

        self._monitor_delay = None
        self._clipped_stim_ts_delta = None
        self._clipped_stim_timestamp_values = None

        if dff_file is not None:
            self.ophys_data_length = get_ophys_data_length(dff_file)
        else:
            self.ophys_data_length = None
        if stimulus_pkl is not None:
            self.stim_data_length = get_stim_data_length(stimulus_pkl)
        else:
            self.stim_data_length = None
        if eye_video is not None:
            self.eye_data_length = get_video_length(eye_video)
        else:
            self.eye_data_length = None
        if behavior_video is not None:
            self.behavior_data_length = get_video_length(behavior_video)
        else:
            self.behavior_data_length = None

    @property
    def dataset(self):
        return self._dataset

    @property
    def ophys_timestamps(self):
        """Get the timestamps for the ophys data."""
        ophys_key = self._keys["2p"]
        if self.scanner == "SCIVIVO":
            # Scientifica data looks different than Nikon.
            # http://confluence.corp.alleninstitute.org/display/IT/Ophys+Time+Sync
            times = self.dataset.get_rising_edges(ophys_key, units="seconds")
        elif self.scanner == "NIKONA1RMP":
            # Nikon has a signal that indicates when it started writing to disk
            acquiring_key = self._keys["acquiring"]
            acquisition_start = self._dataset.get_rising_edges(
                acquiring_key, units="seconds")[0]
            ophys_times = self._dataset.get_falling_edges(ophys_key,
                                                          units="seconds")
            times = ophys_times[ophys_times >= acquisition_start]
        else:
            raise ValueError("Invalid scanner: {}".format(self.scanner))

        return times

    @property
    def corrected_ophys_timestamps(self):
        times = self.ophys_timestamps

        delta = 0
        if self.ophys_data_length is not None:
            if len(times) < self.ophys_data_length:
                raise ValueError(
                    "Got too few timestamps ({}) for ophys data length "
                    "({})".format(len(times), self.ophys_data_length))
            elif len(times) > self.ophys_data_length:
                logging.info(
                    "Ophys data of length %s has timestamps of "
                    "length %s, truncating timestamps", self.ophys_data_length,
                    len(times))
                delta = len(times) - self.ophys_data_length
                times = times[:-delta]
        else:
            logging.info("No data length provided for ophys stream")

        return times, delta

    @property
    def stim_timestamps(self):
        stim_key = self._keys["stimulus"]

        return self.dataset.get_falling_edges(stim_key, units="seconds")

    def _get_clipped_stim_timestamps(self):
        timestamps = self.stim_timestamps

        delta = 0
        if self.stim_data_length is not None and \
           self.stim_data_length < len(timestamps):
            stim_key = self._keys["stimulus"]
            rising = self.dataset.get_rising_edges(stim_key, units="seconds")

            # Some versions of camstim caused a spike when the DAQ is first
            # initialized. Remove it.
            if rising[1] - rising[0] > self.long_stim_threshold:
                logging.info("Initial DAQ spike detected from stimulus, "
                             "removing it")
                timestamps = timestamps[1:]

            delta = len(timestamps) - self.stim_data_length
            if delta != 0:
                logging.info(
                    "Stim data of length %s has timestamps of "
                    "length %s", self.stim_data_length, len(timestamps))
        elif self.stim_data_length is None:
            logging.info("No data length provided for stim stream")

        return timestamps, delta

    @property
    def clipped_stim_timestamps(self):
        """
        Return the stimulus timestamps with the erroneous initial spike
        removed (if relevant)

        Returns
        -------
        timestamps: np.ndarray
            An array of stimulus timestamps in seconds with th emonitor delay
            added

        delta: int
            Difference between the length of timestamps
            and the number of frames reported in the stimulus
            pickle file, i.e.
            len(timestamps) - len(pkl_file['items']['behavior']['intervalsms']
        """
        if self._clipped_stim_ts_delta is None:
            (self._clipped_stim_timestamp_values, self._clipped_stim_ts_delta
             ) = self._get_clipped_stim_timestamps()

        return (self._clipped_stim_timestamp_values,
                self._clipped_stim_ts_delta)

    def _get_monitor_delay(self):
        timestamps, delta = self.clipped_stim_timestamps
        photodiode_key = self._keys["photodiode"]
        delay = calculate_monitor_delay(self.dataset, timestamps,
                                        photodiode_key)
        return delay

    @property
    def monitor_delay(self):
        """
        The monitor delay (in seconds) associated with the session
        """
        if self._monitor_delay is None:
            self._monitor_delay = self._get_monitor_delay()
        return self._monitor_delay

    @property
    def corrected_stim_timestamps(self):
        """
        The stimulus timestamps corrected for monitor delay

        Returns
        -------
        timestamps: np.ndarray
            An array of stimulus timestamps in seconds with th emonitor delay
            added

        delta: int
            Difference between the length of timestamps and
            the number of frames reported in the stimulus
            pickle file, i.e.
            len(timestamps) - len(pkl_file['items']['behavior']['intervalsms']

        delay: float
            The monitor delay in seconds
        """
        timestamps, delta = self.clipped_stim_timestamps
        delay = self.monitor_delay

        return timestamps + delay, delta, delay

    @property
    def behavior_video_timestamps(self):
        key = self._keys["behavior_camera"]

        return self.dataset.get_falling_edges(key, units="seconds")

    @property
    def corrected_behavior_video_timestamps(self):
        return corrected_video_timestamps("Behavior video",
                                          self.behavior_video_timestamps,
                                          self.behavior_data_length)

    @property
    def eye_video_timestamps(self):
        key = self._keys["eye_camera"]

        return self.dataset.get_falling_edges(key, units="seconds")

    @property
    def corrected_eye_video_timestamps(self):
        return corrected_video_timestamps("Eye video",
                                          self.eye_video_timestamps,
                                          self.eye_data_length)
コード例 #14
0
class OphysTimeAligner(object):
    def __init__(self,
                 sync_file,
                 scanner=None,
                 dff_file=None,
                 stimulus_pkl=None,
                 eye_video=None,
                 behavior_video=None,
                 long_stim_threshold=LONG_STIM_THRESHOLD):
        self.scanner = scanner if scanner is not None else "SCIVIVO"
        self._dataset = Dataset(sync_file)
        self._keys = get_keys(self._dataset)
        self.long_stim_threshold = long_stim_threshold
        if dff_file is not None:
            self.ophys_data_length = get_ophys_data_length(dff_file)
        else:
            self.ophys_data_length = None
        if stimulus_pkl is not None:
            self.stim_data_length = get_stim_data_length(stimulus_pkl)
        else:
            self.stim_data_length = None
        if eye_video is not None:
            self.eye_data_length = get_video_length(eye_video)
        else:
            self.eye_data_length = None
        if behavior_video is not None:
            self.behavior_data_length = get_video_length(behavior_video)
        else:
            self.behavior_data_length = None

    @property
    def dataset(self):
        return self._dataset

    @property
    def ophys_timestamps(self):
        """Get the timestamps for the ophys data."""
        ophys_key = self._keys["2p"]
        if self.scanner == "SCIVIVO":
            # Scientifica data looks different than Nikon.
            # http://confluence.corp.alleninstitute.org/display/IT/Ophys+Time+Sync
            times = self.dataset.get_rising_edges(ophys_key, units="seconds")
        elif self.scanner == "NIKONA1RMP":
            # Nikon has a signal that indicates when it started writing to disk
            acquiring_key = self._keys["acquiring"]
            acquisition_start = self._dataset.get_rising_edges(
                acquiring_key, units="seconds")[0]
            ophys_times = self._dataset.get_falling_edges(ophys_key,
                                                          units="seconds")
            times = ophys_times[ophys_times >= acquisition_start]
        else:
            raise ValueError("Invalid scanner: {}".format(self.scanner))

        return times

    @property
    def corrected_ophys_timestamps(self):
        times = self.ophys_timestamps

        delta = 0
        if self.ophys_data_length is not None:
            if len(times) < self.ophys_data_length:
                raise ValueError(
                    "Got too few timestamps ({}) for ophys data length "
                    "({})".format(len(times), self.ophys_data_length))
            elif len(times) > self.ophys_data_length:
                logging.info(
                    "Ophys data of length %s has timestamps of "
                    "length %s, truncating timestamps", self.ophys_data_length,
                    len(times))
                delta = len(times) - self.ophys_data_length
                times = times[:-delta]
        else:
            logging.info("No data length provided for ophys stream")

        return times, delta

    @property
    def stim_timestamps(self):
        stim_key = self._keys["stimulus"]

        return self.dataset.get_falling_edges(stim_key, units="seconds")

    @property
    def corrected_stim_timestamps(self):
        timestamps = self.stim_timestamps

        delta = 0
        if self.stim_data_length is not None and \
           self.stim_data_length < len(timestamps):
            stim_key = self._keys["stimulus"]
            rising = self.dataset.get_rising_edges(stim_key, units="seconds")

            # Some versions of camstim caused a spike when the DAQ is first
            # initialized. Remove it.
            if rising[1] - rising[0] > self.long_stim_threshold:
                logging.info("Initial DAQ spike detected from stimulus, "
                             "removing it")
                timestamps = timestamps[1:]

            delta = len(timestamps) - self.stim_data_length
            if delta != 0:
                logging.info(
                    "Stim data of length %s has timestamps of "
                    "length %s", self.stim_data_length, len(timestamps))
        elif self.stim_data_length is None:
            logging.info("No data length provided for stim stream")

        photodiode_key = self._keys["photodiode"]
        delay = monitor_delay(self.dataset, timestamps, photodiode_key)

        return timestamps + delay, delta, delay

    @property
    def behavior_video_timestamps(self):
        key = self._keys["behavior_camera"]

        return self.dataset.get_falling_edges(key, units="seconds")

    @property
    def corrected_behavior_video_timestamps(self):
        return corrected_video_timestamps("Behavior video",
                                          self.behavior_video_timestamps,
                                          self.behavior_data_length)

    @property
    def eye_video_timestamps(self):
        key = self._keys["eye_camera"]

        return self.dataset.get_falling_edges(key, units="seconds")

    @property
    def corrected_eye_video_timestamps(self):
        return corrected_video_timestamps("Eye video",
                                          self.eye_video_timestamps,
                                          self.eye_data_length)
コード例 #15
0
def get_sync_data(sync_path):

    sync_dataset = SyncDataset(sync_path)
    meta_data = sync_dataset.meta_data
    sample_freq = meta_data['ni_daq']['counter_output_freq']

    # use rising edge for Scientifica, falling edge for Nikon http://confluence.corp.alleninstitute.org/display/IT/Ophys+Time+Sync
    # 2P vsyncs
    vs2p_r = sync_dataset.get_rising_edges('2p_vsync')
    vs2p_f = sync_dataset.get_falling_edges(
        '2p_vsync'
    )  # new sync may be able to do units = 'sec', so conversion can be skipped
    frames_2p = vs2p_r / sample_freq
    vs2p_fsec = vs2p_f / sample_freq

    stimulus_times_no_monitor_delay = sync_dataset.get_falling_edges(
        'stim_vsync') / sample_freq

    if 'lick_times' in meta_data['line_labels']:
        lick_times = sync_dataset.get_rising_edges('lick_1') / sample_freq
    elif 'lick_sensor' in meta_data['line_labels']:
        lick_times = sync_dataset.get_rising_edges('lick_sensor') / sample_freq
    else:
        lick_times = None
    if '2p_trigger' in meta_data['line_labels']:
        trigger = sync_dataset.get_rising_edges('2p_trigger') / sample_freq
    elif 'acq_trigger' in meta_data['line_labels']:
        trigger = sync_dataset.get_rising_edges('acq_trigger') / sample_freq
    if 'stim_photodiode' in meta_data['line_labels']:
        a = sync_dataset.get_rising_edges('stim_photodiode') / sample_freq
        b = sync_dataset.get_falling_edges('stim_photodiode') / sample_freq
        stim_photodiode = sorted(list(a) + list(b))
    elif 'photodiode' in meta_data['line_labels']:
        a = sync_dataset.get_rising_edges('photodiode') / sample_freq
        b = sync_dataset.get_falling_edges('photodiode') / sample_freq
        stim_photodiode = sorted(list(a) + list(b))
    if 'cam1_exposure' in meta_data['line_labels']:
        eye_tracking = sync_dataset.get_rising_edges(
            'cam1_exposure') / sample_freq
    elif 'eye_tracking' in meta_data['line_labels']:
        eye_tracking = sync_dataset.get_rising_edges(
            'eye_tracking') / sample_freq
    if 'cam2_exposure' in meta_data['line_labels']:
        behavior_monitoring = sync_dataset.get_rising_edges(
            'cam2_exposure') / sample_freq
    elif 'behavior_monitoring' in meta_data['line_labels']:
        behavior_monitoring = sync_dataset.get_rising_edges(
            'behavior_monitoring') / sample_freq

    sync_data = {
        'ophys_frames': frames_2p,
        'lick_times': lick_times,
        'ophys_trigger': trigger,
        'eye_tracking': eye_tracking,
        'behavior_monitoring': behavior_monitoring,
        'stim_photodiode': stim_photodiode,
        'stimulus_times_no_delay': stimulus_times_no_monitor_delay,
    }

    return sync_data