def compare_camera_timestamps_between_two_probes(sync_right, sync_left):
    """
    sync_left has no square signal
    """
    # using the probe 3a channel map:
    '''
    0: Arduino synchronization signal
    2: 150 Hz camera
    3: 30 Hz camera
    4: 60 Hz camera
    7: Bpod
    11: Frame2TTL
    12 & 13: Rotary Encoder
    15: Audio
    '''

    for cam_code in [2, 3, 4]:

        cam_times_left = ephys_fpga._get_sync_fronts(sync_left, cam_code)['times']
        cam_times_right = ephys_fpga._get_sync_fronts(sync_right, cam_code)['times']

        assert len(cam_times_left) == len(
            cam_times_right), "# time stamps don't match between probes"

        D = abs(np.array(cam_times_left) - np.array(cam_times_right))

        assert max(D) - min(D) < 0.005, 'cam_code %s; Temporal jitter \
                between probes is large!!' % cam_code

        print('Sync check for cam %s time stamps \
                of left and right probe passed' % cam_code)

        print('mean = ', np.round(np.mean(D), 6),
              'sec ; std = ', np.round(np.std(D), 6),
              'sec ; max - min = ', np.round(max(D) - min(D), 6), 'sec')
Beispiel #2
0
def version3B(ses_path, display=True, type=None, tol=2.5):
    """
    From a session path with _spikeglx_sync arrays extraccted, locate ephys files for 3A and
     outputs one sync.timestamps.probeN.npy file per acquired probe. By convention the reference
     probe is the one with the most synchronisation pulses.
     Assumes the _spikeglx_sync datasets are already extracted from binary data
    :param ses_path:
    :param type: linear, exact or smooth
    :return: None
    """
    DEFAULT_TYPE = 'smooth'
    ephys_files = spikeglx.glob_ephys_files(ses_path, bin_exists=False)
    for ef in ephys_files:
        ef['sync'] = alf.io.load_object(ef.path,
                                        'sync',
                                        namespace='spikeglx',
                                        short_keys=True)
        ef['sync_map'] = get_ibl_sync_map(ef, '3B')
    nidq_file = [ef for ef in ephys_files if ef.get('nidq')]
    ephys_files = [ef for ef in ephys_files if not ef.get('nidq')]
    # should have at least 2 probes and only one nidq
    assert (len(nidq_file) == 1)
    nidq_file = nidq_file[0]
    sync_nidq = _get_sync_fronts(nidq_file.sync,
                                 nidq_file.sync_map['imec_sync'])

    qc_all = True
    out_files = []
    for ef in ephys_files:
        sync_probe = _get_sync_fronts(ef.sync, ef.sync_map['imec_sync'])
        sr = _get_sr(ef)
        try:
            assert (sync_nidq.times.size == sync_probe.times.size)
        except AssertionError:
            raise Neuropixel3BSyncFrontsNonMatching(f"{ses_path}")
        # if the qc of the diff finds anomalies, do not attempt to smooth the interp function
        qcdiff = _check_diff_3b(sync_probe)
        if not qcdiff:
            qc_all = False
            type_probe = type or 'exact'
        else:
            type_probe = type or DEFAULT_TYPE
        timestamps, qc = sync_probe_front_times(sync_probe.times,
                                                sync_nidq.times,
                                                sr,
                                                display=display,
                                                type=type_probe,
                                                tol=tol)
        qc_all &= qc
        out_files.extend(_save_timestamps_npy(ef, timestamps, sr))
    return qc_all, out_files
Beispiel #3
0
def _get_passive_spacers(session_path, sync=None, sync_map=None):
    """
    load and get spacer information, do corr to find spacer timestamps
    returns t_passive_starts, t_starts, t_ends
    """
    if sync is None or sync_map is None:
        sync, sync_map = ephys_fpga.get_main_probe_sync(session_path,
                                                        bin_exists=False)
    meta = _load_passive_stim_meta()
    # t_end_ephys = passive.ephysCW_end(session_path=session_path)
    fttl = ephys_fpga._get_sync_fronts(sync, sync_map["frame2ttl"], tmin=None)
    spacer_template = (
        np.array(meta["VISUAL_STIM_0"]["ttl_frame_nums"], dtype=np.float32) /
        FRAME_FS)
    jitter = 3 / FRAME_FS  # allow for 3 screen refresh as jitter
    t_quiet = meta["VISUAL_STIM_0"]["delay_around"]
    spacer_times, _ = _get_spacer_times(spacer_template=spacer_template,
                                        jitter=jitter,
                                        ttl_signal=fttl["times"],
                                        t_quiet=t_quiet)

    # Check correct number of spacers found
    n_exp_spacer = np.sum(np.array(
        meta["STIM_ORDER"]) == 0)  # Hardcoded 0 for spacer
    if n_exp_spacer != np.size(spacer_times) / 2:
        raise ValueError(f"The number of expected spacer ({n_exp_spacer}) "
                         f"is different than the one found on the raw "
                         f"trace ({np.size(spacer_times)/2})")

    spacer_times = np.r_[spacer_times.flatten(), sync["times"][-1]]
    return spacer_times[0], spacer_times[1::2], spacer_times[2::2]
def extract_camera_sync(sync, output_path=None, save=False, chmap=None):
    """
    Extract camera timestamps from the sync matrix
    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace
    :param output_path: where to save the data
    :param save: True/False
    :param chmap: dictionary containing channel indices. Default to constant.
    :return: dictionary containing camera timestamps
    """
    if chmap == None:
        chmap = {
            'trial_start': 0,
            'sample': 1,
            'delay': 2,
            'choice': 3,
            'outcome': 4,
            'opto': 5,
            'right_lever': 6,
            'imec': 7,
            'nosepoke': 22,
            'reward_pump': 21,
            'reward_port': 23,
            'camera': 16
        }

    output_path = Path(output_path)
    if not output_path.exists():
        output_path.mkdir()
    s = _get_sync_fronts(sync, chmap['camera'])
    np.save(output_path / '_Camera.times.npy', s.times[::2])
def compare_bpod_json_with_fpga(sync_test_folder, sync, show_plots=SHOW_PLOTS):
    '''
    sr, sync=get_ephys_data(sync_test_folder)
    '''

    #  get the bpod signal from the jasonable file
    import json
    with open(sync_test_folder + '/bpod/_iblrig_taskData.raw.jsonable') as fid:
        out = json.load(fid)

    ups = out['Events timestamps']['BNC1High']

    assert len(ups) == 500, 'not all pulses detected in bpod!'

    # get the fpga signal from the sync object
    s3 = ephys_fpga._get_sync_fronts(sync, 0)['times'][::2]

    assert len(s3) == 500, 'not all fronts detected in fpga signal!'

    D = np.array(s3) - np.array(ups)

    offset_on = np.mean(D)
    jitter_on = np.std(D)
    ipi_bpod = np.abs(np.diff(ups))  # inter pulse interval = ipi
    ipi_fpga = np.abs(np.diff(s3))

    print('maximal bpod jitter in sec: ',
          np.round(np.max(ipi_bpod) - np.min(ipi_bpod), 6))

    print('maximal fpga jitter in sec: ',
          np.round(np.max(ipi_fpga) - np.min(ipi_fpga), 6))

    print('maximal bpod-fpga in sec: ',
          np.round(np.max(np.abs(D)) - np.min(np.abs(D)), 6))

    print('fpga and bpod signal offset in sec: ', np.round(offset_on, 6))

    print('std of fpga and bpod difference in sec: ', np.round(jitter_on, 6))

    if show_plots:

        plt.figure('wavefronts')
        plt.plot(s3['times'], s3['polarities'], label='fpga')
        plt.plot(
            ups,
            np.ones(
                len(ups)),
            linestyle='',
            marker='o',
            label='pbod on')

        plt.legend()
        plt.show()

        plt.figure('histogram of wavefront differences, bpod and fpga')

        plt.hist(np.array(s3) - np.array(ups))
        plt.xlabel('error between fpga fronts and ephys fronts in sec')
        plt.show()
Beispiel #6
0
def extract_camera_sync(sync, chmap=None):
    """
    Extract camera timestamps from the sync matrix

    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace
    :param chmap: dictionary containing channel indices. Default to constant.
    :return: dictionary containing camera timestamps
    """
    assert (chmap)
    sr = _get_sync_fronts(sync, chmap['right_camera'])
    sl = _get_sync_fronts(sync, chmap['left_camera'])
    sb = _get_sync_fronts(sync, chmap['body_camera'])
    return {
        'right': sr.times[::2],
        'left': sl.times[::2],
        'body': sb.times[::2]
    }
Beispiel #7
0
def extract_task_replay(
        session_path: str,
        sync: dict = None,
        sync_map: dict = None,
        treplay: np.array = None) -> Tuple[pd.DataFrame, pd.DataFrame]:
    if sync is None or sync_map is None:
        sync, sync_map = ephys_fpga.get_main_probe_sync(session_path,
                                                        bin_exists=False)

    if treplay is None:
        passivePeriods_df = extract_passive_periods(session_path,
                                                    sync=sync,
                                                    sync_map=sync_map)
        treplay = passivePeriods_df.taskReplay.values

    fttl = ephys_fpga._get_sync_fronts(sync,
                                       sync_map["frame2ttl"],
                                       tmin=treplay[0])
    passiveGabor_df = _extract_passiveGabor_df(fttl, session_path)

    bpod = ephys_fpga._get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0])
    passiveValve_intervals = _extract_passiveValve_intervals(bpod)

    audio = ephys_fpga._get_sync_fronts(sync,
                                        sync_map["audio"],
                                        tmin=treplay[0])
    passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(
        audio)

    passiveStims_df = np.concatenate([
        passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals
    ],
                                     axis=1)
    columns = [
        "valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"
    ]
    passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns)
    return (
        passiveGabor_df,
        passiveStims_df,
    )  # _ibl_passiveGabor.table.csv, _ibl_passiveStims.times_table.csv
Beispiel #8
0
def plot_sync_channels(sync, sync_map, ax=None):
    # Plot all sync pulses
    if ax is None:
        f, ax = plt.subplots(1, 1)
    for i, device in enumerate(["frame2ttl", "audio", "bpod"]):
        sy = ephys_fpga._get_sync_fronts(
            sync, sync_map[device])  # , tmin=t_start_passive)
        squares(sy["times"],
                sy["polarities"],
                yrange=[0.1 + i, 0.9 + i],
                color="k",
                ax=ax)
Beispiel #9
0
def version3B(ses_path, display=True, linear=False, tol=2.5):
    """
    From a session path with _spikeglx_sync arrays extraccted, locate ephys files for 3A and
     outputs one sync.timestamps.probeN.npy file per acquired probe. By convention the reference
     probe is the one with the most synchronisation pulses.
     Assumes the _spikeglx_sync datasets are already extracted from binary data
    :param ses_path:
    :return: None
    """
    ephys_files = spikeglx.glob_ephys_files(ses_path)
    for ef in ephys_files:
        ef['sync'] = alf.io.load_object(ef.path,
                                        '_spikeglx_sync',
                                        short_keys=True)
        ef['sync_map'] = get_ibl_sync_map(ef, '3B')
    nidq_file = [ef for ef in ephys_files if ef.get('nidq')]
    ephys_files = [ef for ef in ephys_files if not ef.get('nidq')]
    nprobes = len(ephys_files)
    # should have at least 2 probes and only one nidq
    if nprobes <= 1:
        return True
    assert (len(nidq_file) == 1)
    nidq_file = nidq_file[0]
    sync_nidq = _get_sync_fronts(nidq_file.sync,
                                 nidq_file.sync_map['imec_sync'])

    qc_all = True
    for ef in ephys_files:
        sync_probe = _get_sync_fronts(ef.sync, ef.sync_map['imec_sync'])
        sr = _get_sr(ef)
        assert (sync_nidq.times.size == sync_probe.times.size)
        timestamps, qc = sync_probe_front_times(sync_probe.times,
                                                sync_nidq.times,
                                                sr,
                                                display=display,
                                                linear=linear,
                                                tol=tol)
        qc_all &= qc
        _save_timestamps_npy(ef, timestamps)
    return qc_all
Beispiel #10
0
def extract_rfmapping(session_path: str,
                      sync: dict = None,
                      sync_map: dict = None,
                      trfm: np.array = None) -> Tuple[np.array, np.array]:
    meta = _load_passive_stim_meta()
    mkey = ("VISUAL_STIM_" + {v: k
                              for k, v in meta["VISUAL_STIMULI"].items()
                              }["receptive_field_mapping"])
    if sync is None or sync_map is None:
        sync, sync_map = ephys_fpga.get_main_probe_sync(session_path,
                                                        bin_exists=False)
    if trfm is None:
        passivePeriods_df = extract_passive_periods(session_path,
                                                    sync=sync,
                                                    sync_map=sync_map)
        trfm = passivePeriods_df.RFM.values

    fttl = ephys_fpga._get_sync_fronts(sync,
                                       sync_map["frame2ttl"],
                                       tmin=trfm[0],
                                       tmax=trfm[1])

    RF_file = Path().joinpath(session_path, "raw_passive_data",
                              "_iblrig_RFMapStim.raw.bin")
    passiveRFM_frames, RF_ttl_trace = _reshape_RF(RF_file=RF_file,
                                                  meta_stim=meta[mkey])
    rf_id_up, rf_id_dw, RF_n_ttl_expected = _get_id_raisefall_from_analogttl(
        RF_ttl_trace)
    meta[mkey]["ttl_num"] = RF_n_ttl_expected
    rf_times_on_idx = np.where(np.diff(fttl["times"]) < 1)[0]
    rf_times_off_idx = rf_times_on_idx + 1
    RF_times = fttl["times"][np.sort(
        np.concatenate([rf_times_on_idx, rf_times_off_idx]))]
    RF_times_1 = RF_times[0::2]
    # Interpolate times for RF before outputting dataset
    passiveRFM_times = _interpolate_rf_mapping_stimulus(
        idxs_up=rf_id_up,
        idxs_dn=rf_id_dw,
        times=RF_times_1,
        Xq=np.arange(passiveRFM_frames.shape[0]),
        t_bin=1 / FRAME_FS,
    )

    return passiveRFM_times  # _ibl_passiveRFM.times.npy
Beispiel #11
0
def compare_bpod_json_with_fpga(sync_test_folder, sync, show_plots=SHOW_PLOTS):
    '''
    sr, sync=get_ephys_data(sync_test_folder)
    '''

    #  get the bpod signal from the jasonable file
    import json
    with open(sync_test_folder + '/bpod/_iblrig_taskData.raw.jsonable') as fid:
        out = json.load(fid)

    ups = out['Events timestamps']['BNC1High']

    assert len(ups) == 500, 'not all pulses detected in bpod!'

    # get the fpga signal from the sync object
    s3 = ephys_fpga._get_sync_fronts(sync, 0)['times'][::2]

    assert len(s3) == 500, 'not all fronts detected in fpga signal!'

    IntervalDurationDifferences = np.diff(np.array(s3)) - np.diff(
        np.array(ups))
    R = max(abs(IntervalDurationDifferences))

    print('maximal interval duration difference, fpga - bpod, [sec]:', R)

    if show_plots:

        plt.figure('wavefronts')
        plt.plot(s3['times'], s3['polarities'], label='fpga')
        plt.plot(ups,
                 np.ones(len(ups)),
                 linestyle='',
                 marker='o',
                 label='pbod on')

        plt.legend()
        plt.show()

        plt.figure('histogram of wavefront differences, bpod and fpga')

        plt.hist(np.array(s3) - np.array(ups))
        plt.xlabel('error between fpga fronts and ephys fronts in sec')
        plt.show()
Beispiel #12
0
def validate_ttl_test(ses_path, display=False):
    """
    For a mock session on the Ephys Choice world task, check the sync channels for all
    device properly connected and perform a synchronization if dual probes to check that
    all channels are recorded properly
    :param ses_path: session path
    :param display: show the probe synchronization plot if several probes
    :return: True if tests pass, errors otherwise
    """

    def _single_test(assertion, str_ok, str_ko):
        if assertion:
            _logger.info(str_ok)
            return True
        else:
            _logger.error(str_ko)
            return False

    EXPECTED_RATES_HZ = {'left_camera': 60, 'right_camera': 150, 'body_camera': 30}
    SYNC_RATE_HZ = 1
    MIN_TRIALS_NB = 6

    ok = True
    ses_path = Path(ses_path)
    if not ses_path.exists():
        return False
    rawsync, sync_map = fpga._get_main_probe_sync(ses_path)
    last_time = rawsync['times'][-1]

    # get upgoing fronts for each
    sync = Bunch({})
    for k in sync_map:
        fronts = fpga._get_sync_fronts(rawsync, sync_map[k])
        sync[k] = fronts['times'][fronts['polarities'] == 1]
    wheel = fpga.extract_wheel_sync(rawsync, chmap=sync_map, save=False)

    frame_rates = {'right_camera': np.round(1 / np.median(np.diff(sync.right_camera))),
                   'left_camera': np.round(1 / np.median(np.diff(sync.left_camera))),
                   'body_camera': np.round(1 / np.median(np.diff(sync.body_camera)))}

    # check the camera frame rates
    for lab in frame_rates:
        expect = EXPECTED_RATES_HZ[lab]
        ok &= _single_test(assertion=abs((1 - frame_rates[lab] / expect)) < 0.1,
                           str_ok=f'PASS: {lab} frame rate: {frame_rates[lab]} = {expect} Hz',
                           str_ko=f'FAILED: {lab} frame rate: {frame_rates[lab]} != {expect} Hz')

    # check that the wheel has a minimum rate of activity on both channels
    re_test = abs(1 - sync.rotary_encoder_1.size / sync.rotary_encoder_0.size) < 0.1
    re_test &= len(wheel['re_pos']) / last_time > 5
    ok &= _single_test(assertion=re_test,
                       str_ok="PASS: Rotary encoder", str_ko="FAILED: Rotary encoder")
    # check that the frame 2 ttls has a minimum rate of activity
    ok &= _single_test(assertion=len(sync.frame2ttl) / last_time > 0.2,
                       str_ok="PASS: Frame2TTL", str_ko="FAILED: Frame2TTL")
    # the audio has to have at least one event per trial
    ok &= _single_test(assertion=len(sync.bpod) > len(sync.audio) > MIN_TRIALS_NB,
                       str_ok="PASS: audio", str_ko="FAILED: audio")
    # the bpod has to have at least twice the amount of min trial pulses
    ok &= _single_test(assertion=len(sync.bpod) > MIN_TRIALS_NB * 2,
                       str_ok="PASS: Bpod", str_ko="FAILED: Bpod")
    try:
        # note: tried to depend as little as possible on the extraction code but for the valve...
        behaviour = fpga.extract_behaviour_sync(rawsync, save=False, chmap=sync_map)
        res = behaviour.valve_open.size > 1
    except AssertionError:
        res = False
    # check that the reward valve is actionned at least once
    ok &= _single_test(assertion=res,
                       str_ok="PASS: Valve open", str_ko="FAILED: Valve open not detected")
    _logger.info('ALL CHECKS PASSED !')

    # the imec sync is for 3B Probes only
    if sync.get('imec_sync') is not None:
        ok &= _single_test(assertion=np.all(1 - SYNC_RATE_HZ * np.diff(sync.imec_sync) < 0.1),
                           str_ok="PASS: imec sync", str_ko="FAILED: imec sync")

    # second step is to test that we can make the sync. Assertions are whithin the synch code
    if sync.get('imec_sync') is not None:
        sync_result = sync_probes.version3B(ses_path, display=display)
    else:
        sync_result = sync_probes.version3A(ses_path, display=display)

    ok &= _single_test(assertion=sync_result, str_ok="PASS: synchronisation",
                       str_ko="FAILED: probe synchronizations threshold exceeded")

    if not ok:
        raise ValueError('FAILED TTL test')
    return ok
Beispiel #13
0
    def _extract(self,
                 sync=None,
                 chmap=None,
                 video_path=None,
                 display=False,
                 extrapolate_missing=True):
        """
        The raw timestamps are taken from the FPGA. These are the times of the camera's frame TTLs.
        If the pin state file exists, these timestamps are aligned to the video frames using the
        audio TTLs.  Frames missing from the embedded frame count are removed from the timestamps
        array.
        If the pin state file does not exist, the left and right camera timestamps may be aligned
        using the wheel data.
        :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace.
        :param chmap: dictionary containing channel indices. Default to constant.
        :param video_path: an optional path for fetching the number of frames.  If None,
        the video is loaded from the session path.  If an int is provided this is taken to be
        the total number of frames.
        :param display: if True, the audio and GPIO fronts are plotted.
        :param extrapolate_missing: if True, any missing timestamps at the beginning and end of
        the session are extrapolated based on the median frame rate, otherwise they will be NaNs.
        :return: a numpy array of camera timestamps
        """
        fpga_times = extract_camera_sync(sync=sync, chmap=chmap)
        count, (*_,
                gpio) = raw.load_embedded_frame_data(self.session_path,
                                                     self.label)

        if gpio is not None and gpio['indices'].size > 1:
            _logger.info('Aligning to audio TTLs')
            # Extract audio TTLs
            audio = _get_sync_fronts(sync, chmap['audio'])
            _, ts = raw.load_camera_ssv_times(self.session_path, self.label)
            """
            NB: Some of the audio TTLs occur very close together, and are therefore not
            reflected in the pin state.  This function removes those.  Also converts frame times to
            FPGA time.
            """
            gpio, audio, ts = groom_pin_state(gpio, audio, ts, display=display)
            """
            The length of the count and pin state are regularly longer than the length of
            the video file.  Here we assert that the video is either shorter or the same
            length as the arrays, and  we make an assumption that the missing frames are
            right at the end of the video.  We therefore simply shorten the arrays to match
            the length of the video.
            """
            if video_path is None:
                filename = f'_iblrig_{self.label}Camera.raw.mp4'
                video_path = self.session_path.joinpath(
                    'raw_video_data', filename)
            # Permit the video path to be the length for development and debugging purposes
            length = video_path if isinstance(
                video_path, int) else get_video_length(video_path)
            _logger.debug(f'Number of video frames = {length}')
            if count.size > length:
                count = count[:length]
            else:
                assert length == count.size, 'fewer counts than frames'
            raw_ts = fpga_times[self.label]
            timestamps = align_with_audio(
                raw_ts,
                audio,
                gpio,
                count,
                display=display,
                extrapolate_missing=extrapolate_missing)
        else:
            _logger.warning('Alignment by wheel data not yet implemented')
            timestamps = fpga_times[self.label]

        return timestamps
def extract_behaviour_sync(sync, output_path=None, save=False, chmap=None):
    """
    Extract wheel positions and times from sync fronts dictionary
    :param sync: dictionary 'times', 'polarities' of fronts detected on sync 
    trace for all 16 chans
    :param output_path: where to save the data
    :param save: True/False
    :param chmap: dictionary containing channel index. Default to constant.
    :return: trials dictionary
    """
    if chmap == None:
        chmap = {
            'trial_start': 0,
            'sample': 1,
            'delay': 2,
            'choice': 3,
            'outcome': 4,
            'opto': 5,
            'right_lever': 6,
            'imec': 7,
            'nosepoke': 22,
            'reward_pump': 21,
            'reward_port': 23,
            'camera': 16
        }

    # Get fronts
    trial = _get_sync_fronts(sync, chmap['trial_start'])
    sample = _get_sync_fronts(sync, chmap['sample'])
    delay = _get_sync_fronts(sync, chmap['delay'])
    choice = _get_sync_fronts(sync, chmap['choice'])
    outcome = _get_sync_fronts(sync, chmap['outcome'])
    opto = _get_sync_fronts(sync, chmap['opto'])
    right_lever = _get_sync_fronts(sync, chmap['right_lever'])
    nosepoke = _get_sync_fronts(sync, chmap['nosepoke'])
    reward_pump = _get_sync_fronts(sync, chmap['reward_pump'])
    reward_port = _get_sync_fronts(sync, chmap['reward_port'])

    # Fix for unfinished trials
    if np.count_nonzero(trial['times']) % 2 != 0:
        print('Warning: Unfinished trial, cutting last trial')
        new_end = trial['times'][-2]
        trial = cut_odd_events(new_end, trial)
        sample = cut_odd_events(new_end, sample)
        delay = cut_odd_events(new_end, delay)
        choice = cut_odd_events(new_end, choice)
        outcome = cut_odd_events(new_end, outcome)
        opto = cut_odd_events(new_end, opto)
        right_lever = cut_odd_events(new_end, right_lever)
        nosepoke = cut_odd_events(new_end, nosepoke)
        reward_pump = cut_odd_events(new_end, reward_pump)
        reward_port = cut_odd_events(new_end, reward_port)

    #Assertion QC
    assert np.count_nonzero(
        trial['times']) % 2 == 0, 'ERROR: Uneven trial fronts'
    assert np.count_nonzero(
        trial['times']) % 2 == 0, 'ERROR: Uneven trial fronts'

    # Divide by on and off
    trial_on = trial[
        'times'][::
                 2]  # {'times' : trial['times'][::2], 'polarities' : trial['polarities'][::2]}
    trial_off = trial['times'][1::2]
    sample_on = sample['times'][::2]
    sample_off = sample['times'][1::2]
    delay_on = delay['times'][::2]
    delay_off = delay['times'][1::2]
    choice_on = choice['times'][::2]
    choice_off = choice['times'][1::2]
    outcome_on = outcome['times'][::2]
    outcome_off = outcome['times'][1::2]
    opto_on = opto['times'][::2]
    opto_off = opto['times'][1::2]
    right_lever_on = right_lever['times'][::2]
    right_lever_off = right_lever['times'][1::2]
    nosepoke_on = nosepoke['times'][::2]
    nosepoke_off = nosepoke['times'][1::2]
    reward_pump_on = reward_pump['times'][::2]
    reward_pump_off = reward_pump['times'][1::2]

    # Calculate some trial variables
    trial_trial_on = np.empty(len(trial_on))
    trial_trial_off = np.empty(len(trial_on))
    trial_opto_trial = np.empty(len(trial_on))
    trial_sample_on = np.empty(len(trial_on))
    trial_sample_off = np.empty(len(trial_on))
    trial_delay_on = np.empty(len(trial_on))
    trial_delay_off = np.empty(len(trial_on))
    trial_choice_on = np.empty(len(trial_on))
    trial_choice_off = np.empty(len(trial_on))
    trial_right_lever_on = np.empty(len(trial_on))
    trial_right_lever_off = np.empty(len(trial_on))
    trial_reward_pump_on = np.empty(len(trial_on))
    trial_reward_pump_off = np.empty(len(trial_on))
    trial_completed = np.empty(len(trial_on))
    trial_correct = np.empty(len(trial_on))
    trial_outcome_first = np.empty(len(trial_on))
    trial_outcome_last = np.empty(len(trial_on))
    trial_nosepoke_first = np.empty(len(trial_on))
    trial_nosepoke_last = np.empty(len(trial_on))
    trial_opto_first = np.empty(len(trial_on))
    trial_opto_last = np.empty(len(trial_on))
    trial_completed[:] = np.nan
    trial_correct[:] = np.nan
    trial_outcome_first[:] = np.nan
    trial_outcome_last[:] = np.nan
    trial_nosepoke_first[:] = np.nan
    trial_nosepoke_last[:] = np.nan
    trial_opto_first[:] = np.nan
    trial_opto_last[:] = np.nan
    trial_trial_on[:] = np.nan
    trial_trial_side = ['' for x in range(len(trial_on))]
    trial_opto_event = ['' for x in range(len(trial_on))]
    trial_opto_trial[:] = np.nan
    trial_trial_off[:] = np.nan
    trial_sample_on[:] = np.nan
    trial_sample_off[:] = np.nan
    trial_delay_on[:] = np.nan
    trial_delay_off[:] = np.nan
    trial_choice_on[:] = np.nan
    trial_choice_off[:] = np.nan
    trial_right_lever_on[:] = np.nan
    trial_right_lever_off[:] = np.nan
    trial_reward_pump_on[:] = np.nan
    trial_reward_pump_off[:] = np.nan

    #Empty matrix for variables with variable length per trial
    trial_outcome_on = []
    trial_outcome_off = []
    trial_opto_on = []
    trial_opto_off = []
    trial_nosepoke_on = []
    trial_nosepoke_off = []

    # Fill trial vectors
    trial_trial_on = trial_on
    trial_trial_off = trial_off

    #assert len(trial_on) == len(trial_off) == len(sample_on) == len(sample_off) \
    #   , 'ERROR: Samples and trials dont match!'

    # Fill in trial vectors that require computation, giving 0.001 leeway for
    # some variables
    for t in range(len(trial_on)):

        #Variables that can only have one value per trial
        trial_sample_on[t] = sample_on[np.logical_and(
            sample_on >= trial_on[t], sample_on <= trial_off[t])] if any(
                np.logical_and(sample_on >= trial_on[t],
                               sample_on <= trial_off[t])) else np.nan

        trial_sample_off[t] = sample_off[np.logical_and(
            sample_off >= trial_on[t], sample_off <= trial_off[t])] if any(
                np.logical_and(sample_off >= trial_on[t],
                               sample_off <= trial_off[t])) else np.nan

        trial_delay_on[t] = delay_on[np.logical_and(
            delay_on >= trial_on[t], delay_on <= trial_off[t])] if any(
                np.logical_and(delay_on >= trial_on[t],
                               delay_on <= trial_off[t])) else np.nan

        trial_delay_off[t] = delay_off[np.logical_and(
            delay_off >= trial_on[t], delay_off <= trial_off[t])] if any(
                np.logical_and(delay_off >= trial_on[t],
                               delay_off <= trial_off[t])) else np.nan

        trial_choice_on[t] = choice_on[np.logical_and(
            choice_on >= trial_on[t], choice_on <= trial_off[t])] if any(
                np.logical_and(choice_on >= trial_on[t],
                               choice_on <= trial_off[t])) else np.nan

        trial_choice_off[t] = choice_off[np.logical_and(
            choice_off >= trial_on[t], choice_off <= trial_off[t])] if any(
                np.logical_and(choice_off >= trial_on[t],
                               choice_off <= trial_off[t])) else np.nan

        trial_reward_pump_on[t] = reward_pump_on[np.logical_and(
            reward_pump_on >= trial_on[t],
            reward_pump_on <= trial_off[t])] if any(
                np.logical_and(reward_pump_on >= trial_on[t],
                               reward_pump_on <= trial_off[t])) else np.nan

        trial_reward_pump_off[t] = reward_pump_off[np.logical_and(
            reward_pump_off >= trial_on[t],
            reward_pump_off <= trial_off[t])] if any(
                np.logical_and(reward_pump_off >= trial_on[t],
                               reward_pump_off <= trial_off[t])) else np.nan

        # Variables that can have more than on value per trial
        trial_outcome_on.append(outcome_on[np.logical_and(
            outcome_on >= trial_on[t], outcome_on <= trial_off[t])] if any(
                np.logical_and(outcome_on >= trial_on[t],
                               outcome_on <= trial_off[t])) else np.nan)

        trial_outcome_off.append(outcome_off[np.logical_and(
            outcome_off >= trial_on[t], outcome_off <= trial_off[t])] if any(
                np.logical_and(outcome_off >= trial_on[t],
                               outcome_off <= trial_off[t])) else np.nan)

        trial_opto_on.append(opto_on[np.logical_and(
            opto_on >= trial_on[t], opto_on <= trial_off[t])] if any(
                np.logical_and(opto_on >= trial_on[t], opto_on <= trial_off[t])
            ) else np.nan)

        trial_opto_off.append(opto_off[np.logical_and(
            opto_off >= trial_on[t], opto_off <= trial_off[t])] if any(
                np.logical_and(opto_off >= trial_on[t],
                               opto_off <= trial_off[t])) else np.nan)

        trial_nosepoke_on.append(nosepoke_on[np.logical_and(
            nosepoke_on >= trial_on[t], nosepoke_on <= trial_off[t])] if any(
                np.logical_and(nosepoke_on >= trial_on[t],
                               nosepoke_on <= trial_off[t])) else np.nan)

        trial_nosepoke_off.append(nosepoke_off[np.logical_and(
            nosepoke_off >= trial_on[t], nosepoke_off <= trial_off[t])] if any(
                np.logical_and(nosepoke_off >= trial_on[t],
                               nosepoke_off <= trial_off[t])) else np.nan)

        # Giving 1 ms leeway for syncing pulse error
        # Variables that require logic computation

        if math.isnan(trial_sample_on[t]) == True:
            trial_trial_side[t] = ''
        else:
            trial_trial_side[t] = 'R' if any(
                np.logical_and(
                    right_lever_on >= trial_sample_on[t] - 0.001,
                    right_lever_on <= trial_sample_off[t] + 0.001)) else 'L'
        trial_opto_trial[t] = True if any(
            np.logical_and(opto_on >= trial_on[t],
                           opto_on <= trial_off[t])) else False
        if trial_opto_trial[t] == True:
            if any(
                    np.logical_and(opto_on >= trial_sample_on[t],
                                   opto_on <= trial_sample_off[t])):
                trial_opto_event[t] = 'S'
            if any(
                    np.logical_and(opto_on >= trial_delay_on[t],
                                   opto_on <= trial_delay_off[t])):
                trial_opto_event[t] = 'D'
            if any(
                    np.logical_and(opto_on >= trial_choice_on[t],
                                   opto_on <= trial_choice_off[t])):
                trial_opto_event[t] = 'C'

        # Calculate vector of completed trials
        trial_completed[t] = not math.isnan(trial_choice_on[t])

        # Calculates vector of correct trials
        trial_correct[t] = not math.isnan(trial_reward_pump_on[t])

        # Calculate vector with extremes of outcome,opto and nosepoke
        trial_outcome_first[t] = min(trial_outcome_on[t]) if not np.mean(
            np.isnan(trial_outcome_on[t])) else np.nan
        trial_outcome_last[t] = min(trial_outcome_off[t]) if not np.mean(
            np.isnan(trial_outcome_on[t])) else np.nan
        trial_nosepoke_first[t] = min(trial_nosepoke_on[t]) if not np.mean(
            np.isnan(trial_nosepoke_on[t])) else np.nan
        trial_nosepoke_last[t] = min(trial_nosepoke_off[t]) if not np.mean(
            np.isnan(trial_nosepoke_on[t])) else np.nan
        trial_opto_first[t] = min(trial_opto_on[t]) if not np.mean(
            np.isnan(trial_opto_on[t])) else np.nan
        trial_opto_last[t] = min(trial_opto_off[t]) if not np.mean(
            np.isnan(trial_opto_on[t])) else np.nan

    if save == True:
        np.save(output_path + '/' + '_trial_on.npy', trial_trial_on)
        np.save(output_path + '/' + '_trial_off.npy', trial_trial_off)
        np.save(output_path + '/' + '_trial_sample_on.npy', trial_sample_on)
        np.save(output_path + '/' + '_trial_sample_off.npy', trial_sample_off)
        np.save(output_path + '/' + '_trial_delay_on.npy', trial_delay_on)
        np.save(output_path + '/' + '_trial_delay_off.npy', trial_delay_off)
        np.save(output_path + '/' + '_trial_choice_on.npy', trial_choice_on)
        np.save(output_path + '/' + '_trial_choice_off.npy', trial_choice_off)
        np.save(output_path + '/' + '_trial_reward_pump_on.npy',
                trial_reward_pump_on)
        np.save(output_path + '/' + '_trial_reward_pump_off.npy',
                trial_reward_pump_off)
        np.save(output_path + '/' + '_trial_outcome_on.npy', trial_outcome_on)
        np.save(output_path + '/' + '_trial_outcome_off.npy',
                trial_outcome_off)
        np.save(output_path + '/' + '_trial_opto_on.npy', trial_opto_on)
        np.save(output_path + '/' + '_trial_opto_off.npy', trial_opto_off)
        np.save(output_path + '/' + '_trial_nosepoke_on.npy',
                trial_nosepoke_on)
        np.save(output_path + '/' + '_trial_nosepoke_off.npy',
                trial_nosepoke_off)
        np.save(output_path + '/' + '_trial_trial_side.npy', trial_trial_side)
        np.save(output_path + '/' + '_trial_opto_trial.npy', trial_opto_trial)
        np.save(output_path + '/' + '_trial_completed.npy', trial_completed)
        np.save(output_path + '/' + '_trial_correct.npy', trial_correct)
        np.save(output_path + '/' + '_trial_outcome_first.npy',
                trial_outcome_first)
        np.save(output_path + '/' + '_trial_outcome_last.npy',
                trial_outcome_last)
        np.save(output_path + '/' + '_trial_nosepoke_first.npy',
                trial_nosepoke_first)
        np.save(output_path + '/' + '_trial_nosepoke_last.npy',
                trial_nosepoke_last)
        np.save(output_path + '/' + '_trial_opto_first.npy', trial_opto_first)
        np.save(output_path + '/' + '_trial_opto_last.npy', trial_opto_last)
Beispiel #15
0
def evaluate_camera_sync(d, sync, show_plots=SHOW_PLOTS):

    # d=get_video_stamps_and_brightness(sync_test_folder)
    # sr, sync, rawdata, rawsync=get_ephys_data(sync_test_folder)

    # using the probe 3a channel map:
    '''
    0: Arduino synchronization signal
    2: 150 Hz camera
    3: 30 Hz camera
    4: 60 Hz camera
    7: Bpod
    11: Frame2TTL
    12 & 13: Rotary Encoder
    15: Audio
    '''
    y = {
        '_iblrig_bodyCamera.raw.avi': 3,
        '_iblrig_rightCamera.raw.avi': 4,
        '_iblrig_leftCamera.raw.avi': 2
    }

    s3 = ephys_fpga._get_sync_fronts(sync, 0)  # get arduino sync signal

    for vid in d:
        # threshold brightness time-series of the camera to have it in {-1,1}
        r3 = [1 if x > np.mean(d[vid][0]) else -1 for x in d[vid][0]]
        # fpga cam time stamps
        cam_times = ephys_fpga._get_sync_fronts(sync, y[vid])['times']

        # assuming at the end the frames are dropped
        drops = len(cam_times) - len(r3) * 2

        # check if an extremely high number of frames is dropped at the end
        assert len(cam_times) >= len(r3), 'FPGA should be on before camera!'
        assert drops < 500, '%s frames dropped for %s!!!' % (drops, vid)

        # get fronts of video brightness square signal
        diffr3 = np.diff(r3)  # get signal jumps via differentiation
        fronts_brightness = []
        for i in range(len(diffr3)):
            if diffr3[i] != 0:
                fronts_brightness.append(cam_times[:-drops][0::2][i])

        # check if all 500 square pulses are detected
        assert len(fronts_brightness) == len(
            s3['times']), 'Not all square signals detected in %s!' % vid

        # temporal difference between fpga and brightness ups
        D = [fronts_brightness - s3['times']][0][::2]  # only get up fronts

        assert len(
            D) == 500, \
            'not all 500 pulses were detected \
            by fpga and brightness in %s!'                                           % vid

        print(' %s, Wave fronts temp. diff, in sec: \
            mean = %s, std = %s, max = %s' %
              (vid, np.round(np.mean(abs(D)), 4), np.round(np.std(
                  abs(D)), 4), np.round(max(abs(D)), 4)))

        # check if temporal jitter between fpga and brightness wavefronts is
        # below 100 ms
        assert max(
            abs(D)) < 0.200, \
            'Jitter between fpga and brightness fronts is large!!'

        if show_plots:

            plt.figure('wavefronts, ' + vid)
            ibllib.plots.squares(s3['times'],
                                 s3['polarities'],
                                 label='fpga square signal',
                                 marker='o')
            plt.plot(cam_times[:-drops][0::2],
                     r3,
                     alpha=0.5,
                     label='thresholded video brightness',
                     linewidth=2,
                     marker='x')

            plt.legend()
            plt.title('wavefronts for fpga and brightness of %s' % vid)
            plt.show()

            plt.figure('histogram of front differences, %s' % vid)
            plt.title('histogram of temporal errors of fronts')
            plt.hist(D)
            plt.xlabel('error between fpga fronts and ephys fronts in sec')
            plt.show()
Beispiel #16
0
def event_extraction_and_comparison(sr, sync):

    # it took 8 min to run that for 6 min of data, all 300 ish channels
    # silent channels for Guido's set:
    # [36,75,112,151,188,227,264,303,317,340,379,384]

    # sr,sync=get_ephys_data(sync_test_folder)
    """
    this function first finds the times of square signal fronts in ephys and
    compares them to corresponding ones in the sync signal.
    Iteratively for small data chunks
    """

    _logger.info('starting event_extraction_and_comparison')
    period_duration = 30000  # in observations, 30 kHz
    BATCH_SIZE_SAMPLES = period_duration  # in observations, 30 kHz

    # if the data is needed as well, loop over the file
    # raw data contains raw ephys traces, while raw_sync contains the 16 sync
    # traces

    rawdata, _ = sr.read_samples(0, BATCH_SIZE_SAMPLES)
    _, chans = rawdata.shape

    chan_fronts = {}

    sync_up_fronts = ephys_fpga._get_sync_fronts(sync, 0)['times'][0::2]
    sync_up_fronts = np.array(sync_up_fronts) * sr.fs

    assert len(sync_up_fronts) == 500, 'There are not all sync pulses'

    for j in range(chans):
        chan_fronts[j] = {}
        chan_fronts[j]['ephys up fronts'] = []

    k = 0

    # assure there is exactly one pulse per cut segment

    for pulse in range(500):  # there are 500 square pulses

        first = int(sync_up_fronts[pulse] - period_duration / 2)
        last = int(first + period_duration / 2)

        if k % 100 == 0:
            print('segment %s of %s' % (k, 500))

        k += 1

        rawdata, rawsync = sr.read_samples(first, last)

        # get fronts for only one valid ephys channel
        obs, chans = rawdata.shape

        i = 0  # assume channel 0 is valid (to be generalized maybe)

        Mean = np.median(rawdata.T[i])
        Std = np.std(rawdata.T[i])

        ups = np.invert(rawdata.T[i] > Mean + 2 * Std)
        up_fronts = []

        # Activity front at least 10 samples long (empirical)

        up_fronts.append(first_occ_index(ups, 1) + first)

        chan_fronts[i]['ephys up fronts'].append(up_fronts)

    return chan_fronts, sync_up_fronts
Beispiel #17
0
# load session fixtures
settings = rawio.load_settings(session_path)
ses_nb = settings['SESSION_ORDER'][settings['SESSION_IDX']]
path_fixtures = Path(ephys_fpga.__file__).parent.joinpath('ephys_sessions')
fixture = {'pcs': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_pcs.npy')),
           'delays': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_stimDelays.npy')),
           'ids': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_stimIDs.npy'))}

# load general metadata
with open(path_fixtures.joinpath('passive_stim_meta.json'), 'r') as f:
    meta = json.load(f)
t_end_ephys = passive.ephysCW_end(session_path=session_path)
# load stimulus sequence
sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False)
fpga_sync = ephys_fpga._get_sync_fronts(sync, sync_map['frame2ttl'])
fttl = ephys_fpga._get_sync_fronts(sync, sync_map['frame2ttl'], tmin=t_end_ephys)


def get_spacers():
    """
    load and get spacer information, do corr to find spacer timestamps
    returns t_passive_starts, t_starts, t_ends
    """
    spacer_template = np.array(meta['VISUAL_STIM_0']['ttl_frame_nums'],
                               dtype=np.float32) / FRAME_FS
    jitter = 3 / FRAME_FS  # allow for 3 screen refresh as jitter
    t_quiet = meta['VISUAL_STIM_0']['delay_around']
    spacer_times, _ = passive.get_spacer_times(spacer_template=spacer_template, jitter=jitter,
                                               ttl_signal=fttl['times'], t_quiet=t_quiet)
Beispiel #18
0
def extract_replay_debug(
    session_path: str,
    sync: dict = None,
    sync_map: dict = None,
    treplay: np.array = None,
    ax: plt.axes = None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
    # Load sessions sync channels, map
    if sync is None or sync_map is None:
        sync, sync_map = ephys_fpga.get_main_probe_sync(session_path,
                                                        bin_exists=False)

    if treplay is None:
        passivePeriods_df = extract_passive_periods(session_path,
                                                    sync=sync,
                                                    sync_map=sync_map)
        treplay = passivePeriods_df.taskReplay.values

    if ax is None:
        f, ax = plt.subplots(1, 1)

    f = ax.figure
    f.suptitle("/".join(str(session_path).split("/")[-5:]))
    plot_sync_channels(sync=sync, sync_map=sync_map, ax=ax)

    passivePeriods_df = extract_passive_periods(session_path,
                                                sync=sync,
                                                sync_map=sync_map)
    treplay = passivePeriods_df.taskReplay.values

    plot_passive_periods(passivePeriods_df, ax=ax)

    fttl = ephys_fpga._get_sync_fronts(sync,
                                       sync_map["frame2ttl"],
                                       tmin=treplay[0])
    passiveGabor_df = _extract_passiveGabor_df(fttl, session_path)
    plot_gabor_times(passiveGabor_df, ax=ax)

    bpod = ephys_fpga._get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0])
    passiveValve_intervals = _extract_passiveValve_intervals(bpod)
    plot_valve_times(passiveValve_intervals, ax=ax)

    audio = ephys_fpga._get_sync_fronts(sync,
                                        sync_map["audio"],
                                        tmin=treplay[0])
    passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(
        audio)
    plot_audio_times(passiveTone_intervals, passiveNoise_intervals, ax=ax)

    passiveStims_df = np.concatenate([
        passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals
    ],
                                     axis=1)
    columns = [
        "valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"
    ]
    passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns)

    return (
        passiveGabor_df,
        passiveStims_df,
    )  # _ibl_passiveGabor.table.csv, _ibl_passiveStims.table.csv
Beispiel #19
0
    def load_data(self,
                  download_data: bool = None,
                  extract_times: bool = False,
                  load_video: bool = True) -> None:
        """Extract the data from raw data files
        Extracts all the required task data from the raw data files.

        Data keys:
            - count (int array): the sequential frame number (n, n+1, n+2...)
            - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
            - audio (float array): timestamps of audio TTL fronts
            - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
            - timestamps (float array): extracted video timestamps (the camera.times ALF)
            - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
            - camera_times (float array): camera frame timestamps extracted from frame headers
            - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
            - video (Bunch): video meta data, including dimensions and FPS
            - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)

        :param download_data: if True, any missing raw data is downloaded via ONE.
        :param extract_times: if True, the camera.times are re-extracted from the raw data
        :param load_video: if True, calls the load_video_data method
        """
        assert self.session_path, 'no session path set'
        if download_data is not None:
            self.download_data = download_data
        if self.eid and self.one and not isinstance(self.one, OneOffline):
            self._ensure_required_data()
        _log.info('Gathering data for QC')

        # Get frame count and pin state
        self.data['count'], self.data['pin_state'] = \
            raw.load_embedded_frame_data(self.session_path, self.side, raw=True)

        # Load the audio and raw FPGA times
        if self.type == 'ephys':
            sync, chmap = ephys_fpga.get_main_probe_sync(self.session_path)
            audio_ttls = ephys_fpga._get_sync_fronts(sync, chmap['audio'])
            self.data['audio'] = audio_ttls['times']  # Get rises
            # Load raw FPGA times
            cam_ts = extract_camera_sync(sync, chmap)
            self.data['fpga_times'] = cam_ts[self.side]
        else:
            bpod_data = raw.load_data(self.session_path)
            _, audio_ttls = raw.load_bpod_fronts(self.session_path, bpod_data)
            self.data['audio'] = audio_ttls['times']

        # Load extracted frame times
        alf_path = self.session_path / 'alf'
        try:
            assert not extract_times
            self.data['timestamps'] = alfio.load_object(
                alf_path, f'{self.side}Camera')['times']
        except AssertionError:  # Re-extract
            kwargs = dict(video_path=self.video_path, labels=self.side)
            if self.type == 'ephys':
                kwargs = {**kwargs, 'sync': sync, 'chmap': chmap}  # noqa
            outputs, _ = extract_all(self.session_path,
                                     self.type,
                                     save=False,
                                     **kwargs)
            self.data['timestamps'] = outputs[f'{self.side}_camera_timestamps']
        except ALFObjectNotFound:
            _log.warning('no camera.times ALF found for session')

        # Get audio and wheel data
        wheel_keys = ('timestamps', 'position')
        try:
            self.data['wheel'] = alfio.load_object(alf_path, 'wheel')
        except ALFObjectNotFound:
            # Extract from raw data
            if self.type == 'ephys':
                wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
            else:
                wheel_data = training_wheel.get_wheel_position(
                    self.session_path)
            self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))

        # Find short period of wheel motion for motion correlation.  For speed start with the
        # fist 2 minutes (nearly always enough), extract wheel movements and pick one.
        # TODO Pick movement towards the end of the session (but not right at the end as some
        #  are extrapolated).  Make sure the movement isn't too long.
        if data_for_keys(
                wheel_keys,
                self.data['wheel']) and self.data['timestamps'] is not None:
            self.data['wheel'].period = self.get_active_wheel_period(
                self.data['wheel'])

        # Load Bonsai frame timestamps
        try:
            ssv_times = raw.load_camera_ssv_times(self.session_path, self.side)
            self.data['bonsai_times'], self.data['camera_times'] = ssv_times
        except AssertionError:
            _log.warning('No Bonsai video timestamps file found')

        # Gather information from video file
        if load_video:
            _log.info('Inspecting video file...')
            self.load_video_data()
Beispiel #20
0
def compare_bpod_json_with_fpga(sync_test_folder, sync, show_plots=SHOW_PLOTS):
    '''
    sr, sync=get_ephys_data(sync_test_folder)
    '''

    #  get the bpod signal from the jasonable file
    import json
    with open(sync_test_folder + '/bpod/_iblrig_taskData.raw.jsonable') as fid:
        out = json.load(fid)

    ins = out['Events timestamps']['BNC1High']
    outs = out['Events timestamps']['BNC1Low']

    assert len(ins) == 500, 'not all pulses detected in bpod!'
    assert len(ins) == len(outs), 'not all fronts detected in bpod signal!'

    # get the fpga signal from the sync object
    s3 = ephys_fpga._get_sync_fronts(sync, 0)  # 3b channel map

    assert len(s3['times']) == 1000, 'not all fronts detected in fpga signal!'

    offset_on = np.mean(np.array(s3['times'][1::2]) -
                        np.array(outs))  # get delay
    offset_off = np.mean(np.array(s3['times'][0::2]) - np.array(ins))

    jitter_on = np.std(np.array(s3['times'][1::2]) -
                       np.array(outs))  # get jitter
    jitter_off = np.std(np.array(s3['times'][0::2]) - np.array(ins))

    inter_pulse_interval_bpod = np.abs(np.array(ins) - np.array(outs))
    inter_pulse_interval_fpga = np.abs(
        np.array(s3['times'][1::2]) - np.array(s3['times'][0::2]))

    print(
        'maximal bpod jitter in sec: ',
        np.round(
            np.max(inter_pulse_interval_bpod) -
            np.min(inter_pulse_interval_bpod), 6))
    print(
        'maximal fpga jitter in sec: ',
        np.round(
            np.max(inter_pulse_interval_fpga) -
            np.min(inter_pulse_interval_fpga), 6))
    print(
        'maximal bpod-fpga in sec: ',
        np.round(
            np.max(np.abs(np.array(s3['times'][1::2]) - np.array(outs))) -
            np.min(np.abs(np.array(s3['times'][1::2]) - np.array(outs))), 6))

    print(
        'The fpga 500 ms square signal and \
          the bpod 500 ms square signal are offset',
        'by %s sec and the difference between them has std %s sec' %
        (np.round(np.mean([offset_on, offset_off]),
                  6), np.round(np.mean([jitter_on, jitter_off]), 6)))

    if show_plots:

        plt.figure('wavefronts')
        plt.plot(s3['times'], s3['polarities'], label='fpga')
        plt.plot(ins,
                 np.ones(len(ins)),
                 linestyle='',
                 marker='o',
                 label='pbod on')
        plt.plot(outs,
                 np.ones(len(outs)),
                 linestyle='',
                 marker='x',
                 label='bpod off')
        plt.legend()
        plt.show()

        plt.figure('histogram of wavefront differences, bpod and fpga')

        plt.hist(np.array(s3['times'][1::2]) - np.array(outs))
        plt.xlabel('error between fpga fronts and ephys fronts in sec')
        plt.show()