Пример #1
0
def get_DLC(eid, video_type):
    '''load dlc traces
    load dlc traces for a given session and
    video type.

    :param eid: A session eid
    :param video_type: string in 'left', 'right', body'
    :return: array of times and dict with dlc points
             as keys and x,y coordinates as values,
             for each frame id
    '''
    one = ONE()
    D = one.load(eid, dataset_types=['camera.dlc', 'camera.times'])
    alf_path = one.path_from_eid(eid) / 'alf'
    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')
    Times = cam0['times']
    cam = cam0['dlc']
    points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])
    XYs = {}
    for point in points:
        x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_x'])
        x = x.filled(np.nan)
        y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_y'])
        y = y.filled(np.nan)
        XYs[point] = np.array([x, y])

    return Times, XYs
Пример #2
0
class TestOneOffline(unittest.TestCase):
    def setUp(self) -> None:
        # init: create a temp directory and copy the fixtures
        init_cache_file = Path(__file__).parent.joinpath(
            'fixtures', '.one_cache.parquet')

        # Create a temporary directory
        self.test_dir = tempfile.TemporaryDirectory()

        cache_dir = Path(self.test_dir.name)
        shutil.copyfile(init_cache_file,
                        cache_dir.joinpath(init_cache_file.name))

        # test the constructor
        self.one = ONE(offline=True)
        self.assertTrue(self.one._cache.shape[1] == 14)

        self.eid = 'cf264653-2deb-44cb-aa84-89b82507028a'

    def test_one_offline(self) -> None:
        # test the load with download false so it returns only file paths
        one.list(self.eid)
        dtypes = [
            '_spikeglx_sync.channels', '_spikeglx_sync.polarities',
            '_spikeglx_sync.times', '_iblrig_taskData.raw',
            '_iblrig_taskSettings.raw', 'ephysData.raw.meta', 'camera.times',
            'ephysData.raw.wiring'
        ]
        one.load(self.eid,
                 dataset_types=dtypes,
                 dclass_output=False,
                 download_only=True,
                 offline=False)

    def test_path_eid(self):
        """Test `path_from_eid` and `eid_from_path` methods"""
        eid = 'cf264653-2deb-44cb-aa84-89b82507028a'
        # path from eid
        session_path = self.one.path_from_eid(eid)
        self.assertEqual(session_path.parts[-3:],
                         ('clns0730', '2018-08-24', '002'))
        # eid from path
        self.assertEqual(eid, one.eid_from_path(session_path))

    def tearDown(self) -> None:
        self.test_dir.cleanup()
Пример #3
0
def get_DLC(eid, video_type):
    '''load dlc traces
    load dlc traces for a given session and
    video type.

    :param eid: A session eid
    :param video_type: string in 'left', 'right', body'
    :return: array of times and dict with dlc points
             as keys and x,y coordinates as values,
             for each frame id

    Example:

    eid = '6c6983ef-7383-4989-9183-32b1a300d17a'
    video_type = 'right'

    Times, XYs = get_DLC(eid, video_type)

    # get for frame 500 the x coordinate of the nose
    # and the time stamp:

    x_frame_500 = XYs['nose_tip'][0][500]
    t_frame_500 = Times[500]
    '''

    one = ONE()
    alf_path = one.path_from_eid(eid) / 'alf'
    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')
    Times = cam0['times']
    cam = cam0['dlc']
    points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])
    XYs = {}
    for point in points:
        x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_x'])
        x = x.filled(np.nan)
        y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_y'])
        y = y.filled(np.nan)
        XYs[point] = np.array([x, y])

    return Times, XYs
Пример #4
0
Файл: one.py Проект: k1o0/ibllib
def load_spike_sorting(eid, one=None, dataset_types=None):
    """
    From an eid, hits the Alyx database and downloads a standard default set of dataset types
    From a local session Path (pathlib.Path), loads a standard default set of dataset types
     to perform analysis:
        'clusters.channels',
        'clusters.depths',
        'clusters.metrics',
        'spikes.clusters',
        'spikes.times',
        'probes.description'
    :param eid: experiment UUID or pathlib.Path of the local session
    :param one:
    :param dataset_types: additional spikes/clusters objects to add to the standard default list
    :return: spikes, clusters (dict of bunch, 1 bunch per probe)
    """
    if isinstance(eid, Path):
        return _load_spike_sorting_local(eid)
    if not one:
        one = ONE()
    # This is a first draft, no safeguard, no error handling and a draft dataset list.
    session_path = one.path_from_eid(eid)
    if not session_path:
        print("no session path")
        return (None, None), 'no session path'

    dtypes_default = [
        'clusters.channels',
        'clusters.depths',
        'clusters.metrics',
        'spikes.clusters',
        'spikes.times',
        'probes.description'
    ]
    if dataset_types is None:
        dtypes = dtypes_default
    else:
        #  Append extra optional DS
        dtypes = list(set(dataset_types + dtypes_default))

    one.load(eid, dataset_types=dtypes, download_only=True)
    return _load_spike_sorting_local(session_path)
Пример #5
0
def download_raw_video(eid, cameras=None):
    """
    Downloads the raw video from FlatIron or cache dir.
    This allows you to download just one of the
    three videos
    :param cameras: the specific camera to load
    (i.e. 'left', 'right', or 'body') If None all
    three videos are downloaded.
    :return: the file path(s) of the raw videos
    """
    one = ONE()
    if cameras:
        cameras = [cameras] if isinstance(cameras, str) else cameras
        cam_files = ['_iblrig_{}Camera.raw.mp4'.format(cam) for cam in cameras]
        datasets = one._alyxClient.get('sessions/' +
                                       eid)['data_dataset_session_related']
        urls = [ds['data_url'] for ds in datasets if ds['name'] in cam_files]
        cache_dir = one.path_from_eid(eid).joinpath('raw_video_data')
        if not os.path.exists(str(cache_dir)):
            os.mkdir(str(cache_dir))
        else:  # Check if file already downloaded
            # cam_files = [fi[:-4] for fi in cam_files]  # Remove ext
            filenames = [
                f for f in os.listdir(str(cache_dir))
                if any([cam in f for cam in cam_files])
            ]
            if filenames:
                return [cache_dir.joinpath(file) for file in filenames]

        http_download_file_list(urls,
                                username=one._par.HTTP_DATA_SERVER_LOGIN,
                                password=one._par.HTTP_DATA_SERVER_PWD,
                                cache_dir=str(cache_dir))

        return

    else:
        return one.load(eid, ['_iblrig_Camera.raw'], download_only=True)
Пример #6
0
def load_spike_sorting(eid, one=None, dataset_types=None):
    """
    From an eid, hits the Alyx database and downloads a standard set of dataset types to perform
    analysis.
    :param eid:
    :param dataset_types: additional spikes/clusters objects to add to the standard list
    :return:
    """
    if not one:
        one = ONE()
    # This is a first draft, no safeguard, no error handling and a draft dataset list.
    session_path = one.path_from_eid(eid)
    dtypes = [
        'clusters.channels',
        'clusters.depths',
        'clusters.metrics',
        'spikes.clusters',
        'spikes.times',
        'probes.description',
    ]
    if dataset_types:
        dtypes = list(set(dataset_types + dtypes))

    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    probes = alf.io.load_object(session_path.joinpath('alf'), 'probes')
    spikes = {}
    clusters = {}
    for i, _ in enumerate(probes['description']):
        probe_path = session_path.joinpath('alf',
                                           probes['description'][i]['label'])
        cluster = alf.io.load_object(probe_path, object='clusters')
        spike = alf.io.load_object(probe_path, object='spikes')
        label = probes['description'][i]['label']
        clusters[label] = cluster
        spikes[label] = spike

    return spikes, clusters
Пример #7
0
def get_ME(eid, video_type):

    #video_type = 'left'
    one = ONE()
    dataset_types = ['camera.ROIMotionEnergy', 'camera.times']
    a = one.list(eid, 'dataset-types')
    # for newer iblib version do [x['dataset_type'] for x in a]
    #    if not all([(u in [x['dataset_type'] for x in a]) for u in dataset_types]):
    #        print('not all data available')
    #        return

    one.load(eid, dataset_types=dataset_types)
    local_path = one.path_from_eid(eid)
    alf_path = local_path / 'alf'

    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')

    ME = np.load(alf_path / f'{video_type}Camera.ROIMotionEnergy.npy')

    Times = cam0['times']

    return Times, ME
Пример #8
0
def get_dlc_XYs(eid, video_type):

    #video_type = 'left'
    one = ONE()
    dataset_types = ['camera.dlc', 'camera.times']
    a = one.list(eid, 'dataset-types')
    # for newer iblib version do [x['dataset_type'] for x in a]
    #    if not all([(u in [x['dataset_type'] for x in a]) for u in dataset_types]):
    #        print('not all data available')
    #        return

    one.load(eid, dataset_types=dataset_types)  #clobber=True # force download
    local_path = one.path_from_eid(eid)
    alf_path = local_path / 'alf'

    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')

    Times = cam0['times']

    cam = cam0['dlc']
    points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])

    # Set values to nan if likelyhood is too low # for pqt: .to_numpy()
    XYs = {}
    for point in points:
        x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_x'])
        x = x.filled(np.nan)
        y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_y'])
        y = y.filled(np.nan)
        XYs[point] = np.array([x, y])

    return Times, XYs
Пример #9
0
_, tspi, xspi = overlay_spikes(eqc_butt, spikes, clusters, channels)
overlay_spikes(eqc_dest, spikes, clusters, channels)
overlay_spikes(eqc_ks2, spikes, clusters, channels)

# Do the driftmap
driftmap(spikes['times'], spikes['depths'], t_bin=0.1, d_bin=5, ax=axes[1])

##
import alf.io

eid = dsets[0]['session'][-36:]
tdsets = one.alyx.rest('datasets',
                       'list',
                       session=eid,
                       django='name__icontains,trials.')
one.download_datasets(tdsets)
trials = alf.io.load_object(one.path_from_eid(eid).joinpath('alf'), 'trials')

rewards = trials['feedback_times'][trials['feedbackType'] == 1]

##

rewards = trials['feedback_times'][trials['feedbackType'] == 1]

## do drift map
fig, ax = plt.subplots()
driftmap(spikes['times'], spikes['depths'], t_bin=0.1, d_bin=5, ax=ax)
from ibllib.plots import vertical_lines

vertical_lines(rewards, ymin=0, ymax=3800, ax=ax)
Пример #10
0
one = ONE()

EID = '15f742e1-1043-45c9-9504-f1e8a53c1744'
REGION = 'SNr'
PROBE = 'probe01'
PRE_TIME = 0.6
POST_TIME = -0.1
DECODER = 'bayes-multinomial'
ITERATIONS = 1000
DATA_PATH, FIG_PATH, SAVE_PATH = paths()
FIG_PATH = join(FIG_PATH, 'Decoding', 'Sessions', DECODER)

# %%
# Load in data
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(EID, aligned=True, one=one)
ses_path = one.path_from_eid(EID)
trials = alf.io.load_object(join(ses_path, 'alf'), 'trials')

# Get trial vectors
incl_trials = (trials.probabilityLeft == 0.8) | (trials.probabilityLeft == 0.2)
trial_times = trials.stimOn_times[incl_trials]
probability_left = trials.probabilityLeft[incl_trials]
trial_blocks = (trials.probabilityLeft[incl_trials] == 0.2).astype(int)

# Get clusters in this brain region
region_clusters = combine_layers_cortex(clusters[PROBE]['acronym'])
clusters_in_region = clusters[PROBE].metrics.cluster_id[region_clusters == REGION]

# Select spikes and clusters
spks_region = spikes[PROBE].times[np.isin(spikes[PROBE].clusters, clusters_in_region)]
clus_region = spikes[PROBE].clusters[np.isin(spikes[PROBE].clusters,
Пример #11
0
print("Will try to compare {} data sets".format(len(eids)))

probes = ["probe00", "probe00", "probe00", "probe00", "probe01", "probe01", "probe00", "probe01", "probe00", "probe01", "probe00", "probe01", "probe00", "probe00", "probe00", "probe00", "probe00", "probe00", "probe00"]
bad_eids = ['ee40aece-cffd-4edb-a4b6-155f158c666a', 'db4df448-e449-4a6f-a0e7-288711e7a75a', '0f25376f-2b78-4ddc-8c39-b6cdbe7bf5b9']
session_rank = [4, 1, 1, 1, 4, 3, 2, 3, 4, 4, 4, 1, 5, 2, 1, 3, 1, 5, 2]
ss = []
one = ONE()

assert len(eids) == len(probes) == len(session_rank)
metric_list = []
lab_list = []
for eid, probe, s in zip(eids, probes, session_rank):
    print(eid)
    if eid in bad_eids: continue
    session_path = one.path_from_eid(eid)
    if not session_path:
        print(session_path)
        print("no session path")
        continue

    _ = one.load(eid, dataset_types='clusters.metrics', download_only=True)

    try:
        _ = alf.io.load_object(session_path.joinpath('alf'), 'probes')
    except FileNotFoundError:
        print(session_path.joinpath('alf'))
        print("no probes")
        continue

    probe_path = session_path.joinpath('alf', probe)
Пример #12
0
def plot_rms(eid, probe_label):

    # https://int-brain-lab.github.io/iblenv/notebooks_external/docs_get_rms_data.html

    plt.ion()

    # instantiate ONE
    one = ONE()

    # Specify subject, date and probe we are interested in
    #    subject = 'CSHL049'
    #    date = '2020-01-08'
    #    sess_no = 1
    #    probe_label = 'probe00'
    #    eid = one.search(subject=subject, date=date, number=sess_no)[0]

    # Specify the dataset types of interest
    dtypes = [
        '_iblqc_ephysTimeRms.rms', '_iblqc_ephysTimeRms.timestamps',
        'channels.rawInd', 'channels.localCoordinates'
    ]

    # Download the data and get paths to downloaded data
    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label)
    alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)

    session_name = '_'.join(str(ephys_path).split('/')[5:10])
    # Index of good recording channels along probe
    chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy'))
    # Position of each recording channel along probe
    chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy'))
    # Get range for y-axis
    depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])]

    # RMS data associated with AP band of data
    rms_ap = alf.io.load_object(ephys_path,
                                'ephysTimeRmsAP',
                                namespace='iblqc')
    rms_ap_data = 20 * np.log10(
        rms_ap['rms'][:, chn_inds] * 1e6)  # convert to uV

    #    # Median subtract to clean up the data
    #    median = np.mean(np.apply_along_axis(lambda x: np.median(x), 1, rms_ap_data))
    #    # Add back the median so that the actual values in uV remain correct
    #    rms_ap_data_median = np.apply_along_axis(lambda x: x - np.median(x), 1, rms_ap_data) + median

    # Get levels for colour bar and x-axis
    ap_levels = np.quantile(rms_ap_data, [0.1, 0.9])
    ap_time_range = [rms_ap['timestamps'][0], rms_ap['timestamps'][-1]]

    # RMS data associated with LFP band of data
    rms_lf = alf.io.load_object(ephys_path,
                                'ephysTimeRmsLF',
                                namespace='iblqc')
    rms_lf_data = rms_lf['rms'][:, chn_inds] * 1e6  # convert to uV
    # Median subtract to clean up the data
    #    median = np.mean(np.apply_along_axis(lambda x: np.median(x), 1, rms_lf_data))
    #    rms_lf_data_median = np.apply_along_axis(lambda x: x - np.median(x), 1, rms_lf_data) + median

    lf_levels = np.quantile(rms_lf_data, [0.1, 0.9])
    lf_time_range = [rms_lf['timestamps'][0], rms_lf['timestamps'][-1]]

    # Create figure
    fig, ax = plt.subplots(2, 1, figsize=(6, 8))
    # Plot the AP rms data
    ax0 = ax[0]
    #    rms_ap_plot = ax0.imshow(rms_ap_data.T, extent=np.r_[ap_time_range, depth_range],
    #                             cmap='plasma', vmin=ap_levels[0], vmax=ap_levels[1], origin='lower')
    rms_ap_plot = ax0.imshow(rms_ap_data.T,
                             extent=np.r_[ap_time_range, depth_range],
                             cmap='plasma',
                             vmin=0,
                             vmax=100,
                             origin='lower')

    cbar_ap = fig.colorbar(rms_ap_plot, ax=ax0)
    cbar_ap.set_label('AP RMS (uV)')
    ax0.set_xlabel('Time (s)')
    ax0.set_ylabel('Depth along probe (um)')
    ax0.set_title('RMS of AP band')

    # Plot the LFP rms data
    ax1 = ax[1]
    #    rms_lf_plot = ax1.imshow(rms_lf_data.T, extent=np.r_[lf_time_range, depth_range],
    #                             cmap='inferno', vmin=lf_levels[0], vmax=lf_levels[1], origin='lower')

    rms_lf_plot = ax1.imshow(rms_lf_data.T,
                             extent=np.r_[lf_time_range, depth_range],
                             cmap='inferno',
                             vmin=0,
                             vmax=1500,
                             origin='lower')
    cbar_lf = fig.colorbar(rms_lf_plot, ax=ax1)
    cbar_lf.set_label('LFP RMS (uV)')
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('Depth along probe (um)')
    ax1.set_title('RMS of LFP band')

    plt.suptitle('%s_%s \n %s' % (eid, probe_label, session_name))
    plt.savefig('/home/mic/saturation_analysis/rms_plots/%s_%s.png' %
                (eid, probe_label))
    plt.show()
Пример #13
0
"""
# Author: Olivier Winter

import numpy as np

import ibllib.atlas as atlas
from oneibl.one import ONE
import brainbox.io.one as bbone

# === Parameters section (edit) ===
eid = '614e1937-4b24-4ad3-9055-c8253d089919'
probe_label = 'probe00'
# === Code (do not edit) ===
ba = atlas.AllenAtlas(25)
one = ONE(base_url="https://alyx.internationalbrainlab.org")
one.path_from_eid(eid)
traj = one.alyx.rest('trajectories',
                     'list',
                     session=eid,
                     provenance='Histology track',
                     probe=probe_label)[0]
channels = bbone.load_channel_locations(eid=eid, one=one, probe=probe_label)

picks = one.alyx.rest('insertions', 'read', id=traj['probe_insertion'])['json']
picks = np.array(picks['xyz_picks']) / 1e6
ins = atlas.Insertion.from_dict(traj)

cax = ba.plot_tilted_slice(xyz=picks, axis=1, volume='image')
cax.plot(picks[:, 0] * 1e6, picks[:, 2] * 1e6)
cax.plot(channels[probe_label].x * 1e6, channels[probe_label].z * 1e6, 'g*')
# Specify subject, date and probe we are interested in
subject = 'CSHL049'
date = '2020-01-08'
sess_no = 1
probe_label = 'probe00'
eid = one.search(subject=subject, date=date, number=sess_no)[0]

# Specify the dataset types of interest
dtypes = ['_iblqc_ephysSpectralDensity.freqs',
          '_iblqc_ephysSpectralDensity.power',
          'channels.rawInd',
          'channels.localCoordinates']

# Download the data and get paths to downloaded data
_ = one.load(eid, dataset_types=dtypes, download_only=True)
ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label)
alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)

# Index of good recording channels along probe
chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy'))
# Position of each recording channel along probe
chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy'))
# Get range for y-axis
depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])]

# Load in power spectrum data
lfp_spectrum = alf.io.load_object(ephys_path, 'ephysSpectralDensityLF', namespace='iblqc')
lfp_freq = lfp_spectrum['freqs']
lfp_power = lfp_spectrum['power'][:, chn_inds]

# Define a frequency range of interest
Пример #15
0
def gen_metrics_labels(eid, probe_name):
    one = ONE()
    ses_path = one.path_from_eid(eid)
    alf_probe_dir = os.path.join(ses_path, 'alf', probe_name)
    ks_dir = alf_probe_dir
    spks_b = aio.load_object(alf_probe_dir, 'spikes')
    units_b = bb.processing.get_units_bunch(spks_b)
    units = list(units_b.amps.keys())
    lengths_samples = [len(v) for k, v in units_b.samples.items()]
    units_nonzeros = [i for i, d in enumerate(lengths_samples) if d > 0]
    n_units = len(
        units_nonzeros)  #only compute metrics for units with no samples

    #for cases where raw data is available locally:
    ephys_file_dir = os.path.join(ses_path, 'raw_ephys_data', probe_name)
    ephys_file = os.path.join(ses_path, 'raw_ephys_data', probe_name,
                              '_iblrig_ephysData.raw_g0_t0.imec.ap.cbin')
    #create params.py file
    params_file = os.path.join(ks_dir, 'params.py')
    if os.path.exists(ephys_file) and not os.path.exists(params_file):
        f = open(params_file, "w+")
        f.write('dat_path = ' + 'r"' + ephys_file + '"\n' +
                '''n_channels_dat = 385
        dtype = 'int16'
        offset = 0
        sample_rate = 30000
        hp_filtered = False
        uidx=0''')
        f.close()

    # Initialize metrics
    cum_amp_drift = np.full((n_units, ), np.nan)
    cum_depth_drift = np.full((n_units, ), np.nan)
    cv_amp = np.full((n_units, ), np.nan)
    cv_fr = np.full((n_units, ), np.nan)
    frac_isi_viol = np.full((n_units, ), np.nan)
    frac_missing_spks = np.full((n_units, ), np.nan)
    fp_estimate = np.full((n_units, ), np.nan)
    presence_ratio = np.full((n_units, ), np.nan)
    pres_ratio_std = np.full((n_units, ), np.nan)
    ptp_sigma = np.full((n_units, ), np.nan)

    units_missing_metrics = set()
    label = np.empty([len(units)])
    RefPViol = np.empty([len(units)])
    NoiseCutoff = np.empty([len(units)])
    MeanAmpTrue = np.empty([len(units)])

    for idx, unit in enumerate(units_nonzeros):
        if unit == units_nonzeros[0]:
            t0 = time.perf_counter()  # used for computation time estimate

        print('computing metrics for unit ' + str(unit) + '...')

        #load relevant data for unit
        ts = units_b['times'][str(unit)]
        amps = units_b['amps'][str(unit)]
        samples = units_b['samples'][str(unit)]
        depths = units_b['depths'][str(unit)]

        RefPViol[idx] = FP_RP(ts)
        NoiseCutoff[idx] = noise_cutoff(amps, quartile_length=.25)

        #create 'label' based on RPviol,NoiseCutoff, and MeanAmp
        if len(
                samples > 50
        ):  #only compute mean amplitude for units with more than 50 samples
            try:
                MeanAmpTrue[int(unit)] = peak_to_peak_amp(ephys_file,
                                                          samples,
                                                          nsamps=20)

                if (FP_RP(ts) and noise_cutoff(amps, quartile_length=.25) < 20
                        and MeanAmpTrue[int(unit)] > 50):
                    label[idx] = 1
                else:
                    label[idx] = 0
            except:
                if (FP_RP(ts)
                        and noise_cutoff(amps, quartile_length=.25) < 20):
                    label[idx] = 1
                else:
                    label[idx] = 0

        else:  #no ephys file, do not include true mean amps
            if (FP_RP(ts) and noise_cutoff(amps, quartile_length=.25) < 20):
                label[idx] = 1
            else:
                label[idx] = 0

        #now compute additional metrics that label does not depend on:

        # Cumulative drift of spike amplitudes, normalized by total number of spikes.
        try:
            cum_amp_drift[idx] = cum_drift(amps)
        except Exception as err:
            print(
                "Failed to compute 'cum_drift(amps)' for unit {}. Details: \n {}"
                .format(unit, err))
            units_missing_metrics.add(unit)

        # Cumulative drift of spike depths, normalized by total number of spikes.
        try:
            cum_depth_drift[idx] = cum_drift(depths)
        except Exception as err:
            print(
                "Failed to compute 'cum_drift(depths)' for unit {}. Details: \n {}"
                .format(unit, err))
            units_missing_metrics.add(unit)

        # Coefficient of variation of spike amplitudes.
        try:
            cv_amp[idx] = np.std(amps) / np.mean(amps)
        except Exception as err:
            print("Failed to compute 'cv_amp' for unit {}. Details: \n {}".
                  format(unit, err))
            units_missing_metrics.add(unit)

        # Coefficient of variation of computed instantaneous firing rate.
        try:
            fr = bb.singlecell.firing_rate(ts, hist_win=0.01, fr_win=0.25)
            cv_fr[idx] = np.std(fr) / np.mean(fr)
        except Exception as err:
            print(
                "Failed to compute 'cv_fr' for unit {}. Details: \n {}".format(
                    unit, err))
            units_missing_metrics.add(unit)

        # Fraction of isi violations.
        try:
            frac_isi_viol[idx], _, _ = isi_viol(ts, rp=0.002)
        except Exception as err:
            print(
                "Failed to compute 'frac_isi_viol' for unit {}. Details: \n {}"
                .format(unit, err))
            units_missing_metrics.add(unit)

        # Estimated fraction of missing spikes.
        try:
            frac_missing_spks[idx], _, _ = feat_cutoff(amps,
                                                       spks_per_bin=10,
                                                       sigma=4,
                                                       min_num_bins=50)
        except Exception as err:
            print(
                "Failed to compute 'frac_missing_spks' for unit {}. Details: \n {}"
                .format(unit, err))
            units_missing_metrics.add(unit)

        # Estimated fraction of false positives.
        try:
            fp_estimate[idx] = fp_est(ts, rp=0.002)
        except Exception as err:
            print("Failed to compute 'fp_est' for unit {}. Details: \n {}".
                  format(unit, err))
            units_missing_metrics.add(unit)

        # Presence ratio
        try:
            presence_ratio[idx], _ = pres_ratio(ts, hist_win=10)
        except Exception as err:
            print("Failed to compute 'pres_ratio' for unit {}. Details: \n {}".
                  format(unit, err))
            units_missing_metrics.add(unit)

        # Presence ratio over the standard deviation of spike counts in each bin
        try:
            pr, pr_bins = pres_ratio(ts, hist_win=10)
            pres_ratio_std[idx] = pr / np.std(pr_bins)
        except Exception as err:
            print(
                "Failed to compute 'pres_ratio_std' for unit {}. Details: \n {}"
                .format(unit, err))
            units_missing_metrics.add(unit)

    #append metrics to the current clusters.metrics
    metrics_read = pd.read_csv(Path(alf_probe_dir, 'clusters.metrics.csv'))

    if not 'label' in metrics_read.columns:
        try:
            label_df = pd.DataFrame(label)
            pd.DataFrame.insert(metrics_read, 1, 'label', label_df)
        except ValueError:
            pd.DataFrame.drop(metrics_read, columns='label')
            pd.DataFrame.insert(metrics_read, 1, 'label', label_df)
        except:
            print("Could not save 'label' to .csv.")

        try:
            df_cum_amp_drift = pd.DataFrame(cum_amp_drift.round(2))
            metrics_read['cum_amp_drift'] = df_cum_amp_drift
        except Exception as err:
            print("Could not save 'cum_amp_drift' to .csv. Details: \n {}".
                  format(err))

        try:
            df_cum_depth_drift = pd.DataFrame(cum_depth_drift.round(2))
            metrics_read['cum_depth_drift'] = df_cum_depth_drift
        except Exception as err:
            print("Could not save 'cum_depth_drift' to .tsv. Details: \n {}".
                  format(err))

        try:
            df_cv_amp = pd.DataFrame(cv_amp.round(2))
            metrics_read['cv_amp'] = df_cv_amp
        except Exception as err:
            print(
                "Could not save 'cv_amp' to .tsv. Details: \n {}".format(err))

        try:
            df_cv_fr = pd.DataFrame(cv_fr.round(2))
            metrics_read['cv_fr'] = df_cv_fr
        except Exception as err:
            print("Could not save 'cv_fr' to .tsv. Details: \n {}".format(err))

        try:
            df_frac_isi_viol = pd.DataFrame(frac_isi_viol.round(2))
            metrics_read['frac_isi_viol'] = df_frac_isi_viol
        except Exception as err:
            print("Could not save 'frac_isi_viol' to .tsv. Details: \n {}".
                  format(err))

        try:
            df_frac_missing_spks = pd.DataFrame(frac_missing_spks.round(2))
            metrics_read['frac_missing_spks'] = df_frac_missing_spks
        except Exception as err:
            print("Could not save 'frac_missing_spks' to .tsv. Details: \n {}".
                  format(err))

        try:
            df_fp_est = pd.DataFrame(fp_estimate.round(2))
            metrics_read['fp_est'] = df_fp_est
        except Exception as err:
            print(
                "Could not save 'fp_est' to .tsv. Details: \n {}".format(err))

        try:
            df_pres_ratio = pd.DataFrame(presence_ratio.round(2))
            metrics_read['pres_ratio'] = df_pres_ratio
        except Exception as err:
            print("Could not save 'pres_ratio' to .tsv. Details: \n {}".format(
                err))

        try:
            df_pres_ratio_std = pd.DataFrame(pres_ratio_std.round(2))
            metrics_read['pres_ratio_std'] = df_pres_ratio_std
        except Exception as err:
            print("Could not save 'pres_ratio_std' to .tsv. Details: \n {}".
                  format(err))

        try:
            df_refp_viol = pd.DataFrame(RefPViol)
            pd.DataFrame.insert(metrics_read, 2, 'refp_viol', df_refp_viol)
        except ValueError:
            pd.DataFrame.drop(metrics_read, columns='refp_viol')
            pd.DataFrame.insert(metrics_read, 2, 'refp_viol', df_refp_viol)
        except Exception as err:
            print("Could not save 'RefPViol' to .tsv. Details: \n {}".format(
                err))

        try:
            df_noise_cutoff = pd.DataFrame(NoiseCutoff)
            pd.DataFrame.insert(metrics_read, 3, 'noise_cutoff',
                                df_noise_cutoff)
        except ValueError:
            pd.DataFrame.drop(metrics_read, columns='noise_cutoff')
            pd.DataFrame.insert(metrics_read, 3, 'noise_cutoff',
                                df_noise_cutoff)
        except Exception as err:
            print(
                "Could not save 'NoiseCutoff' to .tsv. Details: \n {}".format(
                    err))

        try:
            df_mean_amp_true = pd.DataFrame(MeanAmpTrue)
            pd.DataFrame.insert(metrics_read, 4, 'mean_amp_true',
                                df_mean_amp_true)
        except ValueError:
            pd.DataFrame.drop(metrics_read, columns='mean_amp_true')
            pd.DataFrame.insert(metrics_read, 4, 'mean_amp_true',
                                df_mean_amp_true)
        except Exception as err:
            print("Could not save 'Mean Amp True' to .tsv. Details: \n {}".
                  format(err))

        #now add df to csv
        metrics_read.to_csv(Path(alf_probe_dir, 'clusters.metrics.csv'))
        print('Launching phy')
    else:
        print('Launching phy')

    try:
        numpass = int(sum(label))
        print("\n Number of units that pass: "******"Number of units that pass RP threshold: ", numpassRP)
        print("Number of units that pass Amp Cutoff threshold: ", numpassAC)
        print("Number of total units: ", ntot)
    except Exception as err:
        print("Could not compute number of units that pass. Details \n {}".
              format(err))

    return metrics_read
Пример #16
0
def populate_dj_with_phy(probe_label,
                         eid=None,
                         subj=None,
                         date=None,
                         sess_no=None,
                         one=None):
    if one is None:
        one = ONE()

    if eid is None:
        eid = one.search(subject=subj, date=date, number=sess_no)[0]

    sess_path = one.path_from_eid(eid)
    alf_path = sess_path.joinpath('alf', probe_label)

    cluster_path = Path(alf_path, 'spikes.clusters.npy')
    template_path = Path(alf_path, 'spikes.templates.npy')

    # Compare spikes.clusters with spikes.templates to find which clusters have been merged
    phy_clusters = np.load(cluster_path)
    id_phy = np.unique(phy_clusters)
    orig_clusters = np.load(template_path)
    id_orig = np.unique(orig_clusters)

    uuid_list = alf.io.load_file_content(
        alf_path.joinpath('clusters.uuids.csv'))

    # First deal with merged clusters and make sure they have cluster uuids assigned
    # Find the original cluster ids that have been merged into a new cluster
    merged_idx = np.setdiff1d(id_orig, id_phy)

    # See if any clusters have been merged, if not skip to the next bit
    if np.any(merged_idx):
        # Make association between original cluster and new cluster id and save in dict
        merge_list = {}
        for m in merged_idx:
            idx = phy_clusters[np.where(orig_clusters == m)[0][0]]
            if idx in merge_list:
                merge_list[idx].append(m)
            else:
                merge_list[idx] = [m]

        # Create a dataframe from the dict
        merge_clust = pd.DataFrame(
            columns={'cluster_idx', 'merged_uuid', 'merged_id'})
        for key, value in merge_list.items():
            value_uuid = uuid_list['uuids'][value]
            merge_clust = merge_clust.append(
                {
                    'cluster_idx': key,
                    'merged_uuid': tuple(value_uuid),
                    'merged_idx': tuple(value)
                },
                ignore_index=True)

        # Get the dj table that has previously stored merged clusters and store in frame
        merge = cluster_table.MergedClusters()
        merge_dj = pd.DataFrame(columns={'cluster_uuid', 'merged_uuid'})
        merge_dj['cluster_uuid'] = merge.fetch('cluster_uuid').astype(str)
        merge_dj['merged_uuid'] = tuple(map(tuple, merge.fetch('merged_uuid')))

        # Merge the two dataframe to see if any merge combinations already have a cluster_uuid
        merge_comb = pd.merge(merge_dj,
                              merge_clust,
                              on=['merged_uuid'],
                              how='outer')

        # Find the merged clusters that do not have a uuid assigned
        no_uuid = np.where(pd.isnull(merge_comb['cluster_uuid']))[0]

        # Assign new uuid to new merge pairs and add to the merge table
        for nid in no_uuid:
            new_uuid = str(uuid.uuid4())
            merge_comb['cluster_uuid'].iloc[nid] = new_uuid
            merge.insert1(dict(
                cluster_uuid=new_uuid,
                merged_uuid=merge_comb['merged_uuid'].iloc[nid]),
                          allow_direct_insert=True)

        # Add all the uuids to the cluster_uuid frame with index according to cluster id from phy
        for idx, c_uuid in zip(merge_comb['cluster_idx'].values,
                               merge_comb['cluster_uuid'].values):
            uuid_list.loc[idx] = c_uuid

        csv_path = Path(alf_path, 'merge_info.csv')
        merge_comb = merge_comb.reindex(columns=[
            'cluster_idx', 'cluster_uuid', 'merged_idx', 'merged_uuid'
        ])

        try:
            merge_comb.to_csv(csv_path, index=False)
        except Exception as err:
            print(err)
            print('Close merge_info.csv file and then relaunch script')
            sys.exit(1)
    else:
        print('No merges detected, continuing...')

    # Now populate datajoint with cluster labels
    user = one._par.ALYX_LOGIN
    current_date = datetime.now().replace(microsecond=0)

    try:
        cluster_group = alf.io.load_file_content(
            alf_path.joinpath('cluster_group.tsv'))
    except Exception as err:
        print(err)
        print('Could not find cluster group file output from phy')
        sys.exit(1)

    try:
        cluster_notes = alf.io.load_file_content(
            alf_path.joinpath('cluster_notes.tsv'))
        cluster_info = pd.merge(cluster_group,
                                cluster_notes,
                                on=['cluster_id'],
                                how='outer')
    except Exception as err:
        cluster_info = cluster_group
        cluster_info['notes'] = None

    cluster_info = cluster_info.where(cluster_info.notnull(), None)
    cluster_info['cluster_uuid'] = uuid_list['uuids'][
        cluster_info['cluster_id']].values

    # dj table that holds data
    cluster = cluster_table.ClusterLabel()

    # Find clusters that have already been labelled by user
    old_clust = cluster & cluster_info & {'user_name': user}

    dj_clust = pd.DataFrame()
    dj_clust['cluster_uuid'] = (old_clust.fetch('cluster_uuid')).astype(str)
    dj_clust['cluster_label'] = old_clust.fetch('cluster_label')

    # First find the new clusters to insert into datajoint
    idx_new = np.where(
        np.isin(cluster_info['cluster_uuid'],
                dj_clust['cluster_uuid'],
                invert=True))[0]
    cluster_uuid = cluster_info['cluster_uuid'][idx_new].values
    cluster_label = cluster_info['group'][idx_new].values
    cluster_note = cluster_info['notes'][idx_new].values

    if idx_new.size != 0:
        print('Populating dj with ' + str(idx_new.size) + ' new labels')
    else:
        print('No new labels to add')
    for iIter, (iClust, iLabel, iNote) in enumerate(
            zip(cluster_uuid, cluster_label, cluster_note)):
        cluster.insert1(dict(cluster_uuid=iClust,
                             user_name=user,
                             label_time=current_date,
                             cluster_label=iLabel,
                             cluster_note=iNote),
                        allow_direct_insert=True)
        print_progress(iIter, cluster_uuid.size, '', '')

    # Next look through clusters already on datajoint and check if any labels have
    # been changed
    comp_clust = pd.merge(cluster_info, dj_clust, on='cluster_uuid')
    idx_change = np.where(
        comp_clust['group'] != comp_clust['cluster_label'])[0]

    cluster_uuid = comp_clust['cluster_uuid'][idx_change].values
    cluster_label = comp_clust['group'][idx_change].values
    cluster_note = comp_clust['notes'][idx_change].values

    # Populate table
    if idx_change.size != 0:
        print('Replacing label of ' + str(idx_change.size) + ' clusters')
    else:
        print('No labels to change')
    for iIter, (iClust, iLabel, iNote) in enumerate(
            zip(cluster_uuid, cluster_label, cluster_note)):
        prev_clust = cluster & {'user_name': user} & {'cluster_uuid': iClust}
        cluster.insert1(dict(*prev_clust.proj(),
                             label_time=current_date,
                             cluster_label=iLabel,
                             cluster_note=iNote),
                        allow_direct_insert=True,
                        replace=True)
        print_progress(iIter, cluster_uuid.size, '', '')

    print('Upload to datajoint complete')
Пример #17
0
def launch_phy(probe_name,
               eid=None,
               subj=None,
               date=None,
               sess_no=None,
               one=None):
    """
    Launch phy given an eid and probe name.

    TODO calculate metrics and save as .tsvs to include in GUI when launching?
    """

    # This is a first draft, no error handling and a draft dataset list.

    # Load data from probe #
    # -------------------- #

    if one is None:
        one = ONE()

    dtypes = [
        'spikes.times',
        'spikes.clusters',
        'spikes.amps',
        'spikes.templates',
        'spikes.samples',
        'spikes.depths',
        'templates.waveforms',
        'templates.waveformsChannels',
        'clusters.uuids',
        'clusters.metrics',
        'clusters.waveforms',
        'clusters.waveformsChannels',
        'clusters.depths',
        'clusters.amps',
        'clusters.channels',
        'channels.probes',
        'channels.rawInd',
        'channels.localCoordinates',
        # 'ephysData.raw.ap'
        '_phy_spikes_subset.waveforms',
        '_phy_spikes_subset.spikes',
        '_phy_spikes_subset.channels'
    ]

    if eid is None:
        eid = one.search(subject=subj, date=date, number=sess_no)[0]

    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    ses_path = one.path_from_eid(eid)
    alf_probe_dir = os.path.join(ses_path, 'alf', probe_name)
    ephys_file_dir = os.path.join(ses_path, 'raw_ephys_data', probe_name)
    raw_files = glob.glob(os.path.join(ephys_file_dir, '*ap.*bin'))
    raw_file = [raw_files[0]] if raw_files else None

    # TODO download ephys meta-data, and extract TemplateController input arg params

    # Launch phy #
    # -------------------- #
    add_default_handler('DEBUG', logging.getLogger("phy"))
    add_default_handler('DEBUG', logging.getLogger("phylib"))
    create_app()
    controller = TemplateController(
        dat_path=raw_file,
        dir_path=alf_probe_dir,
        dtype=np.int16,
        n_channels_dat=384,
        sample_rate=3e4,
        plugins=['IBLMetricsPlugin'],
        plugin_dirs=[Path(__file__).resolve().parent / 'plugins'])
    gui = controller.create_gui()
    gui.show()
    run_app()
    gui.close()
    controller.model.close()
def stream_save_labeled_frames(eid, video_type):

    startTime = time.time()
    '''
    For a given eid and camera type, stream
    sample frames, print DLC labels on them
    and save
    '''

    # eid = '5522ac4b-0e41-4c53-836a-aaa17e82b9eb'
    # video_type = 'left'

    n_frames = 5  # sample 5 random frames

    save_images_folder = '/home/mic/DLC_QC/example_frames/'
    one = ONE()
    info = '_'.join(
        np.array(str(one.path_from_eid(eid)).split('/'))[[5, 7, 8]])
    print(info, video_type)

    r = one.list(eid, 'dataset_types')

    dtypes_DLC = [
        '_ibl_rightCamera.times.npy', '_ibl_leftCamera.times.npy',
        '_ibl_bodyCamera.times.npy', '_iblrig_leftCamera.raw.mp4',
        '_iblrig_rightCamera.raw.mp4', '_iblrig_bodyCamera.raw.mp4',
        '_ibl_leftCamera.dlc.pqt', '_ibl_rightCamera.dlc.pqt',
        '_ibl_bodyCamera.dlc.pqt'
    ]

    dtype_names = [x['name'] for x in r]

    assert all([i in dtype_names
                for i in dtypes_DLC]), 'For this eid, not all data available'

    D = one.load(eid,
                 dataset_types=['camera.times', 'camera.dlc'],
                 dclass_output=True)
    alf_path = Path(D.local_path[0]).parent.parent / 'alf'

    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')

    Times = cam0['times']

    cam = cam0['dlc']
    points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])

    XYs = {}
    for point in points:
        x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_x'])
        x = x.filled(np.nan)
        y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_y'])
        y = y.filled(np.nan)
        XYs[point] = np.array([x, y])

    if video_type != 'body':
        d = list(points)
        d.remove('tube_top')
        d.remove('tube_bottom')
        points = np.array(d)

    # stream frames
    recs = [x for x in r
            if f'{video_type}Camera.raw.mp4' in x['name']][0]['file_records']
    video_path = [x['data_url'] for x in recs if x['data_url'] is not None][0]
    vid_meta = get_video_meta(video_path)

    frame_idx = sample(range(vid_meta['length']), n_frames)
    print('frame indices:', frame_idx)
    frames = get_video_frames_preload(video_path,
                                      frame_idx,
                                      mask=np.s_[:, :, 0])
    size = [vid_meta['width'], vid_meta['height']]
    #return XYs, frames

    x0 = 0
    x1 = size[0]
    y0 = 0
    y1 = size[1]
    if video_type == 'left':
        dot_s = 10  # [px] for painting DLC dots
    else:
        dot_s = 5

    # writing stuff on frames
    font = cv2.FONT_HERSHEY_SIMPLEX

    if video_type == 'left':
        bottomLeftCornerOfText = (20, 1000)
        fontScale = 4
    else:
        bottomLeftCornerOfText = (10, 500)
        fontScale = 2

    lineType = 2

    # assign a color to each DLC point (now: all points red)
    cmap = matplotlib.cm.get_cmap('Set1')
    CR = np.arange(len(points)) / len(points)

    block = np.ones((2 * dot_s, 2 * dot_s, 3))

    k = 0
    for frame in frames:

        gray = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

        # print session info
        fontColor = (255, 255, 255)
        cv2.putText(gray, info, bottomLeftCornerOfText, font, fontScale / 4,
                    fontColor, lineType)

        # print time
        Time = round(Times[frame_idx[k]], 3)
        a, b = bottomLeftCornerOfText
        bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b)

        a, b = bottomLeftCornerOfText
        bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b)
        cv2.putText(gray, '  time: ' + str(Time), bottomLeftCornerOfText0,
                    font, fontScale / 2, fontColor, lineType)

        # print DLC dots
        ll = 0
        for point in points:

            # Put point color legend
            fontColor = (np.array([cmap(CR[ll])]) * 255)[0][:3]
            a, b = bottomLeftCornerOfText
            if video_type == 'right':
                bottomLeftCornerOfText2 = (a, a * 2 * (1 + ll))
            else:
                bottomLeftCornerOfText2 = (b, a * 2 * (1 + ll))
            fontScale2 = fontScale / 4
            cv2.putText(gray, point, bottomLeftCornerOfText2, font, fontScale2,
                        fontColor, lineType)

            X0 = XYs[point][0][frame_idx[k]]
            Y0 = XYs[point][1][frame_idx[k]]

            X = Y0
            Y = X0

            #print(point,X,Y)
            if not np.isnan(X) and not np.isnan(Y):
                try:
                    col = (np.array([cmap(CR[ll])]) * 255)[0][:3]
                    # col = np.array([0, 0, 255]) # all points red
                    X = X.astype(int)
                    Y = Y.astype(int)

                    uu = block * col
                    gray[X - dot_s:X + dot_s, Y - dot_s:Y + dot_s] = uu

                except Exception as e:
                    print('frame', frame_idx[k])
                    print(e)
            ll += 1

        gray = gray[y0:y1, x0:x1]
        # cv2.imshow('frame', gray)
        cv2.imwrite(f'{save_images_folder}{eid}_frame_{frame_idx[k]}.png',
                    gray)
        cv2.waitKey(1)
        k += 1

    print(f'{n_frames} frames done in', np.round(time.time() - startTime))
Пример #19
0
def plot_power_spectrum_lfp(eid, probe_label):
    # instantiate ONE
    one = ONE()

    #    # Specify subject, date and probe we are interested in
    #    subject = 'CSHL049'
    #    date = '2020-01-08'
    #    sess_no = 1
    #    probe_label = 'probe00'
    #    eid = one.search(subject=subject, date=date, number=sess_no)[0]

    # Specify the dataset types of interest
    dtypes = [
        '_iblqc_ephysSpectralDensity.freqs',
        '_iblqc_ephysSpectralDensity.power', 'channels.rawInd',
        'channels.localCoordinates'
    ]

    # Download the data and get paths to downloaded data
    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label)
    alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)

    # Index of good recording channels along probe
    chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy'))
    # Position of each recording channel along probe
    chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy'))
    # Get range for y-axis
    depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])]

    # Load in power spectrum data
    lfp_spectrum = alf.io.load_object(ephys_path,
                                      'ephysSpectralDensityLF',
                                      namespace='iblqc')
    lfp_freq = lfp_spectrum['freqs']
    lfp_power = lfp_spectrum['power'][:, chn_inds]

    # Define a frequency range of interest
    freq_range = [0, 300]
    freq_idx = np.where((lfp_freq >= freq_range[0])
                        & (lfp_freq < freq_range[1]))[0]

    # Limit data to freq range of interest and also convert to dB
    lfp_spectrum_data = 10 * np.log(lfp_power[freq_idx, :])
    dB_levels = np.quantile(lfp_spectrum_data, [0.1, 0.9])

    # Create figure
    fig, ax = plt.subplots()
    # Plot the LFP spectral data
    spectrum_plot = ax.imshow(lfp_spectrum_data.T,
                              extent=np.r_[freq_range, depth_range],
                              cmap='viridis',
                              vmin=dB_levels[0],
                              vmax=dB_levels[1],
                              origin='lower',
                              aspect='auto')
    cbar = fig.colorbar(spectrum_plot, ax=ax)
    cbar.set_label('LFP power (dB)')
    ax.set_xlabel('Frequency (Hz)')
    ax.set_ylabel('Depth along probe (um)')
    #ax.set_title('Power Spectrum of LFP')

    #    plt.show()

    session_name = '_'.join(str(ephys_path).split('/')[5:10])
    plt.suptitle('%s_%s \n %s' % (eid, probe_label, session_name))
    plt.savefig('/home/mic/saturation_analysis/PSD_plots/%s_%s.png' %
                (eid, probe_label))
COLORS = (sns.color_palette('colorblind', as_cmap=True)[0],
          sns.color_palette('colorblind', as_cmap=True)[3])

# Query session list
eids, probes = query_sessions(selection=INCL_SESSIONS)

results_df = pd.DataFrame()
for i in range(len(eids)):
    print('\nProcessing session %d of %d' % (i+1, len(eids)))

    # Load in data
    eid = eids[i]
    try:
        spikes, clusters, channels = bbone.load_spike_sorting_with_channel(
                                                                    eid, aligned=True, one=one)
        ses_path = one.path_from_eid(eid)
        trials = load_trials(eid)
    except Exception as error_message:
        print(error_message)
        continue

    # Check data integrity
    if check_trials(trials) is False:
        continue

    # Extract session data
    ses_info = one.get_details(eid)
    subject = ses_info['subject']
    date = ses_info['start_time'][:10]
    probes_to_use = probes[i]
Пример #21
0
def plot_all(eid):
    matplotlib.rcParams.update({'font.size': 10})
    # report eid =  '4a45c8ba-db6f-4f11-9403-56e06a33dfa4'

    panels = {
        'plot_paw_on_image': plot_paw_on_image,
        'plot_wheel_position': plot_wheel_position,
        'paw_speed_PSTH': paw_speed_PSTH,
        'plot_licks': plot_licks,
        'lick_raster': lick_raster,
        'nose_speed_PSTH': nose_speed_PSTH,
        'pupil_diameter_PSTH': pupil_diameter_PSTH,
        'motion_energy_PSTH': motion_energy_PSTH
    }

    nrows = 2
    ncols = 4

    #plt.ioff()

    plt.figure(figsize=(15, 10))

    k = 1
    for panel in panels:
        plt.subplot(nrows, ncols, k)
        add_panel_letter(k)
        try:
            panels[panel](eid)
            #continue
        except:
            ax = plt.gca()
            plt.text(.5,
                     .5,
                     f'error in \n {panel}',
                     color='r',
                     fontweight='bold',
                     bbox=dict(facecolor='white', alpha=0.5),
                     fontsize=10,
                     transform=ax.transAxes)
        k += 1

    plt.tight_layout()

    # print QC outcome in title and DLC task version
    one = ONE()
    task = one.alyx.rest('tasks', 'list', session=eid, name='EphysDLC')[0]
    det = one.get_details(eid, True)['extended_qc']
    p = one.path_from_eid(eid)
    s1 = ' '.join([str(p).split('/')[i] for i in [4, 6, 7, 8]])

    dlc_qcs = [
        'time_trace_length_match', 'trace_all_nan', 'mean_in_bbox',
        'pupil_blocked', 'lick_detection'
    ]

    qcs = [
        'task', 'behavior', 'videoLeft', 'videoRight', 'videoBody', 'dlcLeft',
        'dlcRight', 'dlcBody'
    ]

    l = []
    for q in qcs:
        try:
            if det[q] == 'FAIL':

                if 'dlc' in q:
                    l.append('\n')
                    l.append(q + ':' + str(det[q]) + '-->')
                    video_type = q[3:]
                    for dlc_qc in dlc_qcs:
                        w = f'_dlc{video_type}_{dlc_qc}'
                        if not det[w]:
                            l.append(w + ':' + str(det[w]) + ',')
                else:
                    l.append(q + ':' + str(det[q]) + ',')
        except:
            continue

    s2 = ' '.join(l)

    plt.suptitle(s1 + ', DLC version: ' + str(task['version']) + ' \n ' + s2,
                 backgroundcolor='white',
                 fontsize=6)
    plt.tight_layout(rect=[0, 0.03, 1, 0.95])
    #plt.tight_layout()
    plt.savefig(f'/home/mic/reproducible_dlc/overviewJune/{eid}.png')
    plt.close()
Пример #22
0
dataset_types = [
    'trials.goCue_times', '_iblrig_taskData.raw',
    '_iblmic_audioSpectrogram.frequencies', '_iblmic_audioSpectrogram.power',
    '_iblmic_audioSpectrogram.times_mic'
]

eIDs = one.search(task_protocol='bias',
                  location='_iblrig_churchlandlab_ephys_0',
                  dataset_types=dataset_types)
# eIDs = '098bdac5-0e25-4f51-ae63-995be7fe81c7' # TEST EXAMPLE

is_plot = False

for i_eIDs in range(0, len(eIDs)):
    one.load(eIDs[i_eIDs], dataset_types=dataset_types, download_only=True)
    session_path = one.path_from_eid(eIDs[i_eIDs])
    c = raw_data_loaders.load_data(session_path)
    n_trial = len(c)

    # -- Get spectrogram
    TF = alf.io.load_object(session_path.joinpath('raw_behavior_data'),
                            'audioSpectrogram',
                            namespace='iblmic')

    # -- Detect goCue
    # Assume quietness before goCue isplayed > use diff to detect onset
    indf = np.where(
        np.logical_and(TF['frequencies'] >= 4000,
                       TF['frequencies'] <= 6000))[1]

    sum_5k = np.sum(TF['power'][:, indf], axis=1)
from oneibl.one import ONE
import numpy as np
import matplotlib.pyplot as plt
from ibllib.atlas import BrainRegions

one = ONE()

# Specify subject, date and probe we are interested in
subject = 'CSHL049'
date = '2020-01-08'
sess_no = 1
probe_label = 'probe01'
eid = one.search(subject=subject, date=date, number=sess_no)[0]

_ = one.load(eid, dataset_types=['clusters.channels'], download_only=True)
alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)
cluster_chans = np.load(alf_path.joinpath('clusters.channels.npy'))

aligned_traj = one.alyx.rest('trajectories',
                             'list',
                             subject=subject,
                             session=eid,
                             probe=probe_label,
                             provenance='Ephys aligned histology track')

if len(aligned_traj) > 0:
    print('Getting channels for provenance ' + aligned_traj[0]['provenance'])

    channels = one.alyx.rest('channels',
                             'list',
                             trajectory_estimate=aligned_traj[0]['id'])
Пример #24
0
# ---------------------------------------------
# 2. Get only spikes and clusters (without brain regions assigned to clusters)
#    data separately from channels
#    Use merger function to get channels information into clusters
#    Adding feature x, y from default
spikes, clusters = bbone.load_spike_sorting(eid, one=one)
channels = bbone.load_channel_locations(eid, one=one)
keys = ['x', 'y']
clusters_brain = bbone.merge_clusters_channels(clusters,
                                               channels,
                                               keys_to_add_extra=keys)
del spikes, clusters, clusters_brain, channels  # Delete for the purpose of the example

# ---------------------------------------------
# 3. I don't want to connect to ONE and I already know my session path
session_path = one.path_from_eid(eid)  # replace by your local path
spikes, clusters = bbone.load_spike_sorting(session_path, one=one)
# TODO offline loading of channel locations ? Probably by caching the queries.

# ---------------- WIP ---------------------

# TODO one.load_object(): return dict of bunch

# --- Download spikes data
# 1. either a specific subset of dataset types via the one command
# 2. either the whole spikes object via the one
'''
# Option 1 -- Download only subset of dataset in spike object
dataset_types = ['spikes.times',
                 'spikes.clusters']
one.load(eid, dataset_types=dataset_types)
Пример #25
0
from ibllib.io import spikeglx
from oneibl.one import ONE

one = ONE()
# Download a dataset of interest
eid = one.search(subject='ZM_2240', date_range='2020-01-22')[0]

dtypes = [
    'ephysData.raw.nidq',  # change this to ephysData.raw.lf or ephysData.raw.ap for
    'ephysData.raw.ch',
    'ephysData.raw.meta'
]

_ = one.load(eid, dataset_types=dtypes, download_only=True)

# Get file paths of interest
raw_ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data')
efile = raw_ephys_path.joinpath('_spikeglx_ephysData_g0_t0.nidq.cbin')

# Read the files and get the data
# Enough to do analysis
sr = spikeglx.Reader(efile)

# Decompress the data
# Used by client code, e.g. Matlab for spike sorting
# Give new path output name
sr.decompress_file(keep_original=True,
                   overwrite=True)  # Keep the original file and overwrite any
# previously decompressed file