Пример #1
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 10:00:29 2021

@author: guido
"""

from iblvideo import run_session
from oneibl.one import ONE
one = ONE()

# session = one.search(task_protocol='_iblrig_NPH_tasks_trainingChoiceWorld')
session = one.search(task_protocol='_iblrig_tasks_opto_biasedChoiceWorld')

for eid in session:
    print(f'Processing session {eid}')
    status = run_session(eid,
                         machine='guido',
                         cams=['left'],
                         one=one,
                         frames=10000)
    downsample_factor = 20

    uclusters = np.unique(spikes['clusters'])
    cols = ['c', 'b', 'g', 'y', 'k', 'r', 'm']
    cols_cat = (cols * int(len(uclusters) / len(cols) + 10))[:len(uclusters)]
    col_dict = dict(zip(uclusters, cols_cat))

    # downsample
    z = spikes['clusters'][::downsample_factor]
    x = spikes['times'][::downsample_factor]
    y = spikes['depths'][::downsample_factor]

    cols_int = [col_dict[x] for x in z]

    plt.scatter(x, y, marker='o', s=0.01, c=cols_int)

    plt.ylabel('depth [um]')
    plt.xlabel('time [sec]')
    plt.title('downsample factor: %s' % downsample_factor)


if __name__ == '__main__':

    one = ONE()
    eid = one.search(subject='ZM_2407', date='2019-11-05', number=3)
    D = one.load(eid[0], clobber=False, download_only=True)
    alf_path = Path(D.local_path[0]).parent

    spikes = alf.io.load_object(alf_path, 'spikes')
    scatter_raster(spikes)
Пример #3
0
class RegistrationClient:
    def __init__(self, one=None):
        self.one = one
        if not one:
            self.one = ONE()
        self.dtypes = self.one.alyx.rest('dataset-types', 'list')
        self.file_extensions = [
            df['file_extension']
            for df in self.one.alyx.rest('data-formats', 'list')
        ]

    def create_sessions(self, root_data_folder, dry=False):
        # session creation on the create me flags.
        flag_files = Path(root_data_folder).glob('**/create_me.flag')
        for flag_file in flag_files:
            if dry:
                print(flag_file)
                continue
            logger_.info('creating session for ' + str(flag_file.parent))
            # providing a false flag stops the registration after session creation
            status_str = self.register_session(flag_file.parent,
                                               file_list=False)
            if status_str:
                logger_.error(status_str)
            flag_file.unlink()

    def register_sync(self, root_data_folder, dry=False):
        flag_files = Path(root_data_folder).glob('**/register_me.flag')
        for flag_file in flag_files:
            if dry:
                print(flag_file)
                continue
            file_list = flags.read_flag_file(flag_file)
            logger_.info('registering ' + str(flag_file.parent))
            status_str = self.register_session(flag_file.parent,
                                               file_list=file_list)
            if status_str:
                error_message = str(flag_file.parent) + ' failed registration'
                error_message += '\n' + ' ' * 8 + status_str
                error_message += traceback.format_exc()
                logger_.error(error_message)
                err_file = flag_file.parent.joinpath('register_me.error')
                flag_file.replace(err_file)
                with open(err_file, 'w+') as f:
                    f.write(error_message)
                continue
            flags.write_flag_file(flag_file.parent.joinpath('flatiron.flag'),
                                  file_list=file_list)
            flag_file.unlink()
            if flag_file.parent.joinpath('create_me.flag').exists():
                flag_file.parent.joinpath('create_me.flag').unlink()
            logger_.info('registered' + '\n')

    @log2sessionfile
    def register_session(self, ses_path, file_list=True, repository_name=None):
        if isinstance(ses_path, str):
            ses_path = Path(ses_path)
        # read meta data from the rig for the session from the task settings file
        settings_json_file = [
            f for f in ses_path.glob('**/_iblrig_taskSettings.raw*.json')
        ]
        if not settings_json_file:
            logger_.error(
                ['could not find _iblrig_taskSettings.raw.json. Abort.'])
            return
        else:
            settings_json_file = settings_json_file[0]
        md = _read_settings_json_compatibility_enforced(settings_json_file)
        # query alyx endpoints for subject, error if not found
        try:
            subject = self.one.alyx.rest(
                'subjects?nickname=' + md['SUBJECT_NAME'], 'list')[0]
        except IndexError:
            return 'Subject: ' + md[
                'SUBJECT_NAME'] + " doesn't exist in Alyx. ABORT."

        # look for a session from the same subject, same number on the same day
        _, session = self.one.search(subjects=subject['nickname'],
                                     date_range=md['SESSION_DATE'],
                                     number=md['SESSION_NUMBER'],
                                     details=True)
        try:
            user = self.one.alyx.rest('users', 'read', md["PYBPOD_CREATOR"][0])
        except Exception:
            return 'User: '******'t exist in Alyx. ABORT"

        username = user['username'] if user else subject['responsible_user']

        # load the trials data to get information about session duration
        ses_data = raw.load_data(ses_path)
        ses_duration_secs = _get_session_duration(ses_path, ses_data)

        start_time = ibllib.time.isostr2date(md['SESSION_DATETIME'])
        end_time = start_time + datetime.timedelta(seconds=ses_duration_secs)

        # this is the generic relative path: subject/yyyy-mm-dd/NNN
        gen_rel_path = Path(subject['nickname'], md['SESSION_DATE'],
                            '{0:03d}'.format(int(md['SESSION_NUMBER'])))

        # checks that the number of actual trials and labeled number of trials check out
        assert (len(ses_data) == ses_data[-1]['trial_num'])

        # task specific logic
        if 'habituationChoiceWorld' in md['PYBPOD_PROTOCOL']:
            n_correct_trials = 0
        else:
            n_correct_trials = ses_data[-1]['ntrials_correct']

        # if nothing found create a new session in Alyx
        if not session:
            ses_ = {
                'subject': subject['nickname'],
                'users': [username],
                'location': md['PYBPOD_BOARD'],
                'procedures': ['Behavior training/tasks'],
                'lab': subject['lab'],
                # 'project': project['name'],
                'type': 'Experiment',
                'task_protocol':
                md['PYBPOD_PROTOCOL'] + md['IBLRIG_VERSION_TAG'],
                'number': md['SESSION_NUMBER'],
                'start_time': ibllib.time.date2isostr(start_time),
                'end_time': ibllib.time.date2isostr(end_time),
                'n_correct_trials': n_correct_trials,
                'n_trials': ses_data[-1]['trial_num'],
                'json': json.dumps(md, indent=1),
            }
            session = self.one.alyx.rest('sessions', 'create', data=ses_)
            if md['SUBJECT_WEIGHT']:
                wei_ = {
                    'subject': subject['nickname'],
                    'date_time': ibllib.time.date2isostr(start_time),
                    'weight': md['SUBJECT_WEIGHT'],
                    'user': username
                }
                self.one.alyx.rest('weighings', 'create', wei_)
        else:
            session = session[0]

        logger_.info(session['url'] + ' ')
        # create associated water administration if not found
        if not session['wateradmin_session_related']:
            wa_ = {
                'subject': subject['nickname'],
                'date_time': ibllib.time.date2isostr(end_time),
                'water_administered': ses_data[-1]['water_delivered'] / 1000,
                'water_type': md['REWARD_TYPE'],
                'user': username,
                'session': session['url'][-36:],
                'adlib': False
            }
            self.one.alyx.rest('water-administrations', 'create', wa_)
        # at this point the session has been created. If create only, exit
        if not file_list:
            return
        # register all files that match the Alyx patterns, warn user when files are encountered
        rename_files_compatibility(ses_path, md['IBLRIG_VERSION_TAG'])
        F = {
        }  # empty dict whose keys will be relative paths and content filenames
        for fn in ses_path.glob('**/*.*'):
            if fn.suffix in ['.flag', '.error', '.avi', '.log']:
                logger_.debug('Excluded: ', str(fn))
                continue
            if not self._match_filename_dtypes(fn):
                logger_.warning('No matching dataset type for: ' + str(fn))
                continue
            if fn.suffix not in self.file_extensions:
                logger_.warning(
                    'No matching dataformat (ie. file extension) for: ' +
                    str(fn))
                continue
            if not _register_bool(fn.name, file_list):
                logger_.debug('Not in filelist: ' + str(fn))
                continue
            try:
                assert (str(gen_rel_path) in str(fn))
            except AssertionError:
                strerr = 'ALF folder mismatch: data is in wrong subject/date/number folder. \n'
                strerr += ' Expected ' + str(
                    gen_rel_path) + ' actual was ' + str(fn)
                return strerr
            # extract the relative path of the file
            rel_path = Path(str(fn)[str(fn).find(str(gen_rel_path)):]).parent
            if str(rel_path) not in F.keys():
                F[str(rel_path)] = [fn.name]
            else:
                F[str(rel_path)].append(fn.name)
            logger_.info('Registering ' + str(fn))

        for rpath in F:
            r_ = {
                'created_by': username,
                'path': rpath,
                'filenames': F[rpath],
            }
            self.one.alyx.post('/register-file', data=r_)

    def _match_filename_dtypes(self, full_file):
        import re
        patterns = [
            dt['filename_pattern'] for dt in self.dtypes
            if dt['filename_pattern']
        ]
        for pat in patterns:
            reg = pat.replace('.', r'\.').replace('_',
                                                  r'\_').replace('*', r'.+')
            if re.match(reg, Path(full_file).name, re.IGNORECASE):
                return True
        return False
Пример #4
0
## Init
from oneibl.one import ONE
from ibllib.misc import pprint
one = ONE(base_url='https://test.alyx.internationalbrainlab.org',
          username='******',
          password='******')

## Find an experiment
eid = one.search(users='olivier', date_range=['2018-08-24', '2018-08-24'])
pprint(eid)
one.search_terms()

## List dataset types for a session
eid = 'cf264653-2deb-44cb-aa84-89b82507028a'
one.list(eid)
## More Info about a session
d = one.list(eid, 'All')

## Get More Info about datasets
d = one.list(eid, details=True)
print(d)

print(d)
## List #1
one.list(None, 'dataset-types')
one.list(None, 'users')
one.list(None, 'subjects')

## Load #1
dataset_types = [
    'clusters.templateWaveforms', 'clusters.probes', 'clusters.depths'
Пример #5
0
if __name__ == '__main__':

    BIN_SIZE = 0.025  # seconds
    SMOOTH_SIZE = 0.025  # seconds; standard deviation of gaussian kernel
    PRE_TIME = 0.25  # seconds to plot before event onset
    POST_TIME = 1.0  # seconds to plot after event onset
    RESULTS_DIR = '/datadisk/scratch'

    # get the data from flatiron
    subject = 'KS004'
    date = '2019-09-25'
    number = 1
    one = ONE()
    eid = one.search(subject=subject,
                     date=date,
                     number=number,
                     task_protocol='ephysChoiceWorld')
    session_info = one.load(eid[0], clobber=False, download_only=True)
    session_path = get_session_path(session_info)
    alf_path = os.path.join(session_path, 'alf')

    # load objects
    spikes = ioalf.load_object(alf_path, 'spikes')
    clusters = ioalf.load_object(alf_path, 'clusters')
    trials = ioalf.load_object(alf_path, '_ibl_trials')

    # containers to store results
    align_events = ['stimOn', 'stimOff', 'feedback']
    cluster_ids = np.unique(
        spikes['clusters'])  # define subset of clusters to plot
    peth_means = {
Пример #6
0
               extent=np.r_[times[[0, -1]], Clusters[[0, -1]]])

    plt.xlabel('Time (s)')
    plt.ylabel('Cluster #; ordered by depth')
    plt.show()

    # plt.savefig('/home/mic/Rasters/%s.svg' %(trial_number))
    # plt.close('all')
    plt.tight_layout()


if __name__ == '__main__':

    # get data
    one = ONE()
    eid = one.search(lab='wittenlab', date='2019-08-04')
    D = one.load(eid[0], clobber=False, download_only=True)
    alf_path = Path(D.local_path[0]).parent
    spikes = alf.io.load_object(alf_path, 'spikes')

    # bin activity
    T_BIN = 0.01  # [sec]
    R, times, Clusters = bincount2D(spikes['times'], spikes['clusters'], T_BIN)

    # Order activity by anatomical depth of neurons
    d = dict(zip(spikes['clusters'], spikes['depths']))
    y = sorted([[i, d[i]] for i in d])
    isort = np.argsort([x[1] for x in y])
    R = R[isort, :]

    # Check the number of clusters x number of time bins
Пример #7
0
import matplotlib.pyplot as plt

from ibllib.io import raw_data_loaders
import alf.io
from oneibl.one import ONE

one = ONE()

dataset_types = [
    'trials.goCue_times', '_iblrig_taskData.raw',
    '_iblmic_audioSpectrogram.frequencies', '_iblmic_audioSpectrogram.power',
    '_iblmic_audioSpectrogram.times_mic'
]

eIDs = one.search(task_protocol='bias',
                  location='_iblrig_churchlandlab_ephys_0',
                  dataset_types=dataset_types)
# eIDs = '098bdac5-0e25-4f51-ae63-995be7fe81c7' # TEST EXAMPLE

is_plot = False

for i_eIDs in range(0, len(eIDs)):
    one.load(eIDs[i_eIDs], dataset_types=dataset_types, download_only=True)
    session_path = one.path_from_eid(eIDs[i_eIDs])
    c = raw_data_loaders.load_data(session_path)
    n_trial = len(c)

    # -- Get spectrogram
    TF = alf.io.load_object(session_path.joinpath('raw_behavior_data'),
                            'audioSpectrogram',
                            namespace='iblmic')
Пример #8
0
Get list of subjects associated to the certification recording project.
TODO not finished: number of subject per lab, number of recording per lab
'''
# Author: Gaelle Chapuis

import numpy
from oneibl.one import ONE
one = ONE()  # base_url='https://dev.alyx.internationalbrainlab.org'

dataset_types = ['spikes.times', 'spikes.clusters']

# eid1, det1 = one.search(project='ibl_certif_neuropix_recording',
#                         dataset_types=dataset_types, details=True)

eid, det = one.search(task_protocol='ephys_certification',
                      dataset_types=dataset_types,
                      details=True)

sub = [p['subject'] for p in det]
# sub_unique = list(set(sub))

lab = [p['lab'] for p in det]
# lab_unique = list(set(lab))

# task = [p['task_protocol'] for p in det]
# task_unique = list(set(task))

# -- How many animals were used per lab
su, ind_su = numpy.unique(sub, return_index=True)
lab_arr = numpy.array(lab)
lu = lab_arr[ind_su]
Пример #9
0
import numpy as np
from brainbox.population import decode
from sklearn.utils import shuffle
from oneibl.one import ONE
import brainbox.io.one as bbone

# %% Load in data
one = ONE()
eid = one.search(subject='ZM_2240', date_range=['2020-01-23', '2020-01-23'])
spikes, clusters = bbone.load_spike_sorting(eid[0], one=one)
trials = one.load_object(eid[0], 'trials')

# %% Only use units with KS2 label 'good' from probe00

spikes = spikes['probe00']
clusters = clusters['probe00']

clusters_to_use = clusters.metrics.ks2_label == 'good'
spikes.times = spikes.times[np.isin(
    spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])]
spikes.clusters = spikes.clusters[np.isin(
    spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])]
cluster_ids = clusters.metrics.cluster_id[clusters_to_use]

# %% Do decoding
print('Decoding whether the stimulus was on the left or the right..')

stim_times = trials.goCue_times
stim_sides = np.isnan(trials.contrastLeft).astype(int)

# Decode left vs right stimulus from a 1 second window after stimulus onset using default settings:
Пример #10
0
def xcorr(x, y, maxlags=None):
    """Cross-correlation between two 1D signals of the same length."""
    ns = len(x)
    if len(y) != ns:
        raise ValueError("x and y should have the same length.")
    maxlags = maxlags or ns - 1
    return np.correlate(x, y, mode='full')[ns - 1 - maxlags:ns + maxlags]


T_BIN = 0.01  # seconds
CORR_LEN = 1  # seconds
CORR_BINS = int(CORR_LEN / T_BIN)  # bins

# get the data from flatiron and the current folder
one = ONE()
eid = one.search(subject='ZM_1150', date='2019-05-07', number=1)
D = one.load(eid[0], clobber=False, download_only=True)
session_path = Path(D.local_path[0]).parent

# load objects
spikes = ioalf.load_object(session_path, 'spikes')

# Get a Bunch instance.
b = one_to_bunch(spikes)

# Compute the firing rates.
rates = firing_rates(b.spike_times, b.spike_clusters, T_BIN)
# Note: I would rather just use spikes['times'] and spikes['clusters'] instead of going
# via a Bunch or DataFrame or similar...

# Compute the cross-correlation between the firing rate of two neurons.
Пример #11
0
def launch_phy(probe_name, eid=None, subj=None, date=None, sess_no=None, one=None):
    """
    Launch phy given an eid and probe name.

    TODO calculate metrics and save as .tsvs to include in GUI when launching?
    """

    # This is a first draft, no error handling and a draft dataset list.

    # Load data from probe #
    # -------------------- #

    if one is None:
        one = ONE()

    dtypes = [
        'spikes.times',
        'spikes.clusters',
        'spikes.amps',
        'spikes.templates',
        'spikes.samples',
        'spikes.depths',
        'templates.waveforms',
        'templates.waveformsChannels',
        'clusters.uuids',
        'clusters.metrics',
        'clusters.waveforms',
        'clusters.waveformsChannels',
        'clusters.depths',
        'clusters.amps',
        'clusters.channels',
        'channels.probes',
        'channels.rawInd',
        'channels.localCoordinates',
        # 'ephysData.raw.ap'
        '_phy_spikes_subset.waveforms',
        '_phy_spikes_subset.spikes',
        '_phy_spikes_subset.channels'
    ]

    if eid is None:
        eid = one.search(subject=subj, date=date, number=sess_no)[0]

    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    ses_path = one.path_from_eid(eid)
    alf_probe_dir = os.path.join(ses_path, 'alf', probe_name)
    ephys_file_dir = os.path.join(ses_path, 'raw_ephys_data', probe_name)
    raw_files = glob.glob(os.path.join(ephys_file_dir, '*ap.*bin'))
    raw_file = [raw_files[0]] if raw_files else None

    # TODO download ephys meta-data, and extract TemplateController input arg params

    # Launch phy #
    # -------------------- #
    add_default_handler('DEBUG', logging.getLogger("phy"))
    add_default_handler('DEBUG', logging.getLogger("phylib"))
    create_app()
    controller = TemplateController(dat_path=raw_file, dir_path=alf_probe_dir, dtype=np.int16,
                                    n_channels_dat=384, sample_rate=3e4,
                                    plugins=['IBLMetricsPlugin'],
                                    plugin_dirs=[Path(__file__).resolve().parent])
    gui = controller.create_gui()
    gui.show()
    run_app()
    gui.close()
    controller.model.close()
Пример #12
0
                    help='Date of session YYYY-MM-DD')
parser.add_argument('-n',
                    '--session_no',
                    default=False,
                    required=True,
                    help='Session Number')
parser.add_argument('-e',
                    '--ephys_data',
                    default=False,
                    required=False,
                    help='Set True to load ephys.bin data')
args = parser.parse_args()

one = ONE()
eid = one.search(subject=str(args.subject),
                 date=str(args.date),
                 number=args.session_no)[0]
data_path = one.load(eid, clobber=False, download_only=True)
print(
    f'Successfully loaded data for {args.subject} {args.date} session no. {args.session_no}'
)

if args.ephys_data:
    print('Loading ephys.bin data')
    ephys_types = [
        'ephysData.raw.ch', 'ephysData.raw.meta', 'ephysData.raw.ap',
        'ephysData.raw.lf'
    ]
    ephys_path = one.load(eid,
                          dataset_types=ephys_types,
                          clobber=False,
Пример #13
0
        t0, t1 = intervals[j, :]
        # Count the number of spikes in the window, for each neuron.
        x = np.bincount(spike_clusters[intervals_idx[j, 0]:intervals_idx[j,
                                                                         1]],
                        minlength=cluster_ids.max() + 1)
        counts[:, j] = x[cluster_ids]
    return counts, cluster_ids


# %%
# Set path to save plots
DATA_PATH, FIG_PATH, _ = paths()
FIG_PATH = join(FIG_PATH, 'TimeConstant')

# Load in data
eids = one.search(subject=SUBJECT, date_range=DATE)
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eids[0],
                                                                   one=one)
trials = one.load_object(eids[0], 'trials')

# Only use single units
probe = 'probe' + PROBE
spikes[probe].times = spikes[probe].times[np.isin(
    spikes[probe].clusters, clusters[probe].metrics.cluster_id[
        clusters[probe].metrics.ks2_label == 'good'])]
spikes[probe].clusters = spikes[probe].clusters[np.isin(
    spikes[probe].clusters, clusters[probe].metrics.cluster_id[
        clusters[probe].metrics.ks2_label == 'good'])]

# Convert into seconds
BIN_SIZE_S = BIN_SIZE / 1000
Пример #14
0
    :return: x and y numpy arrays
    """
    x = np.empty(0)
    y = np.empty(0)
    for key in list(dlc_dict.keys()):
        if key[-1] == 'x':
            x = np.append(x, dlc_dict[key][frame_number])
        if key[-1] == 'y':
            y = np.append(y, dlc_dict[key][frame_number])
    return x, y


# Query sessions with available DLC data using ONE
one = ONE()
dtypes = ['camera.dlc', 'camera.times']
eids, ses_info = one.search(dataset_types=dtypes, details=True)

# Loop over sessions
FRAME_NR = 5000
for i, eid in enumerate(eids):
    if np.mod(i+1, 5) == 0:
        print('Running session %d of %d' % (i+1, len(eids)))

    # Load in data
    d = one.load(eid, dataset_types=dtypes, download_only=True, dclass_output=True)
    ses_path = Path(d.local_path[0]).parent
    dlc_dict = alf.io.load_object(ses_path, '_ibl_leftCamera', short_keys=True)
    video_path = Path(ses_path.parent, 'raw_video_data', '_iblrig_leftCamera.raw.mp4')

    # Get DLC and video data
    if (np.isnan(dlc_dict['pupil_top_r_x'][0])) or (np.size(dlc_dict['pupil_top_r_x']) < 1000):
Пример #15
0
class Alyx2NWBMetadata:
    def __init__(self, eid=None, one_obj=None, **one_search_kwargs):
        """
        Query the sessions, subject, lab tables of the Alyx database using the ONE api.
        Retrieve a mice experiment's metadata as well as the various data types (ONE format)
        created during the experiment like: Trials, Behavior, Electrophysiology(raw, spike sorted),
        Stimulus, Probes used.
        Parameters
        ----------
        eid: str
            uuid of IBL experiment
        one_obj: ONE()
            one object created after user authenticated connection to ALyx servers
        one_search_kwargs: dict
            various search terms to retrieve an eid of interest using the ONE api to query Alyx.
        """
        if one_obj is None:
            self.one_obj = ONE()
        elif not isinstance(one_obj, OneAbstract):
            raise Exception('one_obj is not of ONE class')
        else:
            self.one_obj = one_obj
        if eid is None:
            eid = self.one_obj.search(**one_search_kwargs)
            if len(eid) > 1:
                print(f'nos of EIDs found for your search query: {len(eid)}, '
                      f'generating metadata from the first')
                if input('continue? y/n') == 'y':
                    pass
                else:
                    exit()
            self.eid = eid[0]
        elif isinstance(eid, list):
            self.eid = eid[0]
        else:
            self.eid = eid
        self.one_search_kwargs = one_search_kwargs
        self.schema = nwb_schema
        self.dataset_description_list = self._get_dataset_details()
        self.eid_session_info = self._retrieve_eid_endpoint()
        self.dataset_type_list = self._list_eid_metadata('dataset_type')
        self.users_list = self._list_eid_metadata('users')
        self.subjects_list = self._list_eid_metadata('subjects')
        self.labs_list = self._list_eid_metadata('labs')
        self.dataset_details, self.dataset_simple = self._dataset_type_parse()
        self._get_lab_table()
        self._get_subject_table()

    def _get_datetime(self, dtstr, format='%Y-%m-%dT%X'):
        if '.' in dtstr:
            dtstr = dtstr.split('.')[0]
        if len(dtstr) > 19:
            dtstr = dtstr[:19]
        if len(dtstr) == 10:
            format = '%Y-%m-%d'
        elif len(dtstr) == 19:
            if 'T' in dtstr:
                format = '%Y-%m-%dT%X'
            else:
                format = '%Y-%m-%d %X'
        try:
            return datetime.strptime(dtstr, format)
        except:
            raise Exception('could not convert to datetime')

    def _get_dataset_details(self):
        """
        Retrieves all datasets in the alyx database currently.
        Retrieves a list of dicts with keys like id, name, created_by,description etc. Uses only name and description.
        Returns
        -------
        list
            List of dicts:
            {<dataset-name> : <dataset_description>
        """
        data_url_resp = self.one_obj.alyx.rest('dataset-types', 'list')
        return {i['name']: i['description'] for i in data_url_resp}

    def _list_eid_metadata(self, list_type):
        """
        Uses one's list method to get the types of <list_type> data from the given eid.
        Parameters
        ----------
        list_type: str
            one of strings from
            >>> ONE().search_terms()
        Returns
        -------
        list
        """
        return self.one_obj.list(self.eid, list_type)

    def _retrieve_eid_endpoint(self):
        """
        To get the current sessions url response. Contains all the session metadata as well as the current datasets etc.
        Returns
        -------
        list
            list of server responses.
        """
        return self.one_obj.alyx.rest('sessions/' + self.eid, 'list')

    def _get_lab_table(self):
        self.lab_table = self.one_obj.alyx.rest('labs', 'list')

    def _get_subject_table(self):
        self.subject_table = self.one_obj.alyx.rest(
            'subjects/' + self.eid_session_info['subject'], 'list')

    def _dataset_type_parse(self):
        """

        Returns
        -------
        list
            list of dicts:
            {<dataset object name>: (eg. spikes, clusters etc)
                [
                    {name: objects attribute type (eg. times, intervals etc
                    description: attributes description}
                    {name: objects attribute type (eg. times, intervals etc
                    description: attributes description}
                ]
            }
        """
        split_list_objects = [i.split('.')[0] for i in self.dataset_type_list]
        split_list_attributes = [
            '.'.join(i.split('.')[1:]) for i in self.dataset_type_list
        ]
        dataset_description = [
            self.dataset_description_list[i] for i in self.dataset_type_list
        ]
        split_list_objects_dict_details = dict()
        split_list_objects_dict = dict()
        for obj in set(split_list_objects):
            split_list_objects_dict_details[obj] = []
            split_list_objects_dict[obj] = []
        for att_idx, attrs in enumerate(split_list_attributes):
            append_dict = {
                'name': attrs,
                'description': dataset_description[att_idx]
            }
            # 'extension': dataset_extension[att_idx] }
            split_list_objects_dict_details[
                split_list_objects[att_idx]].extend([append_dict])
            split_list_objects_dict[split_list_objects[att_idx]].extend(
                [attrs])
        dataset_type_list = split_list_objects_dict_details
        dataset_type_list_simple = split_list_objects_dict
        return dataset_type_list, dataset_type_list_simple

    @staticmethod
    def _unpack_dataset_details(dataset_details,
                                object_name,
                                custom_attrs=None,
                                match_str=' '):
        """
        Unpacks the dataset_details into:
        Parameters
        ----------
        dataset_details: dict
            self.dataset_details
        object_name: str
            eg: spikes, clusters, Ecephys
        custom_attrs: list
            attrs to unpack
        match_str: regex
            match string: attrs to exclude (like .times/.intervals etc)
        Returns
        -------
        datafiles: str
            ex: 'face.motionEnergy'
        datanames: str
            ex: 'motionEnergy'
        datadesc: str
            ex: <description string for motionEnergy>
        """
        cond = lambda x: re.match(match_str, x)
        datafiles_all = [
            object_name + '.' + ii['name']
            for ii in dataset_details[object_name] if not cond(ii['name'])
        ]
        datafiles_names_all = [
            ii['name'] for ii in dataset_details[object_name]
            if not cond(ii['name'])
        ]
        datafiles_desc_all = [
            ii['description'] for ii in dataset_details[object_name]
            if not cond(ii['name'])
        ]
        if custom_attrs:
            datafiles_inc = []
            datafiles_names_inc = []
            datafiles_desc_inc = []
            for attrs in custom_attrs:
                datafiles_inc.extend([
                    i for i in datafiles_all if i in object_name + '.' + attrs
                ])
                datafiles_names_inc.extend([
                    datafiles_names_all[j] for j, i in enumerate(datafiles_all)
                    if i in object_name + '.' + attrs
                ])
                datafiles_desc_inc.extend([
                    datafiles_desc_all[j] for j, i in enumerate(datafiles_all)
                    if i in object_name + '.' + attrs
                ])
        else:
            datafiles_inc = datafiles_all
            datafiles_names_inc = datafiles_names_all
            datafiles_desc_inc = datafiles_desc_all
        return datafiles_inc, datafiles_names_inc, datafiles_desc_inc

    def _initialize_container_dict(self, name=None, default_value=None):
        if default_value is None:
            default_value = dict()
        if name:
            return dict({name: default_value.copy()})
        else:
            return None

    def _get_all_object_names(self):
        return sorted(
            list(set([i.split('.')[0] for i in self.dataset_type_list])))

    def _get_current_object_names(self, obj_list):
        loop_list = []
        for j, k in enumerate(obj_list):
            loop_list.extend(
                [i for i in self._get_all_object_names() if k == i])
        return loop_list

    def _get_timeseries_object(self,
                               dataset_details,
                               object_name,
                               ts_name,
                               custom_attrs=None,
                               drop_attrs=None,
                               **kwargs):
        """

        Parameters
        ----------
        dataset_details: dict
            self.dataset_details
        object_name: str
            name of hte object_name in the IBL datatype
        ts_name: str
            the key name for the timeseries list
        custom_attrs: list
            Attributes to consider
        drop_attrs: list
            Attributes to drop
        kwargs
            additional keys/values to add to the default timeseries. For derivatives of TimeSEries

        Returns
        -------
        dict()
            {
                "time_series": [
                    {
                      "name": "face_motionEnergy",
                      "data": "face.motionEnergy",
                      "timestamps": "face.timestamps",
                      "description": "Features extracted from the video of the frontal aspect of the subject, including the subject\\'s face and forearms."
                    },
                    {
                      "name": "_ibl_lickPiezo_times",
                      "data": "_ibl_lickPiezo.raw",
                      "timestamps": "_ibl_lickPiezo.timestamps",
                      "description": "Voltage values from a thin-film piezo connected to the lick spout, so that values are proportional to deflection of the spout and licks can be detected as peaks of the signal."
                    }
                ]
            }
        """
        matchstr = r'.*time.*|.*interval.*'
        timeattr_name = [
            i['name'] for i in dataset_details[object_name]
            if re.match(matchstr, i['name'])
        ]
        dataset_details[object_name], _ = self._drop_attrs(
            dataset_details[object_name].copy(), drop_attrs)
        datafiles, datafiles_names, datafiles_desc = \
            self._unpack_dataset_details(dataset_details.copy(), object_name, custom_attrs, match_str=matchstr)
        if timeattr_name:
            datafiles_timedata, datafiles_time_name, datafiles_time_desc = \
                self._unpack_dataset_details(dataset_details.copy(), object_name, timeattr_name)
        elif not kwargs:  # if no timestamps info, then let this fields be data
            return {ts_name: []}
        else:
            datafiles_timedata, datafiles_time_name, datafiles_time_desc = \
                datafiles, datafiles_names, datafiles_desc
        if not datafiles:
            if not kwargs:
                return {ts_name: []}
            # datafiles_names = datafiles_time_name
            # datafiles_desc = datafiles_time_desc
            # datafiles = ['None']
        timeseries_dict = {ts_name: [None] * len(datafiles)}
        for i, j in enumerate(datafiles):
            original = {
                'name': datafiles_names[i],
                'description': datafiles_desc[i],
                'timestamps': datafiles_timedata[0],
                'data': datafiles[i]
            }
            original.update(**kwargs)
            timeseries_dict[ts_name][i] = {
                k: v
                for k, v in original.items() if v is not None
            }
        return timeseries_dict

    @staticmethod
    def _attrnames_align(attrs_dict, custom_names):
        """
        the attributes that receive the custom names are reordered to be first in the list
        Parameters. This assigns description:'no_description' to those that are not found. This will
        later be used(nwb_converter) as an identifier for non-existent data for the given eid.
        ----------
        attrs_dict:list
            list of dict(attr_name:'',attr_description:'')
        custom_names
            same as 'default_colnames_dict' in self._get_dynamictable_object
        Returns
        -------
        dict()
        """
        attrs_list = [i['name'] for i in attrs_dict]
        list_id_func_exclude = \
            lambda val, comp_list, comp_bool: [i for i, j in enumerate(comp_list) if comp_bool & (j == val)]
        cleanup = lambda x: [i[0] for i in x if i]
        if custom_names:
            custom_names_list = [i for i in list(custom_names.values())]
            custom_names_dict = []
            for i in range(len(custom_names_list)):
                custom_names_dict.extend([{
                    'name': custom_names_list[i],
                    'description': 'no_description'
                }])
            attr_list_include_idx = cleanup([
                list_id_func_exclude(i, attrs_list, True)
                for i in custom_names_list
            ])
            attr_list_exclude_idx = set(range(len(attrs_list))).difference(
                set(attr_list_include_idx))
            custom_names_list_include_idx = [
                i for i, j in enumerate(custom_names_list)
                if list_id_func_exclude(j, attrs_list, True)
            ]
            for ii, jj in enumerate(custom_names_list_include_idx):
                custom_names_dict[custom_names_list_include_idx[
                    ii]] = attrs_dict[attr_list_include_idx[ii]]
                custom_names_list[custom_names_list_include_idx[
                    ii]] = attrs_list[attr_list_include_idx[ii]]
            extend_dict = [attrs_dict[i] for i in attr_list_exclude_idx]
            extend_list = [attrs_list[i] for i in attr_list_exclude_idx]
            custom_names_dict.extend(extend_dict)
            custom_names_list.extend(extend_list)
            return custom_names_dict, custom_names_list
        else:
            out_dict = attrs_dict
            out_list = attrs_list
            return out_dict, out_list

    @staticmethod
    def _drop_attrs(dataset_details, drop_attrs, default_colnames_dict=None):
        """
        Used to remove given attributes of the IBL dataset.
        Parameters
        ----------
        dataset_details: list
            self.dataset_details['clusters']
            [
                {
                    'name': 'amps',
                    'description': description
                },
                {
                    'name': 'channels',
                    'description': description
                }
            ]
        drop_attrs: list
            list of str: attribute names to drop of the self.dataset_details dict
        default_colnames_dict
        Returns
        -------
        dataset_details: list
            list without dictionaries with 'name' as in drop_attrs

        """
        attrs_list = [i['name'] for i in dataset_details]
        if default_colnames_dict is not None:
            default_colnames_dict_copy = default_colnames_dict.copy()
            for i, j in default_colnames_dict.items():
                if j not in attrs_list:
                    default_colnames_dict_copy.pop(i)
        else:
            default_colnames_dict_copy = default_colnames_dict
        if drop_attrs is None:
            return dataset_details, default_colnames_dict_copy
        elif default_colnames_dict is not None:
            for i, j in default_colnames_dict.items():
                if j in drop_attrs and j in attrs_list:
                    default_colnames_dict_copy.pop(i)
        dataset_details_return = [
            dataset_details[i] for i, j in enumerate(attrs_list)
            if j not in drop_attrs
        ]
        return dataset_details_return, default_colnames_dict_copy

    @staticmethod
    def _get_dynamictable_array(**kwargs):
        """
        Helper to dynamictable object method
        Parameters
        ----------
        kwargs
            keys and values that define the dictionary,
            both keys and values are lists where each index would slice all the keys/values and create a dict out of that

        Returns
        -------
        list
            list of dictionaries each with the keys and values from kwargs

        """
        custom_keys = list(kwargs.keys())
        custom_data = list(kwargs.values())
        out_list = [None] * len(custom_data[0])
        for ii, jj in enumerate(custom_data[0]):
            out_list[ii] = dict().copy()
            for i, j in enumerate(custom_keys):
                out_list[ii][j] = custom_data[i][ii]
        return out_list

    def _get_dynamictable_object(self,
                                 dataset_details,
                                 object_name,
                                 dt_name,
                                 default_colnames_dict=None,
                                 custom_attrs=None,
                                 drop_attrs=None):
        """

        Parameters
        ----------
        dataset_details
            self.dataset_details for each eid
        object_name:str
            object from the IBL data types from which to create this table.
        dt_name:str
            custom name for the dynamic table. Its the key with the value being dynamictable_array
        default_colnames_dict:dict()
            keys are the custom names of the columns, corresponding values are the attributes which have to be renamed.
        custom_attrs:list
            list of attributes for the given IBL object in object_name to be considered, all others are ignored

        Returns
        -------
        outdict:dict()
            example output below:
            {'Trials':
                    [
                        {
                          "name": "column1 name",
                          "data": "column data uri (string)",
                          "description": "col1 description"
                        },
                        {
                           "name": "column2 name",
                          "data": "column data uri (string)",
                          "description": "col2 description"
                        }
                    ]
                }
        """
        dataset_details[object_name], default_colnames_dict = self._drop_attrs(
            dataset_details[object_name].copy(), drop_attrs,
            default_colnames_dict)
        dataset_details[object_name], _ = self._attrnames_align(
            dataset_details[object_name].copy(), default_colnames_dict)
        if not default_colnames_dict:
            default_colnames = []
        else:
            default_colnames = list(default_colnames_dict.keys())
        custom_columns_datafilename, custom_columns_name, custom_columns_description = \
            self._unpack_dataset_details(dataset_details.copy(), object_name, custom_attrs)
        custom_columns_name[:len(default_colnames)] = default_colnames
        in_list = self._get_dynamictable_array(
            name=custom_columns_name,
            data=custom_columns_datafilename,
            description=custom_columns_description)
        outdict = {dt_name: in_list}
        return outdict

    @property
    def eid_metadata(self):
        return dict(eid=self.eid)

    @property
    def probe_metadata(self):
        probes_metadata_dict = self._initialize_container_dict(
            'Probes', default_value=[])
        probe_list = self.eid_session_info['probe_insertion']
        probe_dict_keys = ['id', 'model', 'name', 'trajectory_estimate']
        input_dict = dict()
        for key in probe_dict_keys:
            if key == 'trajectory_estimate':
                input_dict.update({
                    key:
                    [[json.dumps(l) for l in probe_list[i].get(key, ["None"])]
                     for i in range(len(probe_list))]
                })
            else:
                input_dict.update({
                    key: [
                        probe_list[i].get(key, "None")
                        for i in range(len(probe_list))
                    ]
                })
        probes_metadata_dict['Probes'].extend(
            self._get_dynamictable_array(**input_dict))
        return probes_metadata_dict

    @property
    def nwbfile_metadata(self):
        nwbfile_metadata_dict = self._initialize_container_dict('NWBFile')
        nwbfile_metadata_dict['NWBFile'].update(
            session_start_time=self._get_datetime(
                self.eid_session_info['start_time']),
            keywords=[
                ','.join(self.eid_session_info['users']),
                self.eid_session_info['lab'], 'IBL'
            ],
            experiment_description=self.eid_session_info['project'],
            session_id=self.eid,
            experimenter=self.eid_session_info['users'],
            identifier=self.eid,
            institution=[
                i['institution'] for i in self.lab_table
                if i['name'] == [self.eid_session_info['lab']][0]
            ][0],
            lab=self.eid_session_info['lab'],
            protocol=self.eid_session_info['task_protocol'],
            surgery='none',
            notes=', '.join([
                f"User:{i['user']}{i['text']}"
                for i in self.eid_session_info['notes']
            ]),
            session_description=','.join(self.eid_session_info['procedures']))
        return nwbfile_metadata_dict

    @property
    def sessions_metadata(self):
        sessions_metadata_dict = self._initialize_container_dict(
            'IBLSessionsData')
        custom_fields = [
            'location', 'project', 'type', 'number', 'end_time',
            'parent_session', 'url', 'qc'
        ]
        sessions_metadata_dict['IBLSessionsData'] = {
            i: str(self.eid_session_info[i])
            if i not in ['procedures', 'number'] else self.eid_session_info[i]
            for i in custom_fields
        }
        sessions_metadata_dict['IBLSessionsData']['extended_qc'] = json.dumps(
            self.eid_session_info['extended_qc'])
        sessions_metadata_dict['IBLSessionsData']['json'] = json.dumps(
            self.eid_session_info['json'])
        sessions_metadata_dict['IBLSessionsData']['wateradmin_session_related'] = \
            [json.dumps(i) for i in self.eid_session_info['wateradmin_session_related']]\
                if len(self.eid_session_info['wateradmin_session_related']) > 0 else ['None']
        sessions_metadata_dict['IBLSessionsData']['notes'] = \
            [json.dumps(i) for i in self.eid_session_info['notes']]\
                if len(self.eid_session_info['notes']) > 0 else ['None']
        return sessions_metadata_dict

    @property
    def subject_metadata(self):
        subject_metadata_dict = self._initialize_container_dict('IBLSubject')
        sub_table_dict = deepcopy(self.subject_table)
        if sub_table_dict:
            subject_metadata_dict['IBLSubject'] = dict(
                age=f'P{sub_table_dict.pop("age_weeks")}W',
                subject_id=sub_table_dict.pop('id'),
                description=sub_table_dict.pop('description'),
                genotype=','.join(sub_table_dict.pop('genotype')),
                sex=sub_table_dict.pop('sex'),
                species=sub_table_dict.pop('species'),
                weight=str(sub_table_dict.pop('reference_weight')),
                date_of_birth=self._get_datetime(
                    sub_table_dict.pop('birth_date')),
                **sub_table_dict)
            water_admin_data = [json.dumps(i) for i in subject_metadata_dict['IBLSubject']['water_administrations']]\
                    if len(subject_metadata_dict['IBLSubject']['water_administrations']) > 0 else ['None']
            subject_metadata_dict['IBLSubject'].update(
                weighings=[
                    json.dumps(i)
                    for i in subject_metadata_dict['IBLSubject']['weighings']
                ],
                water_administrations=water_admin_data)
            temp_metadatadict = deepcopy(subject_metadata_dict['IBLSubject'])
            for key, val in temp_metadatadict.items():
                if isinstance(val, list) and len(val) == 0:
                    _ = subject_metadata_dict['IBLSubject'].pop(key)
        return subject_metadata_dict

    @property
    def surgery_metadata(self):  # currently not exposed by api
        return dict()

    @property
    def behavior_metadata(self):
        behavior_metadata_dict = self._initialize_container_dict('Behavior')
        behavior_objects = [
            'wheel', 'wheelMoves', 'licks', 'lickPiezo', 'face', 'eye',
            'camera'
        ]
        current_behavior_objects = self._get_current_object_names(
            behavior_objects)
        for object_name in current_behavior_objects:
            if 'wheel' == object_name:
                behavior_metadata_dict['Behavior']['BehavioralTimeSeries'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'time_series')
            if 'wheelMoves' in object_name:
                behavior_metadata_dict['Behavior']['BehavioralEpochs'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'time_intervals')
            if 'lickPiezo' in object_name:
                behavior_metadata_dict['Behavior']['BehavioralTimeSeries'][
                    'time_series'].extend(
                        self._get_timeseries_object(
                            self.dataset_details.copy(), object_name,
                            'time_series')['time_series'])
            if 'licks' in object_name:
                behavior_metadata_dict['Behavior']['BehavioralEvents'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'time_series')
            if 'face' in object_name:
                behavior_metadata_dict['Behavior']['BehavioralTimeSeries'][
                    'time_series'].extend(
                        self._get_timeseries_object(
                            self.dataset_details.copy(), object_name,
                            'time_series')['time_series'])
            if 'eye' in object_name:
                behavior_metadata_dict['Behavior']['PupilTracking'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'time_series')
            if 'camera' in object_name:
                behavior_metadata_dict['Behavior']['Position'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'spatial_series', name='camera_dlc')
                if len(behavior_metadata_dict['Behavior']['Position']['spatial_series']) > 0 and \
                        behavior_metadata_dict['Behavior']['Position']['spatial_series'][0]['data']== \
                        behavior_metadata_dict['Behavior']['Position']['spatial_series'][0]['timestamps']:
                    behavior_metadata_dict['Behavior']['Position'][
                        'spatial_series'][0][
                            'timestamps'] = '_iblrig_Camera.timestamps'
        return behavior_metadata_dict

    @property
    def trials_metadata(self):
        trials_metadata_dict = self._initialize_container_dict('Trials')
        trials_objects = ['trials']
        current_trial_objects = self._get_current_object_names(trials_objects)
        for object_name in current_trial_objects:
            if 'trial' in object_name:
                trials_metadata_dict = self._get_dynamictable_object(
                    self.dataset_details.copy(),
                    'trials',
                    'Trials',
                    default_colnames_dict=dict(start_time='intervals',
                                               stop_time='intervals'))
        return trials_metadata_dict

    @property
    def stimulus_metadata(self):
        stimulus_objects = [
            'sparseNoise', 'passiveBeeps', 'passiveValveClick',
            'passiveVisual', 'passiveWhiteNoise'
        ]
        stimulus_metadata_dict = self._initialize_container_dict('Stimulus')
        current_stimulus_objects = self._get_current_object_names(
            stimulus_objects)
        for object_name in current_stimulus_objects:
            if 'sparseNoise' in object_name:
                stimulus_metadata_dict['Stimulus'] = \
                    self._get_timeseries_object(self.dataset_details.copy(), object_name, 'time_series')
            if 'passiveBeeps' in object_name:
                stimulus_metadata_dict['Stimulus']['time_series'].extend(
                    self._get_timeseries_object(self.dataset_details.copy(),
                                                object_name,
                                                'time_series')['time_series'])
            if 'passiveValveClick' in object_name:
                stimulus_metadata_dict['Stimulus']['time_series'].extend(
                    self._get_timeseries_object(self.dataset_details.copy(),
                                                object_name,
                                                'time_series')['time_series'])
            if 'passiveVisual' in object_name:
                stimulus_metadata_dict['Stimulus']['time_series'].extend(
                    self._get_timeseries_object(self.dataset_details.copy(),
                                                object_name,
                                                'time_series')['time_series'])
            if 'passiveWhiteNoise' in object_name:
                stimulus_metadata_dict['Stimulus']['time_series'].extend(
                    self._get_timeseries_object(self.dataset_details.copy(),
                                                object_name,
                                                'time_series')['time_series'])
        return stimulus_metadata_dict

    @property
    def device_metadata(self):
        device_metadata_dict = self._initialize_container_dict(
            'Device', default_value=[])
        device_metadata_dict['Device'].extend(
            self._get_dynamictable_array(name=['NeuroPixels probe'],
                                         description=['NeuroPixels probe']))
        return device_metadata_dict

    @property
    def units_metadata(self):
        units_objects = ['clusters', 'spikes']
        metrics_columns = [
            'cluster_id', 'cluster_id.1', 'num_spikes', 'firing_rate',
            'presence_ratio', 'presence_ratio_std', 'isi_viol',
            'amplitude_cutoff', 'amplitude_std', 'epoch_name',
            'ks2_contamination_pct', 'ks2_label'
        ]

        units_metadata_dict = self._initialize_container_dict(
            'Units', default_value=list())
        current_units_objects = self._get_current_object_names(units_objects)
        for object_name in current_units_objects:
            if 'clusters' in object_name:
                units_metadata_dict = \
                    self._get_dynamictable_object(self.dataset_details.copy(), 'clusters', 'Units',
                                                  default_colnames_dict=dict(location='brainAcronyms',
                                                                             waveform_mean='waveforms',
                                                                             electrodes='channels',
                                                                             electrode_group='probes',
                                                                             ),
                                                  drop_attrs=['uuids', 'metrics'])
                units_metadata_dict['Units'].extend(
                    self._get_dynamictable_array(
                        name=['obs_intervals', 'spike_times'],
                        data=[
                            'trials.intervals', 'spikes.clusters,spikes.times'
                        ],
                        description=[
                            'time intervals of each cluster',
                            'spike times of cluster'
                        ]))
                units_metadata_dict['Units'].extend(
                    self._get_dynamictable_array(
                        name=metrics_columns,
                        data=['clusters.metrics'] * len(metrics_columns),
                        description=['metrics_table columns data'] *
                        len(metrics_columns)))
        return units_metadata_dict

    @property
    def electrodegroup_metadata(self):
        electrodes_group_metadata_dict = self._initialize_container_dict(
            'ElectrodeGroup', default_value=[])
        for ii in range(len(self.probe_metadata['Probes'])):
            try:
                location_str = self.probe_metadata['Probes'][ii][
                    'trajectory_estimate'][0]['coordinate_system']
            except:
                location_str = 'None'
            electrodes_group_metadata_dict['ElectrodeGroup'].extend(
                self._get_dynamictable_array(
                    name=[self.probe_metadata['Probes'][ii]['name']],
                    description=[
                        'model {}'.format(
                            self.probe_metadata['Probes'][ii]['model'])
                    ],
                    device=[self.device_metadata['Device'][0]['name']],
                    location=[
                        'Mouse CoordinateSystem:{}'.format(location_str)
                    ]))
        return electrodes_group_metadata_dict

    @property
    def electrodetable_metadata(self):
        electrodes_objects = ['channels']
        electrodes_table_metadata_dict = self._initialize_container_dict(
            'ElectrodeTable')
        current_electrodes_objects = self._get_current_object_names(
            electrodes_objects)
        for i in current_electrodes_objects:
            electrodes_table_metadata_dict = self._get_dynamictable_object(
                self.dataset_details.copy(),
                'channels',
                'ElectrodeTable',
                default_colnames_dict=dict(group='probes',
                                           x='localCoordinates',
                                           y='localCoordinates'))
        return electrodes_table_metadata_dict

    @property
    def ecephys_metadata(self):
        ecephys_objects = [
            'templates', '_iblqc_ephysTimeRms', '_iblqc_ephysSpectralDensity'
        ]
        container_object_names = [
            'SpikeEventSeries', 'ElectricalSeries', 'Spectrum'
        ]
        custom_attrs_objects = [['waveforms'], ['rms'], ['power']]
        ecephys_container = self._initialize_container_dict('Ecephys')
        kwargs = dict()
        for i, j, k in zip(ecephys_objects, container_object_names,
                           custom_attrs_objects):
            current_ecephys_objects = self._get_current_object_names([i])
            if current_ecephys_objects:
                if j == 'Spectrum':
                    kwargs = dict(
                        name=i,
                        power='_iblqc_ephysSpectralDensity.power',
                        frequencies='_iblqc_ephysSpectralDensity.freqs',
                        timestamps=None)
                ecephys_container['Ecephys'].update(
                    self._get_timeseries_object(self.dataset_details.copy(),
                                                i,
                                                j,
                                                custom_attrs=k,
                                                **kwargs))
            else:
                warnings.warn(f'could not find {i} data in eid {self.eid}')
        return ecephys_container

    @property
    def acquisition_metadata(self):
        acquisition_objects = [
            'ephysData', '_iblrig_Camera', '_iblmic_audioSpectrogram'
        ]
        container_name_objects = [
            'ElectricalSeries', 'ImageSeries', 'DecompositionSeries'
        ]
        custom_attrs_objects = [['raw.nidq', 'raw.ap', 'raw.lf'], ['raw'],
                                ['power']]
        acquisition_container = self._initialize_container_dict('Acquisition')
        current_acquisition_objects = self._get_current_object_names(
            acquisition_objects)
        idx = [
            no for no, i in enumerate(acquisition_objects)
            if i in current_acquisition_objects
        ]
        current_container_name_objects = [
            container_name_objects[i] for i in idx
        ]
        current_custom_attrs_objects = [custom_attrs_objects[i] for i in idx]
        kwargs = dict()
        for i, j, k in zip(current_acquisition_objects,
                           current_container_name_objects,
                           current_custom_attrs_objects):
            if j == 'DecompositionSeries':
                kwargs = dict(name=i,
                              metric='power',
                              bands='_iblmic_audioSpectrogram.frequencies')
            acquisition_container['Acquisition'].update(
                self._get_timeseries_object(self.dataset_details.copy(),
                                            i,
                                            j,
                                            custom_attrs=k,
                                            **kwargs))
        return acquisition_container

    @property
    def ophys_metadata(self):
        raise NotImplementedError

    @property
    def icephys_metadata(self):
        raise NotImplementedError

    @property
    def scratch_metadata(self):
        # this can be used to add further details about subject, lab,
        raise NotImplementedError

    @property
    def complete_metadata(self):
        metafile_dict = {
            **self.eid_metadata,
            **self.probe_metadata,
            **self.nwbfile_metadata,
            **self.sessions_metadata,
            **self.subject_metadata,
            **self.behavior_metadata,
            **self.trials_metadata,
            **self.stimulus_metadata,
            **self.units_metadata,
            **self.electrodetable_metadata, 'Ecephys': {
                **self.ecephys_metadata,
                **self.device_metadata,
                **self.electrodegroup_metadata,
            },
            'Ophys': dict(),
            'Icephys': dict(),
            **self.acquisition_metadata
        }
        return metafile_dict

    def write_metadata(self, fileloc, savetype=None):
        if savetype is not None:
            if Path(fileloc).suffix != savetype:
                raise ValueError(f'{fileloc} should of of type {savetype}')
        else:
            savetype = Path(fileloc).suffix
        full_metadata = self.complete_metadata
        if savetype == '.json':
            full_metadata['NWBFile']['session_start_time'] = datetime.strftime(
                full_metadata['NWBFile']['session_start_time'], '%Y-%m-%dT%X')
            full_metadata['IBLSubject']['date_of_birth'] = datetime.strftime(
                full_metadata['IBLSubject']['date_of_birth'], '%Y-%m-%dT%X')
            with open(fileloc, 'w') as f:
                json.dump(full_metadata, f, indent=2)
        elif savetype in ['.yaml', '.yml']:
            with open(fileloc, 'w') as f:
                yaml.dump(full_metadata, f, default_flow_style=False)
        print(f'data written in {fileloc}')
    nyq = 0.5 * fs
    normal_cutoff = cutoff / nyq
    b, a = signal.butter(order, normal_cutoff, btype=ftype, analog=False)
    y = signal.filtfilt(b, a, data)
    return y


FIG_PATH = '/home/guido/Figures/DLC/'

# Query sessions with available DLC data using ONE
one = ONE()
dtypes = [
    'camera.dlc', 'camera.times', 'trials.feedback_times',
    'trials.feedbackType', 'trials.stimOn_times', 'trials.choice'
]
eids = one.search(dataset_types=dtypes)

# Initialize dataframes
pupil_stim_on = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
pupil_reward = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
pupil_no_reward = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
paw_left = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
paw_right = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
tongue_reward = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])
tongue_no_reward = pd.DataFrame(columns=['eid', 'timepoint', 'trace'])

# Loop over sessions
for i, eid in enumerate(eids):
    if np.mod(i + 1, 5) == 0:
        print('Processing DLC data of session %d of %d' % (i + 1, len(eids)))
from warnings import warn
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import scipy.stats as stats
import pandas as pd
from oneibl.one import ONE
import alf.io as aio
import brainbox as bb
from ibllib.io.spikeglx import glob_ephys_files

# Set eid and probe name #
# ------------------------#
one = ONE()
eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)[0]
probe = 'probe_right'  # *Note: new probe naming convention is 'probe00', 'probe01', etc.

# Get important directories from `eid` #
# --------------------------------------#
session_path = one.path_from_eid(eid)
alf_dir = Path.joinpath(session_path, 'alf')
alf_probe_dir = Path.joinpath(alf_dir, probe)
ephys_files = glob_ephys_files(session_path)
ephys_file_path = None
if ephys_files:
    ephys_file_path = ephys_files[0]['ap']
else:
    warn(
        'Ephys file not found! Some of the examples in this script require ephys file,'
    )
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from functions_5HT import paths, plot_settings, one_session_path
from oneibl.one import ONE
one = ONE()

# Settings
PRE_TIME = 1
POST_TIME = -0.5
MIN_TRIALS = 200
MIN_NEURONS = 20
DATA_PATH, FIG_PATH, SAVE_PATH = paths()
FIG_PATH = join(FIG_PATH, 'WholeBrain')

# Get list of recordings
eids, ses_info = one.search(user='******',
                            dataset_types='spikes.times',
                            details=True)

lda_result = pd.DataFrame()
for i, eid in enumerate(eids):

    # Load in data
    print('Processing session %d of %d' % (i + 1, len(eids)))
    session_path = one_session_path(eid)
    trials = one.load_object(eid, 'trials')
    probes = one.load_object(eid, 'probes', download_only=False)
    if (not hasattr(trials, 'stimOn_times') or
        (trials.stimOn_times.shape[0] != trials.probabilityLeft.shape[0])
            or (not hasattr(probes, 'trajectory'))):
        print('Invalid data, skipping recording')
        continue
Пример #19
0
# Settings
MIN_CONTRAST = 0.1
ALPHA = 0.05


def one_session_path(eid):
    ses = one.alyx.rest('sessions', 'read', id=eid)
    return Path(one._par.CACHE_DIR, ses['lab'], 'Subjects', ses['subject'],
                ses['start_time'][:10],
                str(ses['number']).zfill(3))


# Get list of recordings
eids, ses_info = one.search(dataset_types='spikes.times',
                            task_protocol='_iblrig_tasks_ephysChoiceWorld',
                            details=True)

# Set path to save plots
DATA_PATH, FIG_PATH, SAVE_PATH = paths()
FIG_PATH = join(FIG_PATH, 'WholeBrain')

resp = pd.DataFrame()
for i, eid in enumerate(eids):

    # Load in data
    print('Processing session %d of %d' % (i + 1, len(eids)))
    session_path = one_session_path(eid)
    trials = one.load_object(eid, 'trials')
    probes = one.load_object(eid, 'probes', download_only=False)
    if ((not hasattr(trials, 'stimOn_times'))
Пример #20
0
"""
Quick search through the Alyx database to see all mice/sessions
ever used in training.
"""
# Author: Gaelle Chapuis
import numpy
from oneibl.one import ONE

one = ONE()

eIDs, ses = one.search(task_protocol='world',
                       project='ibl_neuropixel_brainwide_01',
                       details=True)

sub = [p['subject'] for p in ses]
su, ind_su = numpy.unique(sub, return_index=True)

print(f'N subjects: {len(su)} - N sessions: {len(eIDs)}')
the terminal. In this case no plot is generated.
"""

from oneibl.one import ONE
import numpy as np
import matplotlib.pyplot as plt
from ibllib.atlas import BrainRegions

one = ONE()

# Specify subject, date and probe we are interested in
subject = 'CSHL049'
date = '2020-01-08'
sess_no = 1
probe_label = 'probe01'
eid = one.search(subject=subject, date=date, number=sess_no)[0]

_ = one.load(eid, dataset_types=['clusters.channels'], download_only=True)
alf_path = one.path_from_eid(eid).joinpath('alf', probe_label)
cluster_chans = np.load(alf_path.joinpath('clusters.channels.npy'))

aligned_traj = one.alyx.rest('trajectories',
                             'list',
                             subject=subject,
                             session=eid,
                             probe=probe_label,
                             provenance='Ephys aligned histology track')

if len(aligned_traj) > 0:
    print('Getting channels for provenance ' + aligned_traj[0]['provenance'])
Пример #22
0
"""
Get spikes, clusters and channels data
========================================
Downloads and loads in spikes, clusters and channels data for a given session. Data is returned

"""

from oneibl.one import ONE
import brainbox.io.one as bbone
one = ONE()

# Find eid of interest
eid = one.search(subject='CSH_ZAD_001', date='2020-01-14')[0]

##################################################################################################
# Example 1:
# Download spikes, clusters and channels data for all available probes for this session.
# The data for each probe is returned as a dict
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eid,
                                                                   one=one)
print(spikes.keys())
print(spikes['probe00'].keys())

##################################################################################################
# Example 2:
# Download spikes, clusters and channels data for a single probe
spikes, clusters, channels = bbone.load_spike_sorting_with_channel(
    eid, one=one, probe='probe00')
print(spikes.keys())

##################################################################################################
Пример #23
0

if __name__ == '__main__':

    from pathlib import Path
    from oneibl.one import ONE
    import alf.io as ioalf

    # user options
    BINSIZE = 0.05  # sec
    LAGS = 4  # number of bins for calculating RF
    METHOD = 'corr'  # 'corr' | 'sta'

    # get the data from flatiron and the current folder
    one = ONE()
    eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)
    D = one.load(eid[0], clobber=False, download_only=True)
    session_path = Path(D.local_path[0]).parent

    # load objects
    spikes = ioalf.load_object(session_path, 'spikes')
    rfmap = ioalf.load_object(session_path, '_iblcertif_.rfmap')
    rf_stim_times = rfmap['rfmap.times.00']
    rf_stim = rfmap['rfmap.stims.00'].astype('float')

    # compute receptive fields
    if METHOD == 'sta':
        # method in Durand et al 2016; ~9 min for 700 units on a single cpu core
        print('computing receptive fields...', end='')
        rfs = compute_rfs(
            spikes.times, spikes.clusters, rf_stim_times, rf_stim, lags=LAGS, binsize=BINSIZE)
Пример #24
0
from brainbox.io.one import load_spike_sorting, load_channel_locations
from oneibl.one import ONE

one = ONE(base_url="https://dev.alyx.internationalbrainlab.org")
eids = one.search(subject='ZM_2407', task_protocol='ephys')

channels = load_channel_locations(eids[0], one=one)
spikes, clusters = load_spike_sorting(eids[0], one=one)
Пример #25
0
## Load #1
dataset_types = [
    'clusters.templateWaveforms', 'clusters.probes', 'clusters.depths'
]
eid = '86e27228-8708-48d8-96ed-9aa61ab951db'
wf, pr, d = one.load(eid, dataset_types=dataset_types)

## Load #2
my_data = one.load(eid, dataset_types=dataset_types, dclass_output=True)
from ibllib.misc import pprint
pprint(my_data.local_path)
pprint(my_data.dataset_type)

## Load everything
eid, ses_info = one.search(subject='flowers')
my_data = one.load(eid[0])
pprint(my_data.dataset_type)

## Load
eid = '86e27228-8708-48d8-96ed-9aa61ab951db'
dataset_types = [
    'clusters.probes', 'thisDataset.IveJustMadeUp', 'clusters.depths'
]
t, empty, cl = one.load(eid, dataset_types=dataset_types)

## List #1
one.ls_dataset_types()
one.ls_users()
one.ls_subjects()
Пример #26
0
import matplotlib.pyplot as plt
import pandas as pd

from oneibl.one import ONE
from ibllib.time import isostr2date

# import sys
# sys.path.extend('/home/owinter/PycharmProjects/WGs/BehaviourAnaysis/python')
from load_mouse_data import get_behavior
from behavior_plots import plot_psychometric

one = ONE()
# https://alyx.internationalbrainlab.org/admin/actions/session/e752b02d-b54d-4373-b51e-0b31be5f8ee5/change/
# first get the subject information
subject_details = one.alyx.rest('subjects', 'read', 'IBL_14')

# plot the weight curve
# https://alyx.internationalbrainlab.org/admin-actions/water-history/37c8f897-cbcc-4743-bad6-764ccbbfb190
wei = pd.DataFrame(subject_details['weighings'])
wei['date_time'].apply(isostr2date)
wei.sort_values('date_time', inplace=True)
plt.plot(wei.date_time, wei.weight)

# now let's get some session information
ses_ids = one.search(subjects='IBL_14', date_range='2018-11-27')
print(one.list(ses_ids[0]))
df = get_behavior('IBL_14', date_range='2018-11-27')
plt.figure()
plot_psychometric(df, ax=plt.axes(), color="orange")
Пример #27
0
'''
Download raw ephys datasets for all probes in a single session via ONE.
(example written for the LFP, but the download can be done for AP
files similarly by replacing 'lf' with 'ap')
'''
# Author: Olivier, Gaelle

from ibllib.io import spikeglx
from oneibl.one import ONE

# === Option 1 === Download a dataset of interest
one = ONE()

# Get a specific session eID
eid = one.search(subject='ZM_2240', date_range='2020-01-22')[0]

# Define and load dataset types of interest
dtypes = ['ephysData.raw.lf',  # lf : LFP
          'ephysData.raw.meta',
          'ephysData.raw.ch',
          'ephysData.raw.sync']  # Used for synchronisation
one.load(eid, dataset_types=dtypes, download_only=True)

# Get the files information
session_path = one.path_from_eid(eid)
efiles = [ef for ef in spikeglx.glob_ephys_files(session_path, bin_exists=False) if
          ef.get('lf', None)]
efile = efiles[0]['lf']  # Example: access to the first file
Пример #28
0
from v1_protocol import plot as v1_plot
import alf.io as aio
import brainbox as bb

# The examples below can be run independently.

# Example 1: For 'ZM_2104/2019-09-19/001' 'probe_right', generate all 4 figures (grating response
# summary, grating response selected, unit metrics summary, and unit metrics selected) using
# default parameters. For the summary figures, use all units, and for the selected figures, use
# 4 randomly chosen units.
# -------------------------------------------------------------------------------------------------

# Set the eid as `eid` and probe name as `probe` - these two input args are required for running
# `gen_figures`
one = ONE()
eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)[0]
probe = 'probe_right'

# Get paths to the required dataset_types. If required dataset_types are not already downloaded,
# download them.
dtypes = [
    'clusters.amps', 'clusters.channels', 'clusters.depths',
    'clusters.metrics', 'clusters.peakToTrough', 'clusters.uuids',
    'clusters.waveforms', 'clusters.waveformsChannels', 'spikes.amps',
    'spikes.clusters', 'spikes.depths', 'spikes.samples', 'spikes.templates',
    'spikes.times', 'ephysData.raw.meta', '_spikeglx_sync.channels',
    '_spikeglx_sync.polarities', '_spikeglx_sync.times',
    '_iblrig_RFMapStim.raw', '_iblrig_taskSettings.raw',
    '_iblrig_codeFiles.raw'
]
d_paths = one.load(eid,
Пример #29
0
class RegistrationClient:
    """
    Object that keeps the ONE instance and provides method to create sessions and register data.
    """
    def __init__(self, one=None):
        self.one = one
        if not one:
            self.one = ONE()
        self.dtypes = self.one.alyx.rest('dataset-types', 'list')
        self.registration_patterns = [
            dt['filename_pattern'] for dt in self.dtypes
            if dt['filename_pattern']
        ]
        self.file_extensions = [
            df['file_extension']
            for df in self.one.alyx.rest('data-formats', 'list')
        ]

    def create_sessions(self,
                        root_data_folder,
                        glob_pattern='**/create_me.flag',
                        dry=False):
        """
        Create sessions looking recursively for flag files

        :param root_data_folder: folder to look for create_me.flag
        :param dry: bool. Dry run if True
        :param glob_pattern: bool. Dry run if True
        :return: None
        """
        flag_files = Path(root_data_folder).glob(glob_pattern)
        for flag_file in flag_files:
            if dry:
                print(flag_file)
                continue
            _logger.info('creating session for ' + str(flag_file.parent))
            # providing a false flag stops the registration after session creation
            self.create_session(flag_file.parent)
            flag_file.unlink()
        return [ff.parent for ff in flag_files]

    def create_session(self, session_path):
        """
        create_session(session_path)
        """
        return self.register_session(session_path, file_list=False)

    def register_sync(self, root_data_folder, dry=False):
        """
        Register sessions looking recursively for flag files

        :param root_data_folder: folder to look for register_me.flag
        :param dry: bool. Dry run if True
        :return:
        """
        flag_files = Path(root_data_folder).glob('**/register_me.flag')
        for flag_file in flag_files:
            if dry:
                continue
            file_list = flags.read_flag_file(flag_file)
            _logger.info('registering ' + str(flag_file.parent))
            self.register_session(flag_file.parent, file_list=file_list)
            flags.write_flag_file(flag_file.parent.joinpath('flatiron.flag'),
                                  file_list=file_list)
            flag_file.unlink()
            if flag_file.parent.joinpath('create_me.flag').exists():
                flag_file.parent.joinpath('create_me.flag').unlink()
            _logger.info('registered' + '\n')

    def register_session(self, ses_path, file_list=True):
        """
        Register session in Alyx

        :param ses_path: path to the session
        :param file_list: bool. Set to False will only create the session and skip registration
        :param repository_name: Optional, repository on which to register the data
        :return: Status string on error
        """
        if isinstance(ses_path, str):
            ses_path = Path(ses_path)
        # read meta data from the rig for the session from the task settings file
        settings_json_file = list(
            ses_path.glob('**/_mflab_taskSettings.raw*.json'))
        if not settings_json_file:
            settings_json_file = list(
                ses_path.glob('**/_mflab_taskSettings.raw*.json'))
            if not settings_json_file:
                _logger.error(
                    ['could not find _mflab_taskSettings.raw.json. Abort.'])
                return
            _logger.warning(
                [f'Settings found in a strange place: {settings_json_file}'])
        else:
            settings_json_file = settings_json_file[0]
        md = _read_settings_json_compatibility_enforced(settings_json_file)
        # query alyx endpoints for subject, error if not found
        try:
            subject = self.one.alyx.rest(
                'subjects?nickname=' + md['SUBJECT_NAME'], 'list')[0]
        except IndexError:
            _logger.error(
                f"Subject: {md['SUBJECT_NAME']} doesn't exist in Alyx. ABORT.")
            raise ibllib.exceptions.AlyxSubjectNotFound(md['SUBJECT_NAME'])

        # look for a session from the same subject, same number on the same day
        session_id, session = self.one.search(subjects=subject['nickname'],
                                              date_range=md['SESSION_DATE'],
                                              number=md['SESSION_NUMBER'],
                                              details=True)
        print('session if exists', session_id, session)
        try:
            user = self.one.alyx.rest('users',
                                      'read',
                                      id=md["PROTOCOL_CREATOR"][0])
        except Exception as e:
            _logger.error(
                f"User: {md['PROTOCOL_CREATOR'][0]} doesn't exist in Alyx. ABORT"
            )
            raise e

        username = user['username'] if user else subject['responsible_user']

        # load the trials data to get information about session duration and performance
        ses_data = raw.load_data(ses_path)
        start_time, end_time = _get_session_times(ses_path, md, ses_data)
        n_trials, n_correct_trials = _get_session_performance(md, ses_data)

        # this is the generic relative path: subject/yyyy-mm-dd/NNN
        gen_rel_path = Path(subject['nickname'], md['SESSION_DATE'],
                            '{0:03d}'.format(int(md['SESSION_NUMBER'])))

        # if nothing found create a new session in Alyx
        task_protocol = md['PROTOCOL'] + md['VERSION_TAG']
        dset_types = md['DATASET_TYPES']
        alyx_procedure = _alyx_procedure_from_task(task_protocol)
        if not session:
            ses_ = {
                'subject': subject['nickname'],
                'users': [username],
                'procedures':
                [] if alyx_procedure is None else [alyx_procedure],
                'lab': subject['lab'],
                'project': md['PROJECT'],
                'type': 'Experiment',
                'task_protocol': task_protocol,
                'number': md['SESSION_NUMBER'],
                'start_time': ibllib.time.date2isostr(start_time),
                'end_time':
                ibllib.time.date2isostr(end_time) if end_time else None,
                'n_correct_trials': n_correct_trials,
                'n_trials': n_trials,
                'data_dataset_session_related': md['DATASET_TYPES'],
                'dset_types': md['DATASET_TYPES'],
                'json': md,
            }

            session = self.one.alyx.rest('sessions', 'create', data=ses_)

            #if md['SUBJECT_WEIGHT']:
            if 'SUBJECT_WEIGHT' in md.keys():
                wei_ = {
                    'subject': subject['nickname'],
                    'date_time': ibllib.time.date2isostr(start_time),
                    'weight': md['SUBJECT_WEIGHT'],
                    'user': username
                }
                self.one.alyx.rest('weighings', 'create', data=wei_)

        else:  # TODO: if session exists and no json partial_upgrade it
            print('session exists, lets update it')
            ses_ = {
                'subject': subject['nickname'],
                'users': [username],
                'procedures':
                [] if alyx_procedure is None else [alyx_procedure],
                'lab': subject['lab'],
                'project': md['PROJECT'],
                'type': 'Experiment',
                'task_protocol': task_protocol,
                'number': md['SESSION_NUMBER'],
                'start_time': ibllib.time.date2isostr(start_time),
                'end_time':
                ibllib.time.date2isostr(end_time) if end_time else None,
                'n_correct_trials': n_correct_trials,
                'n_trials': n_trials,
                'data_dataset_session_related': md['DATASET_TYPES'],
                'dset_types': md['DATASET_TYPES'],
                'json': md,
            }
            print('ses', ses_)
            #session = self.one.alyx.rest('sessions', 'read', id=session_id[0])
            print('session_id', session_id[0])
            session = self.one.alyx.rest('sessions',
                                         'partial_update',
                                         id=session_id[0],
                                         data=ses_)
            #can try update as well

        _logger.info(session['url'] + ' ')
        # create associated water administration if not found
        if not session['wateradmin_session_related'] and ses_data:
            wa_ = {
                'subject': subject['nickname'],
                'date_time': ibllib.time.date2isostr(end_time),
                'water_administered': ses_data[-1]['water_delivered'] / 1000,
                'water_type': md.get('REWARD_TYPE') or 'Water',
                'user': username,
                'session': session['url'][-36:],
                'adlib': False
            }
            self.one.alyx.rest('water-administrations', 'create', data=wa_)
        # at this point the session has been created. If create only, exit
        if not file_list:
            return
        # register all files that match the Alyx patterns, warn user when files are encountered
        rename_files_compatibility(ses_path, md['VERSION_TAG'])
        F = [
        ]  # empty list whose keys will be relative paths and content filenames
        md5s = []
        file_sizes = []
        for fn in _glob_session(ses_path):
            if fn.suffix in EXCLUDED_EXTENSIONS:
                _logger.debug('Excluded: ', str(fn))
                continue
            if not _check_filename_for_registration(
                    fn, self.registration_patterns):
                _logger.warning('No matching dataset type for: ' + str(fn))
                continue
            if fn.suffix not in self.file_extensions:
                _logger.warning(
                    'No matching dataformat (ie. file extension) for: ' +
                    str(fn))
                continue
            if not _register_bool(fn.name, file_list):
                _logger.debug('Not in filelist: ' + str(fn))
                continue
            try:
                assert (str(gen_rel_path) in str(fn))
            except AssertionError as e:
                strerr = 'ALF folder mismatch: data is in wrong subject/date/number folder. \n'
                strerr += ' Expected ' + str(
                    gen_rel_path) + ' actual was ' + str(fn)
                _logger.error(strerr)
                raise e
            # extract the relative path of the file
            rel_path = Path(str(fn)[str(fn).find(str(gen_rel_path)):])
            F.append(str(rel_path.relative_to(gen_rel_path)))
            file_sizes.append(fn.stat().st_size)
            md5s.append(
                hashfile.md5(fn) if fn.stat().st_size < 1024**3 else None)
            _logger.info('Registering ' + str(fn))

        r_ = {
            'created_by': username,
            'path': str(gen_rel_path),
            'filenames': F,
            'hashes': md5s,
            'filesizes': file_sizes,
            'versions': [version.ibllib() for _ in F]
        }
        self.one.alyx.post('/register-file', data=r_)
Пример #30
0
    import alf.io as ioalf

    BIN_SIZE = 0.025  # seconds
    SMOOTH_SIZE = 0.025  # seconds; standard deviation of gaussian kernel
    PCA_DIMS = 20
    CCA_DIMS = PCA_DIMS
    N_SPLITS = 5
    RNG_SEED = 0

    # get the data from flatiron
    subject = 'KS005'
    date = '2019-08-30'
    number = 1

    one = ONE()
    eid = one.search(subject=subject, date=date, number=number)
    D = one.load(eid[0], download_only=True)
    session_path = Path(D.local_path[0]).parent

    spikes = ioalf.load_object(session_path, 'spikes')
    clusters = ioalf.load_object(session_path, 'clusters')
    # channels = ioalf.load_object(session_path, 'channels')
    trials = ioalf.load_object(session_path, '_ibl_trials')

    # bin spikes and get trial IDs associated with them
    binned_spikes, binned_trialIDs, _ = bin_spikes_trials(spikes,
                                                          trials,
                                                          bin_size=0.01)

    # define areas
    brain_areas = np.unique(clusters.brainAcronyms)