Пример #1
0
    def make(self, key):

        eID = str((acquisition.Session & key).fetch1('session_uuid'))

        key['passive_beep_times'], key['passive_valve_click_times'], \
            key['passive_white_noise_times'] = \
            ONE().load(eID, dataset_types=['_ibl_passiveBeeps.times',
                                           '_ibl_passiveValveClick.times',
                                           '_ibl_passiveWhiteNoise.times'])

        self.insert1(key)
Пример #2
0
    def __init__(self, eid, one=None, bpod_ntrials=None, lazy=False):
        self.one = one or ONE()
        self.eid = eid
        self.details = self.one.get_details(self.eid, full=True)
        self.bpod_ntrials = bpod_ntrials or self.details["n_trials"]

        self.metrics = None
        self.passed = None

        if not lazy:
            self.compute()
Пример #3
0
def criteria_opto_eids(eids,
                       max_lapse=0.2,
                       max_bias=0.3,
                       min_trials=200,
                       one=None):
    if one is None:
        one = ONE()
    use_eids = []
    for j, eid in enumerate(eids):
        try:
            trials = load_trials(eid, laser_stimulation=True)
            lapse_l = 1 - (
                np.sum(trials.loc[trials['signed_contrast'] == -1,
                                  'choice'] == 1) /
                trials.loc[trials['signed_contrast'] == -1, 'choice'].shape[0])
            lapse_r = 1 - (
                np.sum(trials.loc[trials['signed_contrast'] == 1,
                                  'choice'] == -1) /
                trials.loc[trials['signed_contrast'] == 1, 'choice'].shape[0])
            bias = np.abs(0.5 -
                          (np.sum(trials.loc[trials['signed_contrast'] == 0,
                                             'choice'] == 1) /
                           np.shape(trials.loc[trials['signed_contrast'] == 0,
                                               'choice'] == 1)[0]))
            details = one.get_details(eid)
            if ((lapse_l < max_lapse) & (lapse_r < max_lapse) &
                (trials.shape[0] > min_trials)
                    & (bias < max_bias) &
                ('laser_stimulation' in trials.columns)):
                use_eids.append(eid)
            elif 'laser_stimulation' not in trials.columns:
                print('No laser_stimulation data for %s %s' %
                      (details['subject'], details['start_time'][:10]))
            else:
                print(
                    '%s %s excluded (n_trials: %d, lapse_l: %.2f, lapse_r: %.2f, bias: %.2f)'
                    % (details['subject'], details['start_time'][:10],
                       trials.shape[0], lapse_l, lapse_r, bias))
        except Exception:
            print('Could not load session %s' % eid)
    return use_eids
Пример #4
0
    def make(self, key):

        eID = (acquisition.Session & key).fetch1('session_uuid')

        extra_rewards_times = \
            ONE().load(eID, dataset_types=['_ibl_extraRewards.times'])

        key['extra_rewards_times'] = extra_rewards_times

        self.insert1(key)

        logger.info('Populated an ExtraRewards tuple for subject {subject_nickname} in session started at {session_start_time}'.format(**key))
    def test_sync_extract(self):
        session_path = self.session_path
        dry = False
        ibllib.io.flags.create_other_flags(session_path,
                                           'extract_ephys.flag',
                                           force=True)
        iblrig_pipeline.extract_ephys(session_path, dry=dry)

        one = ONE(base_url='https://test.alyx.internationalbrainlab.org',
                  username='******',
                  password='******')
        iblrig_pipeline.register(session_path, one=one)
Пример #6
0
 def setUp(self):
     # Init connection to the database
     one = ONE(base_url='https://test.alyx.internationalbrainlab.org',
               username='******',
               password='******')
     eids = [
         'cf264653-2deb-44cb-aa84-89b82507028a',
         '4e0b3320-47b7-416e-b842-c34dc9004cf8'
     ]
     self.eid = eids[0]
     self.eid2 = eids[1]
     self.One = one
Пример #7
0
def get_one_data(tmp_path):
    eid_temp = 'da188f2c-553c-4e04-879b-c9ea2d1b9a93'
    metadata_converter = Alyx2NWBMetadata(eid=eid_temp, one_obj=ONE())
    nwbsaveloc = str(tmp_path/'test.nwb')
    converter_nwb = Alyx2NWBConverter(
        metadata_obj=metadata_converter,
        saveloc=nwbsaveloc,
        save_raw=False,
        save_camera_raw=False)
    converter_nwb.run_conversion()
    converter_nwb.write_nwb()
    yield nwbsaveloc
Пример #8
0
def get_all_sess_with_ME():
    one = ONE()
    # get all bwm sessions with dlc
    all_sess = one.alyx.rest('sessions',
                             'list',
                             project='ibl_neuropixel_brainwide_01',
                             task_protocol="ephys",
                             dataset_types='camera.ROIMotionEnergy')

    eids = [s['url'].split('/')[-1] for s in all_sess]

    return eids
Пример #9
0
def get_micro_manipulator_data(subject, one=None, force_extract=False):
    """
    Looks for all ephys sessions for a given subject and get the probe micro-manipulator
    trajectories.
    If probes ALF object not on flat-iron, attempts to perform the extraction from meta-data
    and task settings file.
    """
    if not one:
        one = ONE()

    eids, sessions = one.search(subject=subject,
                                task_protocol='ephys',
                                details=True)
    dtypes = [
        'probes.description',
        'probes.trajectory',
    ]
    probes = alf.io.AlfBunch({})
    for ses in sessions:
        sess_path = Path(ses['local_path'])
        probe = None
        if not force_extract:
            probe = one.load_object(ses['url'], 'probes')
        if not probe:
            _logger.warning(f"Re-extraction probe info for {sess_path}")
            dtypes = ['_iblrig_taskSettings.raw', 'ephysData.raw.meta']
            raw_files = one.load(ses['url'],
                                 dataset_types=dtypes,
                                 download_only=True)
            if all([rf is None for rf in raw_files]):
                _logger.warning(
                    f"no raw settings files nor ephys data found for"
                    f" {ses['local_path']}. Skip this session.")
                continue
            extract_probes(sess_path, bin_exists=False)
            probe = alf.io.load_object(sess_path.joinpath('alf'), 'probes')
        one.load(ses['url'],
                 dataset_types='channels.localCoordinates',
                 download_only=True)
        # get for each insertion the sites local mapping: if not found assumes checkerboard pattern
        probe['sites_coordinates'] = []
        for prb in probe.description:
            chfile = Path(ses['local_path']).joinpath(
                'alf', prb['label'], 'channels.localCoordinates.npy')
            if chfile.exists():
                probe['sites_coordinates'].append(np.load(chfile))
            else:
                _logger.warning(
                    f"no channel.localCoordinates found for {ses['local_path']}."
                    f"Assumes checkerboard pattern")
                probe['sites_coordinates'].append(SITES_COORDINATES)
        # put the session information in there
        probe['session'] = [ses] * len(probe.description)
        probes = probes.append(probe)
    return probes
Пример #10
0
def load_wheel_reaction_times(eid, one=None):
    """
    Return the calculated reaction times for session.  Reaction times are defined as the time
    between the go cue (onset tone) and the onset of the first substantial wheel movement.   A
    movement is considered sufficiently large if its peak amplitude is at least 1/3rd of the
    distance to threshold (~0.1 radians).

    Negative times mean the onset of the movement occurred before the go cue.  Nans may occur if
    there was no detected movement withing the period, or when the goCue_times or feedback_times
    are nan.

    Parameters
    ----------
    eid : str
        Session UUID
    one : oneibl.ONE
        An instance of ONE for loading data.  If None a new one is instantiated using the defaults.

    Returns
    ----------
    array-like
        reaction times
    """
    if one is None:
        one = ONE()

    trials = one.load_object(eid, 'trials')
    # If already extracted, load and return
    if trials and 'firstMovement_times' in trials:
        return trials['firstMovement_times'] - trials['goCue_times']
    # Otherwise load wheelMoves object and calculate
    moves = one.load_object(eid, 'wheelMoves')
    # Re-extract wheel moves if necessary
    if not moves or 'peakAmplitude' not in moves:
        wheel = one.load_object(eid, 'wheel')
        moves = extract_wheel_moves(wheel['timestamps'], wheel['position'])
    assert trials and moves, 'unable to load trials and wheelMoves data'
    firstMove_times, is_final_movement, ids = extract_first_movement_times(
        moves, trials)
    return firstMove_times - trials['goCue_times']
Пример #11
0
    def make(self, key):

        passive_trial_key = key.copy()
        eID = (acquisition.Session & key).fetch1('session_uuid')

        passive_visual_stim_contrast_left, passive_visual_stim_contrast_right = \
            ONE().load(eID, dataset_types=['_ibl_passiveVisual.contrastLeft',
                                           '_ibl_passiveVisual.contrastRight',
                                           '_ibl_passiveVisual.times'])

        assert len(
            np.unique(
                np.array([
                    len(passive_visual_stim_contrast_left),
                    len(passive_visual_stim_contrast_right),
                    len(passive_visual_stim_times)
                ]))
        ) == 1, 'Loaded passive visual files do not have the same length'

        key['passive_trials_total_num'] = len(passive_visual_stim_times)
        key['passive_trials_start_time'] = float(passive_visual_stim_times[0])
        key['passive_trials_end_time'] = float(passive_visual_stim_times[-1])

        self.insert1(key)

        for idx_trial in range(len(passive_visual_stim_times)):

            if np.isnan(passive_visual_stim_contrast_left[idx_trial]):
                passive_stim_contrast_left = 0
            else:
                passive_stim_contrast_left = passive_visual_stim_contrast_left[
                    idx_trial]

            if np.isnan(passive_visual_stim_contrast_right[idx_trial]):
                passive_stim_contrast_right = 0
            else:
                passive_stim_contrast_right = passive_visual_stim_contrast_right[
                    idx_trial]

            passive_trial_key['passive_trial_id'] = idx_trial + 1
            passive_trial_key['passive_trial_stim_on_time'] = float(
                passive_visual_stim_times[idx_trial])
            passive_trial_key['passive_trial_stim_contrast_left'] = float(
                passive_stim_contrast_left)
            passive_trial_key['passive_trial_stim_contrast_right'] = float(
                passive_stim_contrast_right)

            self.PassiveTrial().insert1(passive_trial_key)

        logger.info(
            'Populated a PassiveTrialSet tuple, all Trial tuples and Excluded Trial tuples for subject {subject_nickname} in session started at {session_start_time}'
            .format(**key))
Пример #12
0
def job_creator(root_path,
                one=None,
                dry=False,
                rerun=False,
                max_md5_size=None):
    """
    Server function that will look for creation flags and for each:
    1) create the sessions on Alyx
    2) register the corresponding raw data files on Alyx
    3) create the tasks to be run on Alyx
    :param root_path: main path containing sessions or session path
    :param one
    :param dry
    :param rerun
    :param max_md5_size
    :return:
    """
    if not one:
        one = ONE()
    rc = registration.RegistrationClient(one=one)
    flag_files = list(Path(root_path).glob('**/extract_me.flag'))
    flag_files += list(Path(root_path).glob('**/extract_ephys.flag'))
    all_datasets = []
    for flag_file in flag_files:
        session_path = flag_file.parent
        _logger.info(f'creating session for {session_path}')
        if dry:
            continue
        # providing a false flag stops the registration after session creation
        rc.create_session(session_path)
        flag_file.unlink()
        files, dsets = registration.register_session_raw_data(
            session_path, one=one, max_md5_size=max_md5_size)
        if dsets is not None:
            all_datasets.extend(dsets)
        session_type = rawio.get_session_extractor_type(session_path)
        if session_type in ['biased', 'habituation', 'training']:
            pipe = training_preprocessing.TrainingExtractionPipeline(
                session_path, one=one)
        elif session_type in ['ephys']:
            pipe = ephys_preprocessing.EphysExtractionPipeline(session_path,
                                                               one=one)
        else:
            _logger.info(
                f"Session type {session_type} as no matching extractor {session_path}"
            )
        if rerun:
            rerun__status__in = '__all__'
        else:
            rerun__status__in = ['Waiting']
        pipe.create_alyx_tasks(rerun__status__in=rerun__status__in)
    return all_datasets
Пример #13
0
def get_latest_session_eid(subject_nickname, one=None):
    """Return the eID of the latest session for Subject that has data on
    Flatiron"""
    one = one or ONE()
    last_session = one.search(
        subject=subject_nickname,
        dataset_types=["_iblrig_taskData.raw", "_iblrig_taskSettings.raw"],
        limit=1,
    )
    if last_session:
        return last_session[0]
    else:
        return None
Пример #14
0
    def make(self, key):
        datasets = (data.FileRecord & key & 'repo_name LIKE "flatiron_%"' & {
            'exists': 1
        }).fetch('dataset_name')
        is_complete = bool(
            np.all([req_ds in datasets for req_ds in self.required_datasets]))
        if is_complete is True:
            if '_ibl_trials.stimOn_times.npy' not in datasets:
                key['stim_on_times_status'] = 'Missing'
            else:
                eID = str((acquisition.Session & key).fetch1('session_uuid'))
                lab_name = (subject.SubjectLab & key).fetch1('lab_name')
                if lab_name == 'wittenlab':
                    stimOn_times = np.squeeze(ONE().load(
                        eID,
                        dataset_types='_ibl_trials.stimOn_times',
                        clobber=True))
                else:
                    stimOn_times = ONE().load(
                        eID, dataset_types='_ibl_trials.stimOn_times')

                if np.all(np.isnan(stimOn_times)):
                    key['stim_on_times_status'] = 'Missing'
                elif np.any(np.isnan(stimOn_times)):
                    key['stim_on_times_status'] = 'Partial'
                else:
                    key['stim_on_times_status'] = 'Complete'

            if '_ibl_trials.repNum.npy' not in datasets:
                key['rep_num_status'] = 'Missing'
            else:
                key['rep_num_status'] = 'Complete'

            if '_ibl_trials.included.npy' not in datasets:
                key['included_status'] = 'Missing'
            else:
                key['included_status'] = 'Complete'

            self.insert1(key)
Пример #15
0
def download_bpodqc_raw_data(eid, one=None):
    one = one or ONE()
    dstypes = [
        "_iblrig_taskData.raw",
        "_iblrig_taskSettings.raw",
        "_iblrig_encoderPositions.raw",
        "_iblrig_encoderEvents.raw",
        "_iblrig_stimPositionScreen.raw",
        "_iblrig_syncSquareUpdate.raw",
        "_iblrig_encoderTrialInfo.raw",
        "_iblrig_ambientSensorData.raw",
    ]
    one.load(eid, dataset_types=dstypes, download_only=True)
Пример #16
0
def ref2path(ref: Union[str, Mapping, Iter], one=None, offline: bool = False) -> Union[Path, List]:
    """
    Convert one or more experiment references to session path(s)
    :param ref: One or more objects with keys ('subject', 'date', 'sequence'), or strings with the
    form yyyy-mm-dd_n_subject
    :param one: An instance of ONE
    :param offline: Return path without connecting to database (unimplemented)
    :return: a Path object for the experiment session

    Examples:
    >>> base = 'https://test.alyx.internationalbrainlab.org'
    >>> one = ONE(username='******', password='******', base_url=base)
    Connected to...
    >>> ref = {'subject': 'flowers', 'date': datetime(2018, 7, 13).date(), 'sequence': 1}
    >>> ref2path(ref, one=one)
    WindowsPath('E:/FlatIron/zadorlab/Subjects/flowers/2018-07-13/001')
    >>> ref2path(['2018-07-13_1_flowers', '2019-04-11_1_KS005'], one=one)
    [WindowsPath('E:/FlatIron/zadorlab/Subjects/flowers/2018-07-13/001'),
     WindowsPath('E:/FlatIron/cortexlab/Subjects/KS005/2019-04-11/001')]
    """
    if not one:
        one = ONE()
    if offline:
        raise NotImplementedError  # Requires lab name :(
        # root = Path(one._get_cache_dir(None))
        # path = root / ref.subject / str(ref.date) / ('%03d' % ref.sequence)
    else:
        ref = ref2dict(ref, parse=False)
        eid, (d,) = one.search(
            subjects=ref['subject'],
            date_range=(str(ref['date']), str(ref['date'])),
            number=ref['sequence'],
            details=True)
        path = d.get('local_path')
        if not path:
            root = Path(one._get_cache_dir(None)) / 'Subjects' / d['lab']
            return root / d['subject'] / d['start_time'][:10] / ('%03d' % d['number'])
        else:
            return Path(path)
Пример #17
0
def download_raw_video(eid, cameras=None):
    """
    Downloads the raw video from FlatIron or cache dir.
    This allows you to download just one of the
    three videos
    :param cameras: the specific camera to load
    (i.e. 'left', 'right', or 'body') If None all
    three videos are downloaded.
    :return: the file path(s) of the raw videos
    """
    one = ONE()
    if cameras:
        cameras = [cameras] if isinstance(cameras, str) else cameras
        cam_files = ['_iblrig_{}Camera.raw.mp4'.format(cam) for cam in cameras]
        datasets = one._alyxClient.get('sessions/' +
                                       eid)['data_dataset_session_related']
        urls = [ds['data_url'] for ds in datasets if ds['name'] in cam_files]
        cache_dir = one.path_from_eid(eid).joinpath('raw_video_data')
        if not os.path.exists(str(cache_dir)):
            os.mkdir(str(cache_dir))
        else:  # Check if file already downloaded
            # cam_files = [fi[:-4] for fi in cam_files]  # Remove ext
            filenames = [
                f for f in os.listdir(str(cache_dir))
                if any([cam in f for cam in cam_files])
            ]
            if filenames:
                return [cache_dir.joinpath(file) for file in filenames]

        http_download_file_list(urls,
                                username=one._par.HTTP_DATA_SERVER_LOGIN,
                                password=one._par.HTTP_DATA_SERVER_PWD,
                                cache_dir=str(cache_dir))

        return

    else:
        return one.load(eid, ['_iblrig_Camera.raw'], download_only=True)
Пример #18
0
def load_spike_sorting(eid, one=None, dataset_types=None):
    """
    From an eid, hits the Alyx database and downloads a standard default set of dataset types
    From a local session Path (pathlib.Path), loads a standard default set of dataset types
     to perform analysis:
        'clusters.channels',
        'clusters.depths',
        'clusters.metrics',
        'spikes.clusters',
        'spikes.times',
        'probes.description'
    :param eid: experiment UUID or pathlib.Path of the local session
    :param one:
    :param dataset_types: additional spikes/clusters objects to add to the standard default list
    :return: spikes, clusters (dict of bunch, 1 bunch per probe)
    """
    if isinstance(eid, Path):
        return _load_spike_sorting_local(eid)
    if not one:
        one = ONE()
    # This is a first draft, no safeguard, no error handling and a draft dataset list.
    session_path = one.path_from_eid(eid)
    if not session_path:
        print("no session path")
        return (None, None), 'no session path'

    dtypes_default = [
        'clusters.channels', 'clusters.depths', 'clusters.metrics',
        'spikes.clusters', 'spikes.times', 'probes.description'
    ]
    if dataset_types is None:
        dtypes = dtypes_default
    else:
        #  Append extra optional DS
        dtypes = list(set(dataset_types + dtypes_default))

    one.load(eid, dataset_types=dtypes, download_only=True)
    return _load_spike_sorting_local(session_path)
Пример #19
0
def purge_local_data(local_folder, file_name, lab=None, dry=False):
    # Figure out datasetType from file_name or file path
    file_name = Path(file_name).name
    alf_parts = file_name.split('.')
    dstype = '.'.join(alf_parts[:2])
    print(f'Looking for file <{file_name}> in folder <{local_folder}>')
    # Get all paths for file_name in local folder
    local_folder = Path(local_folder)
    files = list(local_folder.rglob(f'*{file_name}'))
    print(f'Found {len(files)} files')
    print(f'Checking on Flatiron for datsetType: {dstype}...')
    # Get all sessions and details from Alyx that have the dstype
    one = ONE()
    if lab is None:
        eid, det = one.search(dataset_types=[dstype], details=True)
    else:
        eid, det = one.search(dataset_types=[dstype], lab=lab, details=True)
    urls = []
    for d in det:
        urls.extend([x['data_url'] for x in d['data_dataset_session_related']
                     if x['dataset_type'] == dstype])
    # Remove None answers when session is registered but dstype not htere yet
    urls = [u for u in urls if u is not None]
    print(f'Found files on Flatiron: {len(urls)}')
    to_remove = []
    for f in files:
        sess_name = session_name(f)
        for u in urls:
            if sess_name in u:
                to_remove.append(f)
    print(f'Local files to remove: {len(to_remove)}')
    for f in to_remove:
        print(f)
        if dry:
            continue
        else:
            f.unlink()
    return
Пример #20
0
def tasks_runner(subjects_path,
                 tasks_dict,
                 one=None,
                 dry=False,
                 count=5,
                 time_out=None,
                 **kwargs):
    """
    Function to run a list of tasks (task dictionary from Alyx query) on a local server
    :param subjects_path:
    :param tasks_dict:
    :param one:
    :param dry:
    :param count: maximum number of tasks to run
    :param time_out: between each task, if time elapsed is greater than time out, returns (seconds)
    :param kwargs:
    :return: list of dataset dictionaries
    """
    if one is None:
        one = ONE()
    import time
    tstart = time.time()
    c = 0
    last_session = None
    all_datasets = []
    for tdict in tasks_dict:
        # if the count is reached or if the time_out has been elapsed, break the loop and return
        if c >= count or (time_out and time.time() - tstart > time_out):
            break
        # reconstruct the session local path. As many jobs belong to the same session
        # cache the result
        if last_session != tdict['session']:
            ses = one.alyx.rest('sessions',
                                'list',
                                django=f"pk,{tdict['session']}")[0]
            session_path = Path(subjects_path).joinpath(
                Path(ses['subject'], ses['start_time'][:10],
                     str(ses['number']).zfill(3)))
            last_session = tdict['session']
        if dry:
            print(session_path, tdict['name'])
        else:
            task, dsets = tasks.run_alyx_task(tdict=tdict,
                                              session_path=session_path,
                                              one=one,
                                              **kwargs)
            if dsets:
                all_datasets.extend(dsets)
                c += 1
    return all_datasets
Пример #21
0
    def make(self, key):

        eID = (acquisition.Session & key).fetch1('session_uuid')

        sparse_noise_positions, sparse_noise_times = \
            ONE().load(eID, dataset_types=['_ibl_sparseNoise.positions', '_ns_sparseNoise.times'])

        assert len(np.unique(np.array([len(sparse_noise_positions), len(sparse_noise_times)]))) == 1, 'Loaded sparse noise files do not have the same length'

        key['sparse_noise_x_pos'] = sparse_noise_positions[:, 0],
        key['sparse_noise_y_pos'] = sparse_noise_positions[:, 1],
        key['sparse_noise_times'] = sparse_noise_times
        self.insert1(key)
        logger.info('Populated a SparseNoise tuple for subject {subject_nickname} in session started at {session_start_time}'.format(**key))
Пример #22
0
def sessions_with_region(acronym, one=None):
    if one is None:
        one = ONE()
    query_str = f'channels__brain_region__acronym__icontains,{acronym},' \
                'probe_insertion__session__project__name__icontains,ibl_neuropixel_brainwide_01,' \
                'probe_insertion__session__qc__lt,50,' \
                '~probe_insertion__json__qc,CRITICAL'
    traj = one.alyx.rest('trajectories',
                         'list',
                         provenance='Ephys aligned histology track',
                         django=query_str)
    eids = np.array([i['session']['id'] for i in traj])
    probes = np.array([i['probe_name'] for i in traj])
    return eids, probes
Пример #23
0
def create_alyx_probe_insertions(session_path: str,
                                 force: bool = False,
                                 one: object = None,
                                 model: str = None,
                                 labels: list = None):
    if one is None:
        one = ONE()
    if is_uuid_string(session_path):
        eid = session_path
    else:
        eid = one.eid_from_path(session_path)
    if eid is None:
        print(
            'Session not found on Alyx: please create session before creating insertions'
        )
    if model is None:
        probe_model = spikeglx.get_neuropixel_version_from_folder(session_path)
        pmodel = '3B2' if probe_model == '3B' else probe_model
    else:
        pmodel = model
    raw_ephys_data_path = Path(session_path) / 'raw_ephys_data'
    if labels is None:
        probe_labels = [
            x.name for x in Path(raw_ephys_data_path).glob('*')
            if x.is_dir() and ('00' in x.name or '01' in x.name)
        ]
    else:
        probe_labels = labels
    # create the dictionary
    for plabel in probe_labels:
        insdict = {'session': eid, 'name': plabel, 'model': pmodel}
        # search for the corresponding insertion in Alyx
        alyx_insertion = one.alyx.rest('insertions',
                                       'list',
                                       session=insdict['session'],
                                       name=insdict['name'])
        # if it doesn't exist, create it
        if len(alyx_insertion) == 0:
            alyx_insertion = one.alyx.rest('insertions',
                                           'create',
                                           data=insdict)
        else:
            iid = alyx_insertion[0]['id']
            if force:
                alyx_insertion = one.alyx.rest('insertions',
                                               'update',
                                               id=iid,
                                               data=insdict)
            else:
                alyx_insertion = alyx_insertion[0]
Пример #24
0
def job_creator(root_path, one=None, dry=False, rerun=False, max_md5_size=None):
    """
    Server function that will look for creation flags and for each:
    1) create the sessions on Alyx
    2) register the corresponding raw data files on Alyx
    3) create the tasks to be run on Alyx
    :param root_path: main path containing sessions or session path
    :param one
    :param dry
    :param rerun
    :param max_md5_size
    :return:
    """
    if not one:
        one = ONE()
    rc = registration.RegistrationClient(one=one)
    flag_files = list(Path(root_path).glob('**/raw_session.flag'))
    all_datasets = []
    for flag_file in flag_files:
        session_path = flag_file.parent
        _logger.info(f'creating session for {session_path}')
        if dry:
            continue
        # if the subject doesn't exist in the database, skip
        try:
            rc.create_session(session_path)
            files, dsets = registration.register_session_raw_data(
                session_path, one=one, max_md5_size=max_md5_size)
        except BaseException:
            _logger.error(traceback.format_exc())
            _logger.info(f"Creating session / registering raw datasets {session_path} errored")
            continue
        if dsets is not None:
            all_datasets.extend(dsets)
        session_type = get_session_extractor_type(session_path)
        if session_type in ['biased', 'habituation', 'training']:
            pipe = training_preprocessing.TrainingExtractionPipeline(session_path, one=one)
        # only start extracting ephys on a raw_session.flag
        elif session_type in ['ephys'] and flag_file.name == 'raw_session.flag':
            pipe = ephys_preprocessing.EphysExtractionPipeline(session_path, one=one)
        else:
            _logger.info(f"Session type {session_type} as no matching extractor {session_path}")
            return
        if rerun:
            rerun__status__in = '__all__'
        else:
            rerun__status__in = ['Waiting']
        pipe.create_alyx_tasks(rerun__status__in=rerun__status__in)
        flag_file.unlink()
    return all_datasets
Пример #25
0
def load_spike_sorting(eid, one=None, dataset_types=None):
    """
    From an eid, hits the Alyx database and downloads a standard set of dataset types to perform
    analysis.
    :param eid:
    :param dataset_types: additional spikes/clusters objects to add to the standard list
    :return:
    """
    if not one:
        one = ONE()
    # This is a first draft, no safeguard, no error handling and a draft dataset list.
    session_path = one.path_from_eid(eid)
    dtypes = [
        'clusters.channels',
        'clusters.depths',
        'clusters.metrics',
        'spikes.clusters',
        'spikes.times',
        'probes.description',
    ]
    if dataset_types:
        dtypes = list(set(dataset_types + dtypes))

    _ = one.load(eid, dataset_types=dtypes, download_only=True)
    probes = alf.io.load_object(session_path.joinpath('alf'), 'probes')
    spikes = {}
    clusters = {}
    for i, _ in enumerate(probes['description']):
        probe_path = session_path.joinpath('alf',
                                           probes['description'][i]['label'])
        cluster = alf.io.load_object(probe_path, object='clusters')
        spike = alf.io.load_object(probe_path, object='spikes')
        label = probes['description'][i]['label']
        clusters[label] = cluster
        spikes[label] = spike

    return spikes, clusters
Пример #26
0
def get_example_images(eid):

    eids = get_repeated_sites()
    #    eid = eids[23]
    #    video_type = 'body'

    #eids = ['15f742e1-1043-45c9-9504-f1e8a53c1744']
    eids = ['4a45c8ba-db6f-4f11-9403-56e06a33dfa4']
    frts = {'body': 30, 'left': 60, 'right': 150}

    one = ONE()

    #for eid in eids:
    for video_type in frts:

        frame_idx = [20 * 60 * frts[video_type]]
        try:

            r = one.list(eid, 'dataset_types')
            recs = [
                x for x in r if f'{video_type}Camera.raw.mp4' in x['name']
            ][0]['file_records']
            video_path = [
                x['data_url'] for x in recs if x['data_url'] is not None
            ][0]

            frames = get_video_frames_preload(video_path,
                                              frame_idx,
                                              mask=np.s_[:, :, 0])
            np.save(
                '/home/mic/reproducible_dlc/example_images/'
                f'{eid}_{video_type}.npy', frames)
            print(eid, video_type, 'done')
        except:
            print(eid, video_type, 'error')
            continue
Пример #27
0
def get_repeated_sites():
    one = ONE()
    STR_QUERY = 'probe_insertion__session__project__name__icontains,ibl_neuropixel_brainwide_01,' \
                'probe_insertion__session__qc__lt,50,' \
                '~probe_insertion__json__qc,CRITICAL,' \
                'probe_insertion__session__n_trials__gte,400'
    all_sess = one.alyx.rest('trajectories',
                             'list',
                             provenance='Planned',
                             x=-2243,
                             y=-2000,
                             theta=15,
                             django=STR_QUERY)
    eids = [s['session']['id'] for s in all_sess]

    return eids
Пример #28
0
    def make(self, key):
        trial_key = key.copy()
        eID = str((acquisition.Session & key).fetch1('session_uuid'))
        asd = ONE().load(eID, dataset_types='_iblrig_ambientSensorData.raw')

        if not len(TrialSet.Trial & key) == len(asd[0]):
            print(
                'Size of ambient sensor data does not match the trial number')
            return

        for idx_trial, asd_trial in enumerate(asd[0]):
            trial_key['trial_id'] = idx_trial + 1
            trial_key['temperature_c'] = asd_trial['Temperature_C'][0]
            trial_key['air_pressure_mb'] = asd_trial['AirPressure_mb'][0]
            trial_key['relative_humidity'] = asd_trial['RelativeHumidity'][0]
            self.insert1(trial_key)
Пример #29
0
def list_current_sessions(one=None):
    """
    Get the set of session eids used in integration tests.  When writing new tests, this can be
    a useful way of choosing which sessions to use.

    :param one: An ONE object for fetching session eid from path
    :return: Set of integration session eids
    """
    def not_null(itr):
        return filter(lambda x: x is not None, itr)

    one = one or ONE()
    root = IntegrationTest.default_data_root()
    folders = set(alf.folders.session_path(x[0]) for x in os.walk(root))
    eids = not_null(one.eid_from_path(x) for x in not_null(folders))
    return set(eids)
Пример #30
0
def plot_saturation():
    '''
    plot the number of segments that are quiet in terms of spikes
    '''

    plt.ion()

    results_folder = '/home/mic/saturation_scan/'
    t = list(os.walk(results_folder))[0][-1]

    sess_info = []
    sat_segs = []
    for ii in t:
        try:
            a = np.load(results_folder + ii)
        except:
            print("could't load %s" % ii)
            continue
        sess_info.append(a[:, 0])
        sat_segs.append(a[:, 1])

    flat_sess_info = [item for sublist in sess_info for item in sublist]
    flat_sat_segs = [int(item) for sublist in sat_segs for item in sublist]

    maxes = np.where(np.array(flat_sat_segs) > 10)

    height = np.array(flat_sat_segs)[maxes]  #flat_sat_segs
    bars = np.array(flat_sess_info)[maxes]

    one = ONE()

    #flat_sess_info
    y_pos = np.arange(len(bars))

    # Create horizontal bars
    plt.barh(y_pos, height)

    # Create names on the y-axis
    plt.yticks(y_pos, bars, fontsize=10)
    plt.xlabel('number of saturated 200 ms segments')
    plt.title(
        'sessions with histology that meet behavior criterion for the BWM')

    sess_info = bars
    seg_info = height

    return seg_info, sess_info