Esempio n. 1
0
    def path_from_eid(self, eid: str) -> Optional[Listable(Path)]:
        """
        From an experiment id or a list of experiment ids, gets the local cache path
        :param eid: eid (UUID) or list of UUIDs
        :return: eid or list of eids
        """
        # If eid is a list of eIDs recurse through list and return the results
        if isinstance(eid, list):
            path_list = []
            for p in eid:
                path_list.append(self.path_from_eid(p))
            return path_list
        # If not valid return None
        if not alfio.is_uuid_string(eid):
            print(eid, " is not a valid eID/UUID string")
            return
        if self._cache.size == 0:
            return

        # load path from cache
        ic = find_first_2d(self._cache[['eid_0', 'eid_1']].to_numpy(),
                           parquet.str2np(eid))
        if ic is not None:
            ses = self._cache.iloc[ic]
            return Path(self._par.CACHE_DIR).joinpath(
                ses['lab'], 'Subjects', ses['subject'],
                ses['start_time'].isoformat()[:10],
                str(ses['number']).zfill(3))
Esempio n. 2
0
 def get_details(self, eid: str, full: bool = False):
     """ Returns details of eid like from one.search, optional return full
     session details.
     """
     # If eid is a list of eIDs recurse through list and return the results
     if isinstance(eid, list):
         details_list = []
         for p in eid:
             details_list.append(self.get_details(p, full=full))
         return details_list
     # If not valid return None
     if not alfio.is_uuid_string(eid):
         print(eid, " is not a valid eID/UUID string")
         return
     # load all details
     dets = self.alyx.rest("sessions", "read", eid)
     if full:
         return dets
     # If it's not full return the normal output like from a one.search
     det_fields = [
         "subject", "start_time", "number", "lab", "project", "url",
         "task_protocol", "local_path"
     ]
     out = {k: v for k, v in dets.items() if k in det_fields}
     out.update({'local_path': self.path_from_eid(eid)})
     return out
Esempio n. 3
0
    def to_eid(self,
               id: Listable(Union[str, Path, UUID, dict]) = None,
               cache_dir: Optional[Union[str, Path]] = None) -> Listable(str):
        if isinstance(id, (list, tuple)):  # Recurse
            return [self.to_eid(i, cache_dir) for i in id]
        if isinstance(id, UUID):
            return str(id)
        # elif is_exp_ref(id):
        #     return ref2eid(id, one=self)
        elif isinstance(id, dict):
            assert {'subject', 'number', 'start_time', 'lab'}.issubset(id)
            root = Path(self._get_cache_dir(cache_dir))
            id = root.joinpath(id['lab'], 'Subjects', id['subject'],
                               id['start_time'][:10], ('%03d' % id['number']))

        if alfio.is_session_path(id):
            return self.eid_from_path(id)
        elif isinstance(id, str):
            if len(id) > 36:
                id = id[-36:]
            if not alfio.is_uuid_string(id):
                raise ValueError('Invalid experiment ID')
            else:
                return id
        else:
            raise ValueError('Unrecognized experiment ID')
Esempio n. 4
0
    def path_from_eid(self,
                      eid: str,
                      use_cache: bool = True) -> Listable(Path):
        """
        From an experiment id or a list of experiment ids, gets the local cache path
        :param eid: eid (UUID) or list of UUIDs
        :param use_cache: if set to False, will force database connection
        :return: eid or list of eids
        """
        # If eid is a list of eIDs recurse through list and return the results
        if isinstance(eid, list):
            path_list = []
            for p in eid:
                path_list.append(self.path_from_eid(p))
            return path_list
        # If not valid return None
        if not alfio.is_uuid_string(eid):
            print(eid, " is not a valid eID/UUID string")
            return

        # first try avoid hitting the database
        if self._cache.size > 0 and use_cache:
            cache_path = super().path_from_eid(eid)
            if cache_path:
                return cache_path

        # if it wasn't successful, query Alyx
        ses = self.alyx.rest('sessions', 'list', django=f'pk,{eid}')
        if len(ses) == 0:
            return None
        else:
            return Path(self._par.CACHE_DIR).joinpath(
                ses[0]['lab'], 'Subjects', ses[0]['subject'],
                ses[0]['start_time'][:10],
                str(ses[0]['number']).zfill(3))
Esempio n. 5
0
 def _set_eid_or_path(self, session_path_or_eid):
     """Overloading base: session path not supported"""
     if is_uuid_string(str(session_path_or_eid)):
         self.eid = session_path_or_eid
         # Try to set session_path if data is found locally
         self.session_path = self.one.path_from_eid(self.eid)
     else:
         self.log.error("Cannot run ONE QC: an experiment uuid requried")
         raise ValueError("'session' must be a valid session uuid")
Esempio n. 6
0
 def _load(self, eid, dataset_types=None, dclass_output=False, dry_run=False, cache_dir=None,
           download_only=False, clobber=False, offline=False, keep_uuid=False):
     """
     From a Session ID and dataset types, queries Alyx database, downloads the data
     from Globus, and loads into numpy array. Single session only
     """
     # if the input as an UUID, add the beginning of URL to it
     cache_dir = self._get_cache_dir(cache_dir)
     if is_uuid_string(eid):
         eid = '/sessions/' + eid
     eid_str = eid[-36:]
     # get session json information as a dictionary from the alyx API
     try:
         ses = self.alyx.get('/sessions/' + eid_str)
     except requests.HTTPError:
         raise requests.HTTPError('Session ' + eid_str + ' does not exist')
     # ses = ses[0]
     # if no dataset_type is provided:
     # a) force the output to be a dictionary that provides context to the data
     # b) download all types that have a data url specified whithin the alf folder
     dataset_types = [dataset_types] if isinstance(dataset_types, str) else dataset_types
     if not dataset_types or dataset_types == ['__all__']:
         dclass_output = True
     dc = SessionDataInfo.from_session_details(ses, dataset_types=dataset_types, eid=eid_str)
     # loop over each dataset and download if necessary
     for ind in range(len(dc)):
         if dc.url[ind] and not dry_run:
             relpath = PurePath(dc.url[ind].replace(self._par.HTTP_DATA_SERVER, '.')).parents[0]
             cache_dir_file = PurePath(cache_dir, relpath)
             Path(cache_dir_file).mkdir(parents=True, exist_ok=True)
             dc.local_path[ind] = self._download_file(
                 dc.url[ind], str(cache_dir_file), clobber=clobber, offline=offline,
                 keep_uuid=keep_uuid, file_size=dc.file_size[ind], hash=dc.hash[ind])
     # load the files content in variables if requested
     if not download_only:
         for ind, fil in enumerate(dc.local_path):
             dc.data[ind] = load_file_content(fil)
     # parse output arguments
     if dclass_output:
         return dc
     # if required, parse the output as a list that matches dataset_types requested
     list_out = []
     for dt in dataset_types:
         if dt not in dc.dataset_type:
             _logger.warning('dataset ' + dt + ' not found for session: ' + eid_str)
             list_out.append(None)
             continue
         for i, x, in enumerate(dc.dataset_type):
             if dt == x:
                 if dc.data[i] is not None:
                     list_out.append(dc.data[i])
                 else:
                     list_out.append(dc.local_path[i])
     return list_out
Esempio n. 7
0
    def __init__(self, one=None, eid=None, lazy=True):
        self.one = one or ONE()
        self.eid = eid if is_uuid_string(eid) else None

        self.bpodqc = None
        self.oneqc = None
        self.frame = None

        if not lazy:
            self.compute_all_qc()
            self.build_extended_qc_frame()
Esempio n. 8
0
    def wrapper(eid, *args, **kwargs):
        if eid is None:
            log.warning("Input eid or session_path is None")
            return
        # Check if first arg is path or eid
        if is_uuid_string(str(eid)):
            session_path = one.path_from_eid(eid)
        else:
            session_path = Path(eid)
        if dl:
            _dl_raw_behavior(session_path, full=full, dry=dry, force=force)

        return func(session_path, *args, **kwargs)
Esempio n. 9
0
 def _set_eid_or_path(self, session_path_or_eid):
     if session_path_or_eid is None:
         log.error(
             "Cannot run BpodQC: Plese insert a valid session path or eid")
     if is_uuid_string(str(session_path_or_eid)):
         self.eid = session_path_or_eid
         # Try to setsession_path if data is found locally
         self.session_path = self.one.path_from_eid(self.eid)
     elif is_session_path(session_path_or_eid):
         self.session_path = session_path_or_eid
     else:
         log.error(
             "Cannot run BpodQC: Plese insert a valid session path or eid")
Esempio n. 10
0
 def _check_inputs(self, endpoint: str, uuid: str) -> None:
     # make sure the queryied endpoint exists, if not throw an informative error
     if endpoint not in self._rest_schemes.keys():
         av = [
             k for k in self._rest_schemes.keys()
             if not k.startswith('_') and k
         ]
         raise ValueError('REST endpoint "' + endpoint +
                          '" does not exist. Available ' +
                          'endpoints are \n       ' + '\n       '.join(av))
     # make sure the uuid is a valid UUID4
     if is_uuid_string(uuid) is False:
         raise ValueError(f"{uuid} is not a valid uuid")
     return
Esempio n. 11
0
def create_alyx_probe_insertions(session_path: str,
                                 force: bool = False,
                                 one: object = None,
                                 model: str = None,
                                 labels: list = None):
    if one is None:
        one = ONE()
    if is_uuid_string(session_path):
        eid = session_path
    else:
        eid = one.eid_from_path(session_path)
    if eid is None:
        print(
            'Session not found on Alyx: please create session before creating insertions'
        )
    if model is None:
        probe_model = spikeglx.get_neuropixel_version_from_folder(session_path)
        pmodel = '3B2' if probe_model == '3B' else probe_model
    else:
        pmodel = model
    raw_ephys_data_path = Path(session_path) / 'raw_ephys_data'
    if labels is None:
        probe_labels = [
            x.name for x in Path(raw_ephys_data_path).glob('*')
            if x.is_dir() and ('00' in x.name or '01' in x.name)
        ]
    else:
        probe_labels = labels
    # create the dictionary
    for plabel in probe_labels:
        insdict = {'session': eid, 'name': plabel, 'model': pmodel}
        # search for the corresponding insertion in Alyx
        alyx_insertion = one.alyx.rest('insertions',
                                       'list',
                                       session=insdict['session'],
                                       name=insdict['name'])
        # if it doesn't exist, create it
        if len(alyx_insertion) == 0:
            alyx_insertion = one.alyx.rest('insertions',
                                           'create',
                                           data=insdict)
        else:
            iid = alyx_insertion[0]['id']
            if force:
                alyx_insertion = one.alyx.rest('insertions',
                                               'update',
                                               id=iid,
                                               data=insdict)
            else:
                alyx_insertion = alyx_insertion[0]
Esempio n. 12
0
 def _load(self,
           eid,
           dataset_types=None,
           dclass_output=False,
           download_only=False,
           offline=False,
           **kwargs):
     """
     From a Session ID and dataset types, queries Alyx database, downloads the data
     from Globus, and loads into numpy array. Single session only
     """
     if alfio.is_uuid_string(eid):
         eid = '/sessions/' + eid
     eid_str = eid[-36:]
     # if no dataset_type is provided:
     # a) force the output to be a dictionary that provides context to the data
     # b) download all types that have a data url specified whithin the alf folder
     dataset_types = [dataset_types] if isinstance(dataset_types,
                                                   str) else dataset_types
     if not dataset_types or dataset_types == ['__all__']:
         dclass_output = True
     if offline:
         dc = self._make_dataclass_offline(eid_str, dataset_types, **kwargs)
     else:
         dc = self._make_dataclass(eid_str, dataset_types, **kwargs)
     # load the files content in variables if requested
     if not download_only:
         for ind, fil in enumerate(dc.local_path):
             dc.data[ind] = alfio.load_file_content(fil)
     # parse output arguments
     if dclass_output:
         return dc
     # if required, parse the output as a list that matches dataset_types requested
     list_out = []
     for dt in dataset_types:
         if dt not in dc.dataset_type:
             _logger.warning('dataset ' + dt + ' not found for session: ' +
                             eid_str)
             list_out.append(None)
             continue
         for i, x, in enumerate(dc.dataset_type):
             if dt == x:
                 if dc.data[i] is not None:
                     list_out.append(dc.data[i])
                 else:
                     list_out.append(dc.local_path[i])
     return list_out
Esempio n. 13
0
def _to_eid(invar):
    """ get eid from: details, path, or lists of details or paths
    """
    outvar = []
    if isinstance(invar, list) or isinstance(invar, tuple):
        for i in invar:
            outvar.append(_to_eid(i))
        return outvar
    elif isinstance(invar, dict) and is_details_dict(invar):
        return invar["url"][-36:]
    elif isinstance(invar, str) and is_session_path(invar):
        return one.eid_from_path(invar)
    elif isinstance(invar, str) and is_uuid_string(invar):
        return invar
    else:
        log.warning("Unknown input type: please input a valid path or details object")
        return
Esempio n. 14
0
 def _set_eid_or_path(self, session_path_or_eid):
     """Parse a given eID or session path
     If a session UUID is given, resolves and stores the local path and vice versa
     :param session_path_or_eid: A session eid or path
     :return:
     """
     self.eid = None
     if is_uuid_string(str(session_path_or_eid)):
         self.eid = session_path_or_eid
         # Try to set session_path if data is found locally
         self.session_path = self.one.path_from_eid(self.eid)
     elif is_session_path(session_path_or_eid):
         self.session_path = Path(session_path_or_eid)
         if self.one is not None:
             self.eid = self.one.eid_from_path(self.session_path)
             if not self.eid:
                 self.log.warning('Failed to determine eID from session path')
     else:
         self.log.error('Cannot run QC: an experiment uuid or session path is required')
         raise ValueError("'session' must be a valid session path or uuid")
Esempio n. 15
0
    def path_from_eid(self, eid: str, grep_str=None) -> Path:
        # If eid is a list of eIDs recurse through list and return the results
        if isinstance(eid, list):
            path_list = []
            for p in eid:
                path_list.append(self.path_from_eid(p, grep_str=grep_str))
            return path_list
        # If not valid return None
        if not is_uuid_string(eid):
            print(eid, " is not a valid eID/UUID string")
            return
        # Load data, if no data present on disk return None
        data = self._load(eid, download_only=True, offline=True)
        if not data.local_path:
            return None
        # If user defined a grep list of specific files return paths to files
        if grep_str is not None:
            files = [x for x in data.local_path if grep_str in str(x)]
            return files
        # If none of the above happen return the session path of the first file you find
        session_path = get_session_path(data.local_path[0])

        return session_path
Esempio n. 16
0
    def path_from_eid(self, eid: str, use_cache=True) -> Path:
        """
        From an experiment id or a list of experiment ids, gets the local cache path
        :param eid: eid (UUID) or list of UUIDs
        :param use_cache: if set to False, will force database connection
        :return: eid or list of eids
        """
        # If eid is a list of eIDs recurse through list and return the results
        if isinstance(eid, list):
            path_list = []
            for p in eid:
                path_list.append(self.path_from_eid(p))
            return path_list
        # If not valid return None
        if not is_uuid_string(eid):
            print(eid, " is not a valid eID/UUID string")
            return

        # first try avoid hitting the database
        if self._cache.size > 0 and use_cache:
            ic = parquet.find_first_2d(
                self._cache[['eid_0', 'eid_1']].to_numpy(), parquet.str2np(eid))
            if ic is not None:
                ses = self._cache.iloc[ic]
                return Path(self._par.CACHE_DIR).joinpath(
                    ses['lab'], 'Subjects', ses['subject'], ses['start_time'].isoformat()[:10],
                    str(ses['number']).zfill(3))

        # if it wasn't successful, query Alyx
        ses = self.alyx.rest('sessions', 'list', django=f'pk,{eid}')
        if len(ses) == 0:
            return None
        else:
            return Path(self._par.CACHE_DIR).joinpath(
                ses[0]['lab'], 'Subjects', ses[0]['subject'], ses[0]['start_time'][:10],
                str(ses[0]['number']).zfill(3))
Esempio n. 17
0
 def _load(self,
           eid,
           dataset_types=None,
           dclass_output=False,
           dry_run=False,
           cache_dir=None,
           download_only=False,
           clobber=False,
           offline=False,
           keep_uuid=False):
     """
     From a Session ID and dataset types, queries Alyx database, downloads the data
     from Globus, and loads into numpy array. Single session only
     """
     # if the input as an UUID, add the beginning of URL to it
     cache_dir = self._get_cache_dir(cache_dir)
     if is_uuid_string(eid):
         eid = '/sessions/' + eid
     eid_str = eid[-36:]
     # get session json information as a dictionary from the alyx API
     try:
         ses = self.alyx.get('/sessions/' + eid_str)
     except requests.HTTPError:
         raise requests.HTTPError('Session ' + eid_str + ' does not exist')
     # ses = ses[0]
     # if no dataset_type is provided:
     # a) force the output to be a dictionary that provides context to the data
     # b) download all types that have a data url specified whithin the alf folder
     dataset_types = [dataset_types] if isinstance(dataset_types,
                                                   str) else dataset_types
     if not dataset_types or dataset_types == ['__all__']:
         dclass_output = True
     # this performs the filtering
     dc = SessionDataInfo.from_session_details(ses,
                                               dataset_types=dataset_types,
                                               eid=eid_str)
     # loop over each dataset and download if necessary
     with concurrent.futures.ThreadPoolExecutor(
             max_workers=NTHREADS) as executor:
         futures = []
         for ind in range(len(dc)):
             if dc.url[ind] is None or dry_run:
                 futures.append(None)
             else:
                 futures.append(
                     executor.submit(self.download_dataset,
                                     dc.url[ind],
                                     cache_dir=cache_dir,
                                     clobber=clobber,
                                     offline=offline,
                                     keep_uuid=keep_uuid,
                                     file_size=dc.file_size[ind],
                                     hash=dc.hash[ind]))
         concurrent.futures.wait(
             list(filter(lambda x: x is not None, futures)))
         for ind, future in enumerate(futures):
             if future is None:
                 continue
             dc.local_path[ind] = future.result()
     # load the files content in variables if requested
     if not download_only:
         for ind, fil in enumerate(dc.local_path):
             dc.data[ind] = load_file_content(fil)
     # parse output arguments
     if dclass_output:
         return dc
     # if required, parse the output as a list that matches dataset_types requested
     list_out = []
     for dt in dataset_types:
         if dt not in dc.dataset_type:
             _logger.warning('dataset ' + dt + ' not found for session: ' +
                             eid_str)
             list_out.append(None)
             continue
         for i, x, in enumerate(dc.dataset_type):
             if dt == x:
                 if dc.data[i] is not None:
                     list_out.append(dc.data[i])
                 else:
                     list_out.append(dc.local_path[i])
     return list_out
Esempio n. 18
0
    def __init__(self,
                 eid=None,
                 trial=None,
                 camera='left',
                 dlc_features=None,
                 quick_load=True,
                 t_win=3,
                 one=None,
                 start=True):
        """
        Plot the wheel trace alongside the video frames.  Below is list of key bindings:
        :key n: plot movements of next trial
        :key p: plot movements of previous trial
        :key r: plot movements of a random trial
        :key t: prompt for a trial number to plot
        :key l: toggle between legend for wheel and trial events
        :key space: pause/play frames
        :key left: move to previous frame
        :key right: move to next frame

        :param eid: uuid of experiment session to load
        :param trial: the trial id to plot
        :param camera: the camera position to load, options: 'left' (default), 'right', 'body'
        :param plot_dlc: tuple of dlc features overlay onto frames
        :param quick_load: when true, move onset detection is performed on individual trials
        instead of entire session
        :param t_win: the window in seconds over which to plot the wheel trace
        :param start: if False, the Viewer must be started by calling the `run` method
        :return: Viewer object
        """
        self._logger = logging.getLogger('ibllib')

        self.t_win = t_win  # Time window of wheel plot
        self.one = one or ONE()
        self.quick_load = quick_load

        # Input validation
        if camera not in ['left', 'right', 'body']:
            raise ValueError(
                "camera must be one of 'left', 'right', or 'body'")

        # If None, randomly pick a session to load
        if not eid:
            self._logger.info('Finding random session')
            eids = self.find_sessions(dlc=dlc_features is not None)
            eid = random.choice(eids)
            ref = eid2ref(eid, as_dict=False, one=self.one)
            self._logger.info('using session %s (%s)', eid, ref)
        elif not is_uuid_string(eid):
            raise ValueError('f"{eid}" is not a valid session uuid')

        # Store complete session data: trials, timestamps, etc.
        ref = eid2ref(eid, one=self.one, parse=False)
        self._session_data = {'eid': eid, 'ref': ref, 'dlc': None}
        self._plot_data = {
        }  # Holds data specific to current plot, namely data for single trial

        # Download the DLC data if required
        if dlc_features:
            self._session_data['dlc'] = self.get_dlc(dlc_features,
                                                     camera=camera)

        # These are for the dict returned by ONE
        trial_data = self.get_trial_data('ONE')
        total_trials = trial_data['intervals'].shape[0]
        trial = random.randint(0, total_trials) if not trial else trial
        self._session_data['total_trials'] = total_trials
        self._session_data['trials'] = trial_data

        # Check for local first movement times
        first_moves = self.one.path_from_eid(
            eid) / 'alf' / '_ibl_trials.firstMovement_times.npy'
        if first_moves.exists() and 'firstMovement_times' not in trial_data:
            # Load file if exists locally
            self._session_data['trials']['firstMovement_times'] = np.load(
                first_moves)

        # Download the raw video for left camera only
        self.video_path, = self.download_raw_video(camera)
        cam_ts = self.one.load(self._session_data['eid'], ['camera.times'],
                               dclass_output=True)
        cam_ts, = [
            ts for ts, url in zip(cam_ts.data, cam_ts.url) if camera in url
        ]
        # _, cam_ts, _ = one.load(eid, ['camera.times'])  # leftCamera is in the middle of the list
        Fs = 1 / np.diff(
            cam_ts).mean()  # Approx. frequency of camera timestamps
        # Verify video frames and timestamps agree
        _, fps, count = get_video_frames_preload(self.video_path, [])

        if count != cam_ts.size:
            assert count <= cam_ts.size, 'fewer camera timestamps than frames'
            msg = 'number of timestamps does not match number video file frames: '
            self._logger.warning(msg + '%i more timestamps than frames',
                                 cam_ts.size - count)

        assert Fs - fps < 1, 'camera timestamps do not match reported frame rate'
        self._logger.info("Frame rate = %.0fHz", fps)
        # cam_ts = cam_ts[-count:]  # Remove extraneous timestamps
        self._session_data['camera_ts'] = cam_ts

        # Load wheel data
        self._session_data['wheel'] = self.one.load_object(
            self._session_data['eid'], 'wheel')
        if 'firstMovement_times' in self._session_data['trials']:
            pos, t = wh.interpolate_position(
                self._session_data['wheel']['timestamps'],
                self._session_data['wheel']['position'],
                freq=1000)

        # Plot the first frame in the upper subplot
        fig, axes = plt.subplots(nrows=2)
        fig.canvas.mpl_disconnect(
            fig.canvas.manager.key_press_handler_id)  # Disable defaults
        fig.canvas.mpl_connect(
            'key_press_event',
            self.process_key)  # Connect our own key press fn

        self._plot_data['figure'] = fig
        self._plot_data['axes'] = axes
        self._trial_num = trial

        self.anim = animation.FuncAnimation(fig,
                                            self.animate,
                                            init_func=self.init_plot,
                                            frames=cycle(range(60)),
                                            interval=20,
                                            blit=False,
                                            repeat=True,
                                            cache_frame_data=False)
        self.anim.running = False
        self.trial_num = trial  # Set trial and prepare plot/frame data
        if start:
            self.run()
Esempio n. 19
0
def create_alyx_probe_insertions(
    session_path: str,
    force: bool = False,
    one: object = None,
    model: str = None,
    labels: list = None,
):
    if one is None:
        one = ONE()
    if is_uuid_string(session_path):
        eid = session_path
    else:
        eid = one.eid_from_path(session_path)
    if eid is None:
        print(
            "Session not found on Alyx: please create session before creating insertions"
        )
    if model is None:
        probe_model = spikeglx.get_neuropixel_version_from_folder(session_path)
        pmodel = "3B2" if probe_model == "3B" else probe_model
    else:
        pmodel = model
    raw_ephys_data_path = Path(session_path) / "raw_ephys_data"
    if labels is None:
        probe_labels = [
            x.name for x in Path(raw_ephys_data_path).glob("*")
            if x.is_dir() and ("00" in x.name or "01" in x.name)
        ]
    else:
        probe_labels = labels

    # create the qc fields in the json field
    qc_dict = {}
    qc_dict.update({"qc": "NOT_SET"})
    qc_dict.update({"extended_qc": {}})

    # create the dictionary
    for plabel in probe_labels:
        insdict = {
            "session": eid,
            "name": plabel,
            "model": pmodel,
            "json": qc_dict
        }
        # search for the corresponding insertion in Alyx
        alyx_insertion = one.alyx.rest("insertions",
                                       "list",
                                       session=insdict["session"],
                                       name=insdict["name"])
        # if it doesn't exist, create it
        if len(alyx_insertion) == 0:
            alyx_insertion = one.alyx.rest("insertions",
                                           "create",
                                           data=insdict)
        else:
            iid = alyx_insertion[0]["id"]
            if force:
                alyx_insertion = one.alyx.rest("insertions",
                                               "update",
                                               id=iid,
                                               data=insdict)
            else:
                alyx_insertion = alyx_insertion[0]