Ejemplo n.º 1
0
    def load_object(self,
                    eid: Union[str, Path, UUID],
                    obj: str,
                    collection: Optional[str] = 'alf',
                    download_only: bool = False,
                    **kwargs) -> Union[alfio.AlfBunch, List[Path]]:
        """
        Load all attributes of an ALF object from a Session ID and an object name.

        :param eid: Experiment session identifier; may be a UUID, URL, experiment reference string
        details dict or Path
        :param obj: The ALF object to load.  Supports asterisks as wildcards.
        :param collection:  The collection to which the object belongs, e.g. 'alf/probe01'.
        Supports asterisks as wildcards.
        :param download_only: When true the data are downloaded and the file paths are returned
        :param kwargs: Optional filters for the ALF objects, including namespace and timescale
        :return: An ALF bunch or if download_only is True, a list of Paths objects

        Examples:
        load_object(eid, '*moves')
        load_object(eid, 'trials')
        load_object(eid, 'spikes', collection='*probe01')
        """
        # Filter server-side by collection and dataset name
        search_str = 'name__regex,' + obj.replace('*', '.*')
        if collection and collection != 'all':
            search_str += ',collection__regex,' + collection.replace('*', '.*')
        results = self.alyx.rest('datasets', 'list', session=eid, django=search_str)
        pattern = re.compile(fnmatch.translate(obj))

        # Further refine by matching object part of ALF datasets
        def match(r):
            return is_valid(r['name']) and pattern.match(alf_parts(r['name'])[1])

        # Get filenames of returned ALF files
        returned_obj = {alf_parts(x['name'])[1] for x in results if match(x)}

        # Validate result before loading
        if len(returned_obj) > 1:
            raise ALFMultipleObjectsFound('The following matching objects were found: ' +
                                          ', '.join(returned_obj))
        elif len(returned_obj) == 0:
            raise ALFObjectNotFound(f'ALF object "{obj}" not found on Alyx')
        collection_set = {x['collection'] for x in results if match(x)}
        if len(collection_set) > 1:
            raise ALFMultipleCollectionsFound('Matching object belongs to multiple collections:' +
                                              ', '.join(collection_set))

        # Download and optionally load the datasets
        out_files = self.download_datasets(x for x in results if match(x))
        assert not any(x is None for x in out_files), 'failed to download object'
        if download_only:
            return out_files
        else:
            return alfio.load_object(out_files[0].parent, obj, **kwargs)
Ejemplo n.º 2
0
    def load_dataset(self,
                     eid: Union[str, Path, UUID],
                     dataset: str,
                     collection: Optional[str] = None,
                     download_only: bool = False) -> Any:
        """
        Load a single dataset from a Session ID and a dataset type.

        :param eid: Experiment session identifier; may be a UUID, URL, experiment reference string
        details dict or Path
        :param dataset: The ALF dataset to load.  Supports asterisks as wildcards.
        :param collection:  The collection to which the object belongs, e.g. 'alf/probe01'.
        For IBL this is the relative path of the file from the session root.
        Supports asterisks as wildcards.
        :param download_only: When true the data are downloaded and the file path is returned
        :return: dataset or a Path object if download_only is true

        Examples:
            intervals = one.load_dataset(eid, '_ibl_trials.intervals.npy')
            intervals = one.load_dataset(eid, '*trials.intervals*')
            filepath = one.load_dataset(eid '_ibl_trials.intervals.npy', download_only=True)
            spikes = one.load_dataset(eid 'spikes.times.npy', collection='alf/probe01')
        """
        search_str = 'name__regex,' + dataset.replace('.', r'\.').replace(
            '*', '.*')
        if collection:
            search_str += ',collection__regex,' + collection.replace('*', '.*')
        results = self.alyx.rest('datasets',
                                 'list',
                                 session=eid,
                                 django=search_str,
                                 exists=True)

        # Get filenames of returned ALF files
        collection_set = {x['collection'] for x in results}
        if len(collection_set) > 1:
            raise ALFMultipleCollectionsFound(
                'Matching dataset belongs to multiple collections:' +
                ', '.join(collection_set))
        if len(results) > 1:
            raise ALFMultipleObjectsFound(
                'The following matching datasets were found: ' +
                ', '.join(x['name'] for x in results))
        if len(results) == 0:
            raise ALFObjectNotFound(f'Dataset "{dataset}" not found on Alyx')

        filename = self.download_dataset(results[0])
        assert filename is not None, 'failed to download dataset'

        return filename if download_only else alfio.load_file_content(filename)