Exemple #1
0
    def __init__(self, layout):

        self.layout = layout
        self.session = layout.session
        self.validate = layout.validate
        self.root = layout.root
        self.config_filename = layout.config_filename
        self.validator = BIDSValidator(index_associated=True)
        # Create copies of list attributes we'll modify during indexing
        self.config = list(layout.config.values())
        self.include_patterns = list(layout.force_index)
        self.exclude_patterns = list(layout.ignore)
Exemple #2
0
    def __init__(self, validate=True, ignore=None, force_index=None,
                 index_metadata=True, config_filename='layout_config.json',
                 **filters):
        self.validate = validate
        self.ignore = ignore
        self.force_index = force_index
        self.index_metadata = index_metadata
        self.config_filename = config_filename
        self.filters = filters
        self.validator = BIDSValidator(index_associated=True)

        # Layout-dependent attributes to be set in __call__()
        self._layout = None
        self._config = None
        self._include_patterns = None
        self._exclude_patterns = None
Exemple #3
0
def Validate(src: Union[Text, PathLike]) -> bool:
    """
    Returns True if file path adheres to BIDS.

    Main method of the validator. uses other class methods for checking
    different aspects of the file path.

    Args:
        src: str or PathLike
            Path pointing to a file or directory within a BIDS dataset.
    Note:
        Path ``src`` is automatically rendered relative
        to the root of the BIDS dataset before validation.
    """
    _src = RelativeToRoot(str(src))
    return BIDSValidator().is_bids(str(_src))
Exemple #4
0
    def __init__(self, bids_directory):
        print('- Validate: init started.')
        file_paths = []
        result = []
        validator = BIDSValidator()
        for path, dirs, files in os.walk(bids_directory):
            for filename in files:
                if filename == '.bidsignore':
                    continue

                if filename.endswith('_annotations.tsv'):
                    continue

                if filename.endswith('_annotations.json'):
                    continue

                temp = os.path.join(path, filename)
                file_paths.append(temp[len(bids_directory):len(temp)])
                result.append(
                    validator.is_bids(temp[len(bids_directory):len(temp)]))
                # print(validator.is_bids(temp[len(bids_directory):len(temp)]))

        self.set_file_paths(file_paths)
        self.set_result(result)
if not args.skip_bids_validator:
    #process = run_cmd('bids-validator %s' % args.bids_dir)
    #logging.info(process.stdout)
    #if process.returncode != 0:
    #    logging.error('BIDS input dataset did not pass BIDS validator. Datasets can be validated online '
    #                    'using the BIDS Validator (http://incf.github.io/bids-validator/).\nUse the '
    #                    '--skip_bids_validator argument to run the detection without prior BIDS validation.')
    #    exit(1)
    bids_error = False
    for dir_, d, files in os.walk(args.bids_dir):
        for file in files:
            rel_file = os.path.relpath(dir_, args.bids_dir)
            if rel_file[0] == '.':
                rel_file = rel_file[1:]
            rel_file = os.path.join(rel_file, file)
            if not BIDSValidator().is_bids('/' + rel_file):
                logging.error('Invalid BIDS-file: ' + rel_file)
                bids_error = True
    if bids_error:
        logging.error('BIDS input dataset did not pass BIDS validator. Datasets can be validated online '
                        'using the BIDS Validator (http://incf.github.io/bids-validator/).\nUse the '
                        '--skip_bids_validator argument to run the detection without prior BIDS validation.')
        exit(1)


#
# configure
#
config = default_config()

#  read the configuration file (if passed)
Exemple #6
0
    def __init__(self,
                 root,
                 validate=True,
                 index_associated=True,
                 absolute_paths=True,
                 derivatives=False,
                 config=None,
                 sources=None,
                 ignore=None,
                 force_index=None,
                 config_filename='layout_config.json',
                 regex_search=False):

        self.root = root
        self._validator = BIDSValidator(index_associated=index_associated)
        self.validate = validate
        self.absolute_paths = absolute_paths
        self.derivatives = {}
        self.sources = sources
        self.regex_search = regex_search
        self.metadata_index = MetadataIndex(self)
        self.config_filename = config_filename
        self.files = {}
        self.nodes = []
        self.entities = {}
        self.ignore = [
            os.path.abspath(os.path.join(self.root, patt)) if isinstance(
                patt, six.string_types) else patt
            for patt in listify(ignore or [])
        ]
        self.force_index = [
            os.path.abspath(os.path.join(self.root, patt)) if isinstance(
                patt, six.string_types) else patt
            for patt in listify(force_index or [])
        ]

        # Do basic BIDS validation on root directory
        self._validate_root()

        # Initialize the BIDS validator and examine ignore/force_index args
        self._setup_file_validator()

        # Set up configs
        if config is None:
            config = 'bids'
        config = [Config.load(c) for c in listify(config)]
        self.config = {c.name: c for c in config}
        self.root_node = BIDSRootNode(self.root, config, self)

        # Consolidate entities into master list. Note: no conflicts occur b/c
        # multiple entries with the same name all point to the same instance.
        for n in self.nodes:
            self.entities.update(n.available_entities)

        # Add derivatives if any are found
        if derivatives:
            if derivatives is True:
                derivatives = os.path.join(root, 'derivatives')
            self.add_derivatives(derivatives,
                                 validate=validate,
                                 index_associated=index_associated,
                                 absolute_paths=absolute_paths,
                                 derivatives=None,
                                 config=None,
                                 sources=self,
                                 ignore=ignore,
                                 force_index=force_index)
def bids_acquisition_download(data_root_path='',
                              dataset_name=None,
                              force_download=False,
                              behav_path='exp_info/recorded_events',
                              copy_events='n',
                              deface=False,
                              dry_run=False):
    """Automatically download files from neurospin server to a BIDS dataset.

    Download-database is based on NeuroSpin server conventions.
    Options are 'prisma', 'trio' and custom path.
    Prisma db_path = '/neurospin/acquisition/database/Prisma_fit'
    Trio db_path = '/neurospin/acquisition/database/TrioTim'

    The bids dataset is created if necessary before download with some
    empy mandatory files to be filled like README in case they dont exist.

    The download depends on the file '[sub-*_][ses-*_]download.csv' contained
    in the folder 'exp_info'.

    NIP and acq date of the subjects will be taken automatically from
    exp_info/participants.tsv file that follows bids standard. The file will
    be copied in the dataset folder without the NIP column for privacy.

    Posible exceptions
    1) exp_info directory not found
    2) participants.tsv not found
    3) download files not found
    4) Acquisition directory in neurospin server not found
    5) There is more than one acquisition directory (Have to ask manip for
    extra digits for NIP, the NIP then would look like xxxxxxxx-ssss)
    6) Event file corresponding to downloaded bold.nii not found
    """

    ### CHECK PATHS AND FILES

    # exp_info path where is the participants.tsv
    # print(data_root_path)
    exp_info_path = os.path.join(data_root_path, 'exp_info')
    if not os.path.exists(exp_info_path):
        raise Exception('exp_info directory not found')
    if not os.path.isfile(os.path.join(exp_info_path, 'participants.tsv')):
        raise Exception('exp_info/participants.tsv not found')

    # Determine target path with the name of dataset
    dataset_name, target_root_path = get_bids_default_path(
        data_root_path, dataset_name)

    # Create dataset directories and files if necessary
    bids_init_dataset(data_root_path, dataset_name)

    # Manage the report and download information
    download_report = ('download_report_' +
                       time.strftime("%d-%b-%Y-%H:%M:%S", time.gmtime()) +
                       '.csv')
    report_path = os.path.join(data_root_path, 'report')
    if not os.path.exists(report_path):
        os.makedirs(report_path)
    download_report = open(os.path.join(report_path, download_report), 'w')
    #report_line = '%s,%s,%s\n' % ('subject_id', 'session_id', 'download_file')
    #download_report.write(report_line)
    list_imported = []
    list_already_imported = []
    list_warning = []

    # Create a dataFrame to store participant information
    #df_participant = pd.DataFrame()
    #Dict for info participant
    #list_all_participants = {}
    dic_info_participants = OrderedDict()

    # List for the bacth file for dc2nii_batch command
    infiles_dcm2nii = []

    # List for data to deface
    files_for_pydeface = []

    #Dict of descriptors to be added
    dict_descriptors = {}

    ### GETTING FOR INFORMATION TO DOWNLOAD

    # Download command for each subject/session
    # one line has the following information
    # participant_id / NIP / infos_participant / session_label / acq_date / location / to_import

    # Read the participants.tsv file for getting subjects/sessions to download
    pop = pd.read_csv(os.path.join(exp_info_path, 'participants.tsv'),
                      dtype=str,
                      sep='\t',
                      index_col=False)

    #print(df_participant)

    for row_idx, subject_info in pop.iterrows():
        # Fill the partcipant information for the participants.tsv
        if subject_info[0] in dic_info_participants:
            existing_items = dic_info_participants[subject_info[0]]
            dico_add = {}
            info_participant = json.loads(subject_info['infos_participant'])
            for k, v in info_participant.items():
                if not k in existing_items:
                    dico_add[k] = v
            #fusion dicos
            existing_items.update(dico_add)
            dic_info_participants[subject_info[0]] = existing_items
        else:
            dic_info_participants[subject_info[0]] = json.loads(
                subject_info['infos_participant'])

        # Determine path to files in NeuroSpin server
        download_database = subject_info['location']
        if download_database in NEUROSPIN_DATABASES:
            db_path = NEUROSPIN_DATABASES[download_database]
        else:
            db_path = download_database

        # the row_idx for giving either participant_label or participant_id
        subject_id = subject_info[0]

        # sub_path = target_root_path + subject_id + ses_path
        # Mange the optional filters
        # optional_filters = [('sub', subject_id)]
        # if session_id is not None:
        #  optional_filters += [('ses', session_id)]
        if 'session_label' in subject_info.index:
            if subject_info['session_label'] is not pd.np.nan:
                session_id = subject_info['session_label']
            else:
                session_id = None
        if session_id is None:
            ses_path = ''
        else:
            ses_path = 'ses-' + session_id
        try:
            int(subject_id)
            subject_id = 'sub-{0}'.format(subject_id)
        except:
            if ('sub-') in subject_id:
                subject_id = subject_id
            else:
                subject_id = subject_id
                print('****  BIDS IMPORTATION WARMING: SUBJECT ID PROBABLY '
                      'NOT CONFORM')
        sub_path = os.path.join(target_root_path, subject_id, ses_path)
        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

        # Avoid redownloading subjects/sessions
        if not force_download:
            check_file = os.path.join(sub_path, 'downloaded')
            if os.path.isfile(check_file):
                continue

        # DATE has to be transformed from BIDS to NeuroSpin server standard
        # NeuroSpin standard is yyyymmdd -> Bids standard is YYYY-MM-DD
        acq_date = subject_info['acq_date'].replace('-', '').replace('\n', '')

        #acq_label
        acq_label = subject_info['acq_label']

        #dir_label
        #dir_label = subject_info['dir_label']

        # nip number
        nip = subject_info['NIP']

        # Get appropriate download file. As specific as possible
        # specs_path = file_manager_default_file(exp_info_path,
        #                                optional_filters, 'download',
        #                               file_type='tsv',
        #                               allow_other_fields=False)
        #report_line = '%s,%s,%s\n' % (subject_id, session_id, specs_path)
        #download_report.write(report_line)

        #specs = pd.read_csv(specs_path, dtype=str, sep='\t', index_col=False)

        # Retrieve list of list for seqs to import
        # One tuple is configured as :(file_to_import;acq_folder;acq_name)
        # value[0] : num of seq
        # value[1] : modality
        # value[2] : part of ht file_name
        print("Scans for ", subject_info['NIP'])
        print(subject_info['to_import'])
        seqs_to_retrieve = literal_eval(subject_info['to_import'])
        #Convert the first element id there is only one sequence, otherwise
        #each value will be used as str and note tuple).
        if isinstance(seqs_to_retrieve[0], str):
            seqs_to_retrieve = [seqs_to_retrieve]

        # download data, store information in batch files for anat/fmri
        # download data for meg data
        for value in seqs_to_retrieve:
            #print(seqs_to_retrieve)
            def get_value(key, text):
                m = re.search(key + '-(.+?)_', text)
                if m:
                    return m.group(1)
                else:
                    return None

            run_task = get_value('task', value[2])
            run_id = get_value('run', value[2])
            run_dir = get_value('dir', value[2])
            run_session = session_id

            tag = value[2].split('_')[-1]

            target_path = os.path.join(sub_path, value[1])
            if not os.path.exists(target_path):
                os.makedirs(target_path)

            # MEG CASE
            if value[1] == 'meg':
                # Create subject path if necessary
                meg_path = os.path.join(sub_path, 'meg')
                if not os.path.exists(meg_path):
                    os.makedirs(meg_path)

                # Create the sub-emptyroom
                #sub-emptyroom_path = os.path.join(data_root_path, 'sub_emptyroom')
                #if not os.path.exists(sub-emptyroom_path):
                #    os.makedirs(sub-emptyroom_path)

                meg_file = os.path.join(db_path, nip, acq_date, value[0])
                print(meg_file)
                filename = get_bids_file_descriptor(subject_id,
                                                    task_id=run_task,
                                                    run_id=run_id,
                                                    run_dir=run_dir,
                                                    session_id=run_session,
                                                    file_tag=tag,
                                                    acq_label=acq_label,
                                                    file_type='tif')
                #output_path = os.path.join(target_path, filename)
                #print(output_path)
                #shutil.copyfile(meg_file, output_path)
                raw = mne.io.read_raw_fif(meg_file, allow_maxshield=True)

                write_raw_bids(raw, filename, target_path, overwrite=True)
                # add event
                # create json file
                #copy the subject emptyroom

            # ANAT and FUNC case
            elif (value[1] == 'anat') or (value[1] == 'func') or (value[1]
                                                                  == 'fmap'):
                download = True
                dicom_paths = []
                path_file_glob = ""
                nip_dirs = glob.glob(
                    os.path.join(db_path, str(acq_date),
                                 str(nip) + '*'))
                #print(os.path.join(db_path, str(acq_date), str(nip) + '*'))
                if len(nip_dirs) < 1:
                    list_warning.append(
                        f"\n WARNING: No directory found for given NIP {nip} and SESSION {session_id}"
                    )
                    #print(message)
                    #download_report.write(message)
                    download = False
                elif len(nip_dirs) > 1:
                    list_warning.append(
                        f"\n  WARNING: Multiple path for given NIP {nip} \
                            SESSION {session_id} - please \
                            mention the session of the subject for this date, \
                            2 sessions for the same subject the same day are \
                            possible")
                    #print(message)
                    #download_report.write(message)
                    download = False
                else:
                    path_file_glob = os.path.join(
                        nip_dirs[0], '{0:06d}_*'.format(int(value[0])))
                    #print(path_file_glob)
                    dicom_paths = glob.glob(path_file_glob)

                if not dicom_paths and download:
                    list_warning.append("\n WARNING: file not found " +
                                        path_file_glob)
                    #print(message)
                    #download_report.write(message)
                elif download:
                    dicom_path = dicom_paths[0]
                    list_imported.append("\n IMPORTATION OF " + dicom_path)
                    #print(message)
                    #download_report.write(message)
                    # Expecting page 10 bids specification file name
                    filename = get_bids_file_descriptor(subject_id,
                                                        task_id=run_task,
                                                        run_id=run_id,
                                                        run_dir=run_dir,
                                                        session_id=run_session,
                                                        file_tag=tag,
                                                        acq_label=acq_label,
                                                        file_type='nii')

                    if value[1] == 'anat' and deface:
                        print("\n Deface with pydeface")
                        files_for_pydeface.append(
                            os.path.join(target_path, filename))

                    # append list for preparing the batch importation
                    file_to_convert = {
                        'in_dir': dicom_path,
                        'out_dir': target_path,
                        'filename': os.path.splitext(filename)[0]
                    }
                    is_file_to_import = os.path.join(
                        os.path.join(os.getcwd(), target_path, filename))

                    if (os.path.isfile(is_file_to_import)):
                        list_already_imported.append(
                            f" ALREADY IMPORTED: {is_file_to_import}")
                    else:
                        infiles_dcm2nii.append(file_to_convert)

                    # Add descriptor into the json file
                    if run_task:
                        filename_json = os.path.join(target_path,
                                                     filename[:-3] + 'json')
                        dict_descriptors.update(
                            {filename_json: {
                                'TaskName': run_task
                            }})

                    if len(value) == 4:
                        #print('value[3]', value[3] )
                        filename_json = os.path.join(target_path,
                                                     filename[:-3] + 'json')
                        dict_descriptors.update({filename_json: value[3]})

        #Importation and conversion of dicom files
        dcm2nii_batch = dict(Options=dict(isGz='false',
                                          isFlipY='false',
                                          isVerbose='false',
                                          isCreateBIDS='true',
                                          isOnlySingleFile='false'),
                             Files=infiles_dcm2nii)

    dcm2nii_batch_file = os.path.join(exp_info_path, 'batch_dcm2nii.yaml')
    with open(dcm2nii_batch_file, 'w') as f:
        data = yaml.dump(dcm2nii_batch, f)

    print(
        "\n------------------------------------------------------------------------------------"
    )
    print(
        "-------------------    SUMMARY OF IMPORTATION   --------------------------------------"
    )
    print(
        "--------------------------------------------------------------------------------------\n"
    )
    for i in list_already_imported:
        print(i)
        download_report.write(i)
    print(
        "\n------------------------------------------------------------------------------------"
    )
    for i in list_imported:
        print(i)
        download_report.write(i)
    print(
        "\n------------------------------------------------------------------------------------"
    )
    for i in list_warning:
        print(i)
        download_report.write(i)
    print(
        "\n------------------------------------------------------------------------------------"
    )
    print(
        "------------------------------------------------------------------------------------\n"
    )
    download_report.close()

    if dry_run:
        print("\n NO IMPORTATION, DRY-RUN OPTION IS TRUE \n")
    else:
        print('\n')
        cmd = "dcm2niibatch %s" % (dcm2nii_batch_file)
        subprocess.call(cmd, shell=True)

        # loop for checking if downloaded are ok and create the downloaded files
        #    done_file = open(os.path.join(sub_path, 'downloaded'), 'w')
        #    done_file.close()

        #Data to deface
        #print(files_for_pydeface)
        if files_for_pydeface:
            try:
                template = (
                    "/neurospin/unicog/protocols/IRMf/Unicogfmri/BIDS/"
                    "unicog-dev/bids/template_deface/mean_reg2mean.nii.gz")
                facemask = ("/neurospin/unicog/protocols/IRMf/Unicogfmri/BIDS/"
                            "unicog-dev/bids/template_deface/facemask.nii.gz")
            except:
                template = resource_filename(
                    Requirement.parse("unicog"),
                    "bids/template_deface/mean_reg2mean.nii.gz")
                facemask = resource_filename(
                    Requirement.parse("unicog"),
                    "bids/template_deface/facemask.nii.gz")
            os.environ['FSLDIR'] = "/i2bm/local/fsl/bin/"
            os.environ['FSLOUTPUTTYPE'] = "NIFTI_PAIR"
            os.environ[
                'PATH'] = os.environ['FSLDIR'] + ":" + os.environ['PATH']

            for file_to_deface in files_for_pydeface:
                print(f"\nDeface with pydeface {file_to_deface}")
                pdu.deface_image(infile=file_to_deface,
                                 outfile=file_to_deface,
                                 facemask=facemask,
                                 template=template,
                                 force=True)

        # Create participants.tsv in dataset folder (take out NIP column)
        participants_path = os.path.join(target_root_path, 'participants.tsv')
        df_participant = pd.DataFrame.from_dict(dic_info_participants,
                                                orient="index")
        df_participant.to_csv(participants_path, sep='\t')

        if dict_descriptors:
            #print(dict_descriptors)
            # Adding a new key value pair in a json file such as taskname
            for k, v in dict_descriptors.items():
                with open(k, 'r+') as json_file:
                    for key, val in v.items():
                        temp_json = json.load(json_file)
                        temp_json[key] = val
                        json_file.seek(0)
                        json.dump(temp_json, json_file)
                        json_file.truncate()

        # Copy recorded event files
        if copy_events == "y":
            bids_copy_events(behav_path, data_root_path, dataset_name)

        #Validate paths with BIDSValidator
        #see also http://bids-standard.github.io/bids-validator/
        validation_bids = yes_no(
            '\nDo you want to use a bids validator? (y/n)')
        if validation_bids:
            bids_validation_report = os.path.join(
                report_path, "report_bids_valisation.txt")
            if shutil.which('bids-validator'):
                cmd = f"bids-validator {target_root_path} > {bids_validation_report}"
                subprocess.call(cmd, shell=True)
                cmd = f"cat < {bids_validation_report}"
                subprocess.call(cmd, shell=True)
                print(
                    '\n\nSee the summary of bids validator at {bids_validation_report}'
                )
            else:
                validator = BIDSValidator()
                os.chdir(target_root_path)
                for file_to_test in Path('.').glob('./**/*'):
                    if file_to_test.is_file():
                        file_to_test = '/' + str(file_to_test)
                        print(
                            '\nTest the following name of file : {name} with BIDSValidator'
                            .format(name=file_to_test))
                        print(validator.is_bids(file_to_test))

    print('\n')
Exemple #8
0
def _setup_main_eeg():
    """
    We copy some explicit example from MNE-BIDS and modify small details.

    Specifically, we will follow these steps:

    1. Download repository, and use the data in example directory:
        data/
            bids_layout/
                sourcedata/
                derivatives/
                sub-XXX/
                sub-XXY/
                ...

    2. Load the source raw data, extract information, preprocess certain things
    and save in a new BIDS directory

    3. Check the result and compare it with the standard
    """
    ###############################################################################
    # Step 1: Prepare the data
    # -------------------------
    #
    # First, we need some data to work with. We will use some sample simulated scalp and
    # iEEG data. For each subject, there are "seizure" events. For the present example, we will
    # show how to format the data for two modalities to comply with the Brain Imaging Data Structure
    # (`BIDS <http://bids.neuroimaging.io/>`_).
    #
    # The data are in the `European Data Format <https://www.edfplus.info/>`_
    # '.edf', which is good for us because next to the BrainVision format, EDF is
    # one of the recommended file formats for EEG BIDS. However, apart from the
    # data format, we need to build a directory structure and supply meta data
    # files to properly *bidsify* this data.
    #
    # Conveniently, there is already a data loading function available with
    # MNE-Python:

    DATADIR = os.getcwd()
    bids_root = os.path.join(DATADIR, "./data/bids_layout/")
    RUN_IEEG = False  # either run scalp, or iEEG
    line_freq = (
        60  # user should set the line frequency, since MNE-BIDS defaults to 50 Hz
    )
    test_subjectid = "0001"
    test_sessionid = "seizure"
    test_task = "monitor"
    authors = ["Adam Li", "Patrick Myers"]

    if RUN_IEEG:
        edf_fpaths = [
            os.path.join(bids_root, "sourcedata", "ieeg_ecog_test.edf")
        ]
        modality = "ecog"
    else:
        edf_fpath1 = os.path.join(bids_root, "sourcedata", "scalp_test.edf")
        edf_fpath2 = os.path.join(bids_root, "sourcedata", "scalp_test_2.edf")
        edf_fpaths = [edf_fpath1, edf_fpath2]
        modality = "eeg"
    ###############################################################################
    # Let's see whether the data has been downloaded using a quick visualization
    # of the directory tree.

    data_dir = os.path.join(bids_root, "sourcedata")
    print_dir_tree(data_dir)

    ###############################################################################
    # Step 2: Formatting as BIDS
    # --------------------------
    #
    # Let's start by formatting a single subject. We are reading the data using
    # MNE-Python's io module and the :func:`read_raw_edf` function. Note that we
    # must use `preload=False`, the default in MNE-Python. It prevents the data
    # from being loaded and modified when converting to BIDS.
    #
    # Note that kind and acquisition currently almost stand for the same thing.
    # Please read the BIDS docs to get acquainted.

    # create the BIDS directory structure
    if not os.path.exists(bids_root):
        print("Making bids root directory.")
        make_bids_folders(
            output_path=bids_root,
            session=test_sessionid,
            subject=test_subjectid,
            kind=modality,
        )

    for i, edf_fpath in enumerate(edf_fpaths):
        """ Write data file into BIDS format """
        test_runid = i

        # add a bids run
        bids_basename = make_bids_basename(
            subject=test_subjectid,
            session=test_sessionid,
            task=test_task,
            run=test_runid,
            acquisition=modality,
        )
        print("Loading filepath: ", edf_fpath)
        print("Writing to bidsroot: ", bids_root)
        print("Bids basenmae; ", bids_basename)
        # call bidsbuilder pipeline
        bids_deriv_root = BidsConverter.convert_to_bids(
            edf_fpath=edf_fpath,
            bids_root=bids_root,
            bids_basename=bids_basename,
            line_freq=line_freq,
            overwrite=True,
        )

    # currently write_raw_bids overwrites make_dataset_description
    # TODO: put this on the top when PR gets merged.
    make_dataset_description(os.path.join(bids_root),
                             name="test_bids_dataset",
                             authors=authors)

    ###############################################################################
    # What does our fresh BIDS directory look like?
    print_dir_tree(bids_root)

    ###############################################################################
    # Step 3: Check and compare and read in the data
    # ------------------------------------------------------------
    # Now we have written our BIDS directory.

    if modality in ["ecog", "seeg"]:
        kind = "ieeg"
    elif modality == "eeg":
        kind = "eeg"
    bids_fname = bids_basename + f"_{kind}.edf"

    print("Trying to read from: ", bids_fname)

    # use MNE-BIDS function to read in the data
    raw = read_raw_bids(bids_fname, bids_root)

    print("Read successfully using MNE-BIDS")

    # use BidsRun object, which just simply adds additional functionality
    # bidsrun = BidsRun(tmp_bids_root, bids_fname)
    # raw = bidsrun.load_data()

    print(raw)

    ###############################################################################
    # Step 4: Run BIDS-Validate
    # ------------------------------------------------------------
    # Now we have written our BIDS directory.
    # save a fif copy and reload it
    # TODO: re-check when pybids is updated.
    # currently, use the https://bids-standard.github.io/bids-validator/ and see that it is verified
    params = _parse_bids_filename(bids_basename, True)
    print(raw.info)
    fif_data_path = make_bids_folders(
        subject=params["sub"],
        session=params["ses"],
        kind=kind,
        output_path=bids_root,
        overwrite=False,
        verbose=True,
    )
    rel_bids_root = f"/sub-0001/ses-seizure/{kind}/"
    path = os.path.join(rel_bids_root, bids_fname)
    is_valid = BIDSValidator().is_bids(path)

    print(BIDSValidator().is_top_level(path))
    print(BIDSValidator().is_associated_data(path))
    print(BIDSValidator().is_session_level(path))
    print(BIDSValidator().is_subject_level(path))
    print(BIDSValidator().is_phenotypic(path))
    print(BIDSValidator().is_file(path))

    print("checked filepath: ", os.path.join(rel_bids_root, bids_fname))
    print(is_valid)
Exemple #9
0
###############################################################################
# Step 4: Run BIDS-Validate
# ------------------------------------------------------------
# Now we have written our BIDS directory.
# save a fif copy and reload it
# TODO: re-check when pybids is updated.
# currently, use the https://bids-standard.github.io/bids-validator/ and see that it is verified
params = _parse_bids_filename(bids_basename, True)
print(raw.info)
fif_data_path = make_bids_folders(
    subject=params["sub"],
    session=params["ses"],
    kind=kind,
    bids_root=bids_root,
    overwrite=False,
    verbose=True,
)
rel_bids_root = f"/sub-0001/ses-seizure/{kind}/"
path = os.path.join(rel_bids_root, bids_fname)
is_valid = BIDSValidator().is_bids(path)

print(BIDSValidator().is_top_level(path))
print(BIDSValidator().is_associated_data(path))
print(BIDSValidator().is_session_level(path))
print(BIDSValidator().is_subject_level(path))
print(BIDSValidator().is_phenotypic(path))
print(BIDSValidator().is_file(path))

print("checked filepath: ", os.path.join(rel_bids_root, bids_fname))
print(is_valid)
Exemple #10
0
def test_is_bids(fname):
    """Test that is_bids returns true for each file in a valid BIDS dataset."""
    validator = BIDSValidator()
    assert validator.is_bids(fname)
Exemple #11
0
def testvalidator():
    return BIDSValidator()
Exemple #12
0
    def __init__(self, path):
        self.path = path
        self.subjects = [
            op.basename(s).split('-')[1]
            for s in sorted(glob(op.join(self.path, 'sub-*'))) if op.isdir(s)
        ]

        self.submessage = f"Found {len(self.subjects)} participant(s) {self.subjects}"

        sessions = []
        sessmessage = []
        for this_sub in self.subjects:
            these_ses = [
                op.basename(s).split('-')[1] for s in sorted(
                    glob(op.join(self.path, f'sub-{this_sub}', 'ses-*')))
                if op.isdir(s)
            ]

            sessmessage.append(
                f"Found {len(these_ses)} session(s) for sub-{this_sub} {these_ses}"
            )
            sessions.append(these_ses)

        self.sessions = sessions
        self.sessmessage = sessmessage

        tasks = []
        taskmessage = []
        for this_sub, these_ses in zip(self.subjects, self.sessions):
            these_task = []
            for this_ses in these_ses:
                if this_ses is None:
                    tmp = glob(
                        op.join(self.path, f'sub-{this_sub}', 'func',
                                f"*{'.nii'}*"))
                else:
                    tmp = glob(
                        op.join(self.path, f'sub-{this_sub}',
                                f'ses-{this_ses}', 'func', f"*{'.nii'}*"))

                these_ses_task = list(
                    set([
                        op.basename(f).split('task-')[1].split('_')[0]
                        for f in tmp
                    ]))

                nullstring = "" if this_ses is None else f"and ses-{this_ses}"

                taskmessage.append(
                    f"Found {len(these_ses_task)} task(s) for sub-{this_sub} {nullstring} {these_ses_task}"
                )
                these_task.append(these_ses_task)

            self.taskmessage = taskmessage
            tasks.append(these_task)
        self.tasks = tasks

        sessions = []
        for this_sub in self.subjects:
            these_ses = [
                op.basename(s).split('-')[1] for s in sorted(
                    glob(op.join(self.path, f'sub-{this_sub}', 'ses-*')))
                if op.isdir(s)
            ]

        # Check BIDS validity
        invalid = []
        validator = BIDSValidator()
        for path, subdirs, files in os.walk(self.path):
            for file in files:
                rel_path = os.path.relpath(path, self.path)
                if validator.is_bids(os.path.join(rel_path, file)) == False:
                    invalid.append(os.path.join(rel_path, file))
        self.bids_invalid = invalid

        if invalid:
            warnings.warn(
                "One or more files does not conform to BIDS standard. See self.bids_invalid for a list of files."
            )