Beispiel #1
0
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the files
    data_file = config.pop('fif')
    raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Check if param_extended_proj parameter is empty
    if config['param_extended_proj'] == '[]':
        config['param_extended_proj'] = []

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(
        config, 'out_dir_maxwell_filter')

    # Raise a value error if the user provides both the destination file and the destination parameter
    if config['param_destination'] is not None and destination is not None:
        value_error_message = f"You can't provide both a destination file and a " \
                              f"destination parameter. One of them must be None."
        raise ValueError(value_error_message)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. '
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_channels
        })
        # Update raw.info['bads'] with info contained in channels.tsv
        raw, user_warning_message_channels = helper.update_data_info_bads(
            raw, channels_file)
        if user_warning_message_channels is not None:
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_channels
            })

    ## Convert parameters ##

    # Deal with param_origin parameter #
    # Convert origin parameter into array when the app is run locally
    if isinstance(config['param_origin'], list):
        config['param_origin'] = np.array(config['param_origin'])

    # Convert origin parameter into array when the app is run on BL
    if isinstance(config['param_origin'],
                  str) and config['param_origin'] != "auto":
        param_origin = list(map(float, config['param_origin'].split(', ')))
        config['param_origin'] = np.array(param_origin)

    # Raise an error if param origin is not an array of shape 3
    if config['param_origin'] != "auto" and config['param_origin'].shape[
            0] != 3:
        value_error_message = f"Origin parameter must contain three elements."
        raise ValueError(value_error_message)

    # Deal with param_destination parameter #
    if config['param_destination'] is not None:
        destination = config['param_destination']
        report_param_destination = destination

        # Convert destination parameter into array when the app is run locally
        if isinstance(destination, list):
            destination = np.array(destination)

        # Convert destination parameter into array when the app is run on BL
        if isinstance(destination, str):
            report_param_destination = destination
            destination = list(map(float, destination.split(', ')))
            destination = np.array(destination)

        # Raise an error if param destination is not an array of shape 3
        if isinstance(destination, np.ndarray) and destination.shape[0] != 3:
            value_error_message = f"Destination parameter must contain three elements."
            raise ValueError(value_error_message)
    else:
        report_param_destination = None

    # Deal with param_mag_scale parameter #
    # Convert param_mag_scale parameter into float when the app is run on BL
    if isinstance(config['param_mag_scale'],
                  str) and config['param_mag_scale'] != "auto":
        config['param_mag_scale'] = float(config['param_mag_scale'])

    # Deal with skip_by_annotation parameter #
    # Convert param_mag_scale into a list of strings when the app runs on BL
    skip_by_an = config['param_skip_by_annotation']
    if skip_by_an == "[]":
        skip_by_an = []
    elif isinstance(skip_by_an,
                    str) and skip_by_an.find("[") != -1 and skip_by_an != "[]":
        skip_by_an = skip_by_an.replace('[', '')
        skip_by_an = skip_by_an.replace(']', '')
        skip_by_an = list(map(str, skip_by_an.split(', ')))
    config['param_skip_by_annotation'] = skip_by_an

    # Display a warning if bad channels are empty
    if not raw.info['bads']:
        user_warning_message = f'No channels are marked as bad. ' \
                               f'Make sure to check (automatically or visually) for bad channels before ' \
                               f'applying Maxwell Filtering.'
        warnings.warn(user_warning_message)
        dict_json_product['brainlife'].append({
            'type': 'warning',
            'msg': user_warning_message
        })

    ## Define kwargs ##

    # Delete keys values in config.json when this app is executed on Brainlife
    config = helper.define_kwargs(config)

    # Delete the param_destination key
    del config['param_destination']

    # Define kwargs
    kwargs = config

    # Keep bad channels in memory
    bad_channels = raw.info['bads']

    # Apply Maxwell Filter
    raw_maxwell_filter = apply_maxwell_filter(raw, calibration_file,
                                              cross_talk_file, head_pos_file,
                                              destination, **kwargs)

    # Update channels.tsv if it exists because bad channels were interpolated
    if channels_file is not None:
        df_channels = pd.read_csv(channels_file, sep='\t')
        for bad in bad_channels:
            index_bad_channel = df_channels[df_channels['name'] == bad].index
            df_channels.loc[index_bad_channel, 'status'] = 'good'

        # Save channels.tsv
        df_channels.to_csv('out_dir_maxwell_filter/channels.tsv',
                           sep='\t',
                           index=False)

    # Write a success message in product.json
    dict_json_product['brainlife'].append({
        'type':
        'success',
        'msg':
        'Maxwell Filter was applied successfully.'
    })

    # Compute SNR on magnetometers
    # snr_before_mag = _compute_snr(raw, meg_channels_type='mag')
    # snr_after_mag = _compute_snr(raw_maxfilter, meg_channels_type='mag')

    # Compute SNR on gradiometers
    # snr_before_mag = _compute_snr(raw, meg_channels_type='grad')
    # snr_after_mag = _compute_snr(raw_maxfilter, meg_channels_type='grad')

    ## Generate html report ##

    # Get messages to add to the html report
    report_calibration_file, report_cross_talk_file, report_head_pos_file, report_destination_file = helper.message_optional_files_in_reports(
        calibration_file, cross_talk_file, head_pos_file, destination)

    # Generate report
    _generate_report(data_file, raw, raw_maxwell_filter, bad_channels,
                     report_cross_talk_file, report_calibration_file,
                     report_head_pos_file, report_destination_file,
                     report_param_destination, **kwargs)

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
Beispiel #2
0
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the MEG file 
    data_file = config.pop('fif')
    raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(config, 'out_dir_make_epochs')
    
    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. ' 
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message_channels})

        raw, user_warning_message_channels = helper.update_data_info_bads(raw, channels_file)
        if user_warning_message_channels is not None: 
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message_channels})


    ## Extract the matrix of events ##

    # Test if an events file is given 
    if events_file is None:
        value_error_message = f'You need to provide events.tsv to make epochs. ' \
                              f'Please use the app-get-events to create such file.'  
        # Raise exception
        raise ValueError(value_error_message) 
    # Get the matrix of events    
    elif events_file is not None:
            # Warning: events file must be BIDS compliant  
            user_warning_message_events = f'The events file provided must be ' \
                                          f'BIDS compliant.'        
            warnings.warn(user_warning_message_events)
            dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message_events})
            ############### TO BE TESTED ON NO RESTING STATE DATA
            # Compute the events matrix #
            df_events = pd.read_csv(events_file, sep='\t')
            
            # Extract relevant info from df_events
            samples = df_events['sample'].values
            event_id = df_events['value'].values

            # Compute the values for events matrix 
            events_time_in_sample = [raw.first_samp + sample for sample in samples]
            values_of_trigger_channels = [0]*len(events_time_in_sample)

            # Create a dataframe
            df_events_matrix = pd.DataFrame([events_time_in_sample, values_of_trigger_channels, event_id])
            df_events_matrix = df_events_matrix.transpose()

            # Convert dataframe to numpy array
            events_matrix = df_events_matrix.to_numpy()


    ## Convert parameters ## 

    # Deal with param_baseline parameter # 
    # Convert baseline parameter into tuple when the app runs locally
    if isinstance(config['param_baseline'], list):
       config['param_baseline'] = tuple(config['param_baseline'])

    # Convert baseline parameter into tuple when the App runs on BL
    if isinstance(config['param_baseline'], str):
        param_baseline = list(map(str, config['param_baseline'].split(', ')))
        param_baseline = [None if i=='None' else i for i in param_baseline]
        param_baseline = [float(i) if isinstance(i, str) else i for i in param_baseline]
        config['param_baseline'] = tuple(param_baseline)

    # Deal with param_proj parameter #
    # Convert string into boolean when app runs on BL
    if config['param_proj'] == "True":
        config['param_proj'] = True
    elif config['param_proj'] == "False":
        config['param_proj'] = False

    # Deal with param_picks_by_channel_indices parameter #
    # Convert it into a slice When the App is run locally and on BL
    picks = config['param_picks_by_channel_indices']
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find("[") == -1 and picks is not None:
        picks = list(map(int, picks.split(', ')))
        if len(picks) == 2:
            config['param_picks_by_channel_indices'] = slice(picks[0], picks[1])
        elif len(picks) == 3:
            config['param_picks_by_channel_indices'] = slice(picks[0], picks[1], picks[2])
        else:
            value_error_message = f"If you want to select channels using a slice, you must give two or three elements."
            raise ValueError(value_error_message)

    # Convert it into a list of integers when the App is run on BL
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find("[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_indices'] = list(map(int, picks.split(', ')))

    # Deal with param_picks_by_channel_types_or_name parameter #
    # Convert it into a list of str when the App is run on BL
    picks = config['param_picks_by_channel_types_or_names']
    if isinstance(picks, str) and picks.find("[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_types_or_names'] = list(map(str, picks.split(', ')))

    # Deal with event id #    
    # Convert it into a list of int or an int When it is run on BL
    if config['param_event_id'] is not None:
        if config['param_event_id'].find("[") != -1: 
            config['param_event_id'] = config['param_event_id'].replace('[', '')
            config['param_event_id'] = config['param_event_id'].replace(']', '')
            config['param_event_id'] = list(map(int, config['param_event_id'].split(', ')))  
        else:
            config['param_event_id'] = int(config['param_event_id']) 

    # Deal with param metadata #
    # Convert it into a pd.Dataframe when the App runs locally
    if isinstance(config['param_metadata'], str):
        config['param_metadata'] = pd.read_csv(config['param_metadata'])
    elif isinstance(config['param_metadata'], dict):
        config['param_metadata'] = pd.DataFrame(list(config['param_metadata'].items())) 

    # Deal with param detrend #
    # Convert it into an int if not None
    if isinstance(config['param_detrend'], str):
        config['param_detrend'] = int(config['param_detrend'])

    # Delete keys values in config.json when this app is executed on Brainlife
    kwargs = helper.define_kwargs(config)

    # Epoch data
    epoched_data = make_epochs(raw, events_matrix, **kwargs)

    # Success message in product.json    
    dict_json_product['brainlife'].append({'type': 'success', 'msg': 'Data was successfully epoched.'})

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the raw file
    data_file = config.pop('fif')
    raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(config, 'out_dir_bad_channels')

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. ' 
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message_channels})
        # Udpate raw.info['bads'] with info contained in channels.tsv
        raw, user_warning_message_channels = helper.update_data_info_bads(raw, channels_file)
        if user_warning_message_channels is not None: 
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message_channels})
    else:
        # Create a BIDSPath
        bids_path = BIDSPath(subject='subject',
                             session=None,
                             task='task',
                             run='01',
                             acquisition=None,
                             processing=None,
                             recording=None,
                             space=None,
                             suffix=None,
                             datatype='meg',
                             root='bids')
        # Write BIDS to create channels.tsv BIDS compliant
        write_raw_bids(raw, bids_path, overwrite=True)
        # Extract channels.tsv from bids path
        channels_file = 'bids/sub-subject/meg/sub-subject_task-task_run-01_channels.tsv'


    # Check if param_extended_proj parameter is an empty list string
    if config['param_extended_proj'] == '[]':
        config['param_extended_proj'] = [] # required to run a pipeline on BL

    # Display a warning if h_freq is None
    if config['param_h_freq'] is None:
        user_warning_message = f'No low-pass filter will be applied to the data. ' \
                               f'Make sure line noise and cHPI artifacts were removed before finding ' \
                               f'bad channels.'
        warnings.warn(user_warning_message)
        dict_json_product['brainlife'].append({'type': 'warning', 'msg': user_warning_message})

    # Check if config['param_return_scores'] is True   
    if config['param_return_scores'] is not True:
        value_error_message = f'param_return_scores must be True.'
        raise ValueError(value_error_message) 

    
    ## Convert parameters ##   

    # Deal with param_origin parameter #
    # Convert origin parameter into array when the app is run locally
    if isinstance(config['param_origin'], list):
       config['param_origin'] = np.array(config['param_origin'])

    # Convert origin parameter into array when the app is run on BL
    if isinstance(config['param_origin'], str) and config['param_origin'] != "auto":
       param_origin = list(map(float, config['param_origin'].split(', ')))
       config['param_origin'] = np.array(param_origin)

    # Raise an error if param origin is not an array of shape 3
    if config['param_origin'] != "auto" and config['param_origin'].shape[0] != 3:
        value_error_message = f"Origin parameter must contain three elements."
        raise ValueError(value_error_message)

    # Deal with param_mag_scale parameter #
    # Convert param_mag_scale into a float when not "auto" when the app runs on BL
    if isinstance(config['param_mag_scale'], str) and config['param_mag_scale'] != "auto":
        config['param_mag_scale'] = float(config['param_mag_scale'])

    # Deal with skip_by_annotation parameter #
    # Convert param_mag_scale into a list of strings when the app runs on BL
    skip_by_an = config['param_skip_by_annotation']
    if skip_by_an == "[]":
        skip_by_an = []
    elif isinstance(skip_by_an, str) and skip_by_an.find("[") != -1 and skip_by_an != "[]": 
        skip_by_an = skip_by_an.replace('[', '')
        skip_by_an = skip_by_an.replace(']', '')
        skip_by_an = list(map(str, skip_by_an.split(', ')))         
    config['param_skip_by_annotation'] = skip_by_an 


    # Delete keys values in config.json when this app is executed on Brainlife
    kwargs = helper.define_kwargs(config)

    # Apply find bad channels     
    raw_copy = raw.copy()
    raw_bad_channels, auto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels(raw_copy, cross_talk_file,
                                                                                     calibration_file,
                                                                                     head_pos_file, 
                                                                                     **kwargs)
    # Delete the copy 
    del raw_copy


    ## Update channels.tsv with bad channels ##

    # Read it as a dataframe
    df_channels = pd.read_csv(channels_file, sep='\t')

    # Update df_channels with bad channels
    bads = raw_bad_channels.info['bads']  
    for bad in bads:
        index_bad_channel = df_channels[df_channels['name'] == bad].index
        df_channels.loc[index_bad_channel, 'status'] = 'bad'

    # Save channels.tsv
    df_channels.to_csv('out_dir_bad_channels/channels.tsv', sep = '\t', index=False)


    # Write a success message in product.json
    dict_json_product['brainlife'].append({'type': 'success', 'msg': 'Bad channels were successfully detected and written in channels.tsv.'})

    # Write an info message in product.json
    dict_json_product['brainlife'].append({'type': 'info', 'msg': f'This algorithm is not fully reliable. '
                                                                  f"Don't hesitate to check all of the "
                                                                  f"signals visually "
                                                                  f"before performing an another preprocessing step."})

    
    ## Generate html report ##

    # Get messages to add to the html report
    report_calibration_file, report_cross_talk_file, report_head_pos_file, report_destination = helper.message_optional_files_in_reports(calibration_file, cross_talk_file, head_pos_file, destination)

    # Generate html report 
    _generate_report(raw, raw_bad_channels, auto_scores, auto_noisy_chs, auto_flat_chs, data_file, 
                     report_cross_talk_file, report_calibration_file, report_head_pos_file, **kwargs)

    
    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
Beispiel #4
0
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the files
    data_file = config.pop('fif')
    if config['param_epoched_data'] is False:
        data = mne.io.read_raw_fif(data_file, allow_maxshield=True)
    else:
        data = mne.read_epochs(data_file)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(
        config, 'out_dir_resampling')

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. '
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_channels
        })
        # Udpate data.info['bads'] with info contained in channels.tsv
        data, user_warning_message_channels = helper.update_data_info_bads(
            data, channels_file)
        if user_warning_message_channels is not None:
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_channels
            })

    # Extract the matrix of events #
    if config['param_epoched_data'] is False:
        if events_file is not None:
            # Warning: events file must be BIDS compliant
            user_warning_message_events = f'The events file provided must be ' \
                                          f'BIDS compliant.'
            warnings.warn(user_warning_message_events)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_events
            })
            ############### TO BE TESTED ON NO RESTING STATE DATA
            # Compute the events matrix #
            df_events = pd.read_csv(events_file, sep='\t')

            # Extract relevant info from df_events
            samples = df_events['sample'].values
            event_id = df_events['value'].values

            # Compute the values for events matrix
            events_time_in_sample = [
                data.first_samp + sample for sample in samples
            ]
            values_of_trigger_channels = [0] * len(events_time_in_sample)

            # Create a dataframe
            df_events_matrix = pd.DataFrame(
                [events_time_in_sample, values_of_trigger_channels, event_id])
            df_events_matrix = df_events_matrix.transpose()

            # Convert dataframe to numpy array
            events_matrix = df_events_matrix.to_numpy()
        else:
            events_matrix = None
    else:
        events_matrix = None

    # Info message about resampling if applied
    if config['param_epoched_data'] is False:
        dict_json_product['brainlife'].append({
            'type':
            'info',
            'msg':
            f'Data was resampled at '
            f'{config["param_sfreq"]}. '
            f'Please bear in mind that it is generally '
            f'recommended not to epoch '
            f'downsampled data, but instead epoch '
            f'and then downsample.'
        })

    # Comment about resampling
    comments_resample_freq = f'{config["param_sfreq"]}Hz'

    # Check if the user will save an empty events file
    if events_file is None and config[
            'param_save_jointly_resampled_events'] is True:
        value_error_message = f'You cannot save en empty events file. ' \
                              f"If you haven't an events file, please set " \
                              f"'param_save_jointly_resampled_event' to False."
        # Raise exception
        raise ValueError(value_error_message)

    ## Convert parameters ##

    # Deal with param_npad parameter #
    # Convert param_npad into int if not "auto" when the App is run on BL
    if config['param_npad'] != "auto":
        config['param_npad'] = int(config['param_npad'])

    # Deal with param_n_jobs parameter #
    # Convert n jobs into int when the App is run on BL
    if config['param_n_jobs'] != 'cuda':
        config['param_n_jobs'] = int(config['param_n_jobs'])

    # Deal with stim picks parameter #
    # Convert stim picks into a list of int when the App is run on BL
    if isinstance(config['param_stim_picks'],
                  str) and config['param_stim_picks'] is not None:
        config['param_stim_picks'] = config['param_stim_picks'].replace(
            '[', '')
        config['param_stim_picks'] = config['param_stim_picks'].replace(
            ']', '')
        config['param_stim_picks'] = list(
            map(int, config['param_stim_picks'].split(', ')))

    # Keep bad channels in memory
    bad_channels = data.info['bads']

    # Define the type of data
    data = data.pick(picks=config['param_pick_type'])

    # Delete keys values in config.json when this app is executed on Brainlife
    del config['param_pick_type']
    kwargs = helper.define_kwargs(config)

    # Apply resampling
    data_copy = data.copy()
    data_resampled, events_resampled = resampling(data_copy, events_matrix,
                                                  **kwargs)
    del data_copy

    ## Create BIDS compliant events file if existed ##
    if events_resampled is not None and config['param_epoched_data'] is False:
        # Create a BIDSPath
        bids_path = BIDSPath(subject='subject',
                             session=None,
                             task='task',
                             run='01',
                             acquisition=None,
                             processing=None,
                             recording=None,
                             space=None,
                             suffix=None,
                             datatype='meg',
                             root='bids')

        # Extract event_id value #
        # to be tested when events are extracted from data
        event_id_value = list(
            events_resampled[:, 2]
        )  # the third column of events corresponds to the value column of BIDS events.tsv
        id_values_occurrences = Counter(
            event_id_value)  # number of different events
        id_values_occurrences = list(id_values_occurrences.keys())
        trials_type = [
            f"events_{i}" for i in range(1,
                                         len(id_values_occurrences) + 1)
        ]  # for trial type column of BIDS events.tsv
        dict_event_id = dict(
            (k, v) for k, v in zip(trials_type, id_values_occurrences))

        # Write BIDS to create events.tsv BIDS compliant
        write_raw_bids(data,
                       bids_path,
                       events_data=events_resampled,
                       event_id=dict_event_id,
                       overwrite=True)

        # Extract events.tsv from bids path
        events_file = 'bids/sub-subject/meg/sub-subject_task-task_run-01_events.tsv'

        # Copy events.tsv in outdir
        shutil.copy2(events_file, 'out_dir_resampling/events.tsv')

        # Info message in product.json
        dict_json_product['brainlife'].append({
            'type':
            'info',
            'msg':
            'Jointly resampled events are saved in events.tsv.'
        })

    # Success message in product.json
    dict_json_product['brainlife'].append({
        'type':
        'success',
        'msg':
        'Data was successfully resampled.'
    })

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
Beispiel #5
0
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the files
    data_file = config.pop('fif')
    raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(
        config, 'out_dir_notch_filter')

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Raise error if no start parameter and method is wrong
    if config['param_freqs_specific_or_start'] is None and config[
            'param_method'] != 'spectrum_fit':
        value_error_message = f'This frequency can only be None when method is spectrum_fit.'
        # Raise exception
        raise ValueError(value_error_message)

    ## Convert parameters ##

    # Deal with param_picks_by_channel_indices parameter #
    # Convert it into a slice When the App is run locally and on BL
    picks = config['param_picks_by_channel_indices']
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find(
            "[") == -1 and picks is not None:
        picks = list(map(int, picks.split(', ')))
        if len(picks) == 2:
            config['param_picks_by_channel_indices'] = slice(
                picks[0], picks[1])
        elif len(picks) == 3:
            config['param_picks_by_channel_indices'] = slice(
                picks[0], picks[1], picks[2])
        else:
            value_error_message = f"If you want to select channels using a slice, you must give two or three elements."
            raise ValueError(value_error_message)

    # Convert it into a list of int when the app is run on BL
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find(
            "[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_indices'] = list(
            map(int, picks.split(', ')))

    # Deal with param_picks_by_channel_types_or_name parameter #
    # Convert it into a list of str when the App is run on BL
    picks = config['param_picks_by_channel_types_or_names']
    if isinstance(picks, str) and picks.find("[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_types_or_names'] = list(
            map(str, picks.split(', ')))

    # Deal with filter_length parameter #
    # Convert it into int if not "auto" when the App runs on BL
    if config['param_filter_length'] != "auto" and config[
            'param_filter_length'].find("s") == -1:
        config['param_filter_length'] = int(config['param_filter_length'])

    # Deal with param_notch_widths parameter #
    # Convert notch widths parameter into array when the app is run locally
    if isinstance(config['param_notch_widths'], list):
        config['param_notch_widths'] = np.array(config['param_notch_widths'])

    # Convert notch widths parameter into array when the app is run on BL
    if isinstance(config['param_notch_widths'], str):
        config['param_notch_widths'] = list(
            map(float, config['param_notch_widths'].split(', ')))
        if len(config['param_notch_widths']) == 1:
            config['param_notch_widths'] = float(
                config['param_notch_widths'][0])
        else:
            config['param_notch_widths'] = np.array(
                config['param_notch_widths'])

    # Deal with param_n_jobs parameter #
    # Convert it into into if not "cuda" When the App is run on BL
    if config['param_n_jobs'] != 'cuda':
        config['param_n_jobs'] = int(config['param_n_jobs'])

    # Comments messages about filtering #
    if config['param_freqs_specific_or_start'] is not None and config[
            'param_freqs_end'] is None:
        comments_notch = f"{config['param_freqs_specific_or_start']}Hz"
    elif config['param_freqs_specific_or_start'] is not None and config[
            'param_freqs_end'] is not None:
        comments_notch = f"Between {config['param_freqs_specific_or_start']} and {config['param_freqs_end']}Hz"
        if config['param_freqs_step'] is not None:
            comments_notch = f"Between {config['param_freqs_specific_or_start']} and " \
                             f"{config['param_freqs_end']}Hz every {config['param_freqs_step']}Hz"

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. '
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_channels
        })
        # Udpate raw.info['bads'] with info contained in channels.tsv
        raw, user_warning_message_channels = helper.update_data_info_bads(
            raw, channels_file)
        if user_warning_message_channels is not None:
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_channels
            })

    # Keep bad channels in memory #
    bad_channels = raw.info['bads']

    # Delete keys values in config.json when this app is executed on Brainlife
    kwargs = helper.define_kwargs(config)

    # Apply temporal filtering
    raw_copy = raw.copy()
    raw_notch_filtered = notch_filter(raw_copy, **kwargs)
    del raw_copy

    # Success message in product.json
    dict_json_product['brainlife'].append({
        'type':
        'success',
        'msg':
        'Notch filter was applied successfully.'
    })

    # Compute SNR
    # snr_before = _compute_snr(raw)
    # snr_after = _compute_snr(raw_filtered)

    # Generate a report
    _generate_report(data_file, raw, raw_notch_filtered, bad_channels,
                     comments_notch, **kwargs)

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the MEG file
    data_file = config.pop('fif')
    raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(
        config, 'out_dir_get_events')

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. '
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_channels
        })
        # Udpate raw.info['bads'] with info contained in channels.tsv
        raw, user_warning_message_channels = helper.update_data_info_bads(
            raw, channels_file)
        if user_warning_message_channels is not None:
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_channels
            })

    # Events file
    if events_file is not None:
        user_warning_message_events = f'The events.tsv file provided will be ' \
                                      f'overwritten with the new events obtained by this App.'
        warnings.warn(user_warning_message_events)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_events
        })

    ## Convert parameters ##

    # Deal with stim channels #
    # Convert it into a list of strings it is run on BL
    stim_channels = config['param_find_events_stim_channels']
    if isinstance(stim_channels, str) and stim_channels.find(
            "[") != -1 and stim_channels is not None:
        stim_channels = stim_channels.replace('[', '')
        stim_channels = stim_channels.replace(']', '')
        config['param_find_events_stim_channels'] = list(
            map(str, stim_channels.split(', ')))

    # Deal with param consecutive #
    # Convert it into a bool if necessary
    if config['param_find_events_consecutive'] == "True":
        config['param_find_events_consecutive'] = True
    elif config['param_find_events_consecutive'] == "False":
        config['param_find_events_consecutive'] = False

    # Test if the data contains events
    if raw.info['events'] and config['param_make_events'] is True:
        user_warning_message = f'Events already exist in this raw file. ' \
                               f'You are going to create an events.tsv file with events ' \
                               f'different from those contained in the raw file.'
        warnings.warn(user_warning_message)
        dict_json_product['brainlife'].append({
            'type': 'warning',
            'msg': user_warning_message
        })
    elif not raw.info['events'] and config['param_make_events'] is False:
        error_value_message = f"You can't extract events from this raw file because it doesn't contain " \
                              f"any events. Please set param_make_events to 'True' so that fixed " \
                              f"length events will be created."
        raise ValueError(error_value_message)

    # Delete keys values in config.json when this app is executed on Brainlife
    kwargs = helper.define_kwargs(config)

    # Create or extract events
    events = get_events(raw, **kwargs)

    ## Create BIDS compliant events file ##

    # Create a BIDSPath
    bids_path = BIDSPath(subject='subject',
                         session=None,
                         task='task',
                         run='01',
                         acquisition=None,
                         processing=None,
                         recording=None,
                         space=None,
                         suffix=None,
                         datatype='meg',
                         root='bids')

    # Extract event_id value #
    # When fixed length events were created
    if config['param_make_events'] is True:
        dict_event_id = {'event': config['param_make_events_id']}
    # When existing events were extracted
    else:  # to be tested
        event_id_value = list(
            events[:, 2]
        )  # the third column of events corresponds to the value column of BIDS events.tsv
        id_values_occurrences = Counter(
            event_id_value)  # number of different events
        id_values_occurrences = list(id_values_occurrences.keys())
        trials_type = [
            f"events_{i}" for i in range(1,
                                         len(id_values_occurrences) + 1)
        ]  # for trial type column of BIDS events.tsv
        dict_event_id = dict(
            (k, v) for k, v in zip(trials_type, id_values_occurrences))

    # Write BIDS to create events.tsv BIDS compliant
    write_raw_bids(raw,
                   bids_path,
                   events_data=events,
                   event_id=dict_event_id,
                   overwrite=True)

    # Extract events.tsv from bids path
    events_file = 'bids/sub-subject/meg/sub-subject_task-task_run-01_events.tsv'

    # Copy events.tsv in outdir
    shutil.copy2(events_file, 'out_dir_get_events/events.tsv')

    # Success message in product.json
    if config['param_make_events'] is True:
        dict_json_product['brainlife'].append({
            'type':
            'success',
            'msg':
            'Events were successfully created.'
        })
    else:
        dict_json_product['brainlife'].append({
            'type':
            'success',
            'msg':
            'Events were successfully extracted.'
        })

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
def main():

    # Generate a json.product to display messages on Brainlife UI
    dict_json_product = {'brainlife': []}

    # Load inputs from config.json
    with open('config.json') as config_json:
        config = json.load(config_json)

    # Read the files
    data_file = config.pop('fif')
    if config['param_epoched_data'] is False:
        data = mne.io.read_raw_fif(data_file, allow_maxshield=True)
    else:
        data = mne.read_epochs(data_file)

    # Read and save optional files
    config, cross_talk_file, calibration_file, events_file, head_pos_file, channels_file, destination = helper.read_optional_files(
        config, 'out_dir_temporal_filtering')

    # Convert empty strings values to None
    config = helper.convert_parameters_to_None(config)

    ## Convert parameters ##

    # Deal with param_picks_by_channel_indices parameter #
    # Convert param_picks_by_channel_indices into slice when the App is run locally and on BL
    picks = config['param_picks_by_channel_indices']
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find(
            "[") == -1 and picks is not None:
        picks = list(map(int, picks.split(', ')))
        if len(picks) == 2:
            config['param_picks_by_channel_indices'] = slice(
                picks[0], picks[1])
        elif len(picks) == 3:
            config['param_picks_by_channel_indices'] = slice(
                picks[0], picks[1], picks[2])
        else:
            value_error_message = f"If you want to select channels using a slice, you must give two or three elements."
            raise ValueError(value_error_message)

    # Convert param_picks_by_channel_indices into a list of integers when the App is run on BL
    if isinstance(picks, str) and picks.find(",") != -1 and picks.find(
            "[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_indices'] = list(
            map(int, picks.split(', ')))

    # Deal with param_picks_by_channel_types_or_name parameter #
    # Convert param_picks_by_channel_types_or_names into a list of string when the App is run on BL
    picks = config['param_picks_by_channel_types_or_names']
    if isinstance(picks, str) and picks.find("[") != -1 and picks is not None:
        picks = picks.replace('[', '')
        picks = picks.replace(']', '')
        config['param_picks_by_channel_types_or_names'] = list(
            map(str, picks.split(', ')))

    # Deal with filter_length parameter on BL #
    # Convert param_filter_length into int if not auto and not a length in time when the App is run on BL
    if config['param_filter_length'] != "auto" and config[
            'param_filter_length'].find("s") == -1:
        config['param_filter_length'] = int(config['param_filter_length'])

    # Deal with param_l_trans_bandwidth parameter on BL #
    # Convert param_l_trans_bandwidth into a float if not auto when the App is run on BL
    if isinstance(config['param_l_trans_bandwidth'],
                  str) and config['param_l_trans_bandwidth'] != "auto":
        config['param_l_trans_bandwidth'] = float(
            config['param_l_trans_bandwidth'])

    # Deal with param_h_trans_bandwidth parameter on BL #
    # Convert param_h_trans_bandwidth into a float if not auto when the App is run on BL
    if isinstance(config['param_h_trans_bandwidth'],
                  str) and config['param_h_trans_bandwidth'] != "auto":
        config['param_h_trans_bandwidth'] = float(
            config['param_h_trans_bandwidth'])

    # Deal with param_n_jobs parameter on BL #
    # Convert param n jobs into an int if not cuda
    if config['param_n_jobs'] != 'cuda':
        config['param_n_jobs'] = int(config['param_n_jobs'])

    # Deal with skip_by_annotation parameter #
    # Convert param_mag_scale into a list of strings when the app runs on BL
    skip_by_an = config['param_skip_by_annotation']
    if skip_by_an == "[]":
        skip_by_an = []
    elif isinstance(skip_by_an,
                    str) and skip_by_an.find("[") != -1 and skip_by_an != "[]":
        skip_by_an = skip_by_an.replace('[', '')
        skip_by_an = skip_by_an.replace(']', '')
        skip_by_an = list(map(str, skip_by_an.split(', ')))
    config['param_skip_by_annotation'] = skip_by_an

    ## Info message about filtering ##

    # Band pass filter
    if config['param_l_freq'] is not None and config[
            'param_h_freq'] is not None:
        comments_about_filtering = f'Data was filtered between ' \
                                   f'{config["param_l_freq"]} ' \
                                   f'and {config["param_h_freq"]}Hz.'
        dict_json_product['brainlife'].append({
            'type': 'info',
            'msg': comments_about_filtering
        })
        filter_type = "band-pass"

    # Lowpass filter
    elif config['param_l_freq'] is None and config['param_h_freq'] is not None:
        comments_about_filtering = f'Lowpass filter was applied at {config["param_h_freq"]}Hz.'
        dict_json_product['brainlife'].append({
            'type': 'info',
            'msg': comments_about_filtering
        })
        filter_type = "low-pass"

    # Highpass filter
    elif config['param_l_freq'] is not None and config['param_h_freq'] is None:
        comments_about_filtering = f'Highpass filter was applied at {config["param_l_freq"]}Hz.'
        dict_json_product['brainlife'].append({
            'type': 'info',
            'msg': comments_about_filtering
        })
        filter_type = "high-pass"

    # Raise an exception if both param_filter_l_freq and param_filter_h_freq are None
    elif config['param_l_freq'] is None and config["param_h_freq"] is None:
        value_error_message = f'You must specify a value for param_l_freq or param_h_freq, ' \
                              f"they can't both be set to None."
        # Raise exception
        raise ValueError(value_error_message)

    # Channels.tsv must be BIDS compliant
    if channels_file is not None:
        user_warning_message_channels = f'The channels file provided must be ' \
                                        f'BIDS compliant and the column "status" must be present. '
        warnings.warn(user_warning_message_channels)
        dict_json_product['brainlife'].append({
            'type':
            'warning',
            'msg':
            user_warning_message_channels
        })
        # Udpate data.info['bads'] with info contained in channels.tsv
        data, user_warning_message_channels = helper.update_data_info_bads(
            data, channels_file)
        if user_warning_message_channels is not None:
            warnings.warn(user_warning_message_channels)
            dict_json_product['brainlife'].append({
                'type':
                'warning',
                'msg':
                user_warning_message_channels
            })

    # Keep bad channels in memory
    bad_channels = data.info['bads']

    # Delete keys values in config.json when this app is executed on Brainlife
    kwargs = helper.define_kwargs(config)

    # Apply temporal filtering
    data_copy = data.copy()
    data_filtered = temporal_filtering(data_copy, **kwargs)
    del data_copy

    # Success message in product.json
    dict_json_product['brainlife'].append({
        'type':
        'success',
        'msg':
        'Filtering was applied successfully.'
    })

    # Compute SNR
    #snr_before = _compute_snr(data)
    #snr_after = _compute_snr(data_filtered)

    # Generate a report
    _generate_report(data_file, data, data_filtered, bad_channels,
                     comments_about_filtering, **kwargs)

    # Save the dict_json_product in a json file
    with open('product.json', 'w') as outfile:
        json.dump(dict_json_product, outfile)
Beispiel #8
0
import mne
import json
from brainlife_apps_helper import helper
from mne_bids import BIDSPath, read_raw_bids

# Load inputs from config.json
with open('config.json') as config_json:
    config = json.load(config_json)

# Read ctf
data_file = config.pop('fif')
#events = mne.read_events(data_file)
#raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

# Convert empty strings values to None
config = helper.convert_parameters_to_None(config)

# Save ctf file
# raw.save('out_dir/test.fif', overwrite=True)

# Read the raw file
# data_file = config.pop('fif')
# raw = mne.io.read_raw_fif(data_file, allow_maxshield=True)

# print(raw.pick_types(eeg=True))
# print(raw.info['ch_names'])

# print(raw.info['chs'])

# # Convert all "" into None when the App runs on BL
# tmp = dict((k, None) for k, v in config.items() if v == "")