Exemplo n.º 1
0
def get_model_file(setname, type, steps=None):
    filename = type + ".model"
    if steps is None:
        filename = type
    else:
        filename = "%s-%s" % (type, str(steps))
    return os.path.join(utils.set_directory(setname), filename + ".model")
            };

            var tf = new TableFilter('results', filtersConfig);
            tf.init();

            for (div of document.getElementsByClassName("div_checklist")) {
                div.style.height = 100;
            }
        </script>
    </body>
</html>
'''

html_table = ''

set_directory('html/dics')

image_folder = 'dics'
image_path = op.join('html', image_folder)
set_directory(image_path)

for i, setting in enumerate(dics_settings):
    # construct query
    setting = tuple(['none' if s is None else s for s in setting])

    reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, real_filter, use_noise_cov, reduce_rank = setting
    q = (
        f"reg=={reg:.2f} and sensor_type=='{sensor_type}' and pick_ori=='{pick_ori}' and inversion=='{inversion}' and real_filter=={real_filter} and "
        f"weight_norm=='{weight_norm}' and normalize_fwd=={normalize_fwd} and use_noise_cov=={use_noise_cov} and reduce_rank=={reduce_rank}"
    )
def create_epochs(raw,
                  title='Simulated evoked',
                  fn_simulated_epochs=None,
                  fn_report_h5=None):
    """
    Create epochs based on the raw object with the baseline
    going from config.tmin to config.tmax

    Parameters:
    -----------
    raw : instance of Raw
        Simulated raw file.
    fn_simulated_raw : None | string
        Path where the epochs file is to be saved. If None the file is not saved.
    fn_report_h5 : None | string
        Path where the .h5 file for the report is to be saved.

    Returns:
    --------
    epochs : instance of Epochs
        Epochs created from the simulated raw.
    """
    sfreq = raw.info['sfreq']
    trial_length = int((config.tmax - config.tmin) * sfreq)
    events = np.hstack((
        (np.arange(config.n_trials) * trial_length)[:, np.newaxis] -
        int(config.tmin * sfreq),
        np.zeros((config.n_trials, 1)),
        np.ones((config.n_trials, 1)),
    )).astype(np.int)

    #  Use tmin=0.1 and tmax=trial_length - 0.1 to avoid edge artifacts
    epochs = mne.Epochs(raw=raw,
                        events=events,
                        event_id=1,
                        tmin=config.tmin + 1 / sfreq,
                        tmax=config.tmax - 1 / sfreq,
                        baseline=(None, 0),
                        preload=True)

    ###############################################################################
    # Save everything
    ###############################################################################

    if fn_simulated_epochs is not None:
        set_directory(op.dirname(fn_simulated_epochs))
        epochs.save(fn_simulated_epochs, overwrite=True)

    # Plot the simulated epochs in the report
    if fn_report_h5 is not None:
        set_directory(op.dirname(fn_report_h5))
        fn_report_html = fn_report_h5.rsplit('.h5')[0] + '.html'

        with mne.open_report(fn_report_h5) as report:

            fig = epochs.average().plot_joint(picks='mag', show=False)
            report.add_figs_to_section(fig,
                                       title,
                                       section='Sensor-level',
                                       replace=True)
            report.save(fn_report_html, overwrite=True, open_browser=False)

    return epochs
def simulate_raw(info,
                 fwd_disc_true,
                 signal_vertex,
                 signal_freq,
                 n_trials,
                 noise_multiplier,
                 random_state,
                 n_noise_dipoles,
                 er_raw,
                 fn_stc_signal=None,
                 fn_simulated_raw=None,
                 fn_report_h5=None):
    """
    Simulate raw time courses for two dipoles with frequencies
    given by signal_freq1 and signal_freq2. Noise dipoles are
    placed randomly in the whole cortex.

    Parameters:
    -----------
    info : instance of Info | instance of Raw
        The channel information to use for simulation.
    fwd_disc_true : instance of mne.Forward
        The forward operator for the discrete source space created with
        the true transformation file.
    signal_vertex : int
        The vertex where signal dipole is placed.
    signal_freq : float
        The frequency of the signal.
    n_trials : int
        Number of trials to create.
    noise_multiplier : float
        Multiplier for the noise dipoles. For noise_multiplier equal to one
        the signal and noise dipoles have the same magnitude.
    random_state : None | int | instance of RandomState
        If random_state is an int, it will be used as a seed for RandomState.
        If None, the seed will be obtained from the operating system (see
        RandomState for details). Default is None.
    n_noise_dipoles : int
        The number of noise dipoles to place within the volume.
    er_raw : instance of Raw
        Empty room measurement to be used as sensor noise.
    fn_stc_signal : None | string
        Path where the signal source time courses are to be saved. If None the file is not saved.
    fn_simulated_raw : None | string
        Path where the raw data is to be saved. If None the file is not saved.
    fn_report_h5 : None | string
        Path where the .h5 file for the report is to be saved.

    Returns:
    --------
    raw : instance of Raw
        Simulated raw file.
    stc_signal : instance of SourceEstimate
        Source time courses of the signal.
    """

    sfreq = info['sfreq']
    trial_length = int((config.tmax - config.tmin) * sfreq)
    times = np.arange(trial_length) / sfreq + config.tmin

    ###############################################################################
    # Simulate a single signal dipole source as signal
    ###############################################################################

    # TODO: I think a discrete source space was used because mne.simulate_raw did not take volume source spaces -> test
    src = fwd_disc_true['src']
    signal_vert = src[0]['vertno'][signal_vertex]
    data = np.asarray([generate_signal(times, freq=signal_freq)])
    vertices = np.array([signal_vert])
    stc_signal = mne.VolSourceEstimate(data=data,
                                       vertices=[vertices],
                                       tmin=times[0],
                                       tstep=np.diff(times[:2])[0],
                                       subject='sample')
    if fn_stc_signal is not None:
        set_directory(op.dirname(fn_stc_signal))
        stc_signal.save(fn_stc_signal)

    ###############################################################################
    # Create trials of simulated data
    ###############################################################################

    # select n_noise_dipoles entries from rr and their corresponding entries from nn
    raw_list = []

    for i in range(n_trials):
        # Simulate random noise dipoles
        stc_noise = simulate_sparse_stc(src,
                                        n_noise_dipoles,
                                        times,
                                        data_fun=generate_random,
                                        random_state=random_state,
                                        labels=None)

        # Project to sensor space
        stc = add_volume_stcs(stc_signal, noise_multiplier * stc_noise)

        raw = simulate_raw_mne(info,
                               stc,
                               trans=None,
                               src=None,
                               bem=None,
                               forward=fwd_disc_true)

        raw_list.append(raw)
        print('%02d/%02d' % (i + 1, n_trials))

    raw = mne.concatenate_raws(raw_list)

    # Use empty room noise as sensor noise
    raw_picks = mne.pick_types(raw.info, meg=True, eeg=False)
    er_raw_picks = mne.pick_types(er_raw.info, meg=True, eeg=False)
    raw._data[raw_picks] += er_raw._data[er_raw_picks, :len(raw.times)]

    ###############################################################################
    # Save everything
    ###############################################################################

    if fn_simulated_raw is not None:
        set_directory(op.dirname(fn_simulated_raw))
        raw.save(fn_simulated_raw, overwrite=True)

    # Plot the simulated raw data in the report
    if fn_report_h5 is not None:
        from matplotlib import pyplot as plt
        set_directory(op.dirname(fn_report_h5))
        fn_report_html = fn_report_h5.rsplit('.h5')[0] + '.html'

        now = datetime.now()
        with mne.open_report(fn_report_h5) as report:
            fig = plt.figure()
            plt.plot(times, generate_signal(times, freq=10))
            plt.xlabel('Time (s)')

            ax = fig.axes[0]
            add_text_next_to_xlabel(fig, ax,
                                    now.strftime('%m/%d/%Y, %H:%M:%S'))

            report.add_figs_to_section(fig,
                                       now.strftime('Signal time course'),
                                       section='Sensor-level',
                                       replace=True)

            fig = raw.plot()

            # axis 1 contains the xlabel
            ax = fig.axes[1]
            add_text_next_to_xlabel(fig, ax,
                                    now.strftime('%m/%d/%Y, %H:%M:%S'))

            report.add_figs_to_section(fig,
                                       now.strftime('Simulated raw'),
                                       section='Sensor-level',
                                       replace=True)
            report.save(fn_report_html, overwrite=True, open_browser=False)

    raw._annotations = mne.annotations.Annotations([], [], [])
    return raw, stc_signal
Exemplo n.º 5
0
    parser.add_argument('--n_folds', type=int, default=3)
    parser.add_argument('--preprocess', type=bool, default=True)
    parser.add_arguemtn('--stain_norm', type=bool, default=True)
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--ckpt_dir', type=str, default='/data/volume/model/')
    parser.add_argument('--model_name', type=str, default='fpn_model/')
    args = parser.parse_args()

    TRAIN_DIR, LABEL_PATH = '/data/train', '/data/train/label.csv'
    CKPT_DIR, MODEL_NAME = args.ckpt_dir, args.model_name

    random.seed(args.seed)
    np.random.seed(args.seed)

    # check isdir
    set_directory(CKPT_DIR, MODEL_NAME)

    # preprocessing
    if PREPROCESS:
        preprocess = Preprocess(
            patch_size=args.patch_size,
            is_norm=args.stain_norm,
            target_norm_path='./preprocess/target_norm.png',
            mode='train',
            server='kakao')
        preprocess.save_patches()
    else:
        print('Already Preprocessed.')

    # set dataset
    patch_loader = PatchLoader(n_kfold=args.n_folds,
Exemplo n.º 6
0
            }
        </script>
    </body>
</html>
'''

html_table = ''

###############################################################################
# Set up directories
###############################################################################

img_folder = op.join('somato', 'dip_vs_lcmv')
html_path = op.join('html')
image_path = op.join(html_path, img_folder)
set_directory(image_path)

###############################################################################
# Compute LCMV solution and plot stc at dipole location
###############################################################################

dists = []
focs = []
ori_errors = []

for ii, setting in enumerate(lcmv_settings):

    reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, use_noise_cov, reduce_rank = setting
    try:

        if sensor_type == 'grad':
Exemplo n.º 7
0
def train_mnli_gradient_reversal(**kwargs):
    dir = set_directory(name=kwargs['type'], type_net=kwargs['type'])
    writer = SummaryWriter(dir)

    train, dev_matched_train, test, dev_matched_test, dev_mismatched_test, vocab = prepare_mnli_split(root='datasets/data',
                                                                                                      urls=['https://www.nyu.edu/projects/bowman/multinli/multinli_1.0.zip'],
                                                                                                      dir='MultiNLI',
                                                                                                      name='MultiNLI',
                                                                                                      data_path='datasets/data/MultiNLI/multinli_1.0',
                                                                                                      train_genres=[['government', 'telephone', 'slate', 'travel']],
                                                                                                      test_genres=[['fiction']],
                                                                                                      max_len=60)

    weight_matrix = prepare_glove(glove_path="datasets/GloVe/glove.840B.300d.txt",
                                  vocab=vocab)

    train_loader = DataLoader(
        MultiNLIDataset(dataset=train[0]),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available())

    test_loader = DataLoader(
        MultiNLIDataset(dataset=test[0]),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available())

    val_loader = DataLoader(
        MultiNLIDataset(dataset=dev_matched_train[0]),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available())

    model = construct_model_r(model_type=kwargs['type'],
                              weight_matrix=weight_matrix)

    num_parameters = sum([p.data.nelement() for p in model.parameters()])
    print(f'Number of model parameters: {num_parameters}')

    if torch.cuda.is_available():
        torch.cuda.set_device(kwargs['device'])

    if torch.cuda.is_available():
        model = model.cuda()
        loss_function = torch.nn.CrossEntropyLoss().cuda()
    else:
        loss_function = torch.nn.CrossEntropyLoss()

    optimizer = construct_optimizer(optimizer=kwargs['optim'],
                                    model=model,
                                    lr=kwargs['lr'])

    cudnn.benchmark = True

    total_steps = 0

    for epoch in tqdm(range(kwargs['epochs'])):
        total_steps = train_single_epoch_with_gradient_reversal(train_loader=train_loader,
                                                                val_loader=test_loader,
                                                                model=model,
                                                                criterion=loss_function,
                                                                optimizer=optimizer,
                                                                epoch=epoch,
                                                                alpha=1e-2,
                                                                total_steps=total_steps,
                                                                print_freq=kwargs['print_freq'],
                                                                writer=writer)

        validate(val_loader=val_loader,
                 model=model,
                 criterion=loss_function,
                 epoch=epoch,
                 print_freq=kwargs['print_freq'],
                 writer=writer)

    print('Zero Shot Performance')

    train_loader = DataLoader(
        MultiNLIDataset(dataset=test[0]),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available())

    val_loader = [DataLoader(
        MultiNLIDataset(dataset=dataset[0]),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available()) for dataset in [dev_matched_test, dev_mismatched_test]]

    validate(val_loader=train_loader,
             model=model,
             criterion=loss_function,
             epoch=epoch,
             print_freq=kwargs['print_freq'],
             writer=writer)

    for loader in val_loader:
        validate(val_loader=loader,
                 model=model,
                 criterion=loss_function,
                 epoch=epoch,
                 print_freq=kwargs['print_freq'],
                 writer=writer)
Exemplo n.º 8
0
def train_mnli(**kwargs):
    dir = set_directory(name=kwargs['type'], type_net=kwargs['type'])
    writer = SummaryWriter(dir)

    train, dev_matched, dev_mismatched, vocab = prepare_mnli(root='datasets/data',
                                                             urls=['https://www.nyu.edu/projects/bowman/multinli/multinli_1.0.zip'],
                                                             dir='MultiNLI',
                                                             name='MultiNLI',
                                                             data_path='datasets/data/MultiNLI/multinli_1.0',
                                                             max_len=60)

    weight_matrix = prepare_glove(glove_path="datasets/GloVe/glove.840B.300d.txt",
                                  vocab=vocab)

    train_loader = DataLoader(
        MultiNLIDataset(dataset=train),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available())

    val_loader = [DataLoader(
        MultiNLIDataset(dataset=loader),
        batch_size=kwargs['batch_size'],
        shuffle=True,
        num_workers=1,
        pin_memory=torch.cuda.is_available()) for loader in [dev_matched, dev_mismatched]]

    model = construct_model(model_type=kwargs['type'],
                            weight_matrix=weight_matrix)

    num_parameters = sum([p.data.nelement() for p in model.parameters()])
    print(f'Number of model parameters: {num_parameters}')

    if torch.cuda.is_available():
        torch.cuda.set_device(kwargs['device'])

    if torch.cuda.is_available():
        model = model.cuda()
        loss_function = torch.nn.CrossEntropyLoss().cuda()
    else:
        loss_function = torch.nn.CrossEntropyLoss()

    optimizer = construct_optimizer(optimizer=kwargs['optim'],
                                    model=model,
                                    lr=kwargs['lr'])

    total_steps = 0

    cudnn.benchmark = True

    for epoch in tqdm(range(kwargs['epochs'])):
        total_steps = train_single_epoch(train_loader=train_loader,
                                         model=model,
                                         criterion=loss_function,
                                         optimizer=optimizer,
                                         epoch=epoch,
                                         total_steps=total_steps,
                                         print_freq=kwargs['print_freq'],
                                         writer=writer)

        for loader in val_loader:
            validate(val_loader=loader,
                     model=model,
                     criterion=loss_function,
                     epoch=epoch,
                     print_freq=kwargs['print_freq'],
                     writer=writer)
Exemplo n.º 9
0
        <td><input type="text" onkeyup="filter(0, this)" placeholder="reg"></td>
        <td><input type="text" onkeyup="filter(1, this)" placeholder="sensor type"></td>
        <td><input type="text" onkeyup="filter(2, this)" placeholder="pick_ori"></td>
        <td><input type="text" onkeyup="filter(3, this)" placeholder="weight_norm"></td>
        <td><input type="text" onkeyup="filter(4, this)" placeholder="use_noise_doc"></td>
        <td><input type="text" onkeyup="filter(5, this)" placeholder="depth"></td>
        <td></td>
        <td></td>
    </tr>
'''

html_footer = '</body></table>'

html_table = ''

set_directory('html/lcmv')

for i, setting in enumerate(settings):
    # construct query
    q = ("reg==%.1f and sensor_type=='%s' and pick_ori=='%s' and "
         "weight_norm=='%s' and use_noise_cov==%s and depth==%s" % setting)

    print(q)

    sel = lcmv.query(q).dropna()

    if len(sel) < 1000:
        continue

    reg, sensor_type, pick_ori, weight_norm, use_noise_cov, depth = setting
Exemplo n.º 10
0
        <td><input type="text" onkeyup="filter(1, this)" placeholder="sensor type"></td>
        <td><input type="text" onkeyup="filter(2, this)" placeholder="pick_ori"></td>
        <td><input type="text" onkeyup="filter(3, this)" placeholder="inversion"></td>
        <td><input type="text" onkeyup="filter(4, this)" placeholder="weight_norm"></td>
        <td><input type="text" onkeyup="filter(5, this)" placeholder="normalize_fwd"></td>
        <td><input type="text" onkeyup="filter(6, this)" placeholder="real_filter"></td>
        <td></td>
        <td></td>
    </tr>
'''

html_footer = '</body></table>'

html_table = ''

set_directory('html/dics')

for i, setting in enumerate(settings):
    # construct query
    q = (
        "reg==%.1f and sensor_type=='%s' and pick_ori=='%s' and "
        "inversion=='%s' and weight_norm=='%s' and normalize_fwd==%s and real_filter==%s"
        % setting)

    print(q)

    sel = dics.query(q).dropna()

    if len(sel) < 1000:
        continue
Exemplo n.º 11
0
def chop_and_apply_ica(raw_filt_fname, ica_cfg):
    """
    Read raw file, chop it into smaller segments and apply ica on the
    chops. Save the ICA objects plus cleaned raw chops. Plot overview
    of the artifact rejection.

    Parameters:
    -----------
    raw_filt_fname : str
        The filtered raw file to clean.
    ica_cfg : dict
        Dict containing the ica specific settings from the config file.

    Returns:
    --------
    clean_filtered : mne.io.Raw instances
        Cleaned, filtered raw object.
    clean_unfiltered : mne.io.Raw instances or None
        Cleaned, unfiltered raw object or None if ica is not to be
        applied on unfiltered data.
    """

    raw_chop_clean_filtered_list = []
    raw_chop_clean_unfiltered_list = []

    print('Running chop_and_apply_ica on ', raw_filt_fname)

    ###########################################################################
    # load settings from ica config
    ###########################################################################
    chop_length = ica_cfg['chop_length']
    ecg_ch = ica_cfg['ecg_ch']
    eog_hor = ica_cfg['eog_hor_ch']
    eog_ver = ica_cfg['eog_ver_ch']

    flow_ecg = ica_cfg['flow_ecg']
    fhigh_ecg = ica_cfg['fhigh_ecg']
    flow_eog = ica_cfg['flow_eog']
    fhigh_eog = ica_cfg['fhigh_eog']

    ecg_thresh = ica_cfg['ecg_thresh']
    eog_thresh = ica_cfg['eog_thresh']
    use_jumeg = ica_cfg['use_jumeg']
    random_state = ica_cfg['random_state']
    unfiltered = ica_cfg['unfiltered']

    reject = ica_cfg['reject']
    exclude = ica_cfg['exclude']
    save = ica_cfg['save']

    # start cleaning

    raw_filt = mne.io.Raw(raw_filt_fname, preload=True, verbose=True)

    if unfiltered:
        raw_unfilt_fname = raw_filt_fname.replace(',fibp', '')
        raw_unfilt = mne.io.Raw(raw_unfilt_fname, preload=True, verbose=True)

    picks = mne.pick_types(raw_filt.info, meg=True, exclude=exclude)

    # you might want to determine the chop time in a more sophisticated way
    # to avoid accidentally chopping in the middle of a trial
    chop_times = determine_chop_times_every_x_s(raw_filt.n_times /
                                                raw_filt.info["sfreq"],
                                                chop_length=chop_length)

    # chop the data and apply filtering
    # avoid double counting of data point at chop: tmax = chop_times[i] - 1./raw.info["sfreq"]

    for i in range(0, len(chop_times) + 1):

        # get chop interval
        tmin, tmax = get_tmin_tmax(ct_idx=i,
                                   chop_times=chop_times,
                                   sfreq=raw_filt.info["sfreq"])

        #######################################################################
        # building the file names here
        #######################################################################

        info_filt = "fibp"

        if tmax is not None:
            tmaxi = int(tmax)
        else:
            tmaxi = tmax

        dirname = op.join(op.dirname(raw_filt_fname), 'chops')
        set_directory(dirname)
        prefix_filt = raw_filt_fname.rsplit('/')[-1].rsplit('-raw.fif')[0]
        ica_fname = op.join(
            dirname, prefix_filt + ',{}-{}-ica.fif'.format(int(tmin), tmaxi))

        # make sure to copy because the original is lost
        raw_filt_chop = raw_filt.copy().crop(tmin=tmin, tmax=tmax)
        clean_filt_fname = op.join(
            dirname, prefix_filt +
            ',{},ar,{}-{}-raw.fif'.format(info_filt, int(tmin), tmaxi))
        raw_filt_chop_fname = op.join(
            dirname, prefix_filt +
            ',{},{}-{}-raw.fif'.format(info_filt, int(tmin), tmaxi))

        if unfiltered:
            prefix_unfilt = prefix_filt.replace(',fibp', '')
            raw_unfilt_chop = raw_unfilt.copy().crop(tmin=tmin, tmax=tmax)
            clean_unfilt_fname = op.join(
                dirname,
                prefix_unfilt + ',ar,{}-{}-raw.fif'.format(int(tmin), tmaxi))
            raw_unfilt_chop_fname = op.join(
                dirname,
                prefix_unfilt + ',{}-{}-raw.fif'.format(int(tmin), tmaxi))

        #######################################################################
        # run the ICA on the chops
        #######################################################################

        print('Starting ICA...')
        if op.isfile(ica_fname):

            ica = read_ica(ica_fname)

        else:

            ica = fit_ica(raw=raw_filt_chop,
                          picks=picks,
                          reject=reject,
                          ecg_ch=ecg_ch,
                          eog_hor=eog_hor,
                          eog_ver=eog_ver,
                          flow_ecg=flow_ecg,
                          fhigh_ecg=fhigh_ecg,
                          flow_eog=flow_eog,
                          fhigh_eog=fhigh_eog,
                          ecg_thresh=ecg_thresh,
                          eog_thresh=eog_thresh,
                          use_jumeg=use_jumeg,
                          random_state=random_state)

        # plot topo-plots first because sometimes components are hard to identify
        # ica.plot_components()
        # do the most important manual check
        ica.plot_sources(raw_filt_chop, block=True)

        # save ica object
        ica.save(ica_fname)

        print('ICA components excluded: ', ica.exclude)

        #######################################################################
        # apply the ICA to data and save the resulting files
        #######################################################################

        print('Running cleaning on filtered data...')
        clean_filt_chop = apply_ica_and_plot_performance(
            raw_filt_chop,
            ica,
            ecg_ch,
            eog_ver,
            raw_filt_chop_fname,
            clean_fname=clean_filt_fname,
            picks=picks,
            replace_pre_whitener=True,
            reject=reject,
            save=save)

        raw_chop_clean_filtered_list.append(clean_filt_chop)

        if unfiltered:

            print('Running cleaning on unfiltered data...')
            clean_unfilt_chop = apply_ica_and_plot_performance(
                raw_unfilt_chop,
                ica,
                ecg_ch,
                eog_ver,
                raw_unfilt_chop_fname,
                clean_fname=clean_unfilt_fname,
                picks=picks,
                replace_pre_whitener=True,
                reject=reject,
                save=save)

            raw_chop_clean_unfiltered_list.append(clean_unfilt_chop)

        # if tmax is None, last chop is reached
        if tmax is None:
            break

    clean_filt_concat = mne.concatenate_raws(raw_chop_clean_filtered_list)

    if unfiltered:

        clean_unfilt_concat = mne.concatenate_raws(
            raw_chop_clean_unfiltered_list)

    else:
        clean_unfilt_concat = None

    return clean_filt_concat, clean_unfilt_concat