Example #1
0
def recon_all_func(fs_path, raw_subj):
    try:
        t1_file = files.get_files(t1_subj_dir, "co", "nii.gz")[2][0]

        sp.call([
            "recon-all", "-i", t1_file, "-subjid", raw_subj, "-sd", fs_path,
            "-all"
        ])
    except:
        print(raw_subj, "No T1 file for this participant")
Example #2
0
def dcm2nii_func(t1_subj_dir, raw_subj_dir):
    dcm2nii = "/cubric/software/mricron/dcm2nii"
    try:
        t1_file = files.get_files(t1_subj_dir, "co", "nii.gz")[2][0]
        print(raw_subj, op.exists(t1_file), "T1 image exists")
    except:
        sp.call([
            dcm2nii, "-4", "y", "-a", "n", "-c", "n", "-d", "n", "-e", "n",
            "-f", "n", "-g", "y", "-i", "n", "-o", t1_subj_dir, "-p", "n",
            "-r", "y", "-v", "y", "-x", "y", raw_subj_dir
        ])
Example #3
0
cond_dict = {
    0: ((1, 1), (1, 1)),
    1: ((1, 1), (1, -1)),
    2: ((1, 1), (-1, 1)),
    3: ((1, 1), (-1, -1)),
    4: ((1, -1), (1, 1)),
    5: ((1, -1), (1, -1)),
    6: ((1, -1), (-1, 1)),
    7: ((1, -1), (-1, -1))
}

beh_path = "/cubric/scratch/c1557187/stirring/BEH"

beh_files = files.get_files(
    beh_path,
    "",
    "matched.gz"
)[2]

beh_files.sort()


beh_file = beh_files[0]
for beh_file in beh_files:
    beh = pd.read_pickle(beh_file)
    beh = beh.sort_values(["run", "trial"])
    beh.reset_index(inplace=True, drop=True)
    beh["movement_direction_phase_1"] = beh.degs.apply(lambda x: np.mean(x[:500])/2)
    beh["movement_direction_phase_2"] = beh.degs.apply(lambda x: np.mean(x[500:])/2)
    beh["dot_motion_direction_phase_1"] = beh.conditions.apply(lambda x: cond_dict[x][0][1])
    beh["dot_motion_direction_phase_2"] = beh.conditions.apply(lambda x: cond_dict[x][1][1])
Example #4
0
try:
    id_index = int(sys.argv[1])
except:
    print("incorrect arguments")
    sys.exit()

try:
    file_index = int(sys.argv[1])
except:
    print("incorrect arguments")
    sys.exit()

output_path = "/cubric/scratch/c1557187/stirring/RESULTS/SVC/data"

beh_path = "/cubric/scratch/c1557187/stirring/BEH"
beh_files = files.get_files(beh_path, "", "matched.gz")[2]

beh_files.sort()

epo_path = "/cubric/scratch/c1557187/stirring/MEG"
subjects = files.get_folders_files(epo_path, wp=False)[0]
subjects.sort()

long_files = [
    files.get_files(op.join(epo_path, i), "long", "epo.fif")[2][0]
    for i in subjects
]
long_files.sort()

engage_files = [
    files.get_files(op.join(epo_path, i), "engage", "epo.fif")[2][0]
    data_out,
    wp=False
)[0]



subj = subjs[subj_index]

subj_path = op.join(
    data_out,
    subj
)

exp_files = files.get_files(
    subj_path,
    "ica_cln",
    "-raw.fif",
    wp=True
)[2]

exp_files.sort()
exp_file = exp_files[file_index]

trans_file = op.join(
    pipeline_params["data_path"],
    "dig",
    subj,
    "{}-trans.fif".format(subj)
)

raw = mne.io.read_raw_fif(
    exp_file,
Example #6
0
which_freq = "low_gamma"

freq_dict = {
    "alpha": (7, 14),
    "beta": (14, 30),
    "delta": (None, 4),
    "theta": (4, 7),
    "low_gamma": (30, 80)
}

with open(label_json) as json_file:
    labels = json.load(json_file)

label_data = pd.read_csv(labels_csv)

particpant_beh_files = files.get_files(data_path, "beh", ".gz")[2]
particpant_beh_files.sort()

particpant_meg_files = files.get_files(data_path, which_freq, ".npy")[2]
particpant_meg_files.sort()

pp_list = [
    i.split("/")[-1].split("-")[-1].split(".")[0] for i in particpant_beh_files
]

zip_files = list(zip(pp_list, particpant_beh_files, particpant_meg_files))

odd_means = dict()
reg_means = dict()

for pp, beh_path, meg_path in tqdm((zip_files)):
        ica.fit(raw)

        # save the files
        raw.save(raw_out)
        ica.save(ica_out)
        mne.write_events(eve_out, events)

    named_tuple = time.localtime()  # get struct_time
    time_string = time.strftime("%m/%d/%Y, %H:%M:%S", named_tuple)
    print("converting, filtering and downsampling done:", time_string)

# MANUAL ICA COMPONENT INSPECTION use ICA_inspection.py

if parameters["apply_ica_epoch"]:
    raws = files.get_files(subj_path, "raw", "-raw.fif")[0]
    raws.sort()

    icas = files.get_files(subj_path, "ica", "-ica.fif")[0]
    icas.sort()

    evts = files.get_files(subj_path, "events", "-eve.fif")[0]
    evts.sort()

    components_file_path = op.join(subj_path, "rejected-components.json")

    with open(components_file_path) as data:
        components_rej = json.load(data)

    all_files = list(zip(raws, icas, evts))
try:
    file_index = int(sys.argv[2])
except:
    print("incorrect file index")
    sys.exit()

# open json file
with open(json_file) as pipeline_file:
    parameters = json.load(pipeline_file)

# prepare paths
data_path = parameters["path"]
subjects_dir = parameters["freesurfer"]

subjects = files.get_folders_files(subjects_dir, wp=False)[0]
subjects.sort()
subjects = [i for i in subjects if "fsaverage" not in i]
subject = subjects[subj_index]

subject_meg = op.join(data_path, "MEG", subject, parameters["folder"])

raw_paths = files.get_files(subject_meg, "80-", "-raw.fif", wp=True)[2]
raw_paths.sort()

raw_path = raw_paths[file_index]
print(raw_path)
print(subject)

mne.gui.coregistration(subjects_dir=subjects_dir,
                       subject=subject,
                       inst=raw_path)
        )

        raw.save(raw_80_out, overwrite=True)
        print(raw_80_out)

        mne.write_events(eve_out, events)

        # deleting  heavy objects
        del raw

        named_tuple = time.localtime()  # get struct_time
        time_string = time.strftime("%m/%d/%Y, %H:%M:%S", named_tuple)
        print("step 1 done:", time_string)

if parameters["step_2"]:
    raws_files = files.get_files(subject_meg, "80", "-raw.fif", wp=True)[2]
    raws_files.sort()

    events_files = files.get_files(subject_meg, "events", "-eve.fif",
                                   wp=True)[2]
    events_files.sort()

    for ix, (raw_path, event_path) in enumerate(zip(raws_files, events_files)):

        # output paths
        ica_out = op.join(subject_meg,
                          "80-{0}-ica.fif".format(str(ix).zfill(3)))

        events = mne.read_events(event_path)
        raw = mne.io.read_raw_fif(raw_path, preload=True)
                mask=ch_selection,
                mask_params=mask_params,
                axes=ax,
                show=False
            )

        row += 1
        ax = figure.add_subplot(
            gs[row, column],
            label="freq{}{}".format(str(row),str(column))
        )

        # read the data
        data_files = files.get_files(
            output_dir,
            "{}".format(freq_key), # respo specific
            ".npy"
        )[2]
        data_files.sort()

        odd, regular = [np.load(i).item() for i in data_files]
        # # data processing
        reg_data = np.array(regular[group_key]) * 1e14
        reg_data = reg_data[:, x_range[0]:x_range[1]]
        reg_data = rescale(reg_data, times, baseline, mode="mean")
        reg_mean = np.average(reg_data, axis=0)
        reg_sem = sem(reg_data, axis=0)
        odd_data = np.array(odd[group_key]) * 1e14
        odd_data = odd_data[:, x_range[0]:x_range[1]]
        odd_data = rescale(odd_data, times, baseline, mode="mean")
        odd_mean = np.average(odd_data, axis=0)
    "MEG",
    subject
)

files.make_folder(meg_subject_dir)

beh_subject_path = op.join(
    out_path,
    "BEH",
    "beh_{}_matched.gz".format(subject)
)

if parameters["step_1"]:
    raw_files = files.get_files(
        raw_subject_dir,
        "",
        "-raw.fif"
    )[2]

    raw_files = [i for i in raw_files if "_rs" not in i]
    raw_files.sort()

    for ix, raw_path in enumerate(raw_files):
        file_ix = str(ix).zfill(3)
        print(raw_path)
        raw = mne.io.read_raw_fif(
            raw_path,
            preload=True
        )
        set_ch = {"EEG057-3305":"eog", "EEG058-3305": "eog", "UPPT001": "stim"}
        raw.set_channel_types(set_ch)
Example #12
0
# prepare paths
data_path = parameters["path"]
meg_path = op.join(data_path, "MEG")

subjects = files.get_folders_files(meg_path, wp=False)[0]
subjects.sort()
subject = subjects[subj_index]

subject_meg = op.join(
    meg_path,
    subject
)

raw_paths = files.get_files(
    subject_meg,
    "time-frequency-",
    "-raw.fif",
    wp=False
)[2]
raw_paths.sort()

ica_paths = files.get_files(
    subject_meg,
    "",
    "-ica.fif",
    wp=False
)[2]
ica_paths.sort()

components_file_path = op.join(
    subject_meg,
    "rejected-components.json"
Example #13
0
    "beta": (14, 30),
    "delta": (None, 4),
    "theta": (4, 7),
    "low_gamma": (30, 80)
}

with open(label_json) as json_file:
    labels = json.load(json_file)

labels = eval(labels)

label_data = pd.read_csv(labels_csv)

mean_files = files.get_files(
    data_path,
    "means-{}".format(which_freq),
    ".npy"
)[2]
mean_files.sort()

odd, reg = [np.load(i).item() for i in mean_files]

odd = np.array([odd[key] for key in odd.keys()])
reg = np.array([reg[key] for key in reg.keys()])


times = np.linspace(-0.5, 2.6, num=776)

# reg = rescale(reg, times, (-0.1, 0.0))
# odd = rescale(odd, times, (-0.1, 0.0))
    path,
    wp=False
)[0]
subjects.sort()

subject = subjects[index]

subject_meg = op.join(
    path,
    subject,
    "new_v1"
)

event_files = files.get_files(
    subject_meg,
    "events",
    "-eve.fif"
)[2]
event_files.sort()

raw_files = files.get_files(
    subject_meg,
    "80",
    "-raw.fif"
)[2]
raw_files.sort()

ica_files = files.get_files(
    subject_meg,
    "80-",
    "-ica.fif"
Example #15
0
        ica.fit(
            raw, 
            picks=picks_meg,
            reject=reject,
            verbose=verb
        )
        print(subj, ix, "ICA_fit")
        raw.save(raw_out_path, overwrite=True)
        mne.write_events(events_out_path, events)
        ica.save(ica_out_path)
        print(subj, ix, "saved")

if pipeline_params["apply_ICA"]:
    ica_json = files.get_files(
        meg_subj_path,
        "",
        "ica-rej.json"
    )[2][0]

    raw_files = files.get_files(
        meg_subj_path,
        "raw",
        "-raw.fif",
        wp=False
    )[2]

    comp_ICA_json_path = op.join(
        meg_subj_path,
        "{}-ica-rej.json".format(str(subj).zfill(3))
    )
subjects = files.get_folders_files(subjects_dir, wp=False)[0]
subjects.sort()
subjects = [i for i in subjects if "fsaverage" not in i]
subject = subjects[subj_index]

subject_meg = op.join(
    data_path,
    "MEG",
    subject,
    parameters["folder"]
)

raw_paths = files.get_files(
    subject_meg,
    "80-",
    "-raw.fif",
    wp=True
)[2]
raw_paths.sort()
raw_path = raw_paths[file_index]

file_no = raw_path.split("/")[-1].split("-")[1]

trans_path = files.get_files(
    subject_meg,
    file_no,
    "-trans.fif"
)[2][0]

info = mne.io.read_info(raw_path)
Example #17
0
# try:
#     index = int(sys.argv[1])
# except:
#     print("incorrect file index")
#     sys.exit()

path = "/cubric/scratch/c1557187/act_mis/MEG"
output_dir = "/cubric/scratch/c1557187/act_mis/RESULTS/THESIS_ANALYSIS"

subjects = files.get_folders_files(path, wp=False)[0]
subjects.sort()

epoch_list_path = []
for subject in subjects:
    filep = op.join(path, subject, "new_v1")
    epochs_list = files.get_files(filep, "epochs", "-epo.fif")[2]
    epoch_list_path.extend(epochs_list)

epoch_list_path = [i for i in epoch_list_path if "-TF-" not in i]
epoch_list_path = [i for i in epoch_list_path if "-TD-" not in i]

info = mne.io.read_info(epoch_list_path[0])
info = mne.pick_info(
    info, mne.pick_channels(info["ch_names"], include=info["ch_names"][29:-3]))

sensor_groupings = {
    "A": [i for i in info["ch_names"] if "MLO" in i],
    "B": [i for i in info["ch_names"] if "MRO" in i],
    "C": [i for i in info["ch_names"] if "MLC" in i],
    "D": [i for i in info["ch_names"] if "MRC" in i],
    "E": [i for i in info["ch_names"] if "O" in i],
Example #18
0
# read the pipeline params
with open(json_file) as pipeline_file:
    pipeline_params = json.load(pipeline_file)

# PATHS
data_out = op.join(pipeline_params["output_path"], "data")

subjs = files.get_folders_files(data_out, wp=False)[0]

subjs.sort()

subj = subjs[subj_index]

subj_path = op.join(data_out, subj)

ica_files = files.get_files(subj_path, "", "-ica.fif")[2]
ica_files.sort()
raw_files = files.get_files(subj_path, "", "-raw.fif", wp=True)[2]
raw_files.sort()

raw_file = raw_files[file_index]
ica_file = ica_files[file_index]

comp_ICA_json_path = op.join(subj_path,
                             "{}-ica-rej.json".format(str(subj).zfill(3)))

if not op.exists(comp_ICA_json_path):
    json_dict = {op.split(i)[1]: [] for i in raw_files}
    files.dump_the_dict(comp_ICA_json_path, json_dict)

raw = mne.io.read_raw_fif(raw_file, preload=True, verbose=False)
    sys.exit()

# get paths

path = "/cubric/scratch/c1557187/act_mis/MEG"
beh_path = "/cubric/scratch/c1557187/act_mis/RESULTS/BEH_PARTICIPANT"
output_dir = "/cubric/scratch/c1557187/act_mis/RESULTS/THESIS_ANALYSIS"

subjects = files.get_folders_files(path, wp=False)[0]
subjects.sort()

subject = subjects[range_index]

print(subject)

beh_file = files.get_files(beh_path, "beh-{}".format(subject), ".gz")[2][0]

meg_path = op.join(path, subject, "new_v1")
meg_files = files.get_files(meg_path, "epochs-TD", "-epo.fif")[2]
meg_files.sort()

# read data

beh = pd.read_pickle(beh_file)

data = [mne.read_epochs(i) for i in meg_files]
data = np.vstack([i.pick_types(ref_meg=False).get_data() for i in data])

times = np.linspace(-0.6, 2.6, num=801)
data = rescale(data, times, (-0.6, -0.5), mode="mean")
Example #20
0
import matplotlib.pylab as plt
import matplotlib.contour as ticker
from tools import files
import os.path as op
import numpy as np
from scipy.stats import trim_mean, sem
from mne.stats import permutation_cluster_1samp_test

output_dir = "/cubric/scratch/c1557187/act_mis/RESULTS/THESIS_ANALYSIS/SVM"

img_save = "/cubric/scratch/c1557187/act_mis/RESULTS/THESIS_ANALYSIS/VIZ_HELP"

dataset = "clk_vs_anti_onset_new_baseline_short"

all_files = files.get_files(output_dir, dataset, ".npy")[2]

all_files.sort()

data = []
for file in all_files:
    pp = np.load(file)
    pp = np.mean(pp, axis=0)
    data.append(pp)
data = np.array(data)

times = np.linspace(-1, 2.1, num=777)

minimax = (0.25, 0.5, 0.75)

tg_mean = np.mean(data, axis=0)
import sys
import os.path as op
import numpy as np
import pandas as pd
from tools import files

try:
    index = int(sys.argv[1])
except:
    print("incorrect arguments")
    sys.exit()

data_path = "/cubric/scratch/c1557187/act_mis/RESULTS/TF_SOURCE_SPACE"

beh_files = files.get_files(data_path, "beh", ".gz")[2]
beh_files.sort()
subjects = [i.split("-")[-1].split(".")[0] for i in beh_files]
subjects.sort()

subject = subjects[index]
subject_files = files.get_files(data_path, subject, ".npy")[2]
subject_files.sort()

cond = np.array(
    [i.split("/")[-1].split(".")[0].split("-") for i in subject_files])
beh = pd.read_pickle(beh_files[index])

print(
    np.hstack([
        np.array(beh.obs_dir_mod.values)[:, np.newaxis], cond[:, 3][:,
                                                                    np.newaxis]
from mne.stats import permutation_cluster_test
from mne.baseline import rescale
import matplotlib.pylab as plt
from matplotlib import gridspec
from tqdm import tqdm

try:
    key_index = int(sys.argv[1])
except:
    print("incorrect file index")
    sys.exit()

output_dir = "/cubric/scratch/c1557187/act_mis/RESULTS/THESIS_ANALYSIS"
epo_info_path = "/cubric/scratch/c1557187/act_mis/MEG/0001/new_v1/epochs-TD-001-epo.fif"

data_files = files.get_files(output_dir, "TD", ".npy")[2]
data_files.sort()

odd, regular = [np.load(i).item() for i in data_files]

keys = list(regular.keys())

key = keys[key_index]

info = mne.io.read_info(epo_info_path)
ch_subset = mne.pick_types(info, ref_meg=False, eog=False, stim=False)
info = mne.pick_info(info, ch_subset)

sensor_groupings = {
    "A": [i for i in info["ch_names"] if "MLO" in i],
    "B": [i for i in info["ch_names"] if "MRO" in i],
subjects = files.get_folders_files(subjects_dir, wp=False)[0]
subjects.sort()

atlas_csv = "/cubric/data/c1557187/meg_pipeline_action_mismatch/tools/atlas_glasser_2016.csv"
atlas_labels = pd.read_csv(atlas_csv)
label_list = atlas_labels.LABEL_NAME.values.tolist()
left = ["L_" + i for i in label_list]
right = ["R_" + i for i in label_list]
label_list = left + right
label_list.sort()

################################################################################

# subject = subjects[index]
for subject in tqdm(subjects[:-4]):
    trials_all = files.get_files(path, subject, ".npy")[2]
    trials_all.sort()

    times = np.linspace(-0.5, 2.6, num=776)

    enumerator = list(zip(range(len(trials_all)), trials_all))

    data_dict = dict()
    def extract(input_):
        ix, path_to_a_file = input_
        data = np.load(path_to_a_file).item()
        data = np.array([data[key].reshape(-1) for key in label_list])
        data = filter_data(
            data,
            sfreq=250,
            l_freq=None,
Example #24
0
subjects_dir = "/cubric/scratch/c1557187/MRI_337/FS_OUTPUT"
main_path = "/cubric/scratch/c1557187/act_mis"

subjects = files.get_folders_files(subjects_dir, wp=False)[0]
subjects.sort()

ranges = [range(0, 12), range(12, 24), range(24, 36), range(36, 48)]

for index in tqdm(ranges[range_index]):
    subject = subjects[int(index // 2)]

    file_list = []

    for sub in subjects[:-4]:
        subject_meg = op.join(main_path, "MEG", sub, "new_v1")
        all_epochs = files.get_files(subject_meg, "epochs-TF", "-epo.fif")[2]
        all_epochs.sort()
        inv_sol = files.get_files(subject_meg, "epochs-TF", "-inv.fif")[2]
        inv_sol.sort()
        morph_path = op.join(subject_meg, "{}-morph.h5".format(subject))
        file_list.append(
            tuple(list(list(zip(inv_sol, all_epochs))[0]) + [morph_path]))
        file_list.append(
            tuple(list(list(zip(inv_sol, all_epochs))[1]) + [morph_path]))

    inv_path, epo_path, morph_path = file_list[index]

    print(inv_path)
    print(epo_path)
    print(morph_path)
Example #25
0
        n_components = 50
        method = "fastica"
        reject = dict(mag=4e-12)

        ica = ICA(n_components=n_components, method=method)

        ica.fit(raw, picks=picks_meg, reject=reject, verbose=verb)
        print(subj, ix, "ICA_fit")
        raw.save(raw_out_path, overwrite=True)
        mne.write_events(events_out_path, events)
        ica.save(ica_out_path)
        print(subj, ix, "saved")

if pipeline_params["apply_ICA"]:
    ica_json = files.get_files(meg_subj_path, "", "ica-rej.json")[2][0]

    raw_files = files.get_files(meg_subj_path, "raw", "-raw.fif", wp=False)[2]

    comp_ICA_json_path = op.join(meg_subj_path,
                                 "{}-ica-rej.json".format(str(subj).zfill(3)))

    ica_files = files.get_files(meg_subj_path, "", "-ica.fif", wp=False)[2]

    with open(ica_json) as data:
        components_rej = json.load(data)

    for k in components_rej.keys():
        raw_path = op.join(meg_subj_path,
                           files.items_cont_str(raw_files, k, sort=True)[0])
        ica_path = op.join(meg_subj_path,
# open json file
with open(json_file) as pipeline_file:
    parameters = json.load(pipeline_file)

# prepare paths
data_path = parameters["path"]
subjects_dir = parameters["freesurfer"]

subjects = files.get_folders_files(subjects_dir, wp=False)[0]
subjects.sort()
subjects = [i for i in subjects if "fsaverage" not in i]
subject = subjects[subj_index]

subject_meg = op.join(data_path, "MEG", subject, parameters["folder"])

raw_paths = files.get_files(subject_meg, "80-", "-raw.fif", wp=False)[2]
raw_paths.sort()

ica_paths = files.get_files(subject_meg, "80-", "-ica.fif", wp=False)[2]
ica_paths.sort()

components_file_path = op.join(subject_meg, "rejected-components.json")

if not op.exists(components_file_path):
    json_dict = {i: [] for i in raw_paths}
    files.dump_the_dict(components_file_path, json_dict)

raw = mne.io.read_raw_fif(op.join(subject_meg, raw_paths[file_index]),
                          preload=True,
                          verbose=False)