json_file = "settings.json" print("USING:", json_file) # opening a json file with open(json_file) as pipeline_file: parameters = json.load(pipeline_file) path = parameters["dataset_path"] sfreq = parameters["downsample_dataset"] sub_path = op.join(path, "data") der_path = op.join(path, "derivatives") files.make_folder(der_path) proc_path = op.join(der_path, "processed") files.make_folder(proc_path) subjects = files.get_folders_files(sub_path)[0] subjects.sort() subject = subjects[index] subject_id = subject.split("/")[-1] meg_path = op.join(subject, "ses-01", "meg") sub_path = op.join(proc_path, subject_id) files.make_folder(sub_path) dss = files.get_folders_files(meg_path)[0] dss = [i for i in dss if "ds" in i] dss.sort() for ds in dss: print("INPUT RAW FILE:", ds)
import pandas as pd try: subjects_path = str(sys.argv[1]) except: subjects_path = "/home/mszul/datasets/explicit_implicit_beta/derivatives/processed" try: output_path = str(sys.argv[2]) except: output_path = "/home/mszul/git/DANC_learning_beh/data" print(subjects_path, op.exists(subjects_path)) print(output_path, op.exists(output_path)) subs = files.get_folders_files(subjects_path)[0] subs.sort() csv_files = [] for sub in subs: print(sub) csvs = files.get_files(sub, "sub", "-beh.csv")[2] csvs.sort csv_files.extend(csvs) all_data = [] for i in csv_files: sub_id = i.split(sep)[-2] file_data = pd.read_csv(i) file_data.subject_id = sub_id all_data.append(file_data)
json_file = "settings.json" print("USING:", json_file) # opening a json file with open(json_file) as pipeline_file: parameters = json.load(pipeline_file) path = parameters["dataset_path"] sfreq = parameters["downsample_dataset"] sub_path = op.join(path, "data") der_path = op.join(path, "derivatives") files.make_folder(der_path) proc_path = op.join(der_path, "processed") files.make_folder(proc_path) subjects = files.get_folders_files(sub_path)[0] subjects.sort() for subject in subjects: subject_id = subject.split("/")[-1] meg_path = op.join(subject, "ses-01", "meg") if len(files.get_folders_files(meg_path)[0]) > 0: continue else: sub_path = op.join(proc_path, subject_id) files.make_folder(sub_path) zip_file = files.get_files(meg_path, "MEG", ".zip")[2][0] print(files.get_files(meg_path, "MEG", ".zip")[1])
json_file = "settings.json" print("USING:", json_file) # opening a json file with open(json_file) as pipeline_file: parameters = json.load(pipeline_file) path = parameters["dataset_path"] sfreq = parameters["downsample_dataset"] sub_path = op.join(path, "data") der_path = op.join(path, "derivatives") files.make_folder(der_path) proc_path = op.join(der_path, "processed") files.make_folder(proc_path) subjects = files.get_folders_files(proc_path)[0] subjects.sort() subject = subjects[index] subject_id = subject.split("/")[-1] meg_path = op.join(subject, "ses-01", "meg") sub_path = op.join(proc_path, subject_id) files.make_folder(sub_path) qc_folder = op.join(sub_path, "QC") files.make_folder(qc_folder) raw_paths = files.get_files(sub_path, subject_id, "-raw.fif")[2] raw_paths.sort() event_paths = files.get_files(sub_path, subject_id, "-eve.fif")[2]
# opening a json file with open(json_file) as pipeline_file: parameters = json.load(pipeline_file) path = parameters["dataset_path"] sfreq = parameters["downsample_dataset"] hi_pass = parameters["hi_pass_filter"] sub_path = op.join(path, "data") der_path = op.join(path, "derivatives") files.make_folder(der_path) proc_path = op.join(der_path, "processed") files.make_folder(proc_path) subjects = files.get_folders_files(proc_path)[0] subjects.sort() subject = subjects[index] subject_id = subject.split("/")[-1] sub_path = op.join(proc_path, subject_id) files.make_folder(sub_path) #setting the paths and extracting files slt_mot_paths = [i for i in files.get_folders_files(sub_path)[0] if "motor" in i] slt_vis_paths = [i for i in files.get_folders_files(sub_path)[0] if "visual" in i] epo_mot_paths = files.get_files(sub_path, "sub", "motor-epo.fif")[2] epo_vis_paths = files.get_files(sub_path, "sub", "visual-epo.fif")[2] beh_match_path = files.get_files(sub_path, "sub", "beh-match.json")[2][0] with open(beh_match_path) as f: beh_match = json.load(f)
parameters = json.load(pipeline_file) def split_and_eval(x): return [eval(i) for i in x.split(",")] path = parameters["dataset_path"] sfreq = parameters["downsample_dataset"] der_path = op.join(path, "derivatives") files.make_folder(der_path) proc_path = op.join(der_path, "processed") files.make_folder(proc_path) subjects = files.get_folders_files(proc_path)[0] subjects.sort() subject = subjects[index] subject_id = subject.split("/")[-1] print(subject) raw_meg_dir = op.join(path, "data") raw_meg_path = op.join(raw_meg_dir, subject_id, "ses-01", "meg") ds_paths = files.get_folders_files(raw_meg_path)[0] ds_paths = [i for i in ds_paths if "misc" not in i] ds_paths.sort() res4_paths = [files.get_files(i, "", ".res4")[2][0] for i in ds_paths] res4_paths.sort() fs_folder = op.join(der_path, "freesurfer", subject_id) surface_file = op.join(fs_folder, "pial.ds.gii")