VERBOSE = True import os import sys # import errno # should do some error checking... # import subprocess # ENH: install "official" version of stormdb on isis/hyades path_to_stormdb = '/users/cjb/src/git/cfin-tools/stormdb/stormdb' sys.path.append(path_to_stormdb) # change to stormdb.access (mod. __init__.py) from access import Query proj_code = 'MINDLAB2015_MR-YoungAddiction' db = Query(proj_code) proj_folder = os.path.join('/projects', proj_code) scratch_folder = os.path.join(proj_folder, 'scratch') recon_all_bin = '/opt/local/freesurfer-releases/5.3.0/bin/recon-all' subjects_dir = os.path.join(scratch_folder, 'fs_subjects_dir') script_dir = proj_folder+'/scripts/MR_scripts' included_subjects = db.get_subjects() # included_subjects = included_subjects[3:] # just test with first one! # included_subjects = [included_subjects[1]] for sub in included_subjects: # this is an example of getting the DICOM files as a list sequence_name = 't1_mp2rage_sag_p2_iso_UNI_Images'
proc = subprocess.Popen([command], shell=True) proc.communicate() return proc.returncode proj_code = "MINDLAB2013_03-MEG-BlindPerception" proj_path = "/projects/" + proj_code analysis_name = "sss" VERBOSE = True FAKE = False # NB n_processes = 3 # Remember that each process is using n_threads cores by default! n_threads = 2 # 2 threads per process db = Query(proj_code=proj_code, verbose=True) ## Make copies of the binary, calibration and cross-talk correction files ## Place them e.g. in "proj_path"/misc/maxfilter # cp /neuro/bin/util/x86_64-pc-linux-gnu/maxfilter-2.2.15 . mx_cmd = proj_path + "/misc/maxfilter/maxfilter-2.2.15" # cp /neuro/databases/sss/sss_cal.dat . cal_db = proj_path + "/misc/maxfilter/sss_cal.dat" # cp /neuro/databases/ctc/ct_sparse.fif . ctc_db = proj_path + "/misc/maxfilter/ct_sparse.fif" mf_params_defaults = { "input_file": None, "output_file": None, "autobad": "on",
import os, sys import errno # should do some error checking... import subprocess # ENH: install "official" version of stormdb on isis/hyades path_to_stormdb = '/users/cjb/src/git/cfin-tools/stormdb' sys.path.append(path_to_stormdb) # change to stormdb.access (mod. __init__.py) from access import Query import numpy as np proj_code = 'MEG_EEG-Training' db=Query(proj_code) proj_folder = os.path.join('/projects', proj_code) scratch_folder = os.path.join(proj_folder, 'scratch') recon_all_bin = '/opt/local/freesurfer-releases/5.3.0/bin/recon-all' subjects_dir = os.path.join(scratch_folder, 'fs_subjects_dir') script_dir = proj_folder+'/scripts/mads_test_scripts' included_subjects = db.get_subjects() # just test with first one! included_subjects = [included_subjects[0]] for sub in included_subjects: # this is an example of getting the DICOM files as a list sequence_name='t1_mprage_3D_sag'
proc = subprocess.Popen([command], shell=True) proc.communicate() return proc.returncode proj_code = 'MINDLAB2013_03-MEG-BlindPerception' proj_path = '/projects/' + proj_code analysis_name = 'sss' VERBOSE = True FAKE = False # NB n_processes = 3 # Remember that each process is using n_threads cores by default! n_threads = 2 # 2 threads per process db = Query(proj_code=proj_code, verbose=True) ## Make copies of the binary, calibration and cross-talk correction files ## Place them e.g. in "proj_path"/misc/maxfilter # cp /neuro/bin/util/x86_64-pc-linux-gnu/maxfilter-2.2.15 . mx_cmd = proj_path + '/misc/maxfilter/maxfilter-2.2.15' # cp /neuro/databases/sss/sss_cal.dat . cal_db = proj_path + '/misc/maxfilter/sss_cal.dat' # cp /neuro/databases/ctc/ct_sparse.fif . ctc_db = proj_path + '/misc/maxfilter/ct_sparse.fif' mf_params_defaults = { 'input_file': None, 'output_file': None, 'autobad': 'on',