def setup_logging(script_name): """Save mne-python log to a file in logs folder""" log_basename = Path(script_name).stem log_fname = log_basename + ".log" log_savepath = (paths.dirs.logs) / log_fname logger = getLogger(log_basename) logger.setLevel(logging.INFO) if not logger.hasHandlers(): stderr_handler = StreamHandler() file_handler = FileHandler(log_savepath) stderr_handler.setLevel(logging.WARNING) file_handler.setLevel(logging.INFO) fmt = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") stderr_handler.setFormatter(fmt) file_handler.setFormatter(fmt) logger.addHandler(stderr_handler) logger.addHandler(file_handler) write_log_header(log_savepath) set_log_file(log_savepath, overwrite=False) return logger
def test_cuda(): """Test CUDA-based filtering """ Fs = 500 sig_len_secs = 20 a = np.random.randn(sig_len_secs * Fs) set_log_file(log_file, overwrite=True) for fl in [None, 2048]: bp = band_pass_filter(a, Fs, 4, 8, n_jobs=1, filter_length=fl) bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs=1, filter_length=fl) lp = low_pass_filter(a, Fs, 8, n_jobs=1, filter_length=fl) hp = high_pass_filter(lp, Fs, 4, n_jobs=1, filter_length=fl) bp_c = band_pass_filter(a, Fs, 4, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') bs_c = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs='cuda', filter_length=fl, verbose='INFO') lp_c = low_pass_filter(a, Fs, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') hp_c = high_pass_filter(lp, Fs, 4, n_jobs='cuda', filter_length=fl, verbose='INFO') assert_array_almost_equal(bp, bp_c, 12) assert_array_almost_equal(bs, bs_c, 12) assert_array_almost_equal(lp, lp_c, 12) assert_array_almost_equal(hp, hp_c, 12) # check to make sure we actually used CUDA set_log_file() out = open(log_file).readlines() assert_true(sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == 8)
def test_cuda(): """Test CUDA-based filtering """ # NOTE: don't make test_cuda() the last test, or pycuda might spew # some warnings about clean-up failing # Also, using `n_jobs='cuda'` on a non-CUDA system should be fine, # as it should fall back to using n_jobs=1. tempdir = _TempDir() log_file = op.join(tempdir, 'temp_log.txt') sfreq = 500 sig_len_secs = 20 a = np.random.randn(sig_len_secs * sfreq) set_log_file(log_file, overwrite=True) for fl in ['10s', None, 2048]: bp = band_pass_filter(a, sfreq, 4, 8, n_jobs=1, filter_length=fl) bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs=1, filter_length=fl) lp = low_pass_filter(a, sfreq, 8, n_jobs=1, filter_length=fl) hp = high_pass_filter(lp, sfreq, 4, n_jobs=1, filter_length=fl) bp_c = band_pass_filter(a, sfreq, 4, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') bs_c = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs='cuda', filter_length=fl, verbose='INFO') lp_c = low_pass_filter(a, sfreq, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') hp_c = high_pass_filter(lp, sfreq, 4, n_jobs='cuda', filter_length=fl, verbose='INFO') assert_array_almost_equal(bp, bp_c, 12) assert_array_almost_equal(bs, bs_c, 12) assert_array_almost_equal(lp, lp_c, 12) assert_array_almost_equal(hp, hp_c, 12) # check to make sure we actually used CUDA set_log_file() with open(log_file) as fid: out = fid.readlines() # triage based on whether or not we actually expected to use CUDA from mne.cuda import _cuda_capable # allow above funs to set it tot = 12 if _cuda_capable else 0 assert_true(sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == tot) # check resampling a = np.random.RandomState(0).randn(3, sig_len_secs * sfreq) a1 = resample(a, 1, 2, n_jobs=2, npad=0) a2 = resample(a, 1, 2, n_jobs='cuda', npad=0) a3 = resample(a, 2, 1, n_jobs=2, npad=0) a4 = resample(a, 2, 1, n_jobs='cuda', npad=0) assert_array_almost_equal(a3, a4, 14) assert_array_almost_equal(a1, a2, 14) assert_array_equal(resample([0, 0], 2, 1, n_jobs='cuda'), [0., 0., 0., 0.]) assert_array_equal(resample(np.zeros(2, np.float32), 2, 1, n_jobs='cuda'), [0., 0., 0., 0.])
def test_cuda(): """Test CUDA-based filtering """ # NOTE: don't make test_cuda() the last test, or pycuda might spew # some warnings about clean-up failing # Also, using `n_jobs='cuda'` on a non-CUDA system should be fine, # as it should fall back to using n_jobs=1. tempdir = _TempDir() log_file = op.join(tempdir, 'temp_log.txt') sfreq = 500 sig_len_secs = 20 a = np.random.randn(sig_len_secs * sfreq) set_log_file(log_file, overwrite=True) for fl in ['10s', None, 2048]: bp = band_pass_filter(a, sfreq, 4, 8, n_jobs=1, filter_length=fl) bs = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs=1, filter_length=fl) lp = low_pass_filter(a, sfreq, 8, n_jobs=1, filter_length=fl) hp = high_pass_filter(lp, sfreq, 4, n_jobs=1, filter_length=fl) bp_c = band_pass_filter(a, sfreq, 4, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') bs_c = band_stop_filter(a, sfreq, 4 - 0.5, 8 + 0.5, n_jobs='cuda', filter_length=fl, verbose='INFO') lp_c = low_pass_filter(a, sfreq, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') hp_c = high_pass_filter(lp, sfreq, 4, n_jobs='cuda', filter_length=fl, verbose='INFO') assert_array_almost_equal(bp, bp_c, 12) assert_array_almost_equal(bs, bs_c, 12) assert_array_almost_equal(lp, lp_c, 12) assert_array_almost_equal(hp, hp_c, 12) # check to make sure we actually used CUDA set_log_file() with open(log_file) as fid: out = fid.readlines() # triage based on whether or not we actually expected to use CUDA tot = 12 if cuda_capable else 0 assert_true(sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == tot) # check resampling a = np.random.RandomState(0).randn(3, sig_len_secs * sfreq) a1 = resample(a, 1, 2, n_jobs=2, npad=0) a2 = resample(a, 1, 2, n_jobs='cuda', npad=0) a3 = resample(a, 2, 1, n_jobs=2, npad=0) a4 = resample(a, 2, 1, n_jobs='cuda', npad=0) assert_array_almost_equal(a3, a4, 14) assert_array_almost_equal(a1, a2, 14)
def __init__(self, db: str, path: str, include_extensions: List[str] = ['edf'], exclude_events: List[str] = None) -> None: logging.debug('Create index at %s', db) logging.debug('Load index at %s', db) engine = create_engine(db) BaseTable.metadata.create_all(engine) self.db = sessionmaker(bind=engine)() self.path = path self.include_extensions = include_extensions self.exclude_events = exclude_events logging.debug('Redirect MNE logging interface to file') set_log_file(join(path, 'mne.log'), overwrite=False)
def test_notch_filters(): """Test notch filters """ tempdir = _TempDir() log_file = op.join(tempdir, 'temp_log.txt') # let's use an ugly, prime sfreq for fun sfreq = 487.0 sig_len_secs = 20 t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq freqs = np.arange(60, 241, 60) # make a "signal" rng = np.random.RandomState(0) a = rng.randn(int(sig_len_secs * sfreq)) orig_power = np.sqrt(np.mean(a**2)) # make line noise a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0) # only allow None line_freqs with 'spectrum_fit' mode assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft') assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir') methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir'] filter_lengths = [None, None, None, 8192, None] line_freqs = [None, freqs, freqs, freqs, freqs] tols = [2, 1, 1, 1] for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols): if lf is None: set_log_file(log_file, overwrite=True) b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth, verbose='INFO') if lf is None: set_log_file() with open(log_file) as fid: out = fid.readlines() if len(out) != 2: raise ValueError('Detected frequencies not logged properly') out = np.fromstring(out[1], sep=', ') assert_array_almost_equal(out, freqs) new_power = np.sqrt(sum_squared(b) / b.size) assert_almost_equal(new_power, orig_power, tol)
def test_cuda(): """Test CUDA-based filtering """ # NOTE: don't make test_cuda() the last test, or pycuda might spew # some warnings about clean-up failing Fs = 500 sig_len_secs = 20 a = np.random.randn(sig_len_secs * Fs) set_log_file(log_file, overwrite=True) for fl in ['10s', None, 2048]: bp = band_pass_filter(a, Fs, 4, 8, n_jobs=1, filter_length=fl) bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs=1, filter_length=fl) lp = low_pass_filter(a, Fs, 8, n_jobs=1, filter_length=fl) hp = high_pass_filter(lp, Fs, 4, n_jobs=1, filter_length=fl) bp_c = band_pass_filter(a, Fs, 4, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') bs_c = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs='cuda', filter_length=fl, verbose='INFO') lp_c = low_pass_filter(a, Fs, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') hp_c = high_pass_filter(lp, Fs, 4, n_jobs='cuda', filter_length=fl, verbose='INFO') assert_array_almost_equal(bp, bp_c, 12) assert_array_almost_equal(bs, bs_c, 12) assert_array_almost_equal(lp, lp_c, 12) assert_array_almost_equal(hp, hp_c, 12) # check to make sure we actually used CUDA set_log_file() with open(log_file) as fid: out = fid.readlines() assert_true(sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == 12) # check resampling a = np.random.RandomState(0).randn(3, sig_len_secs * Fs) a1 = resample(a, 1, 2, n_jobs=2, npad=0) a2 = resample(a, 1, 2, n_jobs='cuda', npad=0) a3 = resample(a, 2, 1, n_jobs=2, npad=0) a4 = resample(a, 2, 1, n_jobs='cuda', npad=0) assert_array_almost_equal(a3, a4, 14) assert_array_almost_equal(a1, a2, 14)
def test_cuda(): """Test CUDA-based filtering """ # NOTE: don't make test_cuda() the last test, or pycuda might spew # some warnings about clean-up failing Fs = 500 sig_len_secs = 20 a = np.random.randn(sig_len_secs * Fs) set_log_file(log_file, overwrite=True) for fl in ['10s', None, 2048]: bp = band_pass_filter(a, Fs, 4, 8, n_jobs=1, filter_length=fl) bs = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs=1, filter_length=fl) lp = low_pass_filter(a, Fs, 8, n_jobs=1, filter_length=fl) hp = high_pass_filter(lp, Fs, 4, n_jobs=1, filter_length=fl) bp_c = band_pass_filter(a, Fs, 4, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') bs_c = band_stop_filter(a, Fs, 4 - 0.5, 8 + 0.5, n_jobs='cuda', filter_length=fl, verbose='INFO') lp_c = low_pass_filter(a, Fs, 8, n_jobs='cuda', filter_length=fl, verbose='INFO') hp_c = high_pass_filter(lp, Fs, 4, n_jobs='cuda', filter_length=fl, verbose='INFO') assert_array_almost_equal(bp, bp_c, 12) assert_array_almost_equal(bs, bs_c, 12) assert_array_almost_equal(lp, lp_c, 12) assert_array_almost_equal(hp, hp_c, 12) # check to make sure we actually used CUDA set_log_file() out = open(log_file).readlines() assert_true(sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == 12) # check resampling a = np.random.RandomState(0).randn(3, sig_len_secs * Fs) a1 = resample(a, 1, 2, n_jobs=2, npad=0) a2 = resample(a, 1, 2, n_jobs='cuda', npad=0) a3 = resample(a, 2, 1, n_jobs=2, npad=0) a4 = resample(a, 2, 1, n_jobs='cuda', npad=0) assert_array_almost_equal(a3, a4, 14) assert_array_almost_equal(a1, a2, 14)
def test_notch_filters(): """Test notch filters """ tempdir = _TempDir() log_file = op.join(tempdir, 'temp_log.txt') # let's use an ugly, prime sfreq for fun sfreq = 487.0 sig_len_secs = 20 t = np.arange(0, int(sig_len_secs * sfreq)) / sfreq freqs = np.arange(60, 241, 60) # make a "signal" rng = np.random.RandomState(0) a = rng.randn(int(sig_len_secs * sfreq)) orig_power = np.sqrt(np.mean(a ** 2)) # make line noise a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0) # only allow None line_freqs with 'spectrum_fit' mode assert_raises(ValueError, notch_filter, a, sfreq, None, 'fft') assert_raises(ValueError, notch_filter, a, sfreq, None, 'iir') methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir'] filter_lengths = [None, None, None, 8192, None] line_freqs = [None, freqs, freqs, freqs, freqs] tols = [2, 1, 1, 1] for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols): if lf is None: set_log_file(log_file, overwrite=True) b = notch_filter(a, sfreq, lf, filter_length=fl, method=meth, verbose='INFO') if lf is None: set_log_file() with open(log_file) as fid: out = fid.readlines() if len(out) != 2: raise ValueError('Detected frequencies not logged properly') out = np.fromstring(out[1], sep=', ') assert_array_almost_equal(out, freqs) new_power = np.sqrt(sum_squared(b) / b.size) assert_almost_equal(new_power, orig_power, tol)
def test_notch_filters(): """Test notch filters """ # let's use an ugly, prime Fs for fun Fs = 487.0 sig_len_secs = 20 t = np.arange(0, sig_len_secs * Fs) / Fs freqs = np.arange(60, 241, 60) # make a "signal" rng = np.random.RandomState(0) a = rng.randn(sig_len_secs * Fs) orig_power = np.sqrt(np.mean(a ** 2)) # make line noise a += np.sum([np.sin(2 * np.pi * f * t) for f in freqs], axis=0) # only allow None line_freqs with 'spectrum_fit' mode assert_raises(ValueError, notch_filter, a, Fs, None, 'fft') assert_raises(ValueError, notch_filter, a, Fs, None, 'iir') methods = ['spectrum_fit', 'spectrum_fit', 'fft', 'fft', 'iir'] filter_lengths = [None, None, None, 8192, None] line_freqs = [None, freqs, freqs, freqs, freqs] tols = [2, 1, 1, 1] for meth, lf, fl, tol in zip(methods, line_freqs, filter_lengths, tols): if lf is None: set_log_file(log_file) b = notch_filter(a, Fs, lf, filter_length=fl, method=meth, verbose='INFO') if lf is None: set_log_file() out = open(log_file).readlines() if len(out) != 2: raise ValueError('Detected frequencies not logged properly') out = np.fromstring(out[1], sep=', ') assert_array_almost_equal(out, freqs) new_power = np.sqrt(np.mean(b ** 2)) assert_almost_equal(new_power, orig_power, tol)
def __init__(self, path: str, name: str, version: str = None, extensions: List[str] = [".edf"], exclude_file: List[str] = None, exclude_channels_set: List[str] = None, exclude_channels_reference: List[str] = None, exclude_sampling_frequency: List[str] = None, minimum_annotation_duration: float = None) -> None: # Set basic attributes self.path = os.path.abspath(os.path.join(path, version)) self.name = name self.version = version # Set data set filter attributes self.extensions = extensions if extensions else [] self.exclude_file = exclude_file if exclude_file else [] self.exclude_channels_set = exclude_channels_set if exclude_channels_set else [] self.exclude_channels_reference = exclude_channels_reference if exclude_channels_reference else [] self.exclude_sampling_frequency = exclude_sampling_frequency if exclude_sampling_frequency else [] self.minimum_annotation_duration = minimum_annotation_duration if minimum_annotation_duration else 0 logging.info("Init dataset '%s'@'%s' at '%s'", self.name, self.version, self.path) # Make workspace directory logging.debug("Make .pyeeglab directory") workspace = os.path.join(self.path, ".pyeeglab") os.makedirs(workspace, exist_ok=True) logging.debug("Make .pyeeglab/cache directory") os.makedirs(os.path.join(workspace, "cache"), exist_ok=True) logging.debug("Set MNE log .pyeeglab/mne.log") mne.set_log_file(os.path.join(workspace, "mne.log"), overwrite=False) # Index data set files self.index()
def mnepy_avg(subjID, sessID, eve, ssp_type): import mne from mne import fiff from mne import viz from mne.viz import evoked import argparse import condCodes as cc import copy import numpy #######Get Input ## print subjID print sessID print eve data_path = '/home/custine/MEG/data/krns_kr3/' +subjID+'/'+sessID if ssp_type == 'run': raw_data_path = '/home/custine/MEG/data/krns_kr3/' +subjID+'/'+sessID runSuffix = '_raw.fif' elif ssp_type == 'ecg' or ssp_type == 'eog': raw_data_path = '/home/custine/MEG/data/krns_kr3/' +subjID+'/'+sessID + '/ssp/mne' runSuffix = '_clean_' + ssp_type + '_raw.fif' ########Analysis Parameters## ###Event file suffix eveSuffix = '-Triggers.eve' eve_file = eve + eveSuffix #eve + eveSuffix print "You have chosen the event file " + eve_file ###Projection and Average Reference and Filtering projVal = True avgRefVal = False hp_cutoff = 0.7 lp_cutoff = 50 #######Experiment specific parameters ###EventLabels and Runs runs = cc.runDict[eve] ##TESTING################################################ print runs labelList = cc.condLabels[eve] event_id = {} condName = {} for row in labelList: event_id[row[1]] = int(row[0]) condName[row[1]] = row[1] print event_id ###TimeWindow tmin = -.5 tmax = float(cc.epMax[eve]) # tmax = 3.00 #4Words Category ########Artifact rejection parameters ###General gradRej = 4000e-13 magRej = 4000e-12 magFlat = 1e-14 gradFlat = 1000e-15 #################################### ######Compute averages for each run evoked=[] evokedRuns =[] for runID in runs: print runID ##Setup Subject Speciifc Information event_file = data_path + '/eve/triggers/' + subjID + '_'+ sessID +'_'+runID +'_' + eve_file print event_file raw_file = raw_data_path + '/' + subjID + '_' + sessID+ '_' + runID + runSuffix ##Change this suffix if you are using SSP ##_clean_ecg_ avgLog_file = data_path + '/ave_projon/logs/' +subjID + '_' + sessID+ '_'+runID + '_'+eve+'_' + ssp_type +'-ave.log' print raw_file, avgLog_file ##Setup Reading fiff data structure print 'Reading Raw data... ' raw = fiff.Raw(raw_file, preload = True) events = mne.read_events(event_file) print events mne.set_log_file(fname = avgLog_file, overwrite = True) ##Filter raw data fiff.Raw.filter(raw, l_freq = hp_cutoff, h_freq = lp_cutoff) ##Pick all channels picks = [] for i in range(raw.info['nchan']): picks.append(i) ##Read Epochs and compute Evoked :) print 'Reading Epochs from evoked raw file...' epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline = (-0.5,0), picks = picks, proj = True, name = condName, preload = True, flat = dict(mag = magFlat, grad= gradFlat), reject=dict(mag=magRej, grad=gradRej)) print epochs evoked = [epochs[cond].average(picks =picks) for cond in event_id] # # epochs.plot() ##Write Evoked print 'Writing Evoked data to -ave.fif file...' fiff.write_evoked(data_path + '/ave_projon/' + subjID+'_' + sessID+'_'+runID+'_' + eve +'_' + ssp_type+'-ave.fif', evoked) evokedRuns.append(evoked) print 'Completed! See ave.fif result in folder', data_path + '/ave_projon/' ############################################################################### ##Show the Result - Plotting the evoked data #mne.viz.evoked.plot_evoked(evoked, exclude = []) print len(evokedRuns) ##Make the Final Grand average of all the runs runData = [] runNave = [] newEvoked = copy.deepcopy(evoked) print newEvoked numCond = len(newEvoked) print 'Length', numCond for c in range(numCond): for evRun in evokedRuns: runData.append(evRun[c].data) runNave.append(evRun[c].nave) print 'Jane Here', c, runNave gaveData = numpy.mean(runData,0) gaveNave = numpy.sum(runNave) print 'Sum', sum(runNave) newEvoked[c].data = gaveData newEvoked[c].nave = gaveNave # runData = [] # runNave = [] print newEvoked[0].nave ##Write Grand average Evoked fiff.write_evoked(data_path + '/ave_projon/'+subjID+'_' + sessID+'_' + eve +'_' + ssp_type+'_All-ave.fif', newEvoked)
logger.setLevel(LOG_LEVEL) logger.handlers = [] # add file print_handler msg_format = ( "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s" ) formatter = logging.Formatter(msg_format) logname = os.path.join(LOGDIR, "eztrack.log") file_handler = RotatingFileHandler(logname, maxBytes=200000, backupCount=10) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # set logging level for submodules: mne, mne-bids mne.set_log_level(LOG_LEVEL) mne.set_log_file(logname, msg_format, overwrite=False) # add stream print_handler to stdout # print_handler = logging.StreamHandler(sys.stdout) # print_handler.setLevel(logging.DEBUG) # logger.addHandler(print_handler) logger.propagate = False # Help message settings help_colors = { "cls": HelpColorsCommand, "help_headers_color": "yellow", "help_options_color": "green", }
import pylab import matplotlib.pyplot as plt import mne import numpy as np import glob import os.path import pandas as pd import warnings import logging mne.set_log_level('Warning') mne.set_log_file('abraxas_erp.log', overwrite=True) layout = mne.channels.read_layout("EEG1005.lay") montage = mne.channels.read_montage(kind="standard_1020") raw = {} epochs = {} evoked = {} # event trigger and conditions event_id = { 'iien': 182, 'iiev': 181, 'iaen': 172, 'iaev': 171, 'aien': 162, 'aiev': 161, 'aaen': 152, 'aaev': 151, 'iipn': 142, 'iipv': 141, 'iapn': 132,
from multiprocessing import Pool import sleep_study as ss channels = [ 'EEG C3-M2', 'EEG O1-M2', 'EEG O2-M1', 'EEG CZ-O1', 'EEG C4-M1', 'EEG F4-M1', ] out_dir = '~/preprocessed' out_dir = os.path.expanduser(out_dir) mne.set_log_file('log.txt') existing = [x[:-4] for x in os.listdir('preprocessed')] def preprocess(i): # Potential problem: the order of channels are not considered print('Processing %d' % i) import sleep_study as ss ss.init() study = ss.data.study_list[i] if study in existing: return try: raw = ss.data.load_study(study, verbose=False) if not all([name in raw.ch_names for name in channels]): return
from mayavi import mlab # noqa from surfer import Brain # noqa import mne ## get analysis parameters from config file if len(sys.argv) > 1: print sys.argv[1] module_name = sys.argv[1] else: module_name = 'WH_config' C = importlib.import_module(module_name) reload(C) mne.set_log_file(fname=C.bem_log_file, overwrite=False) # plt.ion() # interactive plotting # subject numbers subjs = C.subjs ### n_sbs = len(subjs) # for qsub if len(sys.argv) > 2: # if in parallel mode print "Running subject(s) {0} now in PARALLEL mode".format(sys.argv) ss_idx = map(int, sys.argv[2:]) subjs_new = []
def test_logging(): """Test logging (to file) """ old_log_file = open(fname_log, 'r') old_lines = clean_lines(old_log_file.readlines()) old_log_file.close() old_log_file_2 = open(fname_log_2, 'r') old_lines_2 = clean_lines(old_log_file_2.readlines()) old_log_file_2.close() if op.isfile(test_name): os.remove(test_name) # test it one way (printing default off) set_log_file(test_name) set_log_level('WARNING') # should NOT print evoked = Evoked(fname_evoked, setno=1) assert_true(open(test_name).readlines() == []) # should NOT print evoked = Evoked(fname_evoked, setno=1, verbose=False) assert_true(open(test_name).readlines() == []) # should NOT print evoked = Evoked(fname_evoked, setno=1, verbose='WARNING') assert_true(open(test_name).readlines() == []) # SHOULD print evoked = Evoked(fname_evoked, setno=1, verbose=True) new_log_file = open(test_name, 'r') new_lines = clean_lines(new_log_file.readlines()) assert_equal(new_lines, old_lines) # now go the other way (printing default on) os.remove(test_name) set_log_file(test_name) set_log_level('INFO') # should NOT print evoked = Evoked(fname_evoked, setno=1, verbose='WARNING') assert_true(open(test_name).readlines() == []) # should NOT print evoked = Evoked(fname_evoked, setno=1, verbose=False) assert_true(open(test_name).readlines() == []) # SHOULD print evoked = Evoked(fname_evoked, setno=1) new_log_file = open(test_name, 'r') old_log_file = open(fname_log, 'r') new_lines = clean_lines(new_log_file.readlines()) assert_equal(new_lines, old_lines) # check to make sure appending works (and as default, raises a warning) with warnings.catch_warnings(True) as w: set_log_file(test_name, overwrite=False) assert len(w) == 0 set_log_file(test_name) assert len(w) == 1 evoked = Evoked(fname_evoked, setno=1) new_log_file = open(test_name, 'r') new_lines = clean_lines(new_log_file.readlines()) assert_equal(new_lines, old_lines_2) # make sure overwriting works set_log_file(test_name, overwrite=True) # this line needs to be called to actually do some logging evoked = Evoked(fname_evoked, setno=1) new_log_file = open(test_name, 'r') new_lines = clean_lines(new_log_file.readlines()) assert_equal(new_lines, old_lines)
###Get Subject and run-specific parameters print evRun data_path = '/cluster/kuperberg/SemPrMM/MEG/data/'+args.subj+'/' event_fname = data_path + 'eve/' + args.subj + '_' + expName+evRun + evSuffix #event_fname = data_path + args.subj + '_' + expName+evRun + evSuffix print event_fname raw_fname = data_path + args.subj+'_'+ expName +evRun+'_ssp_raw.fif' ##Using ssp because for only for ya12, ya27, ya31 AXCPT- ecg SSP has been perfrmed, rest have been sym-linked to the raw file. avg_log_fname = data_path + 'ave_projon/logs/' +args.subj+ '_'+expName + evRun +'-equaliseTar-test-ave.log' ##CHANGE THIS ###Setup for reading the original raw data and events raw = fiff.Raw(raw_fname, preload=True) events = mne.read_events(event_fname) raw_skip = raw.first_samp mne.set_log_file(fname = avg_log_fname, overwrite = True) ###Correct events for the fact that ssp data has the skip removed if (args.subj == "ya12") or (args.subj == 'ya27') or (args.subj == 'ya31'): print "Adjusting events for SSP raw.fif files" for row in events: row[0] = row[0]-raw_skip ###Filter raw data fiff.Raw.filter(raw,l_freq=hp_cutoff,h_freq=lp_cutoff) ###Pick all channels, including stimulus triggers picks = [] for i in range(raw.info['nchan']): picks.append(i)
from PyQt5.QtCore import QTimer from cognigraph.pipeline import Pipeline from cognigraph.nodes import sources, processors, outputs from cognigraph.gui.window import GUIWindow from cognigraph.gui.async_pipeline_update import AsyncUpdater from cognigraph.gui.forward_dialog import FwdSetupDialog np.warnings.filterwarnings('ignore') # noqa # ----------------------------- setup logging ----------------------------- # logfile = None format = '%(asctime)s:%(name)-17s:%(levelname)s:%(message)s' logging.basicConfig(level=logging.INFO, filename=logfile, format=format) logger = logging.getLogger(__name__) mne.set_log_level('ERROR') mne.set_log_file(fname=logfile, output_format=format) # -------------------------------------------------------------------------- # # ----------------------------- setup argparse ----------------------------- # parser = argparse.ArgumentParser() parser.add_argument('-d', '--data', type=argparse.FileType('r'), help='data path') parser.add_argument('-f', '--forward', type=argparse.FileType('r'), help='forward model path') args = parser.parse_args() # -------------------------------------------------------------------------- #
import scipy.io as sio import numpy as np import pandas as pd import mne import random mne.set_log_file('./mne_output', overwrite=True) srate = 1024 factor = 1. / 1000000. interval = 5 start_time = 0 n_pnts = 1024 n_channels = 28 data_standardization = True raw_plot = False l_freq, h_freq = 5., 30. channels = [ 'C5', 'C3', 'C1', 'C2', 'C4', 'C6', 'CP5', 'CP3', 'CP1', 'CP2', 'CP4', 'CP6', 'P7', 'P5', 'P3', 'P1', 'P2', 'P4', 'P6', 'P8', 'F7', 'F5', 'F3', 'F1', 'F2', 'F4', 'F6', 'F8' ] lab_index = 3 subjects = [ 'ckm', 'clx', 'csb', 'fy', 'lw', 'ly', 'phl', 'szl', 'xwt', 'yfw', 'zjh' ] subjects_num = 11 # subject = [1] ch_names = [ 'Fp1', 'Fpz', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', 'M1', 'T7', 'C3', 'Cz', 'C4', 'T8', 'M2', 'CP5', 'CP1', 'CP2',
print avg_log_fname ###Setup for reading the original raw data and events####### raw = fiff.Raw(raw_fname, preload=True) events = mne.read_events(event_fname) raw_skip = raw.first_samp ###Correct events for the fact that ssp data has the skip removed if (args.subj == "ya12") or (args.subj == 'ya27') or ( args.subj == 'ya31' ): #############################Change back 0-- testing with ssp for all subjects CU 12/26/13 print "Adjusting events for SSP raw.fif files" for row in events: row[0] = row[0] - raw_skip mne.set_log_file(fname=avg_log_fname, overwrite=True) ###Filter SSP raw data####### fiff.Raw.filter(raw, l_freq=hp_cutoff, h_freq=lp_cutoff) ###Pick all channels, including stimulus triggers####### picks = [] for i in range(raw.info['nchan']): picks.append(i) ###Read Prime Epochs####### ###Include catch for subjects where ssp-ecg was performed### epochsAPrime = mne.Epochs(raw, events, event_id['APrime'], tmin,
def mnepy_avg(subjID,run): import mne from mne import fiff from mne import viz #from mne.viz import evoked import argparse import copy import numpy #######Get Input ## print subjID ########Analysis Parameters## ###Projection and Average Reference and Filtering projVal = True avgRefVal = False hp_cutoff = 0.7 lp_cutoff = 40 event_id = 2 #######Experiment specific parameters ###TimeWindow tmin = 0 tmax = 2.0 ##float(cc.epMax[eve]) if run == 'CRM': tmax = 1.0 elif run == 'DFNAM': tmax = 2.0 ########Artifact rejection parameters ###General gradRej = 2000e-13 magRej = 3000e-15 magFlat = 1e-14 gradFlat = 1000e-15 #################################### #######Compute averages for each run # # evoked=[] # if subjID == 'EP2': # runs = ['run1', 'run2', 'run3'] evokedRuns =[] # for run in runs: ###Event file suffix eveSuffix = '.eve' eve_file = run + eveSuffix #eve + eveSuffix print "You have chosen the event file " + eve_file ##Setup Subject Speciifc Information data_path = '/home/custine/MEG/data/epi_conn/' +subjID event_file = data_path + '/eve/' + eve_file print event_file raw_file = data_path + '/' + run +'_raw.fif' ##Change this suffix if you are using SSP avgLog_file = data_path + '/logs/' +run+ '_py-ave.log' print raw_file, avgLog_file # ##Setup Reading fiff data structure print 'Reading Raw data... ' raw = fiff.Raw(raw_file, preload = True) events = mne.read_events(event_file) #print events mne.set_log_file(fname = avgLog_file, overwrite = True) ##Filter raw data fiff.Raw.filter(raw, l_freq = hp_cutoff, h_freq = lp_cutoff) #Pick all channels picks = [] for i in range(raw.info['nchan']): picks.append(i) ##Read Epochs and compute Evoked :) print 'Reading Epochs from evoked file...' epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline = (None,0), proj = True, picks = picks, preload = True, flat = dict(mag = magFlat, grad= gradFlat), reject=dict(mag=magRej, grad=gradRej)) print epochs evoked = [epochs.average(picks =None)] # # epochs.plot() ##Write Evoked print 'Writing Evoked data to -ave.fif file...' fiff.write_evoked(data_path + '/ave_projon/' + run +'-noise-ave.fif', evoked) evokedRuns.append(evoked) print 'Completed! See ave.fif result in folder', data_path + '/ave_projon/' # ## # ############################################################################### ## # #Show the Result - Plotting the evoked data ## # mne.viz.evoked.plot_evoked(evoked, exclude = []) # print len(evokedRuns)
print event_id tmin, tmax = -0.2, 0.5 #runs = cc.runDict['Word'] runs = ['emptyroom'] #, 'run1', 'run2'] ###For other runs use the baseline and set tmin/tmax using epoched data. #print runs covRuns = [] for runID in runs: print runID data_path = '/home/custine/MEG/data/krns_kr3/' +subjID+'/'+sessID fname = data_path +'/'+ subjID + '_'+ sessID +'_'+runID +'_raw.fif' print fname cname = data_path +'/cov/'+ subjID + '_'+ sessID +'_' + runID + '-cov.fif' covLog_file = data_path + '/logs/' +subjID + '_' + sessID+ '_'+runID + '_cov.log' event_file = data_path + '/eve/triggers/' + subjID + '_'+ sessID +'_'+runID +'_' + eve_file mne.set_log_file(fname = covLog_file, overwrite = True) print covLog_file print 'Reading Raw data... ' raw = io.Raw(fname) if runID == "emptyroom": tmin = 0 tmax = 2 cov = mne.compute_raw_data_covariance(raw, tmin = tmin, tmax = tmax) #, tmin = None, tmax = 0) #, reject = None, picks = picks) print cov else: events = mne.read_events(event_file) include = [] # or stim channels ['STI 014'] # #raw.info['bads'] += ['EEG 053'] # bads + 1 more # # pick EEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude='bads')
logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) import mne ## get analysis parameters from config file if len(sys.argv) > 1: print sys.argv[1] module_name = sys.argv[1] else: module_name = 'WH_config' C = importlib.import_module(module_name) reload(C) mne.set_log_file(fname=C.bem_log_file, overwrite=True) # plt.ion() # interactive plotting # subject numbers subjs = C.subjs ### n_sbs = len(subjs) # for qsub if len(sys.argv) > 2: # if in parallel mode print "Running subject(s) {0} now in PARALLEL mode".format(sys.argv) ss_idx = map(int, sys.argv[2:]) subjs_new = []