Exemplo n.º 1
0
 def start_kernel(self):
     self.DTYPE = self._dtype
     self._G = getattr(np, self.DTYPE)(self._G)
     ne.set_num_threads(ne.detect_number_of_cores())
     # Get const values
     self.MASS_LEN = len(self)
     self.SIM_DIM = len(self._mass_list[0]._r)
     # Allocate memory: Object parameters
     self.mass_r_array = np.zeros((self.MASS_LEN, self.SIM_DIM),
                                  dtype=self.DTYPE)
     self.mass_a_array = np.zeros((self.MASS_LEN, self.SIM_DIM),
                                  dtype=self.DTYPE)
     self.mass_m_array = np.zeros((self.MASS_LEN, ), dtype=self.DTYPE)
     # Copy const data into Numpy infrastructure
     for pm_index, pm in enumerate(self._mass_list):
         self.mass_m_array[pm_index] = pm._m
     # Allocate memory: Temporary variables
     self.relative_r = np.zeros((self.MASS_LEN - 1, self.SIM_DIM),
                                dtype=self.DTYPE)
     self.distance_sq = np.zeros((self.MASS_LEN - 1, ), dtype=self.DTYPE)
     self.distance_sqv = np.zeros((self.MASS_LEN - 1, self.SIM_DIM),
                                  dtype=self.DTYPE)
     self.distance_inv = np.zeros((self.MASS_LEN - 1, ), dtype=self.DTYPE)
     self.a_factor = np.zeros((self.MASS_LEN - 1, ), dtype=self.DTYPE)
     self.a1 = np.zeros((self.MASS_LEN - 1, ), dtype=self.DTYPE)
     self.a1r = np.zeros((self.MASS_LEN - 1, self.SIM_DIM),
                         dtype=self.DTYPE)
     self.a1v = np.zeros((self.SIM_DIM, ), dtype=self.DTYPE)
     self.a2 = np.zeros((self.MASS_LEN - 1, ), dtype=self.DTYPE)
     self.a2r = np.zeros((self.MASS_LEN - 1, self.SIM_DIM),
                         dtype=self.DTYPE)
Exemplo n.º 2
0
def set_numexpr_threads(n=None):
    # if we are using numexpr, set the threads to n
    # otherwise reset
    if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
        if n is None:
            n = ne.detect_number_of_cores()
        ne.set_num_threads(n)
Exemplo n.º 3
0
def set_numexpr_threads(n=None):
    # if we are using numexpr, set the threads to n
    # otherwise reset
    if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
        if n is None:
            n = ne.detect_number_of_cores()
        ne.set_num_threads(n)
Exemplo n.º 4
0
def calc_energy(data):
    """Calculate the energy of the entire system.

    :Parameters: **data** -- the standard python data dictionary
    """
    #name relevant variables
    x = data['x']
    y = data['y']
    nv = data['nv']
    rho = 1000 #density of water
    #initialize EK to zero
    l = nv.shape[0]
    area = np.zeros(l)
    for i in xrange(l):
        #first, calculate the area of the triangle
        xCoords = x[nv[i,:]]
        yCoords = y[nv[i,:]]
        #Compute two vectors for the area calculation.
        v1x = xCoords[1] - xCoords[0]
        v2x = xCoords[2] - xCoords[0]
        v1y = yCoords[1] - yCoords[0]
        v2y = yCoords[2] - yCoords[0]
        #calculate the area as the determinant
        area[i] = abs(v1x*v2y - v2x*v1y)
    #get a vector of speeds.
    sdata = calc_speed(data)
    speed = sdata['speed']

    #calculate EK, use numexpr for speed (saves ~15 seconds)
    ne.set_num_threads(ne.detect_number_of_cores())
    ek = ne.evaluate("sum(rho * area * speed * speed, 1)")
    ek = ek / 4
    data['ek'] = ek
    return data
Exemplo n.º 5
0
 def __init__(self, complexity_threshold=2, multicore=True):
     if numexpr_ver is None or numexpr_ver<(2, 0, 0):
         raise ImportError("numexpr version 2.0.0 or better required.")
     self.complexity_threshold = complexity_threshold
     nc = numexpr.detect_number_of_cores()
     if multicore is True:
         multicore = nc
     elif multicore is False:
         multicore = 1
     elif multicore<=0:
         multicore += nc
     numexpr.set_num_threads(multicore)
Exemplo n.º 6
0
def calc_speed(data):
    """
    Calculates the speed from ua and va

    :Parameters:
        **data** -- the standard python data dictionary

    .. note:: We use numexpr here because, with multiple threads, it is\
    about 30 times faster than direct calculation.
    """
    #name required variables
    ua = data['ua']
    va = data['va']

    #we can take advantage of multiple cores to do this calculation
    ne.set_num_threads(ne.detect_number_of_cores())
    #calculate the speed at each point.
    data['speed'] = ne.evaluate("sqrt(ua*ua + va*va)")
    return data
Exemplo n.º 7
0

nopt = 100000
price, strike, t = gen_data(nopt)
call = np.zeros(nopt, dtype=np.float64)
put = -np.ones(nopt, dtype=np.float64)


def black_scholes(price, strike, t, rate, vol):
    mr = -rate
    sig_sig_two = vol * vol * 2

    P = price
    S = strike
    T = t

    call = ne.evaluate(
        "P * (0.5 + 0.5 * erf((log(P / S) - T * mr + 0.25 * T * sig_sig_two) * 1/sqrt(T * sig_sig_two))) - S * exp(T * mr) * (0.5 + 0.5 * erf((log(P / S) - T * mr - 0.25 * T * sig_sig_two) * 1/sqrt(T * sig_sig_two))) "
    )
    put = ne.evaluate("call - P + S * exp(T * mr) ")

    return call, put


#ne.set_vml_num_threads(ne.detect_number_of_cores())
ne.set_num_threads(ne.detect_number_of_cores())
ne.set_vml_accuracy_mode('high')

if __name__ == '__main__':
    black_scholes(price, strike, t, RISK_FREE, VOLATILITY)
Exemplo n.º 8
0
import expert_finding.models.propagation_idne_model
import expert_finding.models.pre_ane_model
import expert_finding.models.post_ane_model
import expert_finding.models.tadw
import expert_finding.models.gvnrt
import expert_finding.models.graph2gauss
import expert_finding.models.idne
import expert_finding.models.gvnrt_expert_model
import numexpr

import os, sys, resource
import logging

logger = logging.getLogger()

numexpr.set_num_threads(numexpr.detect_number_of_cores())


def get_memory():
    with open('/proc/meminfo', 'r') as mem:
        free_memory = 0
        for i in mem:
            sline = i.split()
            if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
                free_memory += int(sline[1])
    return free_memory


def memory_limit():
    soft, hard = resource.getrlimit(resource.RLIMIT_AS)
    resource.setrlimit(resource.RLIMIT_AS, (get_memory() * 1024 * 0.90, hard))
Exemplo n.º 9
0
 def implement():
     x = ne.detect_number_of_cores()
     os.environ['NUMEXPR_MAX_THREADS'] = str(x)
     print(f"Number of cores used = {x}")
def main(N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime,saveCorrEvery,calculateCorrelations, showAnimation, resultsFolder):
	start = time.time()
	print(" #################################################################################################")
	print("                                    START                                                            ")
	print("N = ", N, " T = ", T,  " dt = ", dt, " numNayLow =", numNayLow," numNayHigh = ",  numNayHigh, " L = ", L, " eta = ", eta, " speed = ", speed, " numBins = ", numBins, "burnInTime = ", burnInTime," saveCorrEvery = ", saveCorrEvery,"calculateCorrelations" ,calculateCorrelations, " showAnimation = ",showAnimation)


	################################################################
	########### initialise
	#############################################################
	numexprNumCores = ne.detect_number_of_cores()
	ne.set_num_threads(numexprNumCores)

	resultsCompletePath =  resultsFolder + '/N%s/n%s/alpha%s'% (str(N), str(numNayHigh-numNayLow),str(numNayLow))

	positionsQ = np.zeros((N,2))
	anglesQ = np.zeros(N)
	inRangeIndexQ = np.zeros((N, 1+numNayHigh - numNayLow),dtype=np.int)
	allNearAngles = np.zeros((N, 1+numNayHigh - numNayLow))
	meanSinAllNearAngles = np.zeros(N)
	meanCosAllNearAngles = np.zeros(N)
	meanDirectionsQ = np.zeros(N)
	velocitiesQ = np.zeros((N,2))
	velocities_uQ = np.zeros((N,2))
	mean_velocityQ = np.zeros((1,2))
	noisesQ = np.zeros(N)

	if calculateCorrelations == True:
		binMatrix = np.zeros((N,N),dtype=int)
		binMatrixVU = np.zeros((N,N),dtype=int)
		correlationList = np.zeros((0,2))
		maxDist = np.sqrt(pow(L/2.0,2)+pow(L/2.0,2))
		binWidth =  maxDist/numBins
		distanceMatrix = np.zeros((N,N))
		velocityDotProductMatrixVU = np.zeros((N,N))
		correlationSumHistogramVU = np.zeros(numBins)
		correlationCountsHistogramVU = np.zeros(numBins)
		sqVelocityDotProductMatrixVU = np.zeros((N,N))
		sqCorrelationSumHistogramVU = np.zeros(numBins)
		onesMatrix =np.ones(np.shape(velocityDotProductMatrixVU[np.triu_indices(N,1)]))
		runCount = int(1)

	sumVelocities_U = 0.0
	dotProdVelocities_U = 0.0
	sumDotProdVelocities_U = 0.0
	orderParameter= 0.0
	sumOrderParameter = 0.0

	positionsQ = agents.initialiseRandomPositions(L,positionsQ)
	anglesQ = agents.initialiseRandomAngles(anglesQ)
	noisesQ = agents.updateRandomNoises(noisesQ,eta)
	velocitiesQ = agents.updateVelocities(velocitiesQ,anglesQ,speed)
	velocities_uQ = agents.updateVelocities_u(velocitiesQ,velocities_uQ)
	inRangeIndexQ = agents.initialise_inRangeIndex(inRangeIndexQ)

	if showAnimation == True:
		figVelocityAnimation = plt.figure(1,figsize=(6,6))
		axVel = figVelocityAnimation.add_subplot(111)
		plt.ion()
		wframe = None
		figVelocityAnimation.set_visible(False)
		pause(0.00000000001)

	################################################################
	########### simulation loop
	#############################################################
	for i in range(T):
		if showAnimation == True:
			if i < burnInTime:
				pass
			elif i >= burnInTime:
				oldcol = wframe
				wframe = plottingFunctions.plot_grid(axVel,positionsQ, velocitiesQ)
				figVelocityAnimation.set_visible(True)

		########### timestep
		inRangeIndexQ[:,1:] = agents.calculateParticleInRange(positionsQ, L, numNayLow, numNayHigh)
		anglesQ = agents.calculateAngles(anglesQ, inRangeIndexQ, allNearAngles, meanSinAllNearAngles, meanCosAllNearAngles, meanDirectionsQ, noisesQ)
		velocitiesQ = agents.updateVelocities(velocitiesQ, anglesQ, speed)
		positionsQ = agents.updatePositions(positionsQ, velocitiesQ, L, dt)
		velocities_uQ = agents.updateVelocities_u(velocitiesQ, velocities_uQ)
		noisesQ = agents.updateRandomNoises(noisesQ,eta)

		if i < burnInTime:
			pass
		elif i >= burnInTime:
			if showAnimation == True:
				if oldcol:
					axVel.collections.remove(oldcol)
					figVelocityAnimation.canvas.draw()
					axVel.autoscale(enable=True, axis='both', tight=True)
					axVel.set_xticks([])
					axVel.set_yticks([])
					plt.pause(0.00000000000001)

		################################################################
		########### correlations
		#############################################################
		if i < burnInTime:
			pass
		elif i >= burnInTime:
			if calculateCorrelations == True:
				########### save correlations
				if i % saveCorrEvery==0:
					if np.sum(correlationSumHistogramVU) != 0.0:
						print("i ",i)
						correlations.saveArray(resultsCompletePath + '/Correlations/sums/Correlation Velocity_U Sum Histogram_run%s'% (str(runCount)) ,correlationSumHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
						correlations.saveArray(resultsCompletePath +'/Correlations/counts/Correlation Velocity_U Counts Histogram_run%s'% ( str(runCount)) ,correlationCountsHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
						correlations.saveArray(resultsCompletePath +'/Correlations/sqSums/Square Correlation Velocity_U Sum Histogram_run%s'% ( str(runCount)) ,sqCorrelationSumHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
						correlations.saveArray(resultsCompletePath +'/Correlations/sumvUdotvU/sum dot prod velocities_U_run%s'% ( str(runCount)) ,np.array([sumDotProdVelocities_U]), N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
						correlations.saveArray(resultsCompletePath +'/OrderParameter/sum of OrderParameter_run%s'% ( str(runCount)) ,np.array([sumOrderParameter]), N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins,burnInTime, saveCorrEvery)
						print("saving correlationCountsHistogramVU", correlationCountsHistogramVU)
						print("total counts" , np.sum(correlationCountsHistogramVU))

						########### reset correlations arrays
						distanceMatrix = np.zeros((N,N))
						velocityDotProductMatrixVU = np.zeros((N,N))
						correlationSumHistogramVU = np.zeros(numBins)
						correlationCountsHistogramVU = np.zeros(numBins)
						sqVelocityDotProductMatrixVU = np.zeros((N,N))
						sqCorrelationSumHistogramVU = np.zeros(numBins)
						sumDotProdVelocities_U = 0.0
						binMatrixVU = np.zeros((N,N),dtype=int)
						upperTriangleIndicesMask = np.triu_indices(np.size(binMatrixVU, axis=0),1)
						runCount +=1

				########### calculate correlations
				distanceMatrix = correlations.calculateDistMatrixWithPeriodicBoundary(positionsQ, L)
				velocityDotProductMatrixVU = correlations.calculateMatrixDotProduct(velocities_uQ)
				sqVelocityDotProductMatrixVU = correlations.calcSquareMatrix(velocityDotProductMatrixVU)
				binMatrixVU = correlations.calculateBinMatrix(distanceMatrix, binWidth,binMatrixVU)
				upperTriangleIndicesMask = np.triu_indices(np.size(binMatrixVU, axis=0),1)
				correlationSumHistogramVU += correlations.calcCorrelationHistoSum(binMatrixVU, velocityDotProductMatrixVU,numBins,upperTriangleIndicesMask)
				correlationCountsHistogramVU += correlations.calcCorrelationHistoCounts(binMatrixVU, onesMatrix,numBins,upperTriangleIndicesMask)
				sqCorrelationSumHistogramVU += correlations.calcCorrelationHistoSum(binMatrixVU, sqVelocityDotProductMatrixVU,numBins,upperTriangleIndicesMask)

			################################################################
			########### order parameter
			#############################################################
			orderParameter= orderParameterStatistics.calculateOrderParameter(velocitiesQ,speed)
			sumOrderParameter += orderParameter
			sumVelocities_U = orderParameterStatistics.calculateSumVelocities_U(velocities_uQ)
			dotProdVelocities_U = orderParameterStatistics.calculateDotProductVelocities_U(velocities_uQ)
			sumDotProdVelocities_U += dotProdVelocities_U


	################################################################
	########### correlations
	#############################################################
	if calculateCorrelations==True:
		correlations.saveArray(resultsCompletePath +'/Correlations/sums/Correlation Velocity_U Sum Histogram_run%s'% ( str(runCount)) ,correlationSumHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
		correlations.saveArray(resultsCompletePath +'/Correlations/counts/Correlation Velocity_U Counts Histogram_run%s'% ( str(runCount)) ,correlationCountsHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
		correlations.saveArray(resultsCompletePath +'/Correlations/sqSums/Square Correlation Velocity_U Sum Histogram_run%s'% ( str(runCount)) ,sqCorrelationSumHistogramVU,N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
		correlations.saveArray(resultsCompletePath +'/Correlations/sumvUdotvU/sum dot prod velocities_U_run%s'% ( str(runCount)) ,np.array([sumDotProdVelocities_U]), N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins, burnInTime, saveCorrEvery)
		correlations.saveArray(resultsCompletePath +'/OrderParameter/sum of OrderParameter_run%s'% ( str(runCount)) ,np.array([sumOrderParameter]), N, T, dt, numNayLow, numNayHigh, L, eta, speed, numBins,burnInTime, saveCorrEvery)

	end = time.time()
	meanOrderParameter = sumOrderParameter/(T-burnInTime)
	print(" #################################################################################################")
	print("                                    END                                                            ")
	print("   Simulation statistics")
	print('eta = ', eta)
	print('mean order parameter = ', meanOrderParameter)
	print('mean DotProdVelocities_U = ', sumDotProdVelocities_U/(T-burnInTime))
	print ("time taken = ", end - start)
	return meanOrderParameter
Exemplo n.º 11
0
# ===

# <codecell>

#%%timeit
sq = np.asarray(nums) ** 2

# <markdowncell>

# Squaring example using NumExpr
# ===

# <codecell>

import numexpr as ne
print ne.detect_number_of_cores()
print ne.evaluate("nums ** 2")[:5]

# <codecell>

#%%timeit
ne.evaluate("nums ** 2")

# <codecell>

ne.set_num_threads(1)

# <codecell>

#%%timeit
ne.evaluate("nums**2")
Exemplo n.º 12
0
"""The NIDDK SICR model for estimating the fraction infected with SARS-CoV-2"""
import os
import numexpr
# numexpr.set_num_threads(numexpr.detect_number_of_cores())
ncpus = numexpr.detect_number_of_cores()
if 'SLURM_CPUS_PER_TASK' in os.environ:
    try:
        ncpus = int(os.environ['SLURM_CPUS_PER_TASK'])
    except ValueError:
        ncpus = 2
elif 'SLURM_CPUS_ON_NODE' in os.environ:
    try:
        ncpus = int(os.environ['SLURM_CPUS_ON_NODE'])
    except ValueError:
        ncpus = 2

from .io import *
from .stats import *
from .analysis import *
from .data import *
from .prep import *
from .prepV import *
Exemplo n.º 13
0
def preprocess_eeg(id_num, random_seed=None):

    # Set important variables
    bids_path = BIDSPath(id_num, task=task, datatype=datatype, root=bids_root)
    plot_path = os.path.join(plotdir, "sub_{0}".format(id_num))
    if os.path.exists(plot_path):
        shutil.rmtree(plot_path)
    os.mkdir(plot_path)
    if not random_seed:
        random_seed = int(binascii.b2a_hex(os.urandom(4)), 16)
    random.seed(random_seed)
    id_info = {"id": id_num, "random_seed": random_seed}

    ### Load and prepare EEG data #############################################

    header = "### Processing sub-{0} (seed: {1}) ###".format(
        id_num, random_seed)
    print("\n" + "#" * len(header))
    print(header)
    print("#" * len(header) + "\n")

    # Load EEG data
    raw = read_raw_bids(bids_path, verbose=True)

    # Check if recording is complete
    complete = len(raw.annotations) >= 600

    # Add a montage to the data
    montage_kind = "standard_1005"
    montage = mne.channels.make_standard_montage(montage_kind)
    mne.datasets.eegbci.standardize(raw)
    raw.set_montage(montage)

    # Extract some info
    eeg_index = mne.pick_types(raw.info, eeg=True, eog=False, meg=False)
    ch_names = raw.info["ch_names"]
    ch_names_eeg = list(np.asarray(ch_names)[eeg_index])
    sample_rate = raw.info["sfreq"]

    # Make a copy of the data
    raw_copy = raw.copy()
    raw_copy.load_data()

    # Trim duplicated data (only needed for sub-005)
    annot = raw_copy.annotations
    file_starts = [a for a in annot if a['description'] == "file start"]
    if len(file_starts):
        duplicate_start = file_starts[0]['onset']
        raw_copy.crop(tmax=duplicate_start)

    # Make backup of EOG and EMG channels to re-append after PREP
    raw_other = raw_copy.copy()
    raw_other.pick_types(eog=True, emg=True, stim=False)

    # Prepare copy of raw data for PREP, dropping all non-EEG channels
    raw_copy.pick_types(eeg=True)

    # Plot data prior to any processing
    if complete:
        save_psd_plot(id_num, "psd_0_raw", plot_path, raw_copy)
        save_channel_plot(id_num, "ch_0_raw", plot_path, raw_copy, raw_other)

    ### Clean up events #######################################################

    print("\n\n=== Processing Event Annotations... ===\n")

    event_names = [
        "stim_on", "red_on", "trace_start", "trace_end", "accuracy_submit",
        "vividness_submit"
    ]
    doubled = []
    wrong_label = []
    new_onsets = []
    new_durations = []
    new_descriptions = []

    # Find and flag any duplicate triggers
    annot = raw_copy.annotations
    trigger_count = len(annot)
    for i in range(1, trigger_count - 1):
        a = annot[i]
        on_last = i + 1 == trigger_count
        prev_trigger = annot[i - 1]['description']
        next_onset = annot[i + 1]['onset'] if not on_last else a['onset'] + 100
        # Determine whether duplicates are doubles or mislabeled
        if a['description'] == prev_trigger:
            if (next_onset - a['onset']) < 0.002:
                doubled.append(a)
            else:
                wrong_label.append(a)

    # Rename annotations to have meaningful names & fix duplicates
    for a in raw_copy.annotations:
        if a in doubled or a['description'] not in event_names:
            continue
        if a in wrong_label:
            index = event_names.index(a['description'])
            a['description'] = event_names[index + 1]
        new_onsets.append(a['onset'])
        new_durations.append(a['duration'])
        new_descriptions.append(a['description'])

    # Replace old annotations with new fixed ones
    if len(annot):
        new_annot = mne.Annotations(
            new_onsets,
            new_durations,
            new_descriptions,
            orig_time=raw_copy.annotations[0]['orig_time'])
        raw_copy.set_annotations(new_annot)

    # Check annotations to verify we have equal numbers of each
    orig_counts = Counter(annot.description)
    counts = Counter(raw_copy.annotations.description)
    print("Updated Annotation Counts:")
    for a in event_names:
        out = " - '{0}': {1} -> {2}"
        print(out.format(a, orig_counts[a], counts[a]))

    # Get info
    id_info['annot_doubled'] = len(doubled)
    id_info['annot_wrong'] = len(wrong_label)

    count_vals = [
        n for n in counts.values() if n != counts['vividness_submit']
    ]
    id_info['equal_triggers'] = all(x == count_vals[0] for x in count_vals)
    id_info['stim_on'] = counts['stim_on']
    id_info['red_on'] = counts['red_on']
    id_info['trace_start'] = counts['trace_start']
    id_info['trace_end'] = counts['trace_end']
    id_info['acc_submit'] = counts['accuracy_submit']
    id_info['vivid_submit'] = counts['vividness_submit']

    if not complete:
        remaining_info = {
            'initial_bad': "NA",
            'num_initial_bad': "NA",
            'interpolated': "NA",
            'num_interpolated': "NA",
            'remaining_bad': "NA",
            'num_remaining_bad': "NA"
        }
        id_info.update(remaining_info)
        e = "\n\n### Incomplete recording for sub-{0}, skipping... ###\n\n"
        print(e.format(id_num))
        return id_info

    ### Run components of PREP manually #######################################

    print("\n\n=== Performing CleanLine... ===")

    # Try to remove line noise using CleanLine approach
    n_threads = max(1, int(numexpr.detect_number_of_cores() / 2))
    linenoise = np.arange(60, sample_rate / 2, 60)
    EEG_raw = raw_copy.get_data() * 1e6
    EEG_new = removeTrend(EEG_raw, sample_rate=raw.info["sfreq"])
    EEG_clean = mne.filter.notch_filter(
        EEG_new,
        Fs=raw.info["sfreq"],
        freqs=linenoise,
        filter_length="10s",
        method="spectrum_fit",
        mt_bandwidth=2,
        p_value=0.01,
        n_jobs=n_threads,  # uses half of all avaliable cores
    )
    EEG_final = EEG_raw - EEG_new + EEG_clean
    raw_copy._data = EEG_final * 1e-6
    del linenoise, EEG_raw, EEG_new, EEG_clean, EEG_final

    # Plot data following cleanline
    save_psd_plot(id_num, "psd_1_cleanline", plot_path, raw_copy)
    save_channel_plot(id_num, "ch_1_cleanline", plot_path, raw_copy, raw_other)

    # Perform robust re-referencing
    prep_params = {"ref_chs": ch_names_eeg, "reref_chs": ch_names_eeg}
    reference = Reference(raw_copy,
                          prep_params,
                          ransac=True,
                          random_state=random_seed)
    print("\n\n=== Performing Robust Re-referencing... ===\n")
    reference.perform_reference()

    # If not interpolating bad channels, use pre-interpolation channel data
    if not interpolate_bads:
        reference.raw._data = reference.EEG_before_interpolation * 1e-6
        reference.interpolated_channels = []
        reference.still_noisy_channels = reference.bad_before_interpolation
        reference.raw.info["bads"] = reference.bad_before_interpolation

    # Plot data following robust re-reference
    save_psd_plot(id_num, "psd_2_reref", plot_path, reference.raw)
    save_channel_plot(id_num, "ch_2_reref", plot_path, reference.raw,
                      raw_other)

    # Re-append removed EMG/EOG/trigger channels
    raw_prepped = reference.raw.add_channels([raw_other])

    # Get info
    initial_bad = reference.noisy_channels_original["bad_all"]
    id_info['initial_bad'] = " ".join(initial_bad)
    id_info['num_initial_bad'] = len(initial_bad)

    interpolated = reference.interpolated_channels
    id_info['interpolated'] = " ".join(interpolated)
    id_info['num_interpolated'] = len(interpolated)

    remaining_bad = reference.still_noisy_channels
    id_info['remaining_bad'] = " ".join(remaining_bad)
    id_info['num_remaining_bad'] = len(remaining_bad)

    # Print re-referencing info
    print("\nRe-Referencing Info:")
    print(" - Bad channels original: {0}".format(initial_bad))
    if interpolate_bads:
        print(" - Bad channels after re-referencing: {0}".format(interpolated))
        print(" - Bad channels after interpolation: {0}".format(remaining_bad))
    else:
        print(
            " - Bad channels after re-referencing: {0}".format(remaining_bad))

    # Check if too many channels were interpolated for the participant
    prop_interpolated = len(
        reference.interpolated_channels) / len(ch_names_eeg)
    e = "### NOTE: Too many interpolated channels for sub-{0} ({1}) ###"
    if max_interpolated < prop_interpolated:
        print("\n")
        print(e.format(id_num, len(reference.interpolated_channels)))
        print("\n")

    ### Filter data and apply ICA to remove blinks ############################

    # Apply highpass & lowpass filters
    print("\n\n=== Applying Highpass & Lowpass Filters... ===")
    raw_prepped.filter(1.0, 50.0, fir_design='firwin', picks=['eeg'])

    # Plot data following frequency filters
    save_psd_plot(id_num, "psd_3_filtered", plot_path, raw_prepped)
    save_channel_plot(id_num, "ch_3_filtered", plot_path, raw_prepped)

    # Perform ICA using EOG data on eye blinks
    print("\n\n=== Removing Blinks Using ICA... ===\n")
    ica = ICA(n_components=15, random_state=random_seed, method='picard')
    ica.fit(raw_prepped, decim=5)
    eog_indices, eog_scores = ica.find_bads_eog(raw_prepped)
    ica.exclude = eog_indices

    if not len(ica.exclude):
        err = " - Encountered an ICA error for sub-{0}, skipping for now..."
        print("\n")
        print(err.format(id_num))
        print("\n")
        save_bad_fif(raw_prepped, id_num, ica_err_dir)
        return id_info

    # Plot ICA info & diagnostics before removing from signal
    save_ica_plots(id_num, plot_path, raw_prepped, ica, eog_scores)

    # Remove eye blink independent components based on ICA
    ica.apply(raw_prepped)

    # Plot data following ICA
    save_psd_plot(id_num, "psd_4_ica", plot_path, raw_prepped)
    save_channel_plot(id_num, "ch_4_ica", plot_path, raw_prepped)

    ### Compute Current Source Density (CSD) estimates ########################

    if perform_csd:
        print("\n")
        print("=== Computing Current Source Density (CSD) Estimates... ===\n")
        raw_prepped = mne.preprocessing.compute_current_source_density(
            raw_prepped.drop_channels(remaining_bad))

        # Plot data following CSD
        save_psd_plot(id_num, "psd_5_csd", plot_path, raw_prepped)
        save_channel_plot(id_num, "ch_5_csd", plot_path, raw_prepped)

    ### Write preprocessed data to new EDF ####################################

    if max_interpolated < prop_interpolated:
        if not os.path.isdir(noisy_bad_dir):
            os.makedirs(noisy_bad_dir)
        outpath = os.path.join(noisy_bad_dir, outfile_fmt.format(id_num))
    else:
        outpath = os.path.join(outdir_edf, outfile_fmt.format(id_num))
    write_mne_edf(outpath, raw_prepped)

    print("\n\n### sub-{0} complete! ###\n\n".format(id_num))

    return id_info