Example #1
0
 def period_guess(self, y_s, t_0):
     extrema = []
     spacings = [log(extrema[i+1] - t_0) - log(extrema[i] - t_0)
                 for i in range(1, len(extrema))]
     omegas = spacings / 2 * 22 / 7
     omega_guess = average(omegas)
     omega_std = std(omegas)
     return omega_guess, omega_std
Example #2
0
def main():
    os.chdir(r'G:\Shared drives\Apex\Acoustic Data\IOA, conference proceedings\2021 papers\Max noise levels from hockey pitches')
    # adjust_value_by_LAeqT()
    LAFmax = []
    filenames = [str(1+n)+'.wav' for n in range(104)]
    LAFmax = [calc_LAeq_dt(os.path.join('bat hitting', fn)) for fn in filenames]
    hist_plot(LAFmax)
    print(mean(LAFmax))
    print(std(LAFmax))
    plt.xlabel('LAFmax at 11 m')
    plt.show()
Example #3
0
def analyze_sites(some_list):
	'''
	requires the following input format:
	seq score cdist cons
	cdist = distance to center, cons = conservation value
	'''
	# column definitions for reference / calling
	x0 = 0 # x0 is sequence
	x1 = 1 # x1 is score (Ri)
	x2 = 2 # x2 is peaklength
	x3 = 3 # x3 is cdist
	x4 = 4 # x4 is conservation score

	# columns are seq, score, peaklength, cdist, cons
	# build an array scalar with the correct column types
	for x in some_list:
		# forces sequences to uppercase
		x[x0] = x[x0].upper()
		# forces length to int
		x[x2] = int(x[x2])
		# forces cdist to int
		x[x3] = int(x[x3])
		# forces cons score to floating point
		# and correct nan
		if x[x4]=='nan': x[x4] = 0.0
		else: x[x4] = float(x[x4])
	# sort the list by sequence (arbitrarily)
	sorted_list = sorted(some_list, key=operator.itemgetter(x0))
	# Part 3: continue with analysis
	analyzedSeqs = []
	counter = len(sorted_list)
	while counter > 0:
		site = sorted_list.pop()
		counter -= 1
		seq = site[x0]
		score = site[x1]
		# start a list of dists, absdists, cscores, we'll sum later
		i = 1
		plengths = [ site[x2] ]
		dists = [ site[x3] ]
		absdists = [ abs(site[x3]) ]
		cscores = [ site[x4] ]
		while True: # grab sequences that match
			try: y = sorted_list.pop()
			except IndexError: break
			counter -= 1
			# look if we match the last entry
			if y[x0] == seq:
				i += 1
				plengths.append(y[x2])
				dists.append(y[x3])
				absdists.append( abs( y[x3] ) )
				cscores.append(y[x4])
			# if we don't, escape to previous loop
			else:
				# append back
				sorted_list.append(y)
				break
		# append seq, sum of scores, # of instances
		avgPlength = float(sum(plengths))/i
		avgDist = float(sum(dists))/i
		avgAbsdist = float(sum(absdists))/i
		distErr = std(absdists)
		analyzedSeqs.append([seq, score, i, sum(cscores), avgPlength, avgDist,
							avgAbsdist,distErr])
	analyzedSeqs.reverse()
	return sorted(analyzedSeqs, key=operator.itemgetter(x2))
 def biaser(standings):
     return scale * std(standings, ddof=1)
Example #5
0
 def biaser(standings):
     return scale * std(standings, ddof=1)
def svr(C, gamma, eps):
    
    #initialization of data wmproxy
    traininput, traintarget, testinput, testtarget = initialize_wmproxy()
    #training of the SVR
    
    #scaling values in training and test targets
    
    for i in range(len(traintarget)):
        if(traintarget[i] != 0):
            traintarget[i] = log(traintarget[i])
        if(traininput[i] != 0):
            traininput[i] = log(traininput[i])
            
    
    for i in range(len(testtarget)):
        if(testtarget[i] != 0):
            testtarget[i] = log(testtarget[i])
        if(testinput[i] != 0):
            testinput[i] = log(testinput[i])
    
    avg = mean(traintarget)
    sigma = std(traintarget)
    maxtrain = len(traintarget)
    C = max([abs(avg + sigma), abs(avg - sigma)])
    print "C is equal to %f" % C

    svr = SVR(traininput[maxtrain-1440:maxtrain], testinput, traintarget[maxtrain-1440:maxtrain],gamma,C,eps,eps)
    
    
    out = svr.svr_req(testinput[0:30])
    
    error = 0
    for i in range(len(out)):
        error += (out[i] - testtarget[i])
    
    mean_error = error / len(out)
    variance = 0
    for i in range(len(out)):
        variance = abs(out[i] - mean_error)
    
    variance /= len(out)
    
    print "Variance = %f" % variance
    
    epsilon = 3*variance*sqrt(log(len(out))/len(out))
    
    print "Epsilon = %f" % epsilon
    #calculation of the metrics
    sme = svr.calc_sme(testtarget[0:30], out)
    mape = svr.calc_mape(out, testtarget[0:30])
    predx = svr.calc_pred(out, testtarget[0:30], 25)
    rsq = svr.calc_rsqr(out, testtarget[0:30])
    print out
    print testtarget[0:30]
    # print model results!
    x = array(testinput[0:30], dtype=int32)
    y = array(testtarget[0:30], dtype=int32)
    xp = array(testinput[0:30], dtype=int32)
    yp = array(out, dtype=int32)
    fig = figure()
    ax1 = fig.add_subplot(1,1,1)
    ax1.title.set_text("Predizioni modello SVR con C= %f, Gamma = %f, Eps = %f" % (C, gamma, eps))
    realvalues = ax1.plot(x, y)
    predictedvalues = ax1.plot(xp,yp,"r")
    ax1.axis([8.9,max(xp)+0.5,0,max(y)+10])
    ax1.set_xlabel('minutes of the week')
    ax1.set_ylabel('number of requests')
    legend([realvalues,predictedvalues], ["Real Values","Predicted Values"])
    
    fig.savefig("svr_model_%f" % time(), format='png')
    
    print "SME = %f" % sme
    print "MAPE = %f" % mape
    print "R^2 = %f" % rsq
    print "PREDX = %f" % predx
Example #7
0
rewardedTrials = np.zeros((len(prob), len(vol), nEpisodes))
totalIter = len(prob) * len(vol) * nEpisodes

n = 1  #iterations counter
now = datetime.now()
print 'Simulation started. Date:', now.strftime("%Y-%m-%d %H:%M:%S")
for v in xrange(len(vol)):
    for p in xrange(len(prob)):
        environment = loadArrangeVar(prob[p], vol[v], path, 'environment')
        for e in xrange(nEpisodes):
            lrAgent = loadEpisodeVar(prob[p], vol[v], e, path, 'lrAgent')
            agent = loadEpisodeVar(prob[p], vol[v], e, path, 'agent')
            chosenLRs = [lrAgent.lbd[a] for a in lrAgent.actions]
            lbdMean[p][v][e] = mean(chosenLRs)
            lbdStd[p][v][e] = std(
                chosenLRs
            )  #low std indicates the algorithm chooses a optimum learning rate - convergence
            meanSquareError[p][v][e] = mean(lrAgent.r)
            rightEstimate[p][v][e] = np.sum(
                around(agent.x[1:]) == around(environment.history)
            ) / environment.history.size * 100  #x has a shape of nTrials+1 and history of nTrials. That is because after the last trial the agent learns the value of x for the next
            rightPrediction[p][v][e] = np.sum(
                around(agent.x[0:-1]) == around(
                    environment.history)) / environment.history.size * 100
            rewardedTrials[p][v][e] = float(
                np.sum(agent.r)
            ) / agent.x.size * 100  #Calculates how often the agent was rewarded within the episode

            showProgress(totalIter, n, time.time(), begin)
            n += 1