def get_squeeze_bar(opens, closes, highs, lows): # Calculate BB source = closes # closing prices basis = sma(source, LENGTH) dev = MULT_KC * stddev(source[-LENGTH:]) # standard deviation of x for y bars back. upperBB = basis + dev lowerBB = basis - dev last_close = closes[-1] current_high = highs[-1] current_low = lows[-1] # Calculate KC ma = sma(source, LENGTH_KC) _range = [tr(high, low, last_close) for high, low in zip(highs, lows)] if USE_TRUE_RANGE else ([high - low for high, low in zip(highs, lows)]) # Current high price, Current low price. rangema = sma(_range, LENGTH_KC) upperKC = ma + rangema * MULT_KC lowerKC = ma - rangema * MULT_KC sqzOn = (lowerBB > lowerKC) and (upperBB < upperKC) sqzOff = (lowerBB < lowerKC) and (upperBB > upperKC) noSqz = (sqzOn == False) and (sqzOff == False) # linreg: Linear regression curve. A line that best fits the prices specified over a user-defined time period. It is calculated using the least squares method. The result of this function is calculated using the formula: linreg = intercept + slope * (length - 1 - offset), where length is the y argument, offset is the z argument, intercept and slope are the values calculated with the least squares method on source series (x argument). slope, intercept, r_value, p_value, std_err = linregress( source - avg(avg(max(highs[-LENGTH_KC:]), min(lows[-LENGTH_KC:])), sma(closes, LENGTH_KC)), LENGTH_K) offset = 0 linreg_val = intercept + slope * (LENGTH - 1 - offset) # lime is momentum up above x axis, green is momentum down above x axis # red is momentum down below x axis, maroon is momentum up below x axis # get the bar-colour bcolor = None if linreg_val > 0: if linreg_val > nz(linreg_val[1]): bcolor = 'lime' else: bcolor = 'green' else: if linreg_val < nz(linreg_val[1]): bcolor = 'red' else: bcolor = 'maroon' # blue is no squeeze or the squeeze is on # orange is the squeeze is off if noSqz or sqzOn: scolor = 'blue' else: scolor = 'orange' return bcolor, scolor
def perform_statistics_tests(): """Performs unit tests on the statistics module and prints the results.""" print("\nStatistics Tests:") print("-----------------") v_flt = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] v_flt_avg = statistics.mean(v_flt) print("Average: " + str(v_flt_avg)) variance = statistics.variance(v_flt, v_flt_avg) print("Variance: " + str(variance)) print("Standard Deviation: " + str(statistics.stddev(v_flt, v_flt_avg)))
def __init__(self, values, jobids): self.values = values self.jobids = jobids self.count = len(self.values) self.mean = statistics.mean(self.values) self.stddev = statistics.stddev(self.values) # We sometimes get a standard deviation of zero because # of the granularity of our measurements. If that happens # set the standard deviaton to the error in the measurement. if self.stddev == 0: self.stddev = 10**(-len(re.sub("[0-9]+\.", "", str(self.mean)))/2)
def __init__(self, values, jobids): self.values = values self.jobids = jobids self.count = len(self.values) self.mean = statistics.mean(self.values) self.stddev = statistics.stddev(self.values) # We sometimes get a standard deviation of zero because # of the granularity of our measurements. If that happens # set the standard deviaton to the error in the measurement. if self.stddev == 0: self.stddev = 10**(-len(re.sub("[0-9]+\.", "", str(self.mean))) / 2)
def score_schedule(self, week): """Computes a score for the schedule, based on the daily stress scores.""" """A better schedule is one with a more even distribution of stress.""" """Lower is better.""" # Compute the average daily stress. daily_stress_scores = [0.0] * 7 index = 0 for day in week: if day is not None: if day.estimated_intensity_score is not None: daily_stress_scores[index] = day.estimated_intensity_score index = index + 1 smoothed_scores = signals.smooth(daily_stress_scores, 2) avg_smoothed_scores = sum(smoothed_scores) / len(smoothed_scores) stdev_smoothed_scores = statistics.stddev(smoothed_scores, avg_smoothed_scores) return stdev_smoothed_scores
setB = setB - set(connections) return setB #Team04 def fib(n): a,b = 1,1 for i in range(n-1): a,b = b,a+b return a if __name__ == "__main__": print (fib(6)) #Team06 import statistics statistics.stddev([x,y,z]) #Team10 def kkk(): for i in range(0,10): for j in range(0,10): if i*j < 10: print(i*j,end='') print(" ",end='') else: print(i*j,end='') print(" ",end='') print('') return "九九乘法表"
def implementationWrapper(implementationFunction, nQueens=8, times=30, numberOfCouples=1, populations=[]): # Auxiliaries. success = 0 convergencesIteration = [] convergencesPerExecution = [] averageFitnessPerExecution = [] averageFitnesPerIterationPerExecution = [] fitnessDeviationPerIterationPerExecution = [] # Execute the naive algorithm multiple times and calculate averages. for i in range(times): # Run algorithm implementation. metrics = implementationFunction(nQueens=nQueens, maximumFitnessEvaluations=10000, numberOfCouples=numberOfCouples, population=populations[i]) foundSolution = metrics['foundSolution'] firstSolutionFoundAtIteration = metrics[ 'firstSolutionFoundAtIteration'] numberOfConvergences = metrics['numberOfConvergences'] averageFitness = metrics['averageFitness'] averageFitnesPerIteration = metrics['averageFitnesPerIteration'] fitnessDeviationPerIteration = metrics['fitnessDeviationPerIteration'] # Update metrics if a solution was found. if foundSolution: # Count the number of executions that converged. success += 1 # Store the iteration in which the algorithm converged, the number of # individuals that converged and the average fitness per execution. We # use this to calculate the mean and stardand deviation. convergencesIteration.append(float(firstSolutionFoundAtIteration)) convergencesPerExecution.append(float(numberOfConvergences)) averageFitnessPerExecution.append(float(averageFitness)) # Store data relative to each iteration in each execution, we'll use it later # to plot the average fitness per iteration and fitness deviation per iteration # of the execution that was closer to the all time averages. averageFitnesPerIterationPerExecution.append(averageFitnesPerIteration) fitnessDeviationPerIterationPerExecution.append( fitnessDeviationPerIteration) # Find the execution whose average fitness was closer to the all time average. #averageFitness = statistics.mean(averageFitnessPerExecution) #averageFitness = 56 #closestExecutionIndex = statistics.closestValueToAverage(averageFitnessPerExecution, averageFitness) # Find the execution whose average iteration of convergence was closer to the all time average. averageConvergenceIteration = statistics.mean(convergencesIteration) #averageConvergenceIteration = max(convergencesIteration) #averageConvergenceIteration = min(convergencesIteration) closestExecutionIndex = statistics.closestValueToAverage( convergencesIteration, averageConvergenceIteration) # Create a folder to place the graphs. #conveniences.createFolder('results') # Prepare data to generate the graph. #fileBaseName = "results/"+(implementationFunction.__name__).lower() e = fitnessDeviationPerIterationPerExecution[closestExecutionIndex] y = averageFitnesPerIterationPerExecution[closestExecutionIndex] x = [x for x in range(len(y))] #implementationFunction.__name__ #plotAvr,fig = plotation.plotList(averageList,'fitness medio por iteracao') #plotation.saveImage(implementationFunction.__name__+'/average.png', plotAvr, fig) #plotDev, fig = plotation.plotList(deviationList,'desvio padrao do fitness por iteracao') #plotation.saveImage(implementationFunction.__name__+'/deviation.png', plotDev, fig) # ... print('1. Em quantas execucoes o algoritmo convergiu?') print(' ' + str(success) + '/' + str(times)) # ... average = statistics.mean(convergencesIteration) deviation = statistics.stddev(convergencesIteration) print('2. Em que iteracao o algoritmo convergiu?') print(' Media:' + str(average)) print(' Desvio Padrao:' + str(deviation)) #conveniences.writeToFile('iterations.out', average) #conveniences.writeToFile('iterations.out', deviation) # ... average = statistics.mean(convergencesPerExecution) deviation = statistics.stddev(convergencesPerExecution) print('3. Quantos de individuos convergiram por execucao?') print(' Media:' + str(average)) print(' Desvio Padrao:' + str(deviation)) #conveniences.writeToFile('convergency.out', average) #conveniences.writeToFile('convergency.out', deviation) # ... average = statistics.mean(averageFitnessPerExecution) deviation = statistics.stddev(averageFitnessPerExecution) print('4. Fitness medio alcancado?') print(' Media: ' + str(average)) print(' Desvio Padrao:' + str(deviation)) #conveniences.writeToFile('fitness.out', average) #conveniences.writeToFile('fitness.out', deviation) return x, y, e
def implementation( initialPopulation, maxFitness, fitnessFunction, parentsSelectionFunction, recombinationFunction, mutationFunction, nQueens=8, numberOfIndividuals=100, numberOfCouples=1, recombinationProbability=0.9, mutationProbability=0.4, maximumFitnessEvaluations=10000, ): # Metrics. iteration = 0 foundSolution = False firstSolutionAtIteration = 0 fitnessEvaluationsCount = numberOfIndividuals averageFitnessPerIteration = [] fitnessDeviationPerIteration = [] # Generate the initial population. p = initialPopulation # Compute individuals fitnesses. f = [fitnessFunction(x) for x in p] # Run until the maximum number of fitness evaluations is reached. while (fitnessEvaluationsCount < maximumFitnessEvaluations): iteration += 1 # Algorithm evaluation. if foundSolution == False: # Store the average fitness and standard deviation (first generation). aux = [float(x) for x in f] averageFitnessPerIteration.append(statistics.mean(aux)) fitnessDeviationPerIteration.append(statistics.stddev(aux)) # Check if a solution was found. if f[0] == maxFitness: foundSolution = True firstSolutionAtIteration = iteration break # Select couples to breed. c = parentsSelectionFunction(population=p, fitnesses=f, maxCouples=numberOfCouples) # Recombine couples (breed). n = recombinationFunction( nQueens=nQueens, couples=c, recombinationProbability=recombinationProbability) # Calculate childs fitnesses. k = [fitnessFunction(x) for x in n] fitnessEvaluationsCount += len(n) # Combine childs and their parents, also, combine their fitnesses. p = p + n f = f + k # Mutate. for i in range(len(p)): p[i], performed = mutationFunction( individual=p[i], genesCount=nQueens, mutationProbability=mutationProbability) if performed == True: f[i] = fitnessFunction(p[i]) fitnessEvaluationsCount += 1 # Sort individuals by their fitnesses. z = zip(p, f) z.sort(key=lambda x: x[1], reverse=True) p, f = zip(*z) # Remove worst individuals from population. p = list(p[:numberOfIndividuals]) f = list(f[:numberOfIndividuals]) # Calculate metrics. aux = [float(x) for x in f] averageFitness = statistics.mean(aux) fitnessStardardDeviation = statistics.stddev(aux) convergencesCount = sum(x == maxFitness for x in f) # Return metrics packed in a dictionary. metrics = { 'foundSolution': foundSolution, 'firstSolutionFoundAtIteration': firstSolutionAtIteration, 'numberOfConvergences': convergencesCount, 'averageFitness': averageFitness, 'averageFitnesPerIteration': averageFitnessPerIteration, 'fitnessDeviationPerIteration': fitnessDeviationPerIteration, } return metrics
newdata[y][x] = addpixels(pixel, newdata[y][x]) #countpixels[y][x] += len(ray) #newdata = [[(r//(num_proj-1), g//(num_proj-1), b//(num_proj-1), 255) for (r,g,b,a) in [newdata[y][x]] for x in range(width)] for y in range(height)] #newdata_aslist = [(r//(num_proj-1), g//(num_proj-1), b//(num_proj-1), 255) for row in newdata for (r,g,b,a) in row] #newdata_aslist = [(r//num_proj, g//num_proj, b//num_proj, 255) for row,countingrow in zip(newdata,countpixels) for ((r,g,b,a),c) in zip(row,countingrow)] newdata_aslist = [(r // num_proj, g // num_proj, b // num_proj, 255) for row in newdata for (r, g, b, a) in row] #newimg.paste(newdata, box=(0,width,0,height)) print('reconstructed image; about to renormalize variance') import statistics newr, newg, newb = zip(newdata_aslist) newmeanr, newmeang, newmeanb = (statistics.mean(l) for l in (newr, newg, newb)) newstddevr, newstddevg, newstddevb = (statistics.stddev(l, mean) for l, mean in ((newr, newmeanr), (newg, newmeang), (newb, newmeanb))) oldlst = [img.getpixel((x, y)) for y in range(height) for x in range(width)] oldr, oldg, oldb = zip(oldlst) oldmeanr, newmeang, newmeanb = (statistics.mean(l) for l in (newr, newg, newb)) newstddevr, newstddevg, newstddevb = (statistics.stddev(l, mean) for l, mean in ((newr, newmeanr), (newg, newmeang), (newb, newmeanb))) newimg.putdata(newdata_aslist) print('reconstructed image') # for k,(ray0, proj) in enumerate(zip(lst_of_rays, lst_of_projections)):
cdP.append(float(tmp[13])) mdP.append(float(tmp[12])) x.append (calc_hum[-1]) y.append (supply_humid[-1]) diff.append (round(calc_hum[-1]-supply_humid[-1] ,3)) if diff[-1]< -35:pass # print tmp[0] except IndexError:inside_hum.append(0) except ValueError: pass #print tmp[0] except ZeroDivisionError:pass except: traceback.print_exc() except:traceback.print_exc() #print "\n",tm.ctime() print "\nStart: " + tm.ctime(float(-time[0]+tm.time())) print "max",max(diff),"% min",min(diff),"%" ave= stat.mean(diff) ave ,stddev = stat.stddev(diff) tmp = "Differential stddev: "+"+-"+ str(round(stddev,2))+ "% Differential mean:"+str(round(ave,2))+"%\n" #tmp = "stddev: "+u"\u00B1"+ str(round(stddev,2))+ "% ave:"+str(round(ave,2))+"%" tmp += "Last: "+ str(calc_hum[-1]-supply_humid[-1])+'%' print tmp print "\nRelative Humidity (%)" print "Measured:" mave, mstddev = stat.stddev(x) print "Mean:",mave,"Stddev:",mstddev ave, stddev = stat.stddev(y) rmsError = stat.rmsError(x,y) mae = stat.meanAbsError(x,y) print "Calculated:" print "Mean:",ave,"Stddev:",stddev print "\nCorrelation coeficient\t",round(stat.correlation(x,y),2) #print "MeanErrorSquared", stat.meanErrorSq(x,y)
def test_stddev_order(self): """stddev() should fail if arguments not ordered from min to max.""" with self.assertRaises(ValueError): st.stddev(5, 1)
def test_stddev_values(self): """stddev() should fail if one of the arguments less or equal 0.""" with self.assertRaises(ValueError): st.stddev(0, 1) st.stddev(-1, 2)
def test_stddev(self): """Test stddev() for known input.""" self.assertEqual(st.stddev(2, 8), 1) # general case