def meanDrops(drop): help1 = [] help2 = [] v0 = 0 for i in range(len(drop)): help1.append(drop[i][2]) help2.append(drop[i][3]) if drop[i][1] != 0: v0 = drop[i][1] if len(help1) < 2: v_fast = help1[0]*distance v_slow = help2[0]*distance else : v_fast = ufloat(f.mean(help1) , f.stdDevOfMean(help1) )*distance v_slow = ufloat(f.mean(help2) , f.stdDevOfMean(help2) )*distance Drop_new = np.array([drop[0][0], v0, v_fast, v_slow]) del help1[:] del help2[:] return Drop_new
def __init__(self, output_size=5, hidden_size=64, layers=4): super().__init__() self.hidden_size = hidden_size self.base = ConvBase(output_size=hidden_size, hidden=hidden_size, channels=1, max_pool=False, layers=layers) self.features = M.Sequential( kl.Lambda(lambda x: F.reshape(x, (-1, 1, 28, 28))), self.base, kl.Lambda(lambda x: kf.mean(x, axis=[2, 3])), kl.Flatten(), ) self.classifier = M.Linear(hidden_size, output_size, bias=True)
def test_strategy(strategy): test_strategy = strategy test_num_games = 10000 results = [] for _ in range(test_num_games): result_score, _ = play(strategy=test_strategy, output=False) results.append(result_score) print("Statistics over %d games" % test_num_games) print("------------------------") print("Mean score : %d" % mean(results)) print("Farkel %% : %d%%" % (sum([x == 0 for x in results]) / test_num_games * 100)) print("Hi-score : %d" % max(results))
def standardize(reader, class_count): rows = [] for row in reader: rows.append(row) attributes = len(rows[0]) - class_count for i in range(attributes): if check_digit(rows[0][i]): values = [] for row in rows: values.append(row[i]) mean = functions.mean(values) stddev = functions.standard_deviation(values, mean) for row in rows: row[i] = (float(row[i]) - mean) / stddev return rows
def random_point(dim): return [random.random() for _ in range(dim)] def random_distances(dim, num_pairs): return [ fnc.distance(random_point(dim), random_point(dim)) for _ in range(num_pairs) ] dimensions = range(1, 101) avg_distances = [] min_distances = [] min_ave_ratio = [] random.seed(0) for dim in dimensions: distances = random_distances(dim, 100) #10,000 random pairs avg_distances.append(fnc.mean(distances)) #track the average min_distances.append(min(distances)) #track the minimum min_avg_ratio = [ min_dist / avg_dist for min_dist, avg_dist in zip(min_distances, avg_distances) ] #plt.plot(dimensions, avg_distances, min_distances) plt.plot(dimensions, min_avg_ratio) plt.show()
def mMean(data,cls,cl): #Parameter calulated by train m = [] for column in data.T: m.append(f.mean(column,cls,cl)) return m
Mean = math.fsum(without_nans_distance) / len(without_nans_distance) print('Mean : ', format(Mean)) new_list = [] for i in range(len(without_nans_distance)): new_list.append(abs(Mean - without_nans_distance[i])**2) sum = 0 for i in range(len(new_list)): sum = sum + new_list[i] print('SD :', format(math.sqrt(math.fsum(new_list) / len(without_nans_distance)))) print(math.fsum(without_nans_distance)) Mean_Predator = fn.mean(without_nans_pdr) Predator_SD = fn.standard_daviation(without_nans_pdr, Mean_Predator) Mean_Prey = fn.mean(without_nans_pr) Prey_SD = fn.standard_daviation(without_nans_pr, Mean_Prey) Mean_Distance = fn.mean(without_nans_distance) Distance_SD = fn.standard_daviation(without_nans_distance, Mean_Distance) print('Predator Speed Mean : {predmean}\nPredator Speed SD : {predsd}'.format( predmean=Mean_Predator, predsd=Predator_SD)) print('Prey Speed Mean : {premean}\nPrey Speed SD : {presd}'.format( premean=Mean_Prey, presd=Prey_SD)) print('Distance Mean : {distmean}\nDistance SD : {distsd}'.format( distmean=Mean_Distance, distsd=Distance_SD)) print()
test_size = int(0.1*data.shape[0]) X_train = data[:train_size,:2] y_train = data[:train_size,-1] X_test = data[train_size:train_size+test_size,:2] y_test = data[train_size:train_size+test_size,-1] dataset_sizes = [100, 500, 1000, 2000, 4000] lossfunction = np.array([[0,1,2],[1,0,1],[2,1,0]]) accuracies = [] for ds in dataset_sizes: # 20 replications test_accuracy = [] for i in range(20): np.random.shuffle(X_train) np.random.shuffle(y_train) X = X_train[:ds] y = y_train[:ds] classes, prior = f.getPrior(y) means = np.array(f.getMLE(X, y)) cov_rand = f.getCovMatrix(np.transpose(X_train)) train_pred, train_acc = f.getModel(X, y, means, cov_rand, lossfunction, prior, "bayes") test_pred, test_acc = f.getModel(X_test, y_test, means, cov_rand, lossfunction, prior, "bayes") test_accuracy.append(test_acc) accuracies.append(f.mean(test_accuracy)) print(accuracies)
h = 6.626070040e-34 m_0 = 9.10938356e-31 def richardson(T,I_S): arbeit = [] for i in range(len(T)): arbeit.append(k_B*T[i]* unp.log((4*np.pi*e_0*m_0*f_diode2*(k_B**2) * (T[i]**2))/((h**3)*I_S[i]))) return arbeit Austrittsarbeit = richardson(temperature_kathode, saturation_current) print("Austrittsarbeit", Austrittsarbeit) arbeitswert,arbeit_err = plot.extract_error(Austrittsarbeit) print("Mittelwert:",f.mean(arbeitswert), f.abweichung(ufloat(f.mean(arbeitswert),f.stdDevOfMean(arbeitswert)), 4.5e-19)) fehler_arbeit = f.stdDevOfMean(arbeitswert)*10**19 mittel = f.mean(arbeitswert)*10**19 arbeityeah = [ufloat(mittel,fehler_arbeit)] arbeitbla = copy.deepcopy(Austrittsarbeit) d.make_it_SI2(arbeitbla,19) print("ARBEIT:", arbeityeah) write('../tex-data/arbeit.tex', make_table([[1,2,3,4,5],arbeitbla], [0,2])) def appendstuff(q1,q2):
rows = [] for row in eyeImg: for pixel in row: pxl = [] for rgb in pixel: pxl.append(rgb) rows.append(pxl) # print(rows) # cv2.imshow("img",temp) # cv2.waitKey(0) # cv2.destroyAllWindows() plt.imshow(eyeImg) plt.title("original image") plt.show() mn = f.mean(rows, len(rows), 3) zm = f.zeroMean(rows, mn, len(rows), 3) cov = f.cv(zm, len(rows), 3) eigMat = np.linalg.eig(cov) print(eigMat, end='\n\n\n') eigVal = [[eigMat[0][i], eigMat[1][i]] for i in range(3)] # eigVal.sort(key=lambda x: x[0], reverse=True) # print((eigVal)) eigSum = sum(eigMat[0][i] for i in range(3)) #print(eigSum) k = 1 # sum = 0 # for i in eigMat[0]:
def Mean(self): return mean( self.__bins )
def mean(self, *args, **kwargs): return F.mean(self, *args, **kwargs)
# -*- coding: utf-8 -*- """ Created on Sun Jan 31 13:23:09 2021 @author: Ingo """ import functions as uf import math scores = [88, 92, 79, 93, 80] mean = uf.mean(scores) curved = uf.add_five(scores) mean_c = uf.mean(curved) print("Scores:", scores) print("Original Mean:", mean, " New Mean:", mean_c) print(__name__) print(uf.__name__)
rangeList2 = [] # Going to be sorted from first to last meanList1 = [] # Going to be sorted from first to last meanList2 = [] # Going to be sorted from first to last #print(frequency) #print(csv_files) # Take the directory and append the ranges from their respective columns for file in functions.sort(csv_files): analog1Range = functions.getRange( functions.convertToFloat(functions.columnList(path + file, 0))) analog2Range = functions.getRange( functions.convertToFloat(functions.columnList(path + file, 1))) rangeList1.append(analog1Range) rangeList2.append(analog2Range) analog1Mean = functions.mean( functions.convertToFloat(functions.columnList(path + file, 0))) analog2Mean = functions.mean( functions.convertToFloat(functions.columnList(path + file, 1))) meanList1.append(analog1Mean) meanList2.append(analog2Mean) print("File: " + file) #print(functions.sort(csv_files)) #print(rangeList1) #print(rangeList2) difference_mean = functions.subtract(meanList1, meanList2) difference_range = functions.subtract(rangeList1, rangeList2) divide_mean = functions.divide(meanList2, meanList1) divide_range = functions.divide(rangeList1, rangeList2)
from functions import mean import data print(mean([float(x) for x in data.get("Moisture")]))