def pearson ( x, y): p = [x[i]*y[i] for i in range(len(y))] if sd(x) != 0: return( ( mean(p) - mean(x)*mean(y) ) / ( sd(x)* sd(y) ) ) else: return( 0.0)
def wbb(self, i): #domain=self.prices[i-self.ma_n:] #domainsd=sd(domain) #if i in self.sddic: # sdval=self.sddic[i] #else: # sdval=sd(domain) # self.sddic[i]=sdval if i in self.madic: ma = self.madic[i] else: ma = self.ma(self.ma_n, i) self.madic[i] = ma print("MOVING AVERAGES: ", ma) return (ma[-1] + sd(ma)) - (ma[-1] - sd(ma))
def normalize_column(data: DataFrame, column: str): """ Find the Z score equivalent of each column """ m = mean(data[column]) s = sd(data[column]) return data[column].map(lambda x: (x - m) / s)
def getSummary(self, e=None): string = "" #string = "Demo: " +self.lastDemoName +" Map: " + Demo_GetWorld(self.lastDemoName) +"\r\n"; #TODO if e != None: string += "Test number: " + str(len(self.fps)) + "\r\n" string += "FPS: " + str(e.fps) + "\r\n" string += "Frames Drew: " + str(e.frames) + "\r\n" string += "Run Time: " + str(e.time) + "\r\n" string += "Number of tests: " + str(len(self.fps)) + "\r\n" string += "Average FPS: " + str(statistics.mean(self.fps)) + "\r\n" string += "Std Dev FPS: " + str(statistics.sd(self.fps)) + "\r\n" return string
#from statistics import * from statistics import stdev as sd, mean as m example_list = [3, 5, 7, 8, 23, 2, 5, 3, 51, 52, 626, 62, 623] x = sd(example_list) print(x) x = m(example_list) print(x) ''' import statistics as s example_list = [3,5,7,8,23,2,5,3,51,52,626,62,623] x = s.mean(example_list) print(x) x = s.median(example_list) print(x) x = s.stdev(example_list) print(x) x = s.variance(example_list) print(x) '''
exList = [5,2,3,7,8,9,4,5,6,1,0,3,5] import statistics as s print(s.mean(exList)) print(s.median(exList)) print(s.mode(exList)) print(s.stdev(exList)) print(s.variance(exList)) from statistics import mean print(mean(exList)) from statistics import mean as m print(m(exList)) from statistics import mean, stdev print(mean(exList), stdev(exList)) from statistics import mean as m, stdev as sd print(m(exList), sd(exList)) from statistics import * print(mean(exList), stdev(exList))
list_performance.append((v, e, end_time - start_time)) gg.append(end_time - start_time) end_time_big = time.time() for row in list_performance: sheet.append(row) filepath = "/content/drive/My Drive/301_Project/" + str(v) + '_' + str(i) wb.save(filepath) t_val = t.ppf(0.95, i) mean = 0 for i in range(len(gg)): mean += gg[i] mean = mean / len(gg) st_dev = sd(gg) std_mean_err = st_dev / i**(0.5) low = mean - t_val * std_mean_err high = mean + t_val * std_mean_err print("#vertex: ", n) print("#try:", i) print('mean: ', mean) print(low, high) print('time:', end_time_big - start_time_big) from scipy.stats import t tt = 0
fasta[key] += line.strip() return fasta #iterate over fastas in a directory d = 'LinkerFastasNew' gl_means = [] gl_sds = [] gl_gap_means = [] gl_gap_sds = [] for fn in ls(d): fasta = readFasta(open(d + '/' + fn, 'r')) lengths = [] gaps = [] for entry in fasta: lengths.append(len(fasta[entry])) gaps.append(fasta[entry].count('-')) gl_means.append(str(sum(lengths) / len(lengths)) + '\n') gl_sds.append(str(sd(lengths)) + '\n') gl_gap_means.append(str(sum(gaps) / len(lengths)) + '\n') gl_gap_sds.append(str(sd(gaps)) + '\n') out_gl_means = open('Stats/' + d + '_means.txt', 'w') out_gl_means.writelines(gl_means) out_gl_means = open('Stats/' + d + '_sds.txt', 'w') out_gl_means.writelines(gl_sds) out_gl_means = open('Stats/' + d + '_gap_means.txt', 'w') out_gl_means.writelines(gl_gap_means) out_gl_means = open('Stats/' + d + '_gap_sds.txt', 'w') out_gl_means.writelines(gl_gap_sds)
def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame): """ A list of features identifying a tap :param window: a DataFrame with just the data needed for the computation """ if window.shape[0] == 0: return None #Add user ID, session, task name features = [user_id, session, task_name] #Add orientation orientation = mode(window['Phone_orientation_accel']) features.append(orientation) #Add tap type features.append(tap_feature) lead_file = 'Accelerometer.csv' time_col = x_columns[lead_file] before_start = window[window[tap_feature] == 4].index[0] during_start = window[window[tap_feature] == 2].index[0] after_start = window[window[tap_feature] == 3].index[0] + 1 after_end = window[window[tap_feature] == 5].index[0] before = window.loc[before_start:during_start] during = window.loc[during_start:after_start] after = window.loc[after_start:after_end + 1] if during.shape[0] < 2: # If there were none or one measurements during the tap, # add the closest ones during = window[during_start - 1:after_start + 1] for file_name in file_names: for y in y_columns[file_name]: # Feature 1: Mean during mean_during = mean(during[y]) # Feature 2: SD during sd_during = sd(during[y]) # Feature 3: Difference before/after mean_before = mean(before[y]) mean_after = mean(after[y]) difference_before_after = mean_after - mean_before # Feature 4: Net change from tap net_change_due_to_tap = mean_during - mean_before # Feature 5: Maximal change from tap max_tap = max(during[y]) max_change = max_tap - mean_before # Feature 6: Restoration time avgDiffs = [] for j in range(after[y].shape[0]): subsequentValues = after[y].iloc[j:] subsequentDistances = subsequentValues.map( lambda x: abs(x - mean_before)) averageDistance = mean(subsequentDistances) avgDiffs.append(averageDistance) time_of_earliest_restoration = min(avgDiffs) restoration_time = time_of_earliest_restoration - during[ time_col].iloc[-1] # Feature 7: Normalized duration t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2 normalized_duration = (t_after_center - t_before_center) / ( mean_after - mean_before) # Feature 8: Ndormalized duration max t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0] normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap) features += [ mean_during, sd_during, difference_before_after, net_change_due_to_tap, max_change, restoration_time, normalized_duration, normalized_duration_max ] if random.choice(range(100)) == 0: plot_tap('Plots/Project/' + session, before, during, after, time_col) return features