def average_converted_analog_lists(): global avg_CGR3_LW_WM2, avg_CGR3_TH_tempC, avg_Apg_SW_WM2, avg_Wind_dir_angle avg_CGR3_LW_WM2 = '%0.2f' %avg(CGR3_LW_WM2_list) avg_CGR3_TH_tempC = '%0.2f' %avg(CGR3_TH_tempC_list) avg_Apg_SW_WM2 = '%0.2f' %avg(Apg_SW_WM2_list) avg_Wind_dir_angle = '%0.2f' %avg(Wind_dir_angle_list) return avg_CGR3_LW_WM2, avg_CGR3_TH_tempC, avg_Apg_SW_WM2, avg_Wind_dir_angle
def average_raw_analog_lists(): global avg_CGR3_LW_V, avg_CGR3_TH_V, avg_Wind_dir_V, avg_Apg_SW_V avg_CGR3_LW_V = '%0.2f' %avg(CGR3_LW_V_list) avg_CGR3_TH_V = '%0.2f' %avg(CGR3_TH_V_list) avg_Wind_dir_V = '%0.2f' %avg(Wind_dir_V_list) avg_Apg_SW_V = '%0.2f' %avg(Apg_SW_V_list) return avg_CGR3_LW_V, avg_CGR3_TH_V, avg_Wind_dir_V, avg_Apg_SW_V
def GetSonde2(): query = db.Ballon1.select().order_by(db.Ballon1.id.desc()).where( db.Ballon1.Sonde_haut != None or db.Ballon1.Sonde_bas != None).limit(2) Sonde_haut = [row.Sonde_haut for row in query] Sonde_bas = [row.Sonde_bas for row in query] AvgTemp = (avg(Sonde_haut) + avg(Sonde_bas)) / 2 # etalonne = 1.5 # chan2 = AnalogIn(ads, ADS.P3) # R2 = calcResistance(chan2.voltage) # Temp2 = round(calcTemp(R2), 1) + etalonne # # chan3 = AnalogIn(ads, ADS.P2) # R3 = calcResistance(chan3.voltage) # Temp3 = round(calcTemp(R3), 1) + etalonne # AvgTemp = (Temp2 + Temp3) / 2 return { "bas": { "temp": avg(Sonde_haut) }, "haut": { "temp": avg(Sonde_bas) }, "moyenne": AvgTemp }
def rendement(self, minute, save=True): date = datetime.now() date_range = date - timedelta(seconds = minute) query = db.Ballon1.select().order_by(db.Ballon1.id.desc()).where(db.Ballon1.date.between(date_range, date)) Sonde = [row.moyenne_temperature for row in query if row.moyenne_temperature != None and row.date != None or row.moyenne_temperature != None and row.date != None] if (Sonde[-1]-avg(Sonde) <= 0): Sonde_rend1 = (max(Sonde) - min(Sonde)) / minute else: Sonde_rend1 = -(max(Sonde) - min(Sonde)) / minute # if round(Sonde_rend1*60,2) < 7: # print("Estimation Ballon 1 Entrer : -{}°C par heures".format(round(Sonde_rend1*60,2))) # else: # print("Estimation Ballon 1 Entrer : Calcule en cours...".format(round(Sonde_rend1*60,2))) query = db.Ballon2.select().order_by(db.Ballon2.id.desc()).where(db.Ballon2.date.between(date_range, date)) Sonde = [row.moyenne_temperature for row in query if row.moyenne_temperature != None and row.date != None or row.moyenne_temperature != None and row.date != None] if (Sonde[-1]-avg(Sonde) <= 0): Sonde_rend2 = (max(Sonde) - min(Sonde)) / minute else: Sonde_rend2 = -(max(Sonde) - min(Sonde)) / minute # if round(Sonde_rend2*60,2) < 7: # print("Estimation Ballon 2 Sortie: -{}°C par heures\n".format(round(Sonde_rend2*60,2))) # else: # print("Estimation Ballon 2 Sortie: Calcule en cours...\n".format(round(Sonde_rend2*60,2))) if save == True: db_save = db.Global_info.create(rend_ballon1=Sonde_rend1 * minute, rend_ballon2=Sonde_rend2 * minute, date=date) db_save.save() return Sonde_rend1, Sonde_rend2
def GetSonde1(): #db.Ballon2() #db.Ballon1.select().order_by(db.Ballon1.id.desc()).get() query = db.Ballon2.select().order_by(db.Ballon2.id.desc()).where( db.Ballon2.Sonde_haut != None or db.Ballon2.Sonde_bas != None).limit(2) Sonde_haut = [row.Sonde_haut for row in query] Sonde_bas = [row.Sonde_bas for row in query] AvgTemp = (avg(Sonde_haut) + avg(Sonde_bas)) / 2 # etalonne = 1.5 # chan0 = AnalogIn(ads, ADS.P0) # R0 = calcResistance(chan0.voltage) # Temp0 = round(calcTemp(R0), 1) + etalonne # # chan1 = AnalogIn(ads, ADS.P1) # R1 = calcResistance(chan1.voltage) # Temp1 = round(calcTemp(R1), 1) + etalonne # AvgTemp = (Temp0 + Temp1) / 2 return { "haut": { "temp": avg(Sonde_haut) }, "bas": { "temp": avg(Sonde_bas) }, "moyenne": AvgTemp }
def avg_std(repeat_results): """ Averages results from repeated runs (using repeat) :param List[Tuple[str, List[float], List[float], *]] repeat_results: The results from running repeat :return List[Tuple[str, Tuple[float, float], Tuple[float, float], *]]: A list of tuples containing each the diagram name, avg build time, avg execution time and operation result """ from numpy import average as avg, std return list(map(lambda (m, b, e, r): (m, (avg(b), std(b)), (avg(e), std(b)), r), repeat_results))
def to_the_right_of(figure, ground=None): ret_val = 0 if type(figure) == Entity and type(ground) == Entity: return to_the_right_of_deic(figure, ground) elif figure is None: ret_val = np.avg([to_the_right_of(entity, ground) for entity in world.active_context]) elif ground is None: ret_val = np.avg([to_the_right_of(figure, entity) for entity in world.active_context]) return ret_val
def average_digital_lists(): global avg_outRH, avg_outTemp, avg_inRH, avg_inTemp, avg_pressure # HYT221 avg_outRH = '%0.2f' %avg(outRH_list) avg_outTemp = '%0.2f' %avg(outTemp_list) # BME280 avg_inRH = '%0.2f' %avg(inRH_list) avg_inTemp = '%0.2f' %avg(inTemp_list) avg_pressure = '%0.2f' %avg(pressure_list) str_mm_rain = str(mm_rain) return avg_outRH, avg_outTemp, avg_inRH, avg_inTemp, avg_pressure, str_mm_rain
def find_avg_lazy_load(filename): # file name is name of hdf5 file data = h5py.File(filename, 'r') predictor = data["predictor"] channel_num = predictor.shape[2] avg = np.zeros(channel_num) std = np.zeros_like(avg) for i in range(channel_num): avg = np.avg(predictor[:, :, i]) std = np.avg(predictor[:, :, i]) return avg, std
def GetSonde2(): query = db.Ballon1.select().order_by(db.Ballon1.id.desc()).where(db.Ballon1.Sonde_haut != None or db.Ballon1.Sonde_bas != None).limit(1) Sonde_haut = [row.Sonde_haut for row in query] Sonde_bas = [row.Sonde_bas for row in query] AvgTemp = (avg(Sonde_haut) + avg(Sonde_bas)) / 2 if avg(Sonde_haut) <= 0 or avg(Sonde_haut) >= 70: #pass security(shutdown=True, msg="Sonde haut Ballon 2 value {} [{}] min 1, max 70".format(Sonde_haut, stylize("ERROR", colored.fg("red")))) exit(0) if avg(Sonde_bas) <= 0 or avg(Sonde_bas) >= 70: #pass security(shutdown=True, msg="Sonde bas Ballon 2 value {} [{}] min 1, max 70".format(Sonde_haut, stylize("ERROR", colored.fg("red")))) exit(0) # etalonne = 1.5 # chan2 = AnalogIn(ads, ADS.P3) # R2 = calcResistance(chan2.voltage) # Temp2 = round(calcTemp(R2), 1) + etalonne # # chan3 = AnalogIn(ads, ADS.P2) # R3 = calcResistance(chan3.voltage) # Temp3 = round(calcTemp(R3), 1) + etalonne # AvgTemp = (Temp2 + Temp3) / 2 return {"bas": {"temp": avg(Sonde_haut)}, "haut": {"temp": avg(Sonde_bas)}, "moyenne": AvgTemp}
def GetSonde1(): #db.Ballon2() #db.Ballon1.select().order_by(db.Ballon1.id.desc()).get() query = db.Ballon2.select().order_by(db.Ballon2.id.desc()).where(db.Ballon2.Sonde_haut != None or db.Ballon2.Sonde_bas != None).limit(1) #print(query[0].date) Sonde_haut = [row.Sonde_haut for row in query] Sonde_bas = [row.Sonde_bas for row in query] AvgTemp = (avg(Sonde_haut) + avg(Sonde_bas)) / 2 if avg(Sonde_haut) <= 0 or avg(Sonde_haut) >= 70: #pass security(shutdown=True, msg="Sonde haut Ballon 1 value {} [{}] min 1, max 70".format(Sonde_haut, stylize("ERROR", colored.fg("red")))) exit(0) if avg(Sonde_bas) <= 0 or avg(Sonde_bas) >= 70: #pass security(shutdown=True, msg="Sonde bas Ballon 1 value {} [{}] min 1, max 70".format(Sonde_haut, stylize("ERROR", colored.fg("red")))) exit(0) # etalonne = 1.5 # chan0 = AnalogIn(ads, ADS.P0) # R0 = calcResistance(chan0.voltage) # Temp0 = round(calcTemp(R0), 1) + etalonne # # chan1 = AnalogIn(ads, ADS.P1) # R1 = calcResistance(chan1.voltage) # Temp1 = round(calcTemp(R1), 1) + etalonne # AvgTemp = (Temp0 + Temp1) / 2 return {"haut": {"temp": avg(Sonde_haut)}, "bas": {"temp": avg(Sonde_bas)}, "moyenne": AvgTemp}
def check_y_scale(*args, **kwargs): need_scale = False make_bigger = False y_array = args[0] length_of_y = len(y_array) np_y_array = np.array(y_array) max_y = np_y_array.max() minimum_limit_of_y = 0.985 * max_y array_of_greater_y = np_y_array[np_y_array >= minimum_limit_of_y] average = avg(np_y_array) if len(array_of_greater_y) > 0.15 * length_of_y: # print("out of limits and saturated") # print(array_of_greater_y) # print(max_y) need_scale = True make_bigger = True elif (max_y < 1.3 * average): need_scale = True make_bigger = False pass else: need_scale = False make_bigger = False pass print("DEBUG LIMITS CHECK", need_scale, max_y, make_bigger, average) return need_scale, max_y, make_bigger pass
def createDataset(path, char_to_int, seq_length, args): text = open(args.path, "r").read() if args.predict_type == "word": text = getVocab(args, False) x = [] y = [] for i in xrange(0, len(text) - seq_length, 1): x.append( [char_to_int[c] for c in text[i:i+seq_length]] ) y.append( char_to_int[text[i+seq_length]] ) x = numpy.reshape(x, (len(x), seq_length, 1)) if args.normalize_inputs: mean = numpy.avg(x) std = numpy.std(x) x = x/mean - std # x / float(len(char_to_int)) del text y = to_categorical(y) print x.shape, y.shape return x, y
def reconstruct_with_aggregation(run_file, output, name): uniques = defaultdict(dict) f = open(run_file, 'r') for line in f.readlines(): if len(line.strip()) > 0: tokens = line.strip().split() doc_id, q_id = tokens[2].split('_') score = float(tokens[4]) if doc_id not in uniques[q_id]: uniques[q_id][doc_id] = [score] else: uniques[q_id][doc_id].append(score) # average scores: scores = defaultdict(dict) for q in tqdm(uniques): for doc_id in uniques[q]: scores[q][doc_id] = round(avg(uniques[q][doc_id]), 6) # write results: with open(join(output, "trec_" + name + ".txt"), 'w') as out: sorted_scores = scores # dict(sorted(scores.items(), key=lambda kv: kv[1])) for q in tqdm(sorted_scores): sorted_docs = sorted(sorted_scores[q].items(), key=operator.itemgetter(1), reverse=True) rank = range(1, len(sorted_docs) + 1) iter_rank = iter(rank) q_results = ["{q} Q0 {d} {i} {s} {m}".format(q=q, d=e[0], i=next(iter_rank), m=name, s=e[1]) for e in sorted_docs] out.write('\n'.join(q_results) + '\n')
def Average(dyNetwork): for node1, node2 in dyNetwork._network.edges(data=False): tot_queue1 = dyNetwork._network.nodes[node1]['sending_queue'] tot_queue2 = dyNetwork._network.nodes[node2]['sending_queue'] avg = np.avg([tot_queue1, tot_queue2]) dyNetwork._network[node1][node2]['edge_delay'] = avg del tot_queue1, tot_queue2
def security(shutdown=False, msg=""): sonde1 = GetSonde1() sonde2 = GetSonde2() if sonde1['moyenne'] > 80 or sonde2['moyenne'] > 80: shutdown = True elif sonde1['moyenne'] < 0 or sonde2['moyenne'] < 0: shutdown = True query = db.Global_info.select().order_by(db.Global_info.id.desc()).where(db.Global_info.sonde_interne != None).limit(1) Sonde_interne = [row.sonde_interne for row in query] Sonde_interne_date = [row.date for row in query] Avg_sonde_interne = avg(Sonde_interne) now = datetime.now() try: duration = now - Sonde_interne_date[-1] duration_in_s = duration.total_seconds() except IndexError: PWMControl.control(1, 0) PWMControl.control(2, 0) print("Security STOP ALL Warning temperature interne ERROR value is {}°C".format(Avg_sonde_interne)) exit(0) if Avg_sonde_interne >= 50: PWMControl.control(1, 0) PWMControl.control(2, 0) print("Security STOP ALL Warning temperature interne overload 50°C value is {}°C".format(Avg_sonde_interne)) lcd_device = lcd() lcd_device.lcd_clear() date = datetime.now().strftime('%d, %b %Y %H:%M') lcd_device.lcd_display_string(" {}".format(date), 1) lcd_device.lcd_display_string("Error System Temp. interne".format(self.pos), 2) exit(0) # elif duration_in_s >= 120: # channelVoltage(1, 0, disable=True) # channelVoltage(2, 0, disable=True) # print("Security no possible to check recent value temperature system... last: {}s [{}]".format(round(duration_in_s), stylize("ERROR", colored.fg("red")))) # lcd_device = lcd() # lcd_device.lcd_clear() # date = datetime.now().strftime('%d, %b %Y %H:%M') # lcd_device.lcd_display_string(" {}".format(date), 1) # lcd_device.lcd_display_string("System Temp. interne".format(self.pos), 2) # lcd_device.lcd_display_string("last: {}s [{}]".format(round(duration_in_s), "ERROR"), 3) # exit(0) # else: # print("\nSystem check temperature ... {}°C last: {}s [{}] \n".format(Avg_sonde_interne, round(duration_in_s), stylize("OK", colored.fg("green")))) if shutdown: print(msg) PWMControl.control(1, 0) PWMControl.control(2, 0) exit(0)
def compute(self, tr, lm=None): ret_val = 0 if type(tr) == Entity and type(lm) == Entity: connections = self.get_connections() return max(connections[0].compute(tr, lm), connections[1].compute(tr, lm), connections[2].compute(tr, lm)) elif lm is None: ret_val = np.avg([self.compute(tr, entity) for entity in world.active_context]) return ret_val
def compute(self, figure, ground=None): ret_val = 0 if type(figure) == Entity and type(ground) == Entity: left_connection = self.get_connections() return max(left_connection[0].compute(figure, ground), left_connection[1].compute(figure, ground), left_connection[2].compute(figure, ground)) elif ground is None: ret_val = np.avg([self.compute(figure, entity) for entity in world.active_context]) return ret_val
def count_final_rating(self): """ This method creates output structurein JSON based by JsonStructure. """ json_struct = [] for author in self.authors.keys(): avg_comm = avg(self.authors[author]["commit_ratings_one"]) avg_comm_two = avg(self.authors[author]["commit_ratings_two"]) metrics = avg(self.authors[author]["metric"]) self.authors[author]["hyphotetical_rating_one"] = avg_comm self.authors[author]["hyphotetical_rating_two"] = avg_comm_two avg_list_pylint = avg(self.authors[author]["pylint"]) self.authors[author]["pylint_rating"] = avg_list_pylint self.authors[author]["radon_rating"] = metrics json_dict = JsonStructure().return_structure() json_dict["Name"] = author json_dict["Average_Pylint"] = avg_list_pylint json_dict["Average_Pylint"] = metrics json_dict["Average_Software_Metrics"] = metrics json_dict["Average_Commits_Ratings_one"] = avg_comm json_dict["Average_Commits_Ratings_two"] = avg_comm_two json_dict["Added_lines"] = self.authors[author]["added"] json_dict["Removed_lines"] = self.authors[author]["removed"] json_dict["All_commits"] = self.authors[author]["Commit_count"] json_dict["Pylint_positive"] = self.authors[author]["pylint+"] json_dict["Pylint_negative"] = self.authors[author]["pylint-"] json_dict["Most_modified_file"] = self.authors[author]["MMFile"] json_dict["Commits_to_most_modified_file"] = self.authors[author]["CCMMFile"] json_dict["Data"] = [] ratings = self.authors[author]["time"] for date in ratings: for idx in ratings[date]: if "pylint" not in idx: continue json_dict["Data"].append(dict( Pylint_Rating=idx["pylint"], Date=str(date), Commit_Rating_v_1=idx["rating_one"], Commit_Rating_v_2=idx["rating_two"], Software_Metrics_Rating=idx["metric"] )) json_struct.append(json_dict) return json_struct
def avg_pool(img, kernal, stride): stride = min(stride, j.shape[0]) nx = (img.shape[0] - kernal.shape[0]) // stride + 1 ny = (img.shape[1] - kernal.shape[1]) // stride + 1 z = np.zeros([nx, ny], dtype=int) for i in range(nx): for k in range(ny): z[i][k] = np.avg(img[i:i + kernal.shape[0], k:k + kernal.shape[1]]) return (z)
def setValuesOfSlopeVectors(self): super(IsraeliChild, self).setValuesOfSlopeVectors() if len(self.HCToAgeLevel1) > 0: self.avg_HCToAgeLevel1 = avg(self.HCToAgeLevel1) self.max_HCToAgeLevel1 = max(self.HCToAgeLevel1) self.min_HCToAgeLevel1 = min(self.HCToAgeLevel1) if len(self.HCToAgeLevel2) > 0: self.avg_HCToAgeLevel2 = avg(self.HCToAgeLevel2) self.max_HCToAgeLevel2 = max(self.HCToAgeLevel2) self.min_HCToAgeLevel2 = min(self.HCToAgeLevel2) if len(self.HCdivHeightLevel1) > 0: self.avg_HCdivHeightLevel1 = avg(self.HCdivHeightLevel1) self.max_HCdivHeightLevel1 = max(self.HCdivHeightLevel1) self.min_HCdivHeightLevel1 = min(self.HCdivHeightLevel1) if len(self.HCdivHeightLevel2) > 0: self.avg_HCdivHeightLevel2 = avg(self.HCdivHeightLevel2) self.max_HCdivHeightLevel2 = max(self.HCdivHeightLevel2) self.min_HCdivHeightLevel2 = min(self.HCdivHeightLevel2) if len(self.HCdivHeightSqLevel1) > 0: self.avg_HCdivHeightSqLevel1 = avg(self.HCdivHeightSqLevel1) self.max_HCdivHeightSqLevel1 = max(self.HCdivHeightSqLevel1) self.min_HCdivHeightSqLevel1 = min(self.HCdivHeightSqLevel1) if len(self.HCdivHeightSqLevel2) > 0: self.avg_HCdivHeightSqLevel2 = avg(self.HCdivHeightSqLevel2) self.max_HCdivHeightSqLevel2 = max(self.HCdivHeightSqLevel2) self.min_HCdivHeightSqLevel2 = min(self.HCdivHeightSqLevel2) if len(self.HCdivWeightLevel1) > 0: self.avg_HCdivWeightLevel1 = avg(self.HCdivWeightLevel1) self.max_HCdivWeightLevel1 = max(self.HCdivWeightLevel1) self.min_HCdivWeightLevel1 = min(self.HCdivWeightLevel1) if len(self.HCdivWeightLevel2) > 0: self.avg_HCdivWeightLevel2 = avg(self.HCdivWeightLevel2) self.max_HCdivWeightLevel2 = max(self.HCdivWeightLevel2) self.min_HCdivWeightLevel2 = min(self.HCdivWeightLevel2) if len(self.HCdivWeightSqLevel1) > 0: self.avg_HCdivWeightSqLevel1 = avg(self.HCdivWeightSqLevel1) self.max_HCdivWeightSqLevel1 = max(self.HCdivWeightSqLevel1) self.min_HCdivWeightSqLevel1 = min(self.HCdivWeightSqLevel1) if len(self.HCdivWeightSqLevel2) > 0: self.avg_HCdivWeightSqLevel2 = avg(self.HCdivWeightSqLevel2) self.max_HCdivWeightSqLevel2 = max(self.HCdivWeightSqLevel2) self.min_HCdivWeightSqLevel2 = min(self.HCdivWeightSqLevel2)
def get_dspecs(self): ''' Make power spectrums for all baseline pairs in bls_ for the original delay spectrums. Saves in self.dspecs ''' for cnt, bli in enumerate(self.bls_): for blj in bls_[cnt:]: xi = get_x(blj, mode=0) xj = get_x(blj, mode=0) self.dspecs.append(n.avg(xi * xj.conj(), axis=0))
def get_dspecs(self): ''' Make power spectrums for all baseline pairs in bls_ for the original delay spectrums. Saves in self.dspecs ''' for cnt,bli in enumerate(self.bls_): for blj in bls_[cnt:]: xi = get_x(blj,mode=0) xj = get_x(blj,mode=0) self.dspecs.append(n.avg(xi * xj.conj(), axis=0))
def Targets_Similarity(self, targets, sent, max_flag=True): similarity = list() if len(targets) == 0: return 0.0 for i in range(len(targets)): similarity.append(self.Similarity(targets[i], sent)) similarity = np.array(similarity) if max_flag: return np.max(similarity) else: return np.avg(similarity)
def rendement(minute): date = datetime.now() date_range = date - timedelta(seconds=minute) query = db.Ballon1.select().order_by(db.Ballon1.id.desc()).where( db.Ballon1.date.between(date_range, date)) Sonde = [ row.moyenne_temperature for row in query if row.moyenne_temperature != None and row.date != None or row.moyenne_temperature != None and row.date != None ] if (Sonde[-1] - avg(Sonde) <= 0): Sonde_rend1 = (max(Sonde) - min(Sonde)) / minute else: Sonde_rend1 = -(max(Sonde) - min(Sonde)) / minute if round(Sonde_rend1 * 60, 2) < 7: print("Estimation Ballon 1 Entrer : -{}°C par heures".format( round(Sonde_rend1 * 60, 2))) else: print("Estimation Ballon 1 Entrer : Calcule en cours...".format( round(Sonde_rend1 * 60, 2))) query = db.Ballon2.select().order_by(db.Ballon2.id.desc()).where( db.Ballon2.date.between(date_range, date)) Sonde = [ row.moyenne_temperature for row in query if row.moyenne_temperature != None and row.date != None or row.moyenne_temperature != None and row.date != None ] if (Sonde[-1] - avg(Sonde) <= 0): Sonde_rend2 = (max(Sonde) - min(Sonde)) / minute else: Sonde_rend2 = -(max(Sonde) - min(Sonde)) / minute if round(Sonde_rend2 * 60, 2) < 7: print("Estimation Ballon 2 Sortie: {}°C par heures\n".format( round(Sonde_rend2 * 60, 2))) else: print("Estimation Ballon 2 Sortie: Calcule en cours...\n".format( round(Sonde_rend2 * 60, 2)))
def main(): info = getresults() hostinfo = {} for route in info: for rtt, host in info[route]: if host not in hostinfo: hostinfo[host] = [] hostinfo[host].append(rtt) havg = {} for h in hostinfo: havg[h] = avg(hostinfo[h]) return havg
def sent2vec(sent_tok, mode='sum'): if len(sent_tok) == 0: sent_vec = np.random.randn(1, 300) return sent_vec vectors = [model[w] for w in sent_tok] vec = np.array(vectors) if mode == 'sum': sent_vec = np.sum(vec, axis=0, keepdims=True) if mode == 'avg': sent_vec = np.avg(vec, axis=0, keepdims=True) return sent_vec
def get_target_dbfs(dbfs_list, max_dbfs_list): """ Calculates the target dBFS value which to normalize audio levels to. :param dbfs_list: A list of dBFS values as float. :param max_dbfs_list: A list of dBFS values as float - these are the peak volumes of the audio files. :return: Returns the target dBFS value as float. """ avg_dbfs = avg(dbfs_list) dbfs_changes = avg_dbfs - dbfs_list headrooms = -(max_dbfs_list + dbfs_changes) min_headroom = min(headrooms) target_dbfs = avg_dbfs + min_headroom return target_dbfs
def process3(results): ttls = {} for r in results: for ttl, host, rtt in r: if ttl not in ttls: ttls[ttl] = [] ttls[ttl].append((host, rtt)) visited = set() prom = {} for ttl in sorted(ttls.keys()): hosts = Counter([host for host, rtt in ttls[ttl]]).most_common(5) mch = None times = 0 for h, t in hosts: if h not in visited: visited.add(h) mch, times = h, t break print >> sys.stderr, "[INFO] para ttl %d el host %s aparece %d veces" % (ttl, mch, times) # si aparece menos de 10 veces lo descartamos, la medicion no es significativa. if times < 20 or mch == None: continue # nos quedamos solo con los ttls de los hosts que mas aparecen ttls[ttl] = [(host, rtt) for host, rtt in ttls[ttl] if host == mch] prom[ttl] = avg([rtt for host, rtt in ttls[ttl]]), mch ls = sorted(prom.keys()) res = [prom[ls[0]]] for i in xrange(len(ls) - 1): res.append((max(prom[ls[i+1]][0] - prom[ls[i]][0], 0.0), prom[ls[i+1]][1])) # tomamos el minimo que no sea igual a 0. m = 10 for rtt, host in res: if rtt > 0.000001 and rtt < m: m = rtt for i in xrange(len(res)): rtt, host = res[i] if rtt < m: res[i] = m, host return res
def genH1ZScoreHelp(parms, snpVec, H0Y, numTraits): epsilon = parms['epsilon'] mu = parms['mu'] f = np.avg(snpVec) / 2 A = np.zeros([len(traitData), 1]) A[:, np.random.choice(len(traitData), size=epsilon, replace=False)] = 1 Y = H0Y + (mu / np.sqrt(2 * epsilon * f * (1 - f))) * np.matmul(snpVec, A) snpDF = pd.DataFrame([[range(len(snpVec)), 'G', 'T'] + snpVec.tolist()], index=[0]).to_csv(path + 'process/H1Snp.txt', delimiter='\t', index=False, header=False) np.savetxt(local + name + 'process/Y.txt', Y, delimiter='\t') ans = np.empty([1, numTraits]) for trait in range(numTraits): cmd = [ './gemma', '-g', path + 'process/H1Snp.txt', '-p', path + 'process/Y.txt', '-lmm', pval, '-o', name[:-1] + '-' + str(snp) + '-' + str(trait), '-k', path + 'process/H1Snp.txt', '-n', trait, '-c', path + 'process/dummy.txt' ] print(' '.join(cmd), flush=True) subprocess.run(cmd) df = pd.read_csv('output/' + name[:-1] + '-' + str(snp) + '-' + str(trait) + '.assoc.txt', sep='\t') os.remove('output/' + name[:-1] + '-' + str(snp) + '-' + str(trait) + '.assoc.txt') if wald: ans[trait] = (df['beta'] / df['se']).iloc[0] else: ans[trait] = df['p_lrt'].iloc[0] return (ans)
def process1(results): # ttl va a tener como claves los distintos ttls, y como valores todos los rtts de esos ttls. ttls = {} for r in results: for ttl, host, rtt in r: if ttl not in ttls: ttls[ttl] = [] ttls[ttl].append(rtt) # prom va a tener como claves los distintos ttls, y como valor el promedio de los rtts. prom = {} for ttl in ttls: prom[ttl] = avg(ttls[ttl]) ls = sorted(prom.keys()) # res va a tener los delta rtts de proms. Si es < 0 el delta rtt, ponemos 0. res = [prom[ls[0]]] for i in xrange(len(ls) - 1): res.append(max(prom[ls[i+1]] - prom[ls[i]], 0.0)) return res
def fourier_extractor(x): sampling_freq = 250 N = len(x) f_values = np.linspace(0.0, sampling_freq / 2, N // 2) fft_values_ = fft(x) fft_values = 2.0 / N * np.abs(fft_values_[0:N // 2]) #the following values were determined empirically coeff_0 = fft_values[0] #coefficient at 0Hz peak_70 = 0 #coefficient around 70 Hz coeff = np.zeros(20) #max coefficient from each 2 Hz interval (0-40) integral40 = 0 #integral from 0 to 40 Hz integral125 = np.avg(fft_values) #integral over the whole transform for i in range(0, len(f_values)): if f_values[i] > 69 and f_values[i] < 72 and fft_values[i] > peak_70: peak_70 = fft_values[i] if f_values[i] < 40: integral40 += fft_values[i] if fft_values[i] > coeff[int(i / 2)]: coeff[int(i / 2)] = fft_values[i] return coeff + [coeff_0, peak_70, integral40, integral125]
def aggregate_passages(run_file, output, name, id_seaparator='_'): results = defaultdict(list) with open(run_file, 'r') as run: for line in tqdm(run): tokens = line.strip().split() q_id = tokens[0] d_id = tokens[2].split(id_seaparator)[0] results[(q_id, d_id)].append((float(tokens[4]), int(tokens[-1]))) # (score, relevance) # print("Writing results ...") if not os.path.exists(output): os.mkdir(output) out_avg = open(join(output, "avg_" + name), 'w') with open(join(output, "max_" + name), 'w') as out_max: for result in tqdm(OrderedDict(sorted(results.items()))): out_max.write("{q}\tQ0\t{d}\t1\t{s}\t{m}\t{r}\n".format(q=result[0], d=result[1], s=max([e[0] for e in results[result]]), m=name, r=results[result][0][1])) out_avg.write("{q}\tQ0\t{d}\t1\t{s}\t{m}\t0\n".format(q=result[0], d=result[1], s=avg([e[0] for e in results[result]]), m=name, r=results[result][0][1])) out_avg.close()
def findCorrectLength(cyphertext, distances): # gathering all possible candidates all_factors = [] for elem in distances: all_factors += factorsOfNumb(elem) lengthCandidates = Counter() for elem in all_factors: lengthCandidates[elem] += 1 best_candidate = 0 best_average_coincidence_idx = sys.float_info.min best_coincidence_idx = [] best_col_matrix = [] # looping on candidates: for each candidate we split the cyphertext by columns and compute the coincidence indexes to find the best candidate for candidate in lengthCandidates: if candidate != 1 and candidate < 30: column_matrix = [] for i in range(candidate): row = [] column_matrix.append(row) i = 0 while i < len(cyphertext): column_matrix[i % candidate].append(cyphertext[i]) i += 1 indexes = [] for row in column_matrix: letter_count = Counter() for letter in row: letter_count[letter] += 1 row_coincidence_idx = 0 for elem in letter_count: row_coincidence_idx += (letter_count[elem]/len(row))*((letter_count[elem]-1)/len(row)) indexes.append(row_coincidence_idx) avg_idxs = avg(indexes) # updating best values if avg_idxs > best_average_coincidence_idx: best_average_coincidence_idx = avg_idxs best_candidate = candidate best_col_matrix = column_matrix best_coincidence_idx = indexes return best_candidate, best_col_matrix
def setValuesOfSlopeVectors(self): #### weight slopes #### if len(self.weightToAgeLevel1) > 0: self.max_weightToAgeLevel1 = max(self.weightToAgeLevel1) self.max_weightDivAgeLevel1 = max(self.weightDivAgeLevel1) self.min_weightToAgeLevel1 = min(self.weightToAgeLevel1) self.min_weightDivAgeLevel1 = min(self.weightDivAgeLevel1) self.avg_weightToAgeLevel1 = avg(self.weightToAgeLevel1) self.avg_weightDivAgeLevel1 = avg(self.weightDivAgeLevel1) if len(self.weightDivAgeLevel2) > 0: self.max_weightToAgeLevel2 = max(self.weightToAgeLevel2) self.max_weightDivAgeLevel2 = max(self.weightDivAgeLevel2) self.min_weightToAgeLevel2 = min(self.weightToAgeLevel2) self.min_weightDivAgeLevel2 = min(self.weightDivAgeLevel2) self.avg_weightToAgeLevel2 = avg(self.weightToAgeLevel2) self.avg_weightDivAgeLevel2 = avg(self.weightDivAgeLevel2) #### height slopes #### if len(self.heightToAgeLevel1) > 0: self.max_heightToAgeLevel1 = max(self.heightToAgeLevel1) self.max_heightDivAgeLevel1 = max(self.heightDivAgeLevel1) self.min_heightToAgeLevel1 = min(self.heightToAgeLevel1) self.min_heightDivAgeLevel1 = min(self.heightDivAgeLevel1) self.avg_heightToAgeLevel1 = avg(self.heightToAgeLevel1) self.avg_heightDivAgeLevel1 = avg(self.heightDivAgeLevel1) if len(self.heightToAgeLevel2) > 0: self.avg_heightDivAgeLevel2 = avg(self.heightDivAgeLevel2) self.min_heightDivAgeLevel2 = min(self.heightDivAgeLevel2) self.max_heightDivAgeLevel2 = max(self.heightDivAgeLevel2) self.avg_heightToAgeLevel2 = avg(self.heightToAgeLevel2) self.min_heightToAgeLevel2 = min(self.heightToAgeLevel2) self.max_heightToAgeLevel2 = max(self.heightToAgeLevel2) #### BMI slopes #### if len(self.bmiToAgeLevel1) > 0: self.max_bmiToAgeLevel1 = max(self.bmiToAgeLevel1) self.max_bmiDivAgeLevel1 = max(self.bmiDivAgeLevel1) self.min_bmiToAgeLevel1 = min(self.bmiToAgeLevel1) self.min_bmiDivAgeLevel1 = min(self.bmiDivAgeLevel1) self.avg_bmiToAgeLevel1 = avg(self.bmiToAgeLevel1) self.avg_bmiDivAgeLevel1 = avg(self.bmiDivAgeLevel1) if len(self.bmiToAgeLevel2) > 0: self.min_bmiToAgeLevel2 = min(self.bmiToAgeLevel2) self.min_bmiDivAgeLevel2 = min(self.bmiDivAgeLevel2) self.avg_bmiToAgeLevel2 = avg(self.bmiToAgeLevel2) self.max_bmiDivAgeLevel2 = max(self.bmiDivAgeLevel2) self.max_bmiToAgeLevel2 = max(self.bmiToAgeLevel2) self.avg_bmiDivAgeLevel2 = avg(self.bmiDivAgeLevel2)
def CS(reviews): texts = [review["reviewText"] for review in reviews] return avg([ cosine_sim(review1, review2) for review1 in texts for review2 in texts ])
def evaluate_accuracy(Y, Ypred): """Determines how accurately predicted yPred is""" return np.avg(Y == Ypred)
for filename in ls: f = open(folder_price+'/'+filename, 'r') rows = get_rows(f) # Get file data day = get_day_index(rows[1][0]) f.close() if day=="NO": continue # only use august 9-12 # Only take 2nd quarter of each hour if int(rows[1][2]) != 2: continue hour = int(rows[1][1]) - 1 price = float(rows[1][3]) prices[day*24 + hour] = price return prices if __name__ == "__main__": loads = get_load_data(sys.argv[1]+\ "/zipfiles_load_0812") capacities = get_capacity_data(sys.argv[1]+\ "/zipfiles_gencap_0812") prices = get_price_data(sys.argv[1]+\ "/zipfiles_rspinprice_0812") reserve = [capacities[i]-loads[i] for i in range(hours)] # Plot results print str(np.avg(prices)) print str(np.avg(reserve)) plt.plot(reserve, prices, 'ko') plt.xlabel("MW of spinning reserve") plt.ylabel("Dollars per MW*hour") plt.show()
err = open('out/not_found.out', 'wb') found_loc = {} if len(sys.argv) < 4: print "Filename required." else: count = 0 for l in fileinput.input(sys.argv[1]): line = l.split("\t") userID = line[0].rstrip() latitude = float(line[2]) longitude = float(line[3]) locID = line[4].rstrip() if locID in found_loc: new_loc = found_loc[locID] else: new_loc = find_city(latitude, longitude) if new_loc is None: err.write(str(latitude) + '\t' + str(longitude) + '\n') else: new_line = l.rstrip() + '\t' + new_loc + '\n' out.write(new_line) out.close() err.close() avg = np.avg(distances) print "Average distance = " + str(avg)
def processImages(conn, scriptParams): """ Process the script params to make a list of channel_offsets, then iterate through the images creating a new image from each with the specified channel offsets """ message = "" images, logMessage = script_utils.getObjects(conn, scriptParams) message += logMessage if not images: raise Exception("No images found") imageIds = sorted(set([i.getId() for i in images])) choice = scriptParams["Choice"] debug = bool(scriptParams.get("Debug", False)) globalmin = defaultdict(list) globalmax = defaultdict(list) tb = TableBuilder("Context", "Channel", "Min", "Max") statsInfos = dict() for iId in imageIds: statsInfo = calcStatsInfo(conn, iId, choice, debug) statsInfos[iId] = statsInfo for c, si in sorted(statsInfo.items()): c_min, c_max = si globalmin[c].append(c_min) globalmax[c].append(c_max) tb.row("Image:%s" % iId, c, c_min, c_max) tb.row("", "", "", "") for c in globalmin: c_min = globalmin[c] c_max = globalmax[c] tb.row("Total: outer ", c, min(c_min), max(c_max)) tb.row("Total: inner ", c, max(c_min), min(c_max)) tb.row("Total: average", c, int(avg(c_min)), int(avg(c_max))) if scriptParams["DryRun"]: print str(tb.build()) else: combine = scriptParams["Combine"] for iId in imageIds: img = conn.getObject("Image", iId) for c, ch in enumerate(img.getChannels(noRE=True)): si = ch.getStatsInfo() if si is None: si = StatsInfoI() action = "creating" else: si = si._obj action = "updating" if combine == "no": si.globalMin = rdouble(statsInfos[iId][c][0]) si.globalMax = rdouble(statsInfos[iId][c][1]) elif combine == "outer": si.globalMin = rdouble(min(globalmin[c])) si.globalMax = rdouble(max(globalmax[c])) elif combine == "inner": si.globalMin = rdouble(max(globalmin[c])) si.globalMax = rdouble(min(globalmax[c])) elif combine == "average": si.globalMin = rdouble(avg(globalmin[c])) si.globalMax = rdouble(avg(globalmax[c])) else: raise Exception("unknown combine: %s" % combine) if debug: print "Image:%s(c=%s) - %s StatsInfo(%s, %s)" % ( iId, c, action, si.globalMin.val, si.globalMax.val) ch._obj.statsInfo = si ch.save() count = sum(map(len, statsInfos.values())) message += "%s stats info object(s) processed" % count return message
#-->Write the headers to the new files InputFile.write('%s\n%s\n%s\n' % (sensorHeader, measureHeader, unitHeader)) AvgFile.write('%s\n%s\n%s\n' % (sensorHeader, measureHeader, unitHeader)) RawFile.write('%s\n%s\n%s\n' % (sensorHeader, measureHeader, unitHeader)) #Change in the Minute: #...Since currentMin is never higher than 59, the prevMin + the minutes between can't be more than 59. #-If prevMin + userMin is greater than 59, set prevMin equal to if prevMin + userMin > 59: prevMin = 0 + (59 - (prevMin + userMin)) #-Check to see if currentMin is equal to prevMin plus however many minutes the user wants between logs if currentMin == prevMin + userMin: #-->Set prevMin equal to currentMin prevMin = currentMin #-->Write the average (and total amount of rain) of each list to the Avg File AvgFile.write(columnSpacing % (datetime, avg(lightList), avg(tempList), avg\ (mbarList), avg(humidList), mm_rain, avg(mV_angle(mV_windDir)))) #-->Then clear the lists lightList, tempList, mbarList, humidList, windDirList = [], [], [], [], [] mV_lightList, mV_windDirList = [], [] #-->Set the variable which holds the total amount of rain back to zero mm_rain = 0 avgTotal += 1 #-Write the current raw sensor values to the Raw File RawFile.write('%s, %15.2f, %10.4f\n' % (datetime, avg(mV_light), avg(mV_windDir))) #-Write the current sensor values to the Input File InputFile.write(columnSpacing % (datetime, mV_WM2(mV_light), degrees_C, mbar, humidity, mm_rain, mV_angle(mV_windDir))) #Add one to the total number of Loops that the program has run loopsTotal += 1
lightData.append(WM2) tempData.append(degrees_C) mbarData.append(mbar) humidData.append(humidity) columnSpacing = '%s %14.2f %16.2f %9.2f %10.2f' #datetime, WM2, degrees_C, mbar, humidity #When the hour changes, it's time to close the current file and start a new one if hour != prevHour: prevHour = hour print "\n\n Upload \n\n" #When the minute changes, it logs the average and the real time data if min != prevMin: prevMin = min print '\n\n' + columnSpacing + '\n\n' % (datetime, avg(lightData), avg(tempData), avg\ (mbarData), avg(humidData)) #Otherwise, it continues logging the real time data print columnSpacing % (datetime, WM2, degrees_C, mbar, humidity) #Wait (AKA Sleep) 1 second minus whatever time the process takes # if the process takes more than 1 sec, sleep default to 1 sec elapsedTime = time.time() - startTime if elapsedTime < 1 and elapsedTime > 0: time.sleep (1 - elapsedTime) else: sleep (1)
def process_sequence2(id, event_sequence, session_close_event_num): f1 = numpy.avg(event_sequence[0:session_close_event_num + 1]) f2 = numpy.min(event_sequence[0:session_close_event_num + 1]) f3 = f4 = f5 = 0 return (id, f1, f2, f3, f4, f5)
def averageValue(img): height, width = img.shape[:2] val = avg(img.sum(axis=0).sum(axis=0)) return val / (height * width)