def statistic(self): self.PDOTStatistic.statistic("PDOP", self.PDOPList) self.HDOTStatistic.statistic("HDOP", self.HDOPList) self.VDOTStatistic.statistic("VDOP", self.VDOPList) self.LatitudeStatistic.statistic("Latitude", self.LatitudeList) self.LongitudeStatistic.statistic("Longitude", self.LongitudeList) self.AltitudeStatistic.statistic("Altitude", self.AltitudeList) self.convert() self.XStatistic.statistic("X", self.XList) self.YStatistic.statistic("Y", self.YList) self.CEP = Statistic.cep(self.XStatistic.Std, self.YStatistic.Std) self.CEP95 = Statistic.cep95(self.XStatistic.Std, self.YStatistic.Std) self.CEP99 = Statistic.cep99(self.XStatistic.Std, self.YStatistic.Std) print(self.cep_to_string()) print(self.rms_to_string()) print(self.test_point_count_to_string()) print(self.fix_quality_count_to_string()) if self.OutputFile is not None: self.OutputFile.write(self.to_string()) self.OutputFile.close()
def __init__(self): self.levelStatistic = Statistic() self.threadStatistic = Statistic() self.loggerStatistic = Statistic() self.timeStatistic = Statistic() self.entries = 0 super().__init__()
def build(kernel, metric, keys_limit, svm_C, logs): trainX = genfromtxt('input/arcene_train.data', delimiter=' ') trainY = genfromtxt('input/arcene_train.labels', delimiter=' ') validX = genfromtxt('input/arcene_valid.data', delimiter=' ') validY = genfromtxt('input/arcene_valid.labels', delimiter=' ') keys = metric.build(trainX.transpose(), trainY, logs=logs, limit=keys_limit) tX = [] for x in trainX: tX.append(np.take(x, keys)) tX = np.array(tX) clf = SVM(kernel=kernel.kernel, C=svm_C) clf.fit(tX, trainY) vX = [] for x in validX: vX.append(np.take(x, keys)) vX = np.array(vX) predict_arr = [clf.predict(x) for x in vX] confusion_matrix = Statistic.get_metrics(predict_arr, validY) f_measure = Statistic.get_f_measure(confusion_matrix) return keys, confusion_matrix, f_measure
def haughTransform(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 150, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi / 180, 200) if lines is None: return None statAngle = Statistic() angleTable = [] for i in range(lines.shape[0]): rho, theta = lines[i][0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) dy = y2 - y1 dx = x2 - x1 A = 0 if abs(dx) > abs(dy): if x1 < x2: A = 1 angle = math.atan2(-dy, dx) else: A = 2 angle = math.atan2(dy, dx) else: if y1 < y2: A = 3 angle = math.atan2(dx, dy) else: A = 4 angle = math.atan2(-dx, -dy) angleTable.append(angle) statAngle.add(angle) # print( dx, dy , angle * 180.0 / math.pi , A) # reject any angle higher than 5 degree from target targetAngle = statAngle.mean() maxDegree = 5 * math.pi / 180 statAngle.clear() for angle in angleTable: if (abs(angle - targetAngle)) < maxDegree: statAngle.add(angle) if statAngle.count > 0: targetAngle = statAngle.mean() print("Best angle :", 180.0 * targetAngle / math.pi) return targetAngle
def __init__(self,feature_size): ''' self.total_runs: the total times of making prediction self.total_reward: the total reward of prediction B, miu, f are median parameters ''' self.linucb = LinUCB(feature_size) self.lts = LTS(feature_size) self.stat = Statistic(feature_size) self.his_linucb = [] self.his_lts = [] self.his_stat = [] self.valid_linucb = 0 self.valid_lts = 0 self.valid_stat = 0
class Sep_test(object): def __init__(self,feature_size): ''' self.total_runs: the total times of making prediction self.total_reward: the total reward of prediction B, miu, f are median parameters ''' self.linucb = LinUCB(feature_size) self.lts = LTS(feature_size) self.stat = Statistic(feature_size) self.his_linucb = [] self.his_lts = [] self.his_stat = [] self.valid_linucb = 0 self.valid_lts = 0 self.valid_stat = 0 def LinUCB_predict_and_learn(self,context,articleID,reward,pool): prediction = self.linucb.predict(context,pool) #update records if prediction==articleID: self.his_linucb.append(reward) self.valid_linucb += 1 #train one of the agents self.linucb.learn(context,articleID,reward) def lts_predict_and_learn(self,context,articleID,reward,pool): prediction = self.lts.predict(context,pool) #update records if prediction==articleID: self.his_lts.append(reward) self.valid_lts += 1 #train one of the agents self.lts.learn(context,articleID,reward) def stat_predict_and_learn(self,context,articleID,reward,pool): prediction = self.stat.predict(context,pool) #update records if prediction==articleID: self.his_stat.append(reward) self.valid_stat += 1 #train one of the agents self.stat.learn(context,articleID,reward) def predict_and_learn(self,context,articleID,reward,pool): self.LinUCB_predict_and_learn(context,articleID,reward,pool) self.lts_predict_and_learn(context,articleID,reward,pool) self.stat_predict_and_learn(context,articleID,reward,pool)
def __init__(self, queries, options, scenario_plan, samples=3): self._queries = queries self._samples = samples self._options = options self._scenario_plan = scenario_plan self._scenarios = [] self._results = [] self._stats = [] # TODO (djrut): Implement more sophisticated scenario plans, with # idioms such as: 'range(x..y) step z' for scenario in scenario_plan.split(','): self._scenarios.append(Scenario(threads=int(scenario), samples=self._samples, queries=self._queries, options=self._options)) for stat_name, stat_function in stats_cfg.items(): self._stats.append(Statistic(name=stat_name, function=stat_function))
def create_stats(self): self.stats['po'] = Statistic('po') self.stats['pr'] = Statistic('pr') self.stats['ze'] = Statistic('ze') self.stats['sp'] = Statistic('sp') self.stats['ku'] = Statistic('ku') self.stats['kr'] = Statistic('kr') self.stats['pc'] = Statistic('pc') self.stats['ji'] = Statistic('ji')
def __init__(self, db, chats): self.db = db self.chats = chats self.updater = Updater(token=GET_TOKEN()) dispatcher = self.updater.dispatcher dispatcher.addTelegramCommandHandler('start', self.start) dispatcher.addTelegramCommandHandler('restrict', self.add_restrict) dispatcher.addTelegramCommandHandler('stat', self.stats) dispatcher.addTelegramCommandHandler('info', self.info) dispatcher.addTelegramCommandHandler('help', self.help) dispatcher.addTelegramCommandHandler('sites', self.sites) dispatcher.addTelegramMessageHandler(self.answer) self.updater.start_polling() self.bot = telegram.Bot(token=GET_TOKEN()) self.statistician = Statistic(GET_FIRST_TIME(), GET_LAST_TIME(), db)
def euler(pearson_keys, spearman_keys, ig_keys): All, pearson_spearman, pearson_IG, spearman_IG, pearson, spearman, IG = Statistic.get_statistics(pearson_keys, spearman_keys, ig_keys) plt.figure(figsize=(30, 30)) v = venn3(subsets=(1, 1, 1, 1, 1, 1, 1), set_labels=('Pearson', 'Spearman', 'IG')) c = venn3_circles(subsets=(1, 1, 1, 1, 1, 1, 1), linestyle='dashed') v.get_label_by_id('100').set_text(str(pearson)) v.get_label_by_id('010').set_text('\n\n' + str(spearman)) v.get_label_by_id('001').set_text(str(IG)) v.get_label_by_id('110').set_text(str(pearson_spearman)) v.get_label_by_id('011').set_text('' + str(spearman_IG)) v.get_label_by_id('101').set_text('\n\n' + str(pearson_IG)) v.get_label_by_id('111').set_text(str(All)) plt.title("Feature Selection") plt.show()
def main(): os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1' onlyPlot = False deserializer = Deserializer('LBProjects/test_1024_64_bi') deserializer.deserialize(Settings) Settings.toFemtoseconds() grid = Grid.Instance() field = numpy.zeros((grid.space_size, grid.time_size), dtype=numpy.complex128) gauss = Gauss() gauss.fillField(field) context = ComputationalContext(field) context.fill_data() statistic = Statistic() if (onlyPlot): lBulletGraph = Graph(fieldxtxt) #lBulletGraph.plot2D(big_intenstxt) #lBulletGraph.plot3D() else: statistic.set_start_time() do_steps(context) context.copy_from_buffer(field) statistic.set_end_time() statistic.print_profile_info(context) statistic.print_total_time() #statistic.print_error(fieldxtxt, field) graph = Graph(field) graph.plot3D()
self.LinUCB_predict_and_learn(context,articleID,reward,pool) self.lts_predict_and_learn(context,articleID,reward,pool) self.stat_predict_and_learn(context,articleID,reward,pool) print("==START==") start_time = time.time() data_dir = 'ydata-fp-td-clicks-v2_0.20111003'#'rewrite.txt' batch_num = Data.process_large_data(data_dir) data_gen=Data.get_batched_data(min(batch_num,3)) print("done processing data file") agents = [] linucb=LinUCB(Data.USER_VEC_SIZE) lts=LTS(Data.USER_VEC_SIZE) stat = Statistic(Data.USER_VEC_SIZE) agents.append(linucb) agents.append(lts) agents.append(stat) agents=[LinUCB(Data.USER_VEC_SIZE) for i in range(3)]+[LTS(Data.USER_VEC_SIZE) for i in range(3)]+[Statistic(Data.USER_VEC_SIZE) for i in range(3)] seprate_test = Sep_test(Data.USER_VEC_SIZE) print("Computation starts") total_click=0 total_data=0#count data entries for (display,click,user_vec,pool) in data_gen: #do something with current data total_data+=1 total_click+=click seprate_test.LinUCB_predict_and_learn(user_vec,display,click,pool)
Bagging.__init__(self, agents) def train(self, context, articleID, reward): for agent in self.agents: agent.learn(context, articleID, reward) if __name__ == "__main__": from LinUCB import LinUCB from Statistic import Statistic import Data display, click, user_vec, pool = Data.load_from_dump() pool_size = len(pool) data_size = len(display) print("pool_size= {}, data_size={}".format(pool_size, data_size)) agents = [Statistic(pool_size, Data.USER_VEC_SIZE)] # for i in range(3)] bag = Bagging(agents) tuning_factor = 0.001 tuning_size = int(data_size * tuning_factor) print("tuning") #tuning phase for i in range(tuning_size): bag.train(user_vec[i], display[i], click[i]) print("Predicting") for i in range(tuning_size, data_size): bag.predict_and_learn(user_vec[i], display[i], click[i]) #report avg_reward = sum(bag.reward_history) * 1.0 / len(bag.reward_history)
class Sep_test(object): def __init__(self, feature_size): ''' self.total_runs: the total times of making prediction self.total_reward: the total reward of prediction B, miu, f are median parameters ''' self.linucb = LinUCB(feature_size) self.lts = LTS(feature_size) self.stat = Statistic(feature_size) self.his_linucb = [] self.his_lts = [] self.his_stat = [] self.his_hybrid = [] self.valid_linucb = 0 self.valid_lts = 0 self.valid_stat = 0 self.valid_hybrid = 0 self.vote = [] def LinUCB_predict_and_learn(self, context, articleID, reward, pool): prediction = self.linucb.predict(context, pool) self.vote.append(prediction) #update records if prediction == articleID: self.his_linucb.append(reward) self.valid_linucb += 1 #train one of the agents self.linucb.learn(context, articleID, reward) def lts_predict_and_learn(self, context, articleID, reward, pool): prediction = self.lts.predict(context, pool) self.vote.append(prediction) #update records if prediction == articleID: self.his_lts.append(reward) self.valid_lts += 1 #train one of the agents self.lts.learn(context, articleID, reward) def stat_predict_and_learn(self, context, articleID, reward, pool): prediction = self.stat.predict(context, pool) self.vote.append(prediction) #update records if prediction == articleID: self.his_stat.append(reward) self.valid_stat += 1 #train one of the agents self.stat.learn(context, articleID, reward) def predict_and_learn(self, context, articleID, reward, pool): self.LinUCB_predict_and_learn(context, articleID, reward, pool) self.lts_predict_and_learn(context, articleID, reward, pool) self.stat_predict_and_learn(context, articleID, reward, pool) def Hybrid_predict_and_learn(self, context, articleID, reward, pool): self.predict_and_learn(context, articleID, reward, pool) counts = np.bincount(self.vote) prediction = choice(np.flatnonzero(counts == counts.max())) #update records if prediction == articleID: self.his_hybrid.append(reward) self.valid_hybrid += 1 self.vote = []
def __init__(self): self.FileName = FILE_NAME_EXT_DATA self.OutputFile = None self.LastNavigateData = None self.DistanceFromLastPoint = 0 self.TotalDistance = 0 self.FixQuality_0 = 0; self.FixQuality_1 = 0; self.FixQuality_2 = 0; self.FixQuality_3 = 0; self.FixQuality_4 = 0; self.FixQuality_5 = 0; self.FixQuality_6 = 0; self.MarkColor = None self.PDOPList = [] self.HDOPList = [] self.VDOPList = [] self.LatitudeList = [] self.LongitudeList = [] self.AltitudeList = [] self.XList = [] self.YList = [] self.ColorList = [] self.PDOTStatistic = Statistic() self.HDOTStatistic = Statistic() self.VDOTStatistic = Statistic() self.LatitudeStatistic = Statistic() self.LongitudeStatistic = Statistic() self.AltitudeStatistic = Statistic() self.XStatistic = Statistic() self.YStatistic = Statistic() self.Basemap = None self.CEP = 0 self.CEP95 = 0 self.CEP99 = 0
class StatisticCommand(BaseCommand): def __init__(self): self.levelStatistic = Statistic() self.threadStatistic = Statistic() self.loggerStatistic = Statistic() self.timeStatistic = Statistic() self.entries = 0 super().__init__() def execute(self, record): self.levelStatistic.addValue(record[2]) self.threadStatistic.addValue(record[1]) self.loggerStatistic.addValue(record[3]) self.processTime(record) self.entries += 1 def printResult(self): print("logfile has", self.entries, "entries\n") print("\nLog level\n---------") self.levelStatistic.printStatistic() print("\nThreads\n-------") self.threadStatistic.printStatistic() print("\nLogger\n------") self.loggerStatistic.printStatistic() print("\nTime\n----") self.timeStatistic.printStatistic() def processTime(self, record): time = record[0][0:16] self.timeStatistic.addValue(time)
from Statistic import Statistic # 导入Statistic类 a = Statistic('students.txt') sum = a.statistic_sum() # 求和 print('the sum is ', sum) ava = a.statistic_average() # 求平均值 print('the average is ', ava)
class UserManager(QObject): connected = pyqtSignal(str) openidGot = pyqtSignal(str) error = pyqtSignal(str) info = pyqtSignal(str) onUser = pyqtSignal(User) progress = pyqtSignal(int, int) def __init__(self): QObject.__init__(self) self.reset() self.statistic = Statistic(self.users) def reset(self): self.users = {} def connect(self, appid, secret): url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' %(appid, secret) try: r = requests.get(url) if r.status_code == 200: content = json.loads(r.content) if content.has_key('access_token'): self.access_token = content['access_token'] self.connected.emit('Connected to server successfully') elif content.has_key('errmsg'): errmsg = content['errmsg'] else: self.error.emit('Unknown error in connection') else: errmsg = 'Connection error with code %i' %(r.status_code) self.error.emit(errmsg) except: self.error.emit('Unhandling error in connection') def get_openids(self): url = 'https://api.weixin.qq.com/cgi-bin/user/get?access_token=%s&next_openid=' %(self.access_token) try: r = requests.get(url) if r.status_code == 200: content = json.loads(r.content) if content.has_key('data'): self.openids = content['data']['openid'] self.openidGot.emit('Pull openids successfully') else: errmsg = 'Connection error with code %i' %(r.status_code) self.error.emit(errmsg) except: self.error.emit('Unhandling error in show users') def get_user(self, openid): if openid not in self.users: self.load_user(openid) self.onUser.emit(self.users[openid]) def dump_all_users(self): self.info.emit('Dumping all users information') t = threading.Thread(target = self.dump_execute) t.start() def dump_execute(self): for openid in self.openids: if openid not in self.users: t = threading.Thread(target = self.load_user, args = (openid,)) t.start() def load_user(self, openid): url = 'https://api.weixin.qq.com/cgi-bin/user/info?access_token=%s&openid=%s' %(self.access_token, openid) msg = 'loading user %s' %(openid) self.info.emit(msg) try: r = requests.get(url) if r.status_code == 200: jobj = json.loads(r.content) if jobj.has_key('openid'): self.users[openid] = User(jobj) self.progress.emit(len(self.users), len(self.openids)) else: errmsg = 'User information error' self.error.emit(errmsg) else: errmsg = 'Connection error with code %i' %(r.status_code) self.error.emit(errmsg) except: self.error.emit('Unhandling error in load user info') def do_statistic(self): self.statistic.doStatistic() def show_stat_sex(self): self.statistic.showPieChart('sex') def show_stat_language(self): self.statistic.showPieChart('language') def show_stat_city(self): self.statistic.showPieChart('city') def show_stat_province(self): self.statistic.showPieChart('province') def show_stat_country(self): self.statistic.showPieChart('country')
def __init__(self): QObject.__init__(self) self.reset() self.statistic = Statistic(self.users)
class NmeaStatistic: def __init__(self): self.FileName = FILE_NAME_EXT_DATA self.OutputFile = None self.LastNavigateData = None self.DistanceFromLastPoint = 0 self.TotalDistance = 0 self.FixQuality_0 = 0; self.FixQuality_1 = 0; self.FixQuality_2 = 0; self.FixQuality_3 = 0; self.FixQuality_4 = 0; self.FixQuality_5 = 0; self.FixQuality_6 = 0; self.MarkColor = None self.PDOPList = [] self.HDOPList = [] self.VDOPList = [] self.LatitudeList = [] self.LongitudeList = [] self.AltitudeList = [] self.XList = [] self.YList = [] self.ColorList = [] self.PDOTStatistic = Statistic() self.HDOTStatistic = Statistic() self.VDOTStatistic = Statistic() self.LatitudeStatistic = Statistic() self.LongitudeStatistic = Statistic() self.AltitudeStatistic = Statistic() self.XStatistic = Statistic() self.YStatistic = Statistic() self.Basemap = None self.CEP = 0 self.CEP95 = 0 self.CEP99 = 0 def set_file_name(self, file_name): self.FileName = file_name + FILE_NAME_EXT_DATA def check_date_time(self, navigate_data): result = False if navigate_data is None: return result if navigate_data.LocalDateTime is None: return result if self.LastNavigateData is not None: last_local_date_time = self.LastNavigateData.LocalDateTime if last_local_date_time is not None: duration = navigate_data.LocalDateTime - last_local_date_time if duration.seconds <= 0: return result result = True return result def add_to_data_list(self, navigate_data): self.DistanceFromLastPoint = 0 if navigate_data is None: return if not self.check_date_time(navigate_data): return if len(navigate_data.PDOP) == 0 or len(navigate_data.HDOP) == 0 or len(navigate_data.VDOP) == 0: return if navigate_data.LatitudeValue == 0 or navigate_data.LongitudeValue == 0: return if self.LastNavigateData is not None: last_point = (self.LastNavigateData.LatitudeValue, self.LastNavigateData.LongitudeValue) current_point = (navigate_data.LatitudeValue, navigate_data.LongitudeValue) self.DistanceFromLastPoint = distance.distance(last_point, current_point).m self.TotalDistance += self.DistanceFromLastPoint try: self.PDOPList.append(navigate_data.PDOP) self.HDOPList.append(navigate_data.HDOP) self.VDOPList.append(navigate_data.VDOP) self.LatitudeList.append(navigate_data.LatitudeValue) self.LongitudeList.append(navigate_data.LongitudeValue) self.AltitudeList.append(float(navigate_data.Altitude)) except ValueError as e: print("ValueError:", e) return # (55, 168, 218), (187, 249, 112), (255, 255, 0), (113, 130, 36), (113, 174, 38), (255, 255, 255) value = int(navigate_data.FixQuality) if value == 0: self.FixQuality_0 = self.FixQuality_0 + 1; self.MarkColor = (0.5, 0.5, 0.5) # remark = "Invalid" elif value == 1: self.FixQuality_1 = self.FixQuality_1 + 1; self.MarkColor = (0.22, 0.67, 0.872) # remark = "SPS" elif value == 2: self.FixQuality_2 = self.FixQuality_2 + 1; self.MarkColor = (0.733, 0.976, 0.439) # remark = "Differential" elif value == 3: self.FixQuality_3 = self.FixQuality_3 + 1; self.MarkColor = (1.0, 1.0, 0) # remark = "PPS" elif value == 4: self.FixQuality_4 = self.FixQuality_4 + 1; self.MarkColor = (0.443, 0.509, 0.141) # remark = "RTK Fixed" elif value == 5: self.FixQuality_5 = self.FixQuality_5 + 1; self.MarkColor = (0.443, 0.682, 0.149) # remark = "RTK Float" elif value == 6: self.FixQuality_6 = self.FixQuality_6 + 1; self.MarkColor = (1.0, 1.0, 1.0) # remark = "Estimated" self.ColorList.append(self.MarkColor) self.LastNavigateData = navigate_data self.write_to_file(navigate_data) def convert(self): if not self.LongitudeStatistic.valid(): return if not self.LatitudeStatistic.valid(): return a = 1 b = 0.001 longitude_offset = a * max(abs(self.LongitudeStatistic.Mean - self.LongitudeStatistic.Min), abs(self.LongitudeStatistic.Mean - self.LongitudeStatistic.Max)) + b latitude_offset = a * max(abs(self.LatitudeStatistic.Mean - self.LatitudeStatistic.Min), abs(self.LatitudeStatistic.Mean - self.LatitudeStatistic.Max)) + b self.Basemap = Basemap(llcrnrlon=self.LongitudeStatistic.Mean - longitude_offset, llcrnrlat=self.LatitudeStatistic.Mean - latitude_offset, urcrnrlon=self.LongitudeStatistic.Mean + longitude_offset, urcrnrlat=self.LatitudeStatistic.Mean + latitude_offset, projection='lcc', suppress_ticks=False, resolution='i', lat_0=self.LatitudeStatistic.Mean, lon_0=self.LongitudeStatistic.Mean) self.XList, self.YList = self.Basemap(self.LongitudeStatistic.DataList, self.LatitudeStatistic.DataList) def write_to_file(self, navigate_data): if self.OutputFile is None: self.OutputFile = open(self.FileName, "w") text = "date_time" + SEPARATOR_T \ + "FixQuality" + SEPARATOR_T \ + "PDOP" + SEPARATOR_T \ + "HDOP" + SEPARATOR_T \ + "VDOP" + SEPARATOR_T \ + "Latitude" + SEPARATOR_T \ + "Longitude" + SEPARATOR_T \ + "Altitude" + SEPARATOR_T \ + "delt" + SEPARATOR_T \ + "total" + SEPARATOR_N self.OutputFile.write(text) if self.OutputFile is not None: text = navigate_data.local_date_time_to_string() + SEPARATOR_T \ + navigate_data.FixQuality + SEPARATOR_T \ + navigate_data.PDOP + SEPARATOR_T \ + navigate_data.HDOP + SEPARATOR_T \ + navigate_data.VDOP + SEPARATOR_T \ + str(navigate_data.LatitudeValue) + SEPARATOR_T \ + str(navigate_data.LongitudeValue) + SEPARATOR_T \ + str(navigate_data.Altitude) + SEPARATOR_T \ + str(self.DistanceFromLastPoint) + SEPARATOR_T \ + str(self.TotalDistance) + SEPARATOR_N self.OutputFile.write(text) def statistic(self): self.PDOTStatistic.statistic("PDOP", self.PDOPList) self.HDOTStatistic.statistic("HDOP", self.HDOPList) self.VDOTStatistic.statistic("VDOP", self.VDOPList) self.LatitudeStatistic.statistic("Latitude", self.LatitudeList) self.LongitudeStatistic.statistic("Longitude", self.LongitudeList) self.AltitudeStatistic.statistic("Altitude", self.AltitudeList) self.convert() self.XStatistic.statistic("X", self.XList) self.YStatistic.statistic("Y", self.YList) self.CEP = Statistic.cep(self.XStatistic.Std, self.YStatistic.Std) self.CEP95 = Statistic.cep95(self.XStatistic.Std, self.YStatistic.Std) self.CEP99 = Statistic.cep99(self.XStatistic.Std, self.YStatistic.Std) print(self.cep_to_string()) print(self.rms_to_string()) print(self.test_point_count_to_string()) print(self.fix_quality_count_to_string()) if self.OutputFile is not None: self.OutputFile.write(self.to_string()) self.OutputFile.close() def draw(self): if self.Basemap is None: return self.Basemap.scatter(self.XList, self.YList, c=self.ColorList) x0 = self.XStatistic.Mean y0 = self.YStatistic.Mean # MAX_RADIUS = 10 x_axis = (x0 - MAX_RADIUS, x0 + MAX_RADIUS) y_axis = (y0 - MAX_RADIUS, y0 + MAX_RADIUS) plt.plot(x_axis, (y0, y0), color='grey') plt.plot((x0, x0), y_axis, color='grey') for i in range(0, MAX_RADIUS): circle = Circle((x0, y0), radius=i + 1, fill=False, color='#00ffff', alpha=0.5) plt.gca().add_patch(circle) plt.text(x0 + i + 1, y0, i + 1) circle = Circle((x0, y0), radius=self.CEP, fill=False, color='red') plt.gca().add_patch(circle) plt.text(x0, y0 + self.CEP, ("%.4f" % self.CEP)) circle = Circle((x0, y0), radius=self.CEP95, fill=False, color='red') plt.gca().add_patch(circle) plt.text(x0, y0 + self.CEP95, ("%.4f" % self.CEP95)) circle = Circle((x0, y0), radius=self.CEP99, fill=False, color='red') plt.gca().add_patch(circle) plt.text(x0, y0 + self.CEP99, ("%.4f" % self.CEP99)) plt.show() def test_point_count_to_string(self): result = "" result += "Test point count = " + str(len(self.XList)) + SEPARATOR_N return result def fix_quality_count_to_string(self): result = "" if self.FixQuality_0 > 0: result += "Invalid count = " + str(self.FixQuality_0) + SEPARATOR_N if self.FixQuality_1 > 0: result += "SPS count = " + str(self.FixQuality_1) + SEPARATOR_N if self.FixQuality_2 > 0: result += "Differential count = " + str(self.FixQuality_2) + SEPARATOR_N if self.FixQuality_3 > 0: result += "PPS count = " + str(self.FixQuality_3) + SEPARATOR_N if self.FixQuality_4 > 0: result += "RTK Fixed count = " + str(self.FixQuality_4) + SEPARATOR_N if self.FixQuality_5 > 0: result += "RTK Float count = " + str(self.FixQuality_5) + SEPARATOR_N if self.FixQuality_6 > 0: result += "Estimated count = " + str(self.FixQuality_6) + SEPARATOR_N return result def rms_to_string(self): result = "" result += "RMS_H = " + str(1.2 * self.CEP) + SEPARATOR_N \ + "RMS_V = " + str(self.AltitudeStatistic.Std) + SEPARATOR_N return result def cep_to_string(self): result = "" result += "CEP = " + str(self.CEP) + SEPARATOR_N \ + "CEP95 = " + str(self.CEP95) + SEPARATOR_N \ + "CEP99 = " + str(self.CEP99) + SEPARATOR_N return result def to_string(self): result = "" result += self.PDOTStatistic.to_string() + SEPARATOR_N \ + self.HDOTStatistic.to_string() + SEPARATOR_N \ + self.VDOTStatistic.to_string() + SEPARATOR_N \ + self.LatitudeStatistic.to_string() + SEPARATOR_N \ + self.LongitudeStatistic.to_string() + SEPARATOR_N \ + self.AltitudeStatistic.to_string() + SEPARATOR_N \ + self.XStatistic.to_string() + SEPARATOR_N \ + self.YStatistic.to_string() + SEPARATOR_N \ + self.cep_to_string() + SEPARATOR_N \ + self.rms_to_string() + SEPARATOR_N \ + self.test_point_count_to_string() + SEPARATOR_N\ + self.fix_quality_count_to_string() return result
class Bot(): def __init__(self, db, chats): self.db = db self.chats = chats self.updater = Updater(token=GET_TOKEN()) dispatcher = self.updater.dispatcher dispatcher.addTelegramCommandHandler('start', self.start) dispatcher.addTelegramCommandHandler('restrict', self.add_restrict) dispatcher.addTelegramCommandHandler('stat', self.stats) dispatcher.addTelegramCommandHandler('info', self.info) dispatcher.addTelegramCommandHandler('help', self.help) dispatcher.addTelegramCommandHandler('sites', self.sites) dispatcher.addTelegramMessageHandler(self.answer) self.updater.start_polling() self.bot = telegram.Bot(token=GET_TOKEN()) self.statistician = Statistic(GET_FIRST_TIME(), GET_LAST_TIME(), db) def start(self, bot, update): message = update['message'] user_id = message['from_user']['id'] user_name = message['from_user']['first_name'] chat_id = message['chat']['id'] self.chats.add_chat(chat_id, user_id, user_name) bot.sendMessage( chat_id=update.message.chat_id, text= "Привет, друг! Я буду рассказывать обо всем самом интересном. \nЧтобы мы смогли лучше узнать друг друга, используй команду /help" ) def stats(self, bot, update): try: message = update['message'] chat_id = update.message.chat_id text = message['text'] text = text.split(' ') hours = int(text[1]) if hours > 48: text = "Не больше 48 часов" bot.sendMessage(chat_id=chat_id, text=text) return hours_text = get_text_after_number(hours, ["час", "часа", "часов"]) stat = self.statistician.get_statistic(hours, self.chats.chats[chat_id]) if stat is None: text = "У меня нет информации за последние {} {}.".format( hours, hours_text) bot.sendMessage(chat_id=chat_id, text=text) return news_text = get_text_after_number( stat["all"], ["новость", "новости", "новостей"]) text = "За последние {} {} было предсказано {} {}. " \ "\nПравильно предсказано топовых {} из {}. " \ "\nОшибочно предсказанно топовых {}. " \ "\nОшибочно пропущено топовых {}." \ "\nПравильно отсеяно непопулярных {}." text = text.format(hours, hours_text, stat["all"], news_text,\ stat["correct"], stat["correct"]+stat["missed"], \ stat["error"], \ stat["missed"],\ stat["filtered"] ) # Добавляем информацию о порогах text += "\n\n" text += "Статистика была расчитана по порогам:" threshold = merge_two_dicts(Statistic.default_dict, self.chats.chats[chat_id]) for key, value in threshold.iteritems(): text += "\n" text += str(int(value)) + " : " + str(key) bot.sendMessage(chat_id=chat_id, text=text) except Exception as e: logging.exception("stat_exception") bot.sendMessage( chat_id=update.message.chat_id, text= "Не понял. Введи, например\n /stat 15\n - статистика за 15 часов\nВозможно, промежуток времени недостаточный" ) def add_restrict(self, bot, update): try: message = update['message'] chat_id = update.message.chat_id text = message['text'] text = text.split(' ') type = text[1] value = int(text[2]) # Если такого сайта нет, сообщаем об этом if type not in Statistic.default_dict.keys(): text = "Такого сайта нет. Выбирай любой из:\n" for key in Statistic.default_dict.keys(): text += key text += "\n" bot.sendMessage(chat_id=update.message.chat_id, text=text) return self.chats.add_restrict(chat_id, type, value) tweets_text = get_text_after_number(value, ["твит", "твита", "твитов"]) bot.sendMessage(chat_id=chat_id, text="Установил для {} ограничение в {} {}".format( type, value, tweets_text)) except Exception as e: logging.exception("restriction_exception") bot.sendMessage( chat_id=update.message.chat_id, text="Не понял. Введи, например\n /restrict lenta.ru 5") def answer(self, bot, update): """ Отвечает пользователям на их сообщения :param bot: :param update: """ bot.sendMessage( chat_id=update.message.chat_id, text= "Прости, я тут вообще-то новости анализирую, мне некогда болтать") def send_message(self, chats, url, predicted, first_time_tweets): #text = "Ухты! Ты только глянь, какая новость! Наберет {} твиттов! {}".format(int(predicted), url) first_time_text = get_text_after_number(GET_FIRST_TIME(), ["минута", "минуты", "минут"]) last_time_text = get_text_after_number(GET_LAST_TIME(), ["минута", "минуты", "минут"]) first_time_tweets_text = get_text_after_number( first_time_tweets, ["твит", "твита", "твитов"]) predicted_time_tweets_text = get_text_after_number( int(predicted), ["твит", "твита", "твитов"]) text = "{}\n" \ "За первые {} {} новость уже набрала {} {}. \n" \ "Через {} {} она наберет примерно {} {}."\ .format(url, \ GET_FIRST_TIME(), first_time_text,first_time_tweets, first_time_tweets_text, \ GET_LAST_TIME(), last_time_text, int(predicted), predicted_time_tweets_text) for chat_id in chats: requests.get( "https://api.telegram.org/bot{}/sendmessage?chat_id={}&text={}" .format(GET_TOKEN(), chat_id, text)) def info(self, bot, update): try: message = update['message'] chat_id = update.message.chat_id chats = self.chats.chats[chat_id] chats_text = "" for key, value in chats.iteritems(): chats_text += "\n" chats_text += str(int(value)) + " : " + str(key) if len(chats_text) == 0: chats_text = "\nВы получаете все сообщения. Ограничений на количество новостей нет.\n\nЧтобы поставить, воспользуйтейсь командой /restrict lenta.ru 10" bot.sendMessage(chat_id=chat_id, text=chats_text) return bot.sendMessage( chat_id=chat_id, text= "Текущие пороги, в соотвествии с которыми Вы получаете сообщения:{}" .format(chats_text)) except Exception as e: logging.exception("info exception") bot.sendMessage( chat_id=update.message.chat_id, text= "Что-то пошло не так. Мне кажется я заболел. Попробуй позже.") def help(self, bot, update): try: message = update['message'] chat_id = update.message.chat_id text = "Что же я умею:\n\n" \ "Команда /restrict lenta.ru 10\n- высылать с сайта lenta.ru только те новости, которые наберут больше 10 твитов\n\n" \ "Команда /stat 10\n- выдает статистику моей работы за последние 10 часов\n\n" \ "Команда /info\n- покажет текущие пороги, по которым я высылаю новости \n\n" \ "Команда /sites\n- покажет список сайтов, для которых я умею предсказывать популярность новостей \n\n" \ "А теперь прости, мне нужно работать" bot.sendMessage(chat_id=chat_id, text=text) except Exception as e: logging.exception("help exception") bot.sendMessage( chat_id=update.message.chat_id, text= "Что-то пошло не так. Мне кажется я заболел. Попробуй позже.") def sites(self, bot, update): try: message = update['message'] chat_id = update.message.chat_id text = "Я неплохо предсказываю:\n" for site in Statistic.default_dict.keys(): text += "\n" text += site bot.sendMessage(chat_id=chat_id, text=text) except Exception as e: logging.exception("sites exception") bot.sendMessage( chat_id=update.message.chat_id, text= "Что-то пошло не так. Мне кажется я заболел. Попробуй позже.")
def getdigitsSpacing(self, cBoxes, minSpacing): self.digitsSpacing = 0 deltaX=[] deltaY=[] centerX = Statistic() centerY = Statistic() cx, cy = self.sudokuBoxCenter for digit in cBoxes: if abs(digit["center"][0] - cx) < minSpacing/2: centerX.add(digit["center"][0]) if abs(digit["center"][1] - cy) < minSpacing/2: centerY.add(digit["center"][0]) if centerX.count > 1: cx = centerX.mean() if centerY.count > 1: cx = centerY.mean() self.sudokuBoxCenter = (cx, cy) #get spacing between all objects for idx1 in range(len(cBoxes)-1): obj1 = cBoxes[idx1] if obj1["valid"] is False: continue for idx2 in range(idx1+1,len(self.allDigitsCenter)): obj2 = cBoxes[idx2] if obj2["valid"] is False: continue dx = abs(obj1["center"][0] - obj2["center"][0]) if dx >= minSpacing/2: deltaX.append(dx) dy = abs(obj1["center"][1] - obj2["center"][1]) if dy >= minSpacing/2: deltaY.append(dy) deltaX.sort() deltaY.sort() # now get he minimum distance stat = Statistic() for i in deltaX: if stat.count == 0: stat.add(i) else: if (i - stat.mean()) > minSpacing /2 : if stat.count == 1: stat.clear() stat.add(i) else: break spacingX = stat.mean() stat.clear() for i in deltaY: if stat.count == 0: stat.add(i) else: if (i - stat.mean()) > minSpacing /2 : if stat.count == 1: stat.clear() stat.add(i) else: break spacingY = stat.mean() # ok we got the spacing # we need to figure out if it is x,2x,or3x, or 4x for i in range(1,9): spacing = spacingX / i if (8 * spacing) < self.sudokuBoxSize: spacingX = spacing break for i in range(1,9): spacing = spacingY / i if (8 * spacing) < self.sudokuBoxSize: spacingY = spacing break print("spacing X", spacingX,"spacingY", spacingY) if (spacingX > 1) and ( spacingY > 1): self.digitsSpacing = (spacingX + spacingY)/2 elif spacingX > 1: self.digitsSpacing = spacingX elif spacingY > 1: self.digitsSpacing = spacingX else: self.digitsSpacing = self.sudokuBoxSize/9