def find_closest_ts(timestamp_list, seek_timestamp, max_dist=float('inf')): ''' Find the timestamp that is closest to a given timestamp from a sorted list of timestamps. Only returns values that fall within the optional max_dist (in seconds). Note: This uses bisect.bisectl_left for the binary search. bisect_left will sometimes return the one index off based one how it splits the list. As such we need to check the distance of the returned index vs those one index higher and lower. I check both as I cannot be sure whether my timestamp list is sorted ASC or DESC. ''' idx = bisect_left(timestamp_list, seek_timestamp) # Return Nones if no values match if idx == len(timestamp_list): return None, None # Otherwise check if the timestamp (and values around it) fall within the # specified max_dist else: lower = max(0, idx - 1) upper = min(idx + 1, len(timestamp_list) - 1) closest_timestamp = min(timestamp_list[lower:upper+1], \ key=lambda x:time_diff(x, seek_timestamp)) distance = time_diff(closest_timestamp, seek_timestamp) if distance <= max_dist: return closest_timestamp, distance else: return None, None
def get(): """ 所需参数: user_id: 登录用户id """ parser = reqparse.RequestParser() parser.add_argument('user_id', type=str, required=True) args = parser.parse_args() user_id = args['user_id'] resp_suc = {} resp_suc['list'] = [] if user_id: message_count = Message.query.filter(Message.receiver_id == user_id).count() if message_count > 1: messages = Message.query.filter(Message.receiver_id == user_id) for message in messages: times = time_diff(message.time) content = message.content user_pic = to_messages(times, content, message.sender_id) resp_suc['list'].append(user_pic) else: message = Message.query.filter(Message.receiver_id == user_id).first() if message: times = time_diff(message.time) content = message.content user_pic = to_messages(times, content, message.sender_id) resp_suc['list'].append(user_pic) resp_suc['status'] = 0 resp_suc['message'] = 'success' return resp_suc else: resp_suc['status'] = 1 resp_suc['message'] = 'error' return resp_suc
def parse(args): ''' take in log file row by row added to ordered dict. subtracted by ordered dict by if new session is identified, current max time - timeout > min time, and dump at the end of file :param args: :return: void write file to outputPath ''' timeout = get_inactivity_period(args.inactivityFilePath) rows = iter(get_log_data(args.logFilePath)) next(rows, None) # skip the header ip_map = OrderedDict() unique_timestamp = set() with contextlib.suppress(FileNotFoundError): os.remove(args.outPath) for (i, row) in enumerate(rows): try: if valiad_row(row): fields = row init_time = convert_datetime(fields[1], fields[2], args.time_format) unique_timestamp.add(init_time) ip = fields[0] if ip in ip_map: if time_diff(init_time, ip_map[ip].get_last_session_time()) > timeout: dump_list_to_file_as_line(ip_map[ip].output_session(), args.outPath, args.delimiter) ip_map[ip] = Session(ip, init_time) else: ip_map[ip].set_last_session_time(init_time) ip_map[ip].increment_doc() else: ip_map[ip] = Session(ip, init_time) if (len(unique_timestamp) - 1 > timeout): exp_ips = [ v.get_ip() for v in ip_map.values() if v.get_last_session_time() == min(unique_timestamp) ] for exp in exp_ips: dump_list_to_file_as_line( ip_map.pop(exp).output_session(), args.outPath, args.delimiter) unique_timestamp.remove(min(unique_timestamp)) else: logging.warning('row {} has invalid data'.format(i)) except Exception as e: logging.warning('row {} could not be processed because {}'.format( i, str(e))) for ip in ip_map.keys(): dump_list_to_file_as_line(ip_map[ip].output_session(), args.outPath, args.delimiter) logging.info('Excution complete')
def get(): """ 所需参数: user_id:必传,用户登录的id pub_id: 必传,用户收藏的酒吧id """ parser = reqparse.RequestParser() parser.add_argument('user_id', type=str, required=True, help=u'必须要传入user_id。') args = parser.parse_args() user_id = int(args['user_id']) resp_suc = {} resp_suc['list'] = [] if user_id: result_count = session.query(Pub).\ join(Collect).\ filter(Collect.user_id == user_id).count() if result_count > 1: results = session.query(Pub).\ join(Collect).\ filter(Collect.user_id == user_id) for result in results: collect = Collect.query.filter(Collect.user_id == user_id, Collect.pub_id == result.id).first() difference = time_diff(collect.time) result_pic = differences(result, difference) resp_suc['list'].append(result_pic) else: result = session.query(Pub).\ join(Collect).\ filter(Collect.user_id == user_id).first() if result: collect = Collect.query.filter(Collect.user_id == user_id, Collect.pub_id == result.id) difference = time_diff(collect.time) result_pic = differences(result, difference) resp_suc['list'].append(result_pic) resp_suc['status'] = 0 else: resp_suc['message'] = 'error' resp_suc['status'] = 1 return resp_suc
def fit(self, sess, x_train, y_train, x_dev, y_dev, save_dir=None, print_per_batch=100): saver = tf.train.Saver() if save_dir: if not os.path.exists(save_dir): os.makedirs(save_dir) sess.run(tf.global_variables_initializer()) print('Training and evaluating...') start_time = time.time() total_batch = 0 # 总批次 best_acc_dev = 0.0 # 最佳验证集准确率 last_improved = 0 # 记录上次提升批次 require_improvement = 500 # 如果超过500轮模型效果未提升,提前结束训练 flags = False for epoch in range(self.n_epoch): print('Epoch:', epoch + 1) for train_feed, train_n in self.get_batches( x_train, y_train, batch_size=self.batch_size): loss_train, acc_train = self.train_on_batch(sess, train_feed) loss_dev, acc_dev = self.evaluate(sess, x_dev, y_dev) if total_batch % print_per_batch == 0: if acc_dev > best_acc_dev: # 保存在验证集上性能最好的模型 best_acc_dev = acc_dev last_improved = total_batch if save_dir: saver.save(sess=sess, save_path=os.path.join( save_dir, 'sa-model')) improved_str = '*' else: improved_str = '' time_dif = time_diff(start_time) msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' + \ ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}' print( msg.format(total_batch, loss_train, acc_train, loss_dev, acc_dev, time_dif, improved_str)) total_batch += 1 if total_batch - last_improved > require_improvement: print('No optimization for a long time, auto-stopping...') flags = True break if flags: break
def traverse_message(message, resp_suc): """ 遍历一条消息 """ if message: times = time_diff(message.time) content = message.content user_pic = to_messages(times, content, message.sender_id) user_pic['sender_id'] = message.sender_id user_pic['receiver_id'] = message.receiver_id time = time_to_str(message.time) user_pic['time'] = time resp_suc['list'].append(user_pic)
def extract_stays(locations, t_dur=5, l_roam=5): # The same algorithm as in LNCS 3234 i = 0 s = [] while i < len(locations) - 1: # print(i) j = i + 1 if j < len(locations): time_i = int(locations[i]['timestampMs']) while j < len(locations) and utils.time_diff(locations, i, j) < 5: # Find the next record with time difference larger than t_dur j += 1 if j >= len(locations): j -= 1 if i == j: i = i + 1 continue if utils.time_diff(locations, i, j) > 60: # The difference is too big, we consider it as a new stay i = j continue if utils.diameter(locations, i, j) > l_roam: i = i + 1 else: while j < len(locations) and utils.diameter( locations, i, j) <= l_roam and utils.time_diff( locations, i, j) <= 24 * 60: j += 1 j -= 1 time_j = int(locations[j]['timestampMs']) s.append((utils.medoid(locations, i, j), time_i, time_j)) i = j + 1 else: break # extract_home_from_stays(s) get_places(s)
def traverse_user_sender_one(message, resp_suc, receiver_id): """ 一条用户发送给好友信息 """ if message.sender_id == receiver_id: pass else: message.view = 1 db.commit() times = time_diff(message.time) user_pic = to_messages_sender(message.content, message.sender_id, receiver_id) user_pic['sender_id'] = message.sender_id user_pic['receiver_id'] = message.receiver_id time = time_to_str(message.time) user_pic['time'] = time resp_suc['list'].append(user_pic)
async def boss_reminder(self): await self.bot.wait_until_ready() chn = self.bot.get_channel(Constants.ID_CHN_BOT_CHN) for role in self.bot.get_guild(self.bot.get_server_id()).roles: if role.name == Constants.BOSS_HUNTER: bh = role while not self.bot.is_closed(): if time_diff(next_boss().get_time() ) == Constants.BOSS_NOTIFICATION_NOTICE_SECONDS: logger.info(Constants.BOSS_NOTIFICATION_SENT, next_boss().get_name()) await chn.send( Constants.MSG_NEXT_BOSS_REMINDER.format( bh.mention, next_boss().get_name())) await asyncio.sleep(1)
def traverse_collect(result, user_id, resp_suc): """ 遍历一条收藏 collect: 收藏中间表 resp_suc: 列表 """ if result: result.is_collect = True pub_type(result) collect = Collect.query.filter(Collect.user_id == user_id, Collect.pub_id == result.id).first() difference = time_diff(collect.time) result_pic = differences(result, difference) result_pic.pop('longitude') result_pic.pop('latitude') pub_picture = PubPicture.query.filter(PubPicture.pub_id == result.id).first() county = County.query.filter(County.id == result.county_id).first() to_city(result_pic, county) if pub_picture: if pub_picture.rel_path and pub_picture.pic_name: result_pic['pic_path'] = pub_picture.rel_path + '/' + pub_picture.pic_name change_latitude_longitude(result_pic, result) resp_suc['list'].append(result_pic)
def traverse_messages_receiver(messages, resp_suc): """ 遍历接收多条消息 """ #if receiver_messages: # for message in receiver_messages: # user_pic = to_messages_no_time(message.content, message.sender_id) # user_pic['sender_id'] = message.sender_id # user_pic['receiver_id'] = message.receiver_id # time = time_to_str(message.time) # user_pic['time'] = time # resp_suc['list'].append(user_pic) if messages: for message in messages: message.view = 1 db.commit() times = time_diff(message.time) user_pic = to_messages(times, message.content, message.sender_id) user_pic['sender_id'] = message.sender_id user_pic['receiver_id'] = message.receiver_id time = time_to_str(message.time) user_pic['time'] = time resp_suc['list'].append(user_pic)
def traverse_message_receiver(sender_message, resp_suc): """ 遍历接收一条消息 """ #if message: # times = time_diff(message.time) # content = message.content # user_pic = to_messages(times, content, message.sender_id) # user_pic['sender_id'] = message.sender_id # user_pic['receiver_id'] = message.receiver_id # time = time_to_str(message.time) # user_pic['time'] = time # resp_suc['list'].append(user_pic) if sender_message: sender_message.view = 1 db.commit() times = time_diff(sender_message.time) content = sender_message.content user_pic = to_messages(times, content, sender_message.receiver_id) user_pic['sender_id'] = sender_message.sender_id user_pic['receiver_id'] = sender_message.receiver_id time = time_to_str(sender_message.time) user_pic['time'] = time resp_suc['list'].append(user_pic)
GPIO.output(PIN_TRIGGER, GPIO.LOW) time.sleep(2) try: measurements_avg = [] while True: measurements = [] while len(measurements) < 300: GPIO.output(PIN_TRIGGER, GPIO.HIGH) time.sleep(0.000001) GPIO.output(PIN_TRIGGER, GPIO.LOW) ref = time.time() pulse_start = time.time() while GPIO.input(PIN_ECHO)==0 and utils.time_diff(ref, pulse_start) < 0.01: pulse_start = time.time() pulse_end = time.time() while GPIO.input(PIN_ECHO)==1 and utils.time_diff(ref, pulse_end) < 0.01: pulse_end = time.time() pulse_time = utils.time_diff(pulse_start, pulse_end) if pulse_time < 0.005: measurements.append(round(MAX_VOLUME - pulse_time * (SPEED_OF_SOUND / 2) * SURFACE_AREA, 2)) else: logging.warning(f"[{CONTEXT}] invalid measurement") time.sleep(0.01) measurements.sort()
is_streaming = False time.sleep(STARTUP_TIME) utils.setup_logging("main") time.sleep(MAX_STREAMING_TIME) while True: utils.connect() if not is_streaming and utils.command_sent(CONTEXT, "stream", COMMAND_OUTDATED_AFTER): start_threads() if is_streaming and utils.time_diff(start_streaming, time.time()) > MAX_STREAMING_TIME: stop_threads() time.sleep(CONNECT_INTERVAL) except KeyboardInterrupt: pass except: logging.exception(f"[{CONTEXT}] general error") time.sleep(PAUZE_BEFORE_REBOOT) logging.info(f"[{CONTEXT}] rebooting") os.system("sudo reboot") finally: stop_threads()