def get_queue_key_of_alarm(self, alarm_item): # attention: using right field as hash value, will reduce progress communication alarm_type = alarm_item.get('alarm_type') msg_hash = None if alarm_type == 'packet': msg_hash = int(alarm_item.get('alarm_content').get('uniq_chain_hash')) elif alarm_type == 'networksize': node_id = alarm_item.get('alarm_content').get('node_id') network_id = node_id[:12] # head 8 * 2 bytes if network_id.startswith('ffffff'): network_id = 'ffffff' msg_hash = int(int(hashlib.sha256(network_id.encode('utf-8')).hexdigest(), 16) % 10**8) elif alarm_type == 'system': node_ip = '127.0.0.1:9000' msg_hash = int(int(hashlib.sha256(node_ip.encode('utf-8')).hexdigest(), 16) % 10**8) msg_hash = random.randint(0,10000) else: msg_hash = random.randint(0,10000) # eg: topargus_alarm_list:type:0 ; topargus_alarm_list:type:1 index = msg_hash % self.alarm_type_queue_num # 0,1,2,3 qkey = '{0}:{1}:{2}'.format(self.queue_key_base, alarm_type, index) if qkey not in self.all_queue_keys_set: self.myredis.sadd(self.all_queue_keys, qkey) self.all_queue_keys_set.add(qkey) slog.debug('get qkey:{0}'.format(qkey)) return qkey
def query_from_db(cls, data, cols=None, page=1, limit=200): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('network_id'): where.append(' `network_id` = "{0}" '.format( data.get('network_id'))) # network_type choices_list is [rec,zec,edg,arc,adv,val] if data.get('network_type'): where.append(' `network_type` = "{0}" '.format( data.get('network_type'))) if data.get('network_num'): where.append(' `network_num` = {0} '.format( data.get('network_num'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order='') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def query_from_db(cls, data, cols=None, page=1, limit=50): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('network_id'): if len(data.get('network_id')) <= 20: where.append(' network_id regexp "^{0}" '.format( data.get('network_id'))) else: where.append(' network_id = "{0}" '.format( data.get('network_id'))) if data.get('begin'): where.append(' `timestamp` >= {0} '.format(data.get('begin'))) if data.get('end'): where.append(' `timestamp` <= {0} '.format(data.get('end'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order=' timestamp desc') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def system_cron_alarm(self, content): now = (time.time() * 1000) send_timestamp = content.get('send_timestamp') if abs(now - send_timestamp) > 10 * 60 * 1000: slog.warn('system_cron_alarm expired, diff:{0} ms'.format( abs(now - send_timestamp))) return if content.get('send_timestamp') % (60 * 1000) % 1000 != 0: slog.warn('system_cron_alarm send_timestamp:{0} invalid'.format( content.get('send_timestamp'))) return network_num_result = self.get_network_num_of_ip( content.get('public_ip_port')) for num in network_num_result: if num < 1 or num > 10: slog.warn('network_num:{0} invalid'.foramt(num)) continue db_net_field = 'net{0}'.format(num) content[db_net_field] = 1 self.system_cron_info_sql_.insert_to_db(content) slog.debug('insert system_cron_info to db:{0}'.format( json.dumps(content))) return
def query_from_db(cls, data, cols=None, page=1, limit=200): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('public_ip_port'): where.append(' `public_ip_port` = "{0}" '.format( data.get('public_ip_port'))) # attention: max net size is 10 for i in range(1, 11): net_sql_field = 'net{0}'.format(i) # net1, net2...net10 if data.get(net_sql_field): where.append(' `{0}` = {1} '.format(net_sql_field, data.get(net_sql_field))) if data.get('begin'): where.append(' `send_timestamp` >= {0} '.format(data.get('begin'))) if data.get('end'): where.append(' `send_timestamp` <= {0} '.format(data.get('end'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order=' send_timestamp desc ') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def get_queue_exp(self, queue_key_list, step = 50): item_list = [] for i in range(0, step): item = self.get_queue(queue_key_list) if item != None: item_list.append(item) slog.debug('get_queue multi-item size:{0}'.format(len(item_list))) return item_list
def execute_command(): global mycommand while True: time.sleep(1) while mycommand: cmd = mycommand.pop() slog.debug("execute_command: {0}".format(cmd)) os.system(cmd)
def get_queue(self, queue_key_list): item = self.myredis.brpop( queue_key_list, timeout=0) # will block here if no data get, return item is tuple if not item: return None slog.debug('get_queue {0}'.format(item)) return json.loads(item[1])
def get_user_info(self, data = {}): if data == None: data = {} vs,total = [],0 vs,total = self.user_info_sql.query_from_db(data, page = None, limit = None) if not vs: slog.debug('user_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) return vs
def query_from_db(cls, data, cols=None, page=1, limit=50): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('uniq_chain_hash'): where.append(' uniq_chain_hash = {0} '.format( data.get('uniq_chain_hash'))) if data.get('chain_hash'): where.append(' chain_hash = {0} '.format(data.get('chain_hash'))) if data.get('chain_msgid'): where.append(' chain_msgid = {0} '.format(data.get('chain_msgid'))) if data.get('is_root'): where.append(' is_root = {0} '.format(data.get('is_root'))) if data.get('broadcast'): where.append(' broadcast = {0} '.format(data.get('broadcast'))) if data.get('send_node_id'): where.append(' send_node_id = {0} '.format( data.get('send_node_id'))) if data.get('src_node_id'): if len(data.get('src_node_id')) <= 12: where.append(' src_node_id regexp "^{0}" '.format( data.get('src_node_id'))) else: where.append(' src_node_id = "{0}" '.format( data.get('src_node_id'))) if data.get('dest_node_id'): if len(data.get('dest_node_id')) <= 20: where.append(' dest_node_id regexp "^{0}" '.format( data.get('dest_node_id'))) else: where.append(' dest_node_id = "{0}" '.format( data.get('dest_node_id'))) if data.get('begin'): where.append(' `send_timestamp` >= {0} '.format(data.get('begin'))) if data.get('end'): where.append(' `send_timestamp` <= {0} '.format(data.get('end'))) where = ' and '.join(where) vs, total = [], 0 #vs = cls.select_vs(cols = cols, where=where, page=page, limit=limit, order=' timestamp desc ') vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order=' send_timestamp desc ') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def handle_alarm(self, data): if not data: return for item in data: # item is string ,not dict self.put_queue(item) slog.debug("put {0} alarm in queue, now size is {1}".format(len(data), self.qsize())) return
def update_config(): global gconfig while True: time_step = gconfig.get('config_update_time') if not time_step: time_step = 5 * 60 time.sleep(time_step) slog.debug('update remote config alive, update_step:{0} s'.format(time_step)) update_config_from_remote() return
def get_network_ids_exp(self, data): result = self.get_network_ids(data) if data.get('withip') == False: return result result_exp = { 'node_info': [], 'node_size': 0 } slog.debug('get_network_ids_exp') iplocation_update_flag = False iplocation_load_again = False #try: for item in result.get('node_info'): ip = item.get('node_ip').split(':')[0] #''' if ip in self.iplocation_: item['node_country'] = self.iplocation_[ip]['country_name'] else: if not iplocation_update_flag and os.path.exists(self.iplocation_file_): with open(self.iplocation_file_, 'r') as fin: self.iplocation_ = json.loads(fin.read()) iplocation_load_again = True fin.close() slog.info('load iplocation from {0}, size:{1}'.format(self.iplocation_file_, len(self.iplocation_.keys()))) ipinfo = sipinfo.GetIPLocation([ip]) if ipinfo.get(ip): self.iplocation_[ip] = ipinfo.get(ip) item['node_country'] = ipinfo.get(ip).get('country_name') slog.debug('get iplocation of {0} from server'.format(ip)) iplocation_update_flag = True else: item['node_country'] = 'unknow' #''' ''' country_name_list = ['United States', 'China', 'England', 'Afric','France'] tmp_country_name = random.choice(country_name_list) item['node_country'] = tmp_country_name slog.debug('add country {0}'.format(tmp_country_name)) ''' result_exp['node_info'].append(item) if iplocation_update_flag: with open(self.iplocation_file_, 'w') as fout: fout.write(json.dumps(self.iplocation_)) fout.close() result_exp['node_size'] = len(result_exp['node_info']) #except Exception as e: # slog.warn('parse ip goes wrong: {0}'.format(e)) return result_exp
def watchlog(filename, offset = 0): try: #log_handle = open(filename, 'r',encoding="utf-8", errors='replace') log_handle = open(filename, 'r',encoding="utf-8") #log_handle = open(filename, 'r',encoding="latin-1") except Exception as e: slog.warn("open file exception: {0}".format(e)) return offset wait_num = 0 #log_handle.seek(0, 2) # go to end log_handle.seek(offset, 0) # go to offset from head cur_pos = log_handle.tell() while True: cur_pos = log_handle.tell() try: line = log_handle.readline() except Exception as e: slog.warn("readline exception:{0}, cur_pos:{1}".format(e, cur_pos)) continue if not line: wait_num += 1 log_handle.seek(cur_pos) # go to cur_pos from head time.sleep(1) slog.info("sleep 1 s, cur_pos: {0}".format(cur_pos)) print_queue() if wait_num > 4: slog.debug("file: {0} done watch, size: {1}".format(filename, cur_pos)) break else: send_size, recv_size = grep_log(line) wait_num = 0 # judge new file "$filename" created if not os.path.exists(filename): return cur_pos try: new_log_handle = open(filename, 'r',encoding="latin-1") except Exception as e: return cur_pos new_log_handle.seek(0, 2) # go to end new_size = new_log_handle.tell() if new_size > cur_pos: return cur_pos if new_size == cur_pos: slog.info('logfile:{0} maybe stopped'.format(filename)) check_progress(filename) return cur_pos # new file "$filename" created slog.info("new file: {0} created".format(filename)) return 0
def network_query(): network_id = request.args.get('network_id') or 'ffffff' node_id = request.args.get('node_id') or None node_ip = request.args.get('node_ip') or None onlysize = request.args.get('onlysize') or None withip = request.args.get('withip') or None if onlysize == 'true': onlysize = True else: onlysize = False if withip == 'true': withip = True else: withip = False print(onlysize) status_ret = { 0: 'OK', -1: '没有数据', -2: '参数不合法', } data = { 'network_id': network_id, 'node_id': node_id, 'node_ip': node_ip, 'onlysize': onlysize, 'withip': withip } history_max_node_size = 0 if network_id.startswith('ffffff'): # get real node ndata = { 'simple': 'true', } node_info_results = mydash.get_node_info(ndata) if node_info_results.get('node_size'): history_max_node_size = node_info_results.get('node_size') slog.debug( 'get history_max_node_size:{0} for network_id:{1}'.format( history_max_node_size, network_id)) results = mydash.get_network_ids_exp(data) if results: results['max_node_size'] = history_max_node_size ret = {'status': 0, 'error': status_ret.get(0), 'results': results} return jsonify(ret) else: ret = {'status': -1, 'error': status_ret.get(-1), 'results': results} return jsonify(ret)
def put_queue(self, item): if not isinstance(item, dict): return # TODO(smaug) for packet using uniq_chain_hash; other type using other hash qkey = self.get_queue_key_of_alarm(item) # item is dict, serialize to str # TODO(smaug) size = self.qsize([qkey]) if size >= 500000: slog.warn("queue_key:{0} size {1} beyond 500000".format(qkey, size)) return self.myredis.lpush(qkey, json.dumps(item)) slog.debug("put_queue alarm:{0} in queue {1}, now size is {2}".format(json.dumps(item), qkey, self.qsize([qkey]))) return
def dump_db_system_alarm_info(self, public_ip_port, root='', priority=0, alarm_info='', send_timestamp=0): system_alarm_info = { 'priority': priority, 'public_ip_port': public_ip_port, 'root': root, 'alarm_info': alarm_info, 'send_timestamp': send_timestamp } self.system_alarm_info_sql_.insert_to_db(system_alarm_info) slog.debug('insert system_alarm_info to db:{0}'.format( json.dumps(system_alarm_info))) return
def delete_db(cls, data): sbegin = int(time.time() * 1000) where = [] if data.get('public_ip_port'): where.append(' `public_ip_port` = "{0}" '.format( data.get('public_ip_port'))) if data.get('root'): if len(data.get('root')) <= 20: where.append(' `root` regexp "{0}" '.format(data.get('root'))) else: where.append(' `root` = "{0}" '.format(data.get('root'))) where = ' and '.join(where) cls.delete(where=where) send = int(time.time() * 1000) slog.debug('delete from %s where %s taking:%d ms' % (cls._tbl, where, (send - sbegin))) return
def dict_cmp(a, b): typea = isinstance(a, dict) typeb = isinstance(b, dict) # normal type if typea != typeb: return False if not typea and not typeb: return operator.eq(a, b) for k in set(a) | set(b): if k not in a or k not in b: slog.debug('dict_cmp diff k:{0}'.format(k)) return False if not dict_cmp(a[k], b[k]): return False return True
def query_from_db(cls, data, cols=None, page=None, limit=None): where, vs, total = [], [], 0 if data.get('username'): where.append(' `username` = "{0}" '.format(data.get('username'))) if data.get('email'): where.append(' `email` = "{0}" '.format(data.get('email'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=None, limit=None, order=None) total = cls.total(where=where) slog.debug('select * from %s where %s,total: %s' % (cls._tbl, where, total)) return vs, total
def query_from_db(cls, data, cols=None, page=1, limit=200): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('public_ip_port'): where.append(' `public_ip_port` = "{0}" '.format( data.get('public_ip_port'))) if data.get('root'): if len(data.get('root')) <= 20: where.append(' `root` regexp "{0}" '.format(data.get('root'))) else: where.append(' `root` = "{0}" '.format(data.get('root'))) if data.get( 'priority' ): # list of priority, eg: [0,1,2] ,0 is low, 1 is middle, 2 is high wp = [] for p in data.get('priority'): if p not in [0, 1, 2]: continue wp.append(' `priority` = "{0}" '.format(p)) wp = ' or '.join(wp) if wp: wp = ' ( {0} ) '.format(wp) where.append(wp) if data.get('begin'): where.append(' `send_timestamp` >= {0} '.format(data.get('begin'))) if data.get('end'): where.append(' `send_timestamp` <= {0} '.format(data.get('end'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order=' send_timestamp desc ') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def get_network_num_of_ip(self, public_ip_port): result = [] if not self.node_info_.get(public_ip_port): slog.warn('get node_info of ip:{0} failed'.format(public_ip_port)) return result for net_choice in ['rec', 'zec', 'edg', 'arc', 'adv', 'val']: net_id_list = self.node_info_.get(public_ip_port).get(net_choice) if not net_id_list: continue for node_id in net_id_list: network_id = node_id[:12] if not self.network_id_num_.get(network_id): continue network_num = self.network_id_num_.get(network_id).get( 'network_num') result.append(network_num) slog.debug('get_network_num:{0} of ip:{1}'.format( json.dumps(result), public_ip_port)) return result
def get_system_alarm_info(self,data, page = 1, limit = 200): ''' data = { 'public_ip_port': public_ip_port, 'root': root, 'priority': priority_list, 'begin': begin, 'end': end } # db field id | priority | public_ip_port | root | alarm_info | send_timestamp | ''' tbegin = int(time.time() * 1000) results = { 'system_alarm_info':[], 'size':0, } vs,total = [],0 vs,total = self.system_alarm_info_sql_.query_from_db(data, page = page, limit = limit) if not vs: slog.debug('system_alarm_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) tend = int(time.time() * 1000) slog.debug('get_system_alarm_info taking:{0} ms'.format(tend - tbegin)) results['system_alarm_info'] = vs results['size'] = len(vs) slog.debug('get system_alarm_info ok, size:{0}'.format(len(vs))) return results, total
def query_from_db(cls, data, cols=None, page=1, limit=2000): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('public_ip_port'): where.append(' `public_ip_port` = "{0}" '.format( data.get('public_ip_port'))) if data.get('root'): if len(data.get('root')) <= 20: where.append(' `root` regexp "{0}" '.format(data.get('root'))) else: where.append(' `root` = "{0}" '.format(data.get('root'))) if data.get('status'): # online or offline where.append(' `status` = "{0}" '.format(data.get('status'))) if data.get('rec'): where.append(' `rec` like "%%{0}%%" '.format(data.get('rec'))) if data.get('zec'): where.append(' `zec` like "%%{0}%%" '.format(data.get('zec'))) if data.get('edg'): where.append(' `edg` like "%%{0}%%" '.format(data.get('edg'))) if data.get('arc'): where.append(' `arc` like "%%{0}%%" '.format(data.get('arc'))) if data.get('adv'): where.append(' `adv` like "%%{0}%%" '.format(data.get('adv'))) if data.get('val'): where.append(' `val` like "%%{0}%%" '.format(data.get('val'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order='') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def get_packet_info(self, data, limit = 50, page = 1): tbegin = int(time.time() * 1000) vs,total = [],0 vs,total = self.packet_info_sql.query_from_db(data, page = page, limit = limit) if not vs: slog.debug('packet_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) for i in range(0, len(vs)): vs[i]['uniq_chain_hash'] = '{0}'.format(vs[i].get('uniq_chain_hash')) dest_networksize = int(vs[i].get('dest_networksize')) recv_nodes_num = int(vs[i].get('recv_nodes_num')) if dest_networksize <= 0 or recv_nodes_num > dest_networksize: vs[i]['drop_rate'] = '0.0' else: drop_rate = 100 - float(recv_nodes_num) / dest_networksize * 100 drop_rate = float(drop_rate) drop_rate = '%.1f' % drop_rate vs[i]['drop_rate'] = drop_rate tend = int(time.time() * 1000) slog.debug('get_packet_info taking:{0} ms'.format(tend - tbegin)) return vs,total
def query_from_db(cls, data, cols=None, page=1, limit=50): sbegin = int(time.time() * 1000) where, vs, total = [], [], 0 if data.get('uniq_chain_hash'): where.append(' uniq_chain_hash = {0} '.format( data.get('uniq_chain_hash'))) if data.get('recv_node_id'): if len(data.get('recv_node_id')) <= 12: where.append(' recv_node_id regexp "^{0}" '.format( data.get('recv_node_id'))) else: where.append(' recv_node_id = "{0}" '.format( data.get('recv_node_id'))) if data.get('recv_node_ip'): if len(data.get('recv_node_ip')) <= 12: where.append(' recv_node_ip regexp "{0}" '.format( data.get('recv_node_ip'))) else: where.append(' recv_node_ip = "{0}" '.format( data.get('recv_node_ip'))) where = ' and '.join(where) vs, total = [], 0 vs = cls.select_vs(cols=cols, where=where, page=page, limit=limit, order='') total = cls.total(where=where) send = int(time.time() * 1000) slog.debug('select * from %s where %s,total: %s taking:%d ms' % (cls._tbl, where, total, (send - sbegin))) return vs, total
def get_node_info(self,data): ''' { 'simple': 'true', # set true only return public_ip_port and root and status field 'public_ip_port':'127.0.0.1:9000', 'root': '010000', 'status': 'online', 'rec': '640000xxx', 'zec': '6500', 'edg':'6600000', 'arc': '67000', 'adv': '6800000', 'val': '69000xxx', } # db field public_ip_port VARCHAR(25) NOT NULL, root VARCHAR(73) DEFAULT "", status VARCHAR(10) DEFAULT "online", /* offline */ rec VARCHAR(1000) DEFAULT "", zec VARCHAR(1000) DEFAULT "", edg VARCHAR(1000) DEFAULT "", arc VARCHAR(1000) DEFAULT "", adv VARCHAR(1000) DEFAULT "", val VARCHAR(1000) DEFAULT "", ''' tbegin = int(time.time() * 1000) results = { 'node_info':[], 'node_size':0, } cols = None if data.get('simple') == 'true': cols = 'public_ip_port,status' print(cols) vs,total = [],0 vs,total = self.node_info_sql_.query_from_db(data, cols = cols) if not vs: slog.debug('node_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) for i in range(0, len(vs)): for k,v in vs[i].items(): if k in ['rec', 'zec', 'edg', 'arc', 'adv', 'val'] and v: vs[i][k] = json.loads(v) tend = int(time.time() * 1000) slog.debug('get_node_info taking:{0} ms'.format(tend - tbegin)) results['node_info'] = vs results['node_size'] = len(vs) slog.debug('get node_info ok:{0}'.format(json.dumps(results))) return results
def query_network_ids(self,data): vs,total = [],0 vs,total = self.network_info_sql.query_from_db(data) if not vs: slog.debug('network_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) return vs,total
def get_packet_recv_info(self, data, limit = 50, page = 1): vs,total = [],0 vs,total = self.packet_recv_info_sql.query_from_db(data, limit = limit, page = page) if not vs: slog.debug('packet_recv_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) return vs,total
def get_system_cron_info(self,data, page = 1, limit = 200000): ''' data = { 'public_ip_port': public_ip_port, 'network_id': network_id, 'begin': begin, 'end': end } ''' tbegin = int(time.time() * 1000) results = {} # key is db_filed:/cpu/mem/band ; value is list of list [[time,value], [time,value]] tmp_result = {} # key is timestamp cols = 'public_ip_port,send_timestamp' cols_list = [] if data.get('mem') == 'true': cols += ',mem' cols_list.append('mem') if data.get('send_bandwidth') == 'true': cols += ',send_bandwidth' cols_list.append('send_bandwidth') if data.get('recv_bandwidth') == 'true': cols += ',recv_bandwidth' cols_list.append('recv_bandwidth') if data.get('send_packet') == 'true': cols += ',send_packet' cols_list.append('send_packet') if data.get('recv_packet') == 'true': cols += ',recv_packet' cols_list.append('recv_packet') if cols.endswith('send_timestamp') or data.get('cpu') == 'true': cols += ',cpu' cols_list.append('cpu') tmp_value = {} for k in cols_list: # {mem:xx,cpu:xx,send_bandwidth:xx....} tmp_value[k] = 0 results[k] = [] tmp_value['count'] = 0 network_num = None if data.get('network_id'): network_id = data.get('network_id')[:12] if network_id not in self.network_id_num_: vs = self.load_db_network_id_num(data = {}) for item in vs: self.network_id_num_[item.get('network_id')] = item slog.info('load network_id_num from db size:{0}'.format(len(vs))) if network_id not in self.network_id_num_: slog.warn('can not find network_num of network_id:{0}'.format(network_id)) return results network_num = self.network_id_num_.get(network_id).get('network_num') slog.debug('get network_num:{0} of network_id:{1}'.format(network_num, network_id)) if network_num != None: net_field = 'net{0}'.format(network_num) data[net_field] = 1 vs,total = [],0 vs,total = self.system_cron_info_sql_.query_from_db(data, cols = cols, page = page, limit = limit) if not vs: slog.debug('system_cron_info_sql query_from_db failed, data:{0}'.format(json.dumps(data))) return results print('query fom db size;{0}'.format(len(vs))) for item in vs: send_timestamp = item.get('send_timestamp') if send_timestamp not in tmp_result: tmp_result[send_timestamp] = copy.deepcopy(tmp_value) for k in cols_list: tmp_result[send_timestamp][k] += item.get(k) tmp_result[send_timestamp]['count'] += 1 for timest,tvalue in tmp_result.items(): for name,sumv in tvalue.items(): if name == 'count': continue point = [timest, sumv / tvalue['count']] results[name].append(point) slog.debug('system_cron result:{0}'.format(json.dumps(results))) tend = int(time.time() * 1000) slog.debug('get_system_cron_info taking:{0} ms'.format(tend - tbegin)) return results