def get_all_num(act_time, end_time): all_num = 0 all_err_num = 0 # host = Host # db_name = 'bermuda_s1' # db = get_host(host,db_name) db = database.s1_db_session() for i in range(10): try: print('-------------{0}--------'.format(i)) sql_q = "refresh_result{}".format(i) #db_num = db[sql_q].find({"result":"200",'time': {'$gt': act_time, '$lt': end_time}}).count() #err_num = db[sql_q].find({"result": {"$ne":"200"}, 'time': {'$gt': act_time, '$lt': end_time}}).count() db_num = 0 while True: try: db_num, err_num = get_all_err_num(db, sql_q, act_time, end_time) break except Exception as e: print("超时:{}".format(i)) pass all_num += db_num all_err_num += err_num except Exception as e: pass return all_num, all_err_num
def preload_router(self): """ 处理rabbitmq中preload_task的内容 """ try: # messages = queue.get('preload_task', self.batch_size) messages = self.get_preload_messages() if not messages: messages = queue.get('preload_task', self.batch_size) s1_db = database.s1_db_session() logger.debug( "preload_router.work process messages begin, count: %d " % len(messages)) url_dict = {} url_other = [] for message in messages: self.merge_preload_task(message, url_dict, url_other) for urls in list(url_dict.values()): preload_worker_new.dispatch.delay(urls) if url_other: #定时任务先只插入库中 for url_t in url_other: url_t['_id'] = ObjectId() s1_db.preload_url.insert(url_other) logger.info( "preload_router.work process messages end, count: %d " % len(messages)) except Exception: logger.warning('preload_router work error:%s' % traceback.format_exc())
def get_fail_device_num(act_time, end_time): all_num = 0 all_err_num = 0 host = Host devices_list_all = [] # db_name = 'bermuda_s1' # db = get_host(host,db_name) db = database.s1_db_session() for i in range(10): try: print('-------------{0}--------'.format(i)) sql_q = "refresh_result{}".format(i) while True: try: devices_list = get_all_err_device(db, sql_q, act_time, end_time) print devices_list break except Exception as e: print("超时:{}".format(i)) pass devices_list_all = devices_list_all + devices_list except Exception as e: pass pai_dev = Counter(devices_list_all).most_common(10) my_dev_table = write_statistical_dev(pai_dev) return my_dev_table
def scheduleTask(self, url_list): s1_db = database.s1_db_session() host_aps = choice( eval(config.get('apscheduler_server', 'host_cluster'))) logger.debug("scheduleTask host_aps: %s|| url_list: %s" % (host_aps, url_list)) conn_aps = rpyc.connect(host_aps, int(config.get('apscheduler_server', 'port')), config={ 'allow_public_attrs': True, 'allow_all_attrs': True, 'allow_pickle': True }) for url in url_list: if url.get('task_type') == 'SCHEDULE': conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', trigger='interval', args=([url], ), seconds=int(url.get('interval')), start_date=url.get('start_time'), end_date=url.get('end_time')) elif url.get('task_type') == 'INTERVAL': conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', trigger='interval', args=([url], ), seconds=int(url.get('interval')), end_date=url.get('end_time')) elif url.get('task_type') == 'TIMER': url['_id'] = ObjectId() self.s1_db.preload_url.insert(url) rdate = url.get('start_time') rdate = rdate if isinstance(rdate, datetime) else datetime.strptime( rdate, '%Y-%m-%d %H:%M:%S') run_date_dict = { 'year': rdate.year, 'month': rdate.month, 'day': rdate.day, 'hour': rdate.hour, 'minute': rdate.minute, 'second': rdate.second, } conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', 'cron', args=([url], ), **run_date_dict) logger.info("scheduleTask add_job [%s done!] url: %s." % (url.get('task_type'), url.get('url')))
def __init__(self, batch_size=3000, package_size=30): self.preload_api_size = 100 self.batch_size = batch_size self.package_size = package_size logger.debug('router start. batch_size:%s package_size:%s' % (self.batch_size, self.package_size)) self.dt = datetime.now() self.merged_urls = {} self.high_merged_urls = {} self.merged_cert = {} self.merged_cert_query = {} self.merged_transfer_cert = {} self.physical_urls = {} self.db = database.db_session() self.s1_db = database.s1_db_session()
def get_preload_messages(self): try: s1_db = database.s1_db_session() queue_list = [{ i['queue_name']: int(i['queue_ratio']) } for i in self.s1_db.preload_queue_ratio.find({'status': 'ready'}) ] for q in queue_list: PRELOAD_RATIO.update(q) logger.info('get_preload_messages PRELOAD_RATIO: %s' % (PRELOAD_RATIO, )) all_p = sum(PRELOAD_RATIO.values()) all_m_dict = {} for pi, pv in PRELOAD_RATIO.items(): g_num = int(ceil((pv / float(all_p)) * self.batch_size)) g_messages = queue.get(pi, g_num) # logger.debug('get_preload_messages g_messages key %s value len %s' % # (pi, len(g_messages))) all_m_dict[pi] = g_messages sorted_s = sorted(PRELOAD_RATIO.items(), key=lambda x: x[1], reverse=True) messages = [] for k in sorted_s: append_key = k[0] messages.extend(all_m_dict[k[0]]) for x in xrange(len(PRELOAD_RATIO)): if len(messages) < self.batch_size: left_n = self.batch_size - len(messages) left_m = queue.get(sorted_s[x][0], left_n) messages.extend(left_m) logger.info('get_preload_messages messages count %s' % (len(messages))) return messages except Exception: logger.info('get_preload_messages error %s' % (traceback.format_exc())) return []
import time import datetime import json from core.database import s1_db_session from core import redisfactory CERT_PULL_CACHE = redisfactory.getDB(1) db = s1_db_session() def run(): db.cert_update_pull.ensure_index('update_time', unique=True) transfer_cert = [] add_certs = [] t = int(time.strftime('%Y%m%d')) # t=20171212 g = str(t - 1) l = str(t) data = db.cert_detail.find({ 'created_time': { "$gte": datetime.datetime(int(g[0:4]), int(g[4:6]), int(g[6:8])), "$lte": datetime.datetime(int(l[0:4]), int(l[4:6]), int(l[6:8])) } }) for d in data: co = { 'cert': d.get('cert', ''), 'p_key': d.get('p_key', ''), 's_name': d.get('save_name'), 'task_id': d.get('save_name', ''), 'seed': d.get('seed', ''), 'op_type': d.get('op_type', ''),
''' uid = str(url.pop("_id")) url['firstLayer'] = url.pop('is_multilayer') url['layer_type'] = "two" if url.get('firstLayer') else "one" url['r_id'] = str(url.get("r_id")) url['recev_host'] = RECEIVER_HOST url['id'] = uid del url['created_time'] if username == 'sina_t' or username == 'sina_weibo' or username == 'autohome' or username == 'meipai': url['layer_type'] = "three" return url db = database.db_session() db_s1 = database.s1_db_session() @task(ignore_result=True, default_retry_delay=10, max_retries=3) def submit(refresh_task, urls): ''' 提交任务到消息队列 Parameters ---------- refresh_task : 任务 ignore_result 设置任务存储状态,如果为True,不存状态,也查询不了返回值 default_retry_delay 设置重试提交到消息队列间隔时间,默认10 分钟,单位为秒 max_retries 设置重试次数,默认为3
import struct import core.redisfactory as redisfactory # import receiver from util.tools import JSONEncoder, load_task, delete_urlid_host, get_mongo_str from core.config import config from celery.task import task # STATUS_RESOLVE_FAILED = 500 from core.models import STATUS_RETRY_SUCCESS, STATUS_RESOLVE_FAILED import datetime import traceback db = db_session() q_db = query_db_session() db_s1 = s1_db_session() # link detection in redis dev_detect = redisfactory.getDB(7) logger = log_utils.get_receiver_Logger() expire_time = 3600 def assemble_command_info(rid, host): """ according url _id and host info, assembly infomation :param id_host:str of id,host :param id: url collection, _id :param host: device id :return: """ # RL = RedisLock()
from core import redisfactory import traceback from util import log_utils from core.config import config from util.tools import get_mongo_str import os import gzip from core import queue, models import json import pymongo logger = log_utils.get_receiver_Logger() db = db_session() q_db = query_db_session() monitor_db = s1_db_session() multi_db = multi_session() REWRITE_CACHE = redisfactory.getDB(15) def get_url_by_id(id, url=False): """ :param id: the id of url :return: """ if not url: result = {} try: result = q_db.url.find_one({'_id': ObjectId(id)})
def cert_query_trans(): ''' 证书查询任务下发 ''' try: s1_db = database.s1_db_session() data = json.loads(request.data) logger.debug('cert_query_trans post data %s' % (data)) data_username = data.get('username', 'chinacache') data_info = data['info'] query_ip = data_info.get('ip', '') query_path = data_info.get('path', '') query_config_path = data_info.get('config_path', '') query_cert_type = data_info.get('cert_type', '') query_type = data_info.get('query_type', '') query_cert_name = data_info.get('cert_name', '') query_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if not query_ip: raise QueryipError('not input ip') if not query_path: raise QuerypathError('not input path') if not query_config_path: raise QueryconpathError('not input config path') if not query_cert_name: raise QuerycertnameError('not input cert name') if not query_cert_type: raise QuerycerttypeError('not input cert type') devices_list = [] for q_ip in query_ip: q_ip_type = tools.judge_dev_ForH_byip(q_ip) if q_ip_type != 'HPCC': devices_list.append(q_ip) if devices_list: raise QuerydevicesError('%s isn`t HPCC devices ' % ' '.join(devices_list)) q_id = s1_db.cert_query_info.insert({ 'cert_type': query_cert_type, 'cert_name': query_cert_name, 'path': query_path, 'config_path': query_config_path, 'created_time': datetime.datetime.now(), 'username': data_username }) task = {} task['_id'] = str(ObjectId()) task['query_dev_ip'] = tools.sortip(query_ip) logger.debug('cert_query_trans query_dev_ip %s' % (task['query_dev_ip'])) task['dev_ip_md5'] = tools.md5(json.dumps(task['query_dev_ip'])) logger.debug('cert_query_trans dev_ip_md5 %s' % (task['dev_ip_md5'])) task['q_id'] = str(q_id) task['username'] = data_username task['query_path'] = query_path task['query_cert_name'] = query_cert_name task['query_cert_type'] = query_cert_type task['query_config_path'] = query_config_path task['created_time'] = query_time queue.put_json2('cert_query_task', [task]) return jsonify({ 'code': 200, 'task_id': task['_id'], 'cert_query_id': task['q_id'] }) except QueryipError as ex: return jsonify({"code": 520, "msg": ex.__str__()}) except QuerypathError as ex: return jsonify({"code": 521, "msg": ex.__str__()}) except QueryconpathError as ex: return jsonify({"code": 522, "msg": ex.__str__()}) except QuerycertnameError as ex: return jsonify({"code": 523, "msg": ex.__str__()}) except QuerycerttypeError as ex: return jsonify({"code": 524, "msg": ex.__str__()}) except QuerydevicesError as ex: return jsonify({"code": 525, "msg": ex.__str__()}) except Exception: logger.debug('/internal/cert/query error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})
def make_task(data, _type='portal'): ''' 生成证书任务 ''' if _type == 'portal': s1_db = database.s1_db_session() username = data.get('username', 'chinacache') user_id = data.get('user_id', 2275) p_key = data.get('p_key', '') cert = data.get('cert', '') r_cert = data.get('r_cert', '') cert_alias = data.get('cert_name', 'chinacache-cert') seed = cert_trans_worker.get_custom_seed(username) is_rigid = data.get('rigid', False) try: cert_sum = s1_db.cert_detail.find({ "username": username, 'cert_alias': cert_alias }).count() if cert_sum >= 1: raise CertAliasError('cert alias has already existed') except Exception: logger.debug(traceback.format_exc()) raise #私钥 try: p_key_cip, p_key_sign = rsa_tools.split_ciphertext_and_sign(p_key) p_key = rsa_tools.decrypt_trunk(p_key_cip, rsa_tools.bermuda_pri_key) try: rsa_tools.verify_sign(p_key_sign, rsa_tools.portal_pub_key, p_key) except Exception: logger.debug('---p_key--- verify_sign error---') logger.debug(traceback.format_exc()) raise CertDecryptError('Private key of verify_sign error') except Exception: logger.debug('---p_key--- decrypt error---') logger.debug(traceback.format_exc()) raise CertDecryptError('Private key of decryption error') #证书 try: cert_cip, cert_sign = rsa_tools.split_ciphertext_and_sign(cert) cert = rsa_tools.decrypt_trunk(cert_cip, rsa_tools.bermuda_pri_key) try: rsa_tools.verify_sign(cert_sign, rsa_tools.portal_pub_key, cert) except Exception: logger.debug('---cert--- verify_sign error---') logger.debug(traceback.format_exc()) raise CertDecryptError('Cert of verify_sign error') #去掉多余回车 cert = cert.strip('\n') except Exception: logger.debug('---cert--- decrypt error---') logger.debug(traceback.format_exc()) raise CertDecryptError('Cert key of decryption eror') if r_cert: #根证书 try: r_cert_cip, r_cert_sign = rsa_tools.split_ciphertext_and_sign( r_cert) r_cert = rsa_tools.decrypt_trunk(r_cert_cip, rsa_tools.bermuda_pri_key) try: rsa_tools.verify_sign(r_cert_sign, rsa_tools.portal_pub_key, r_cert) except Exception: logger.debug('---r_cert--- verify_sign error---') logger.debug(traceback.format_exc()) raise CertDecryptError('R_cert of verify_sign eror') #去掉多余回车 r_cert = r_cert.strip('\n') except Exception: logger.debug('---r_cert--- decrypt error---') logger.debug(traceback.format_exc()) raise CertDecryptError('R_cert key of decryption eror') #私钥是否为PKCS1 if 'BEGIN PRIVATE KEY' in p_key: raise CertPrikeyTypeError('RSA Private Key must be (PKCS#1)') #提交内容合并 all_cert = cert if r_cert: all_cert = cert + '\n' + r_cert if cert_tools.crt_number(all_cert) < 1: raise CertInputError('Certificate must be more content') middle_cert_lack = False if cert_tools.crt_number(all_cert) < 2: ''' 判断有没有中间证书 ''' middle_cert_lack = True if middle_cert_lack and is_rigid: raise CertNoMiddle('Please upload intermediate certificate ') all_cert_checked = cert_tools.get_all_chain(all_cert) if not all_cert_checked: raise CertPathError('The certificate path does not match') if all_cert_checked == 1: raise CertNoRoot('The match root certificate fails') if all_cert_checked in [2, 3]: raise CertNoMiddle('The matching intermediate certificate fails') cert_last = cert_tools.get_cert(all_cert_checked)[0] #检查子证书和私钥match if not cert_tools.check_consistency(cert_last, p_key): raise CertPrikeyError( 'Certificates and private keys does not match') #检查子证书是否吊销 crl_info = cert_tools.get_crl(cert_last) if crl_info: crl_object = cert_tools.get_crl_object(crl_info) if crl_object: if cert_tools.get_revoke(cert_last, crl_object): raise CertRevokeError('Certificate has been revoked') #检查子证书是否过期 if cert_tools.is_expire(cert_last): raise CertExpireError('Certificate Is Expired') #解析子证书主要信息 cert_last_subject = cert_tools.get_subject(cert_last) cert_last_issuer = cert_tools.get_issuer(cert_last) cert_last_validity = cert_tools.get_Validity(cert_last) cert_last_pubkey = cert_tools.get_public_key(cert_last) cert_last_DNS = cert_tools.get_DNS(cert_last) end_time = cert_tools.make_validity_to_China( cert_last_validity)['end_time'] end_time_obj = datetime.datetime.strptime(end_time, '%Y%m%d%H%M%S') end_time_name = end_time_obj.strftime('%Y-%m-%d-%H') now_time_name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') if cert_last_DNS: save_dns_name = cert_last_DNS[0] else: save_dns_name = cert_last_subject['CN'] #替换泛域名× save_dns_name = save_dns_name.replace('*', '_') save_name = '%s-%s-%s' % (end_time_name, save_dns_name, now_time_name) #检查存储名是否冲突 if s1_db.cert_detail.find_one({'save_name': save_name}): raise CertSaveNameError('The certificate had same save name') #加密证书&pk try: p_key_e = rsa_tools.fun(p_key, rsa_tools.cache_pub_key, rsa_tools.bermuda_pri_key, seed) all_cert_e = rsa_tools.fun(all_cert_checked, rsa_tools.cache_pub_key, rsa_tools.bermuda_pri_key, seed) #r_cert_e = rsa_tools.fun(r_cert,rsa_tools.cache_pub_key, rsa_tools.bermuda_pri_key, seed) except Exception: logger.debug('---make_task--- encrypt error---') logger.debug(traceback.format_exc()) raise #origin cert o_c_id = s1_db.cert_origin.insert({ 'cert_origin': cert, 'cert_origin_r': r_cert, 'cert_all': all_cert_checked, 'created_time': datetime.datetime.now() }) #cert c_id = s1_db.cert_detail.insert({ 'cert': all_cert_e, 'p_key': p_key_e, 'username': username, 'seed': seed, 'o_c_id': o_c_id, 'cert_alias': cert_alias, 'save_name': save_name, 'subject': cert_last_subject, 'issuer': cert_last_issuer, 'validity': cert_last_validity, 'pubkey': cert_last_pubkey, 'DNS': cert_last_DNS, 'user_id': user_id, 'created_time': datetime.datetime.now(), 'middle_cert_lack': middle_cert_lack }) task = { '_id': str(ObjectId()), 'middle_cert_lack': middle_cert_lack, 'username': username, 'p_key': p_key_e, 'cert': all_cert_e, 'created_time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'c_id': str(c_id), 'send_devs': 'all_hpcc', 'cert_alias': cert_alias, 'o_c_id': str(o_c_id), 'op_type': "add", 'seed': seed, 's_name': save_name, 's_dir': HPCC_SAVE_DIR, 'user_id': user_id, 'recev_host': RECEV_HOST } return task else: raise
def transfer_portal_expired_cert(): ''' portal转移过期证书 ''' try: s1_db = database.s1_db_session() data = json.loads(request.data) cer_id_str = data.get('cert_ids', '') #s_name = data.get('save_name', '')#证书名称 username = data.get('username', 'portal') transfer_dev = data.get('transfer_dev', ['']) #转移证书的cache设备 dev_type = data.get('dev_type', 'all_dev') #转移证书的类型 c_o_path = config.get('app', 'o_path') c_d_path = config.get('app', 'd_path') o_path = data.get('o_path', c_o_path) d_path = data.get('d_path', c_d_path) # o_path = data.get('o_path', '') # d_path = data.get('d_path', '') transfer_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') #logger.debug("s_name is %s"%s_name) logger.debug("username is %s" % username) logger.debug("o_path is %s" % o_path) logger.debug("d_path is %s" % d_path) logger.debug("dev_type is %s" % dev_type) if len(transfer_dev ) <= 1 and transfer_dev[0] == '' and dev_type != 'all_dev': raise TransferdevError('not input ip') #array_s_name = s_name.split(',') array_s_name = [] cer_id_list = cer_id_str.split(',') if not cer_id_list: return jsonify({"code": 504, "msg": 'please push cert id'}) for cert_id in cer_id_list: cert_id_objectid = ObjectId(cert_id) cert_detail_one = s1_db.cert_detail.find_one( {'_id': cert_id_objectid}) if not cert_detail_one: return jsonify({"code": 504, "msg": 'this id not exist'}) array_s_name.append(cert_detail_one['save_name']) if not array_s_name: return jsonify({"code": 504, "msg": 'please push cert id'}) db.transfer_certs_detail.ensure_index('save_name', unique=True) #cret_id_list = [] # cert_not_find = [] # for save_name in array_s_name: # info_cert = s1_db.cert_detail.find_one({'save_name': save_name}) # if not info_cert: # cert_not_find.append(save_name) # else: # cret_id_list.append(str(info_cert['_id'])) # if cert_not_find: # return jsonify({"code": 504, "msg": 'Certificate does not exist, please check the name of the certificate %s'%(cert_not_find)}) # #status,message = check_cert_cms(cret_id_list) status, message = check_cert_cms(cer_id_list) if status == False: return jsonify({"code": 504, "msg": message}) task = {} task['_id'] = str(ObjectId()) if dev_type == 'all_dev' or transfer_dev == 'all_hpcc': task['send_dev'] = 'all_dev' task['send_dev_md5'] = tools.md5(task['send_dev']) else: task['send_dev'] = tools.sortip(transfer_dev) logger.debug(task['send_dev']) devices_list = [] for q_ip in task['send_dev']: q_ip_type = tools.judge_dev_ForH_byip(q_ip) if q_ip_type != 'HPCC': devices_list.append(q_ip) if devices_list: raise QuerydevicesError('%s isn`t HPCC devices ' % ' '.join(devices_list)) task['send_dev_md5'] = tools.md5(json.dumps(task['send_dev'])) logger.debug(task['send_dev_md5']) try: status = cert_cms_delete(cer_id_list) if status: portal_status = cert_portal_delete(cer_id_list) if portal_status != True: return jsonify({'code': 504, 'msg': 'portal delete error'}) else: return jsonify({'code': 504, 'msg': 'cms delete error'}) except Exception: logger.error('callback error %s' % (traceback.format_exc())) return jsonify({ 'code': 504, 'msg': 'delete error%s' % (traceback.format_exc()) }) for save_name in array_s_name: info = s1_db.cert_detail.find_one({'save_name': save_name}) datestr = int(time.mktime(datetime.datetime.now().timetuple())) change_name = "{}{}{}{}".format("trans_", username, info.get('cert_alias'), datestr) #db.transfer_certs_detail.ensure_index('save_name', unique=True) t_id = s1_db.transfer_certs_detail.insert({ 'save_name': save_name, 'o_path': o_path, 'd_path': d_path, 'created_time': datetime.datetime.now(), 'username': username }) #db.transfer_certs_detail.ensure_index('save_name', unique=True) s1_db.cert_detail.update( {'save_name': save_name}, {"$set": { "t_id": t_id, "cert_alias": change_name }}) #if not info: # raise CertNotFoundError() task['t_id'] = str(t_id) task['username'] = username task['o_path'] = o_path task['d_path'] = d_path task['save_name'] = ','.join(array_s_name) #s_name task['created_time'] = transfer_time logger.debug('transfer cert task {}'.format([task])) queue.put_json2('transfer_cert_task', [task]) #res ={'code': 200, 'cert_id': str(info.get('_id'))} res = {'code': 200, 'msg': 'ok'} return jsonify(res) except CertNotFoundError as ex: return jsonify({"code": 504, "msg": "The certificate does not exist"}) except TransferdevError as ex: return jsonify({"code": 524, "msg": ex.__str__()}) except QuerydevicesError as ex: return jsonify({"code": 525, "msg": ex.__str__()}) except Exception: logger.debug('/transfer_expired_cert error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})
import uuid from xml.dom.minidom import parseString from config import config from util.tools import JSONEncoder import traceback from core.subcenter_base import Subcenter_cert, Subcenter_Refresh_Dir, Subcenter_Refresh_Url, Subcenter_preload from redis.exceptions import WatchError # link detection in redis # dev_detect = redisfactory.getDB(7) # logger = log_utils.get_celery_Logger() logger = log_utils.get_cert_query_postal_Logger() # expire_time = 3600 db = database.db_session() db_preload = database.s1_db_session() db_cert = database.s1_db_session() db_s1 = database.s1_db_session() # store failed FC device information import core.redisfactory as redisfactory FAILED_DEV_FC = redisfactory.getDB(5) expire_time = 60 * 60 * 24 * 5 SUBCENTER_REFRSH_UID = redisfactory.getDB(3) subcenter_expire_time = 60 * 15 ### old version for refresh # @task(ignore_result=True, default_retry_delay=10, max_retries=3) # def link_detection_refresh(urls, failed_dev_list, flag): # rid = insert_retry_db(urls, failed_dev_list, flag) # for url in urls:
from email.mime.multipart import MIMEMultipart from datetime import timedelta imp.reload(sys) sys.setdefaultencoding("utf8") from core import database import redis from util.emailSender import EmailSender conn_db = database.query_db_session() conn = database.s1_db_session() # 定义null global null null = '' source_device_dict = {} LOG_DIR = '/Application/bermuda3/logs/outputxls/' LOG_FILENAME = '/Application/bermuda3/logs/count_device.log' logging.basicConfig(filename=LOG_FILENAME, format='%(asctime)s - %(name)s - %(levelname)s - %(process)d - Line:%(lineno)d - %(message)s', level=logging.INFO) logger = logging.getLogger('count_deivce') logger.setLevel(logging.DEBUG) RCMS_API = 'http://j.hope.chinacache.com:8082/api/device/name/{0}/apps' #RCMS_DEVICES_LAYER = 'https://rcmsapi.chinacache.com/upperlayer/devices' RCMS_DEVICES_LAYER = 'https://cms3-apir.chinacache.com/upperlayer/devices'