def update_db_dev(dev_id, results): ''' 更新任务状态 cert_query_dev&cert_query_tasks ''' try: now = datetime.datetime.now() db_dev = s1_db.cert_query_dev.find_one({"_id": ObjectId(dev_id)}) db_dev["finish_time"] = now devices = db_dev.get("query_dev_ip") for ret in results: devices.get(ret.get("name"))["code"] = ret.get("code", 0) devices.get(ret.get("name"))["a_code"] = ret.get("a_code", 0) devices.get(ret.get("name"))["r_code"] = ret.get("r_code", 0) db_dev["unprocess"] = int(db_dev.get("unprocess")) - 1 update_dev = copy.deepcopy(db_dev) if '_id' in update_dev: update_dev.pop('_id') s1_db.cert_query_dev.update_one({"_id": ObjectId(dev_id)}, {'$set': update_dev}) s1_db.cert_query_tasks.update_many({'dev_id': ObjectId(dev_id)}, {'$set': { 'finish_time': now }}) except Exception, e: logger.debug('cert_query update_db_dev error is %s' % (traceback.format_exc(e)))
def check_cert_task(task_ids): ''' n seconds 后检查任务状态 ''' logger.debug('---check_cert_task start task_ids: %s---' % (task_ids)) callback_list = s1_db.cert_trans_tasks.find({ '_id': { '$in': [ObjectId(task_id) for task_id in task_ids] }, 'status': 'FINISHED' }) # 尝试2次回调 for c in callback_list: if not c['rcms_callback_time'] or not c['portal_callback_time']: cert_info = s1_db.cert_detail.find_one( {'_id': ObjectId(c['c_id'])}) logger.debug( '---check_cert_task retry callback task_id %s cert_id %s---' % (c['_id'], c['c_id'])) make_all_callback(c, cert_info) task_info_list = s1_db.cert_trans_tasks.find({ '_id': { '$in': [ObjectId(task_id) for task_id in task_ids] }, 'status': 'PROGRESS' }) if task_info_list.count() == 0: logger.debug('---check_cert_task no task_info ids: %s---' % (task_ids)) return for task_info in task_info_list: error_devs = get_error_dev_result(task_info) if not error_devs: continue send_error_email(task_info, error_devs) logger.debug('---check_cert_task id %s set Failed begin---' % (task_info['_id'])) set_finished(task_info['_id'], 'FAILED') logger.debug('---check_cert_task id %s set Failed end---' % (task_info['_id'])) # 尝试再次回调,无状态 make_all_callback_force(task_ids) logger.debug('---check_cert_task end task_ids: %s---' % (task_ids)) return
def set_finished(task_id, status): ''' to finish ''' now = datetime.datetime.now() task_info = s1_db.cert_trans_tasks.find_one_and_update( { '_id': ObjectId(task_id), 'status': "PROGRESS" }, {"$set": { 'status': status, 'hpc_finish_time': now }}, return_document=ReturnDocument.AFTER) # cache -> mongo res_cache_key = '%s_res' % (task_id) dev_num_cache_key = '%s_res_dev_num' % (task_id) failed_cache_key = '%s_res_failed' % (task_id) success_cache_key = '%s_res_success' % (task_id) all_cache = CERT_TRANS_CACHE.hgetall(res_cache_key) all_dev_num = CERT_TRANS_CACHE.get(dev_num_cache_key) success_num = CERT_TRANS_CACHE.scard(success_cache_key) unprocess = int(all_dev_num) - int(success_num) if unprocess <= 0: unprocess = 0 save_cache = { '_id': ObjectId(task_id), 'devices': {}, 'created_time': now, 'unprocess': int(unprocess) } for k, v in list(all_cache.items()): v_obj = json.loads(v) save_cache['devices'][v_obj['name']] = v_obj try: s1_db.cert_trans_result.insert_one(save_cache) except Exception: logger.debug('set_finished error is %s' % (traceback.format_exc())) return CERT_TRANS_CACHE.delete(res_cache_key) CERT_TRANS_CACHE.delete(dev_num_cache_key) CERT_TRANS_CACHE.delete(failed_cache_key) CERT_TRANS_CACHE.delete(success_cache_key) if status == 'FINISHED': cert_info = s1_db.cert_detail.find_one( {'_id': ObjectId(task_info['c_id'])}) make_all_callback(task_info, cert_info)
def init_db_device(self, urls): """ 从RCMS获取需要的设备,更新url表,将dev_id加入表中 :param urls: :return: """ worker_hostname = REFRESH_WORKER_HOST devs = rcmsapi.getDevices(urls[0].get("channel_code")) logger.debug('init_db_device:%s' % urls[0].get("layer_type")) if urls[0].get("layer_type") != "one": devs += rcmsapi.getFirstLayerDevices(urls[0].get("channel_code")) db_device = { "devices": verify.create_dev_dict(devs), "unprocess": len(devs), "created_time": datetime.now(), "_id": self.dev_id } for url in urls: url["dev_id"] = self.dev_id db_update(db.url, {"_id": ObjectId(url.get("id"))}, { "$set": { "dev_id": self.dev_id, "worker_host": worker_hostname, "recev_host": url.get("recev_host", "") } }) logger.debug( "url_init_db_device successed ,worker_id: %s ,dev_id: %s " % (self.get_id_in_requestofwork(), self.dev_id)) return db_device
def cert_trans_search(): ''' portal search cert ''' try: data = json.loads(request.data) cert_id = data.get('cert_id', '') if not cert_id: raise info = s1_db.cert_detail.find_one({'_id': ObjectId(cert_id)}) if not info: raise CertNotFoundError() res = { 'code': 200, 'save_name': info.get('save_name', ''), 'DNS_name': info.get('DNS'), 'validity': cert_tools.make_validity_to_China(info.get('validity')), 'subject': info.get('subject'), 'issuer': info.get('issuer'), 'pubkey': info.get('pubkey') } return jsonify(res) except CertNotFoundError as ex: return jsonify({"code": 504, "msg": "The certificate does not exist"}) except Exception: logger.debug('/internal/cert/trans error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})
def get_url(url, username, parent, request_id, action, isSub, type, isdir): """ 从RCMS获取用户的频道信息,匹配出channel_code :param url: :param username: :param request_id: :param action: :param isdir: :return: """ url_id = ObjectId() #检查dir情况下,url是否合法,不合法则变为url if isdir: if not url.endswith('/'): logger.info('get url url is not dir: url %s isdir %s' %(url, isdir)) isdir = False if isSub: isValid, is_multilayer, channel_code, ignore_case = rcmsapi.isValidUrlByPortal(username, parent, url) else: isValid, is_multilayer, channel_code, ignore_case = rcmsapi.isValidUrl(parent, url) #检查任务优先级 high_priority = False if isValid: high_priority = is_refresh_high_priority(channel_code) return {"_id": url_id , "r_id": request_id, "url": url, "ignore_case": ignore_case, "status": 'PROGRESS' if isValid else 'INVALID', "isdir": isdir, "username": username, "parent": parent, "created_time": datetime.now(), "action": action, "is_multilayer": is_multilayer, "channel_code": channel_code, 'type':type, 'high_priority':high_priority,'channel_name':get_channelname(url)}
def init_db_device_refresh_devices(self, url): """ Args: url: Returns: """ worker_hostname = REFRESH_WORKER_HOST devs = url.get('devices') db_device = { "devices": verify.create_dev_dict(devs), "unprocess": len(devs), "created_time": datetime.now(), "_id": self.dev_id } # dev_id = db.device.insert(db_device) url["dev_id"] = self.dev_id # db_update(db.url, {"_id": ObjectId(url.get("id"))}, {"$set": {"dev_id": self.dev_id}}) db_update(db.url, {"_id": ObjectId(url.get("id"))}, { "$set": { "dev_id": self.dev_id, "worker_host": worker_hostname, "recev_host": url.get("recev_host", "") } }) logger.debug( "dir_init_db_device successed ,url_id : %s ,dev_id = %s " % (url.get("id"), self.dev_id)) return db_device
def preload_router(self): """ 处理rabbitmq中preload_task的内容 """ try: # messages = queue.get('preload_task', self.batch_size) messages = self.get_preload_messages() if not messages: messages = queue.get('preload_task', self.batch_size) s1_db = database.s1_db_session() logger.debug( "preload_router.work process messages begin, count: %d " % len(messages)) url_dict = {} url_other = [] for message in messages: self.merge_preload_task(message, url_dict, url_other) for urls in list(url_dict.values()): preload_worker_new.dispatch.delay(urls) if url_other: #定时任务先只插入库中 for url_t in url_other: url_t['_id'] = ObjectId() s1_db.preload_url.insert(url_other) logger.info( "preload_router.work process messages end, count: %d " % len(messages)) except Exception: logger.warning('preload_router work error:%s' % traceback.format_exc())
def get_data_from_retry_device_branch(self, retry_branch_id): """ get device info from retry_branch_id :param retry_branch_id: the _id of retry_device_branch :return: {'host1': 200, 'hots': 503} dict """ result_return = {} if not retry_branch_id: return result_return else: try: result = db.retry_device_branch.find_one( {"_id": ObjectId(retry_branch_id)}) logger.debug( 'success get data from retry_device_branch result:%s, retry_branch_id:%s' % (result, retry_branch_id)) except Exception, e: logger.debug( 'get data error from retry_device_branch retry_branch_id:%s, error content:%s' % (retry_branch_id, e)) return result_return try: if result: devices = result.get('devices') if devices: for dev in devices: name = dev.get('name') branch_code = dev.get('branch_code') if name and branch_code: result_return[name] = branch_code except Exception, e: logger.debug( 'get_data_from_retry_device_branch parse data error:%s' % traceback.format_exc(e))
def check_cert_query_task(task_ids): ''' n seconds 后检查任务状态 ''' logger.debug('---check_cert_task start task_ids: %s---' % (task_ids)) task_info_list = s1_db.cert_query_tasks.find({ '_id': { '$in': [ObjectId(task_id) for task_id in task_ids] }, 'status': 'PROGRESS' }) if task_info_list.count() == 0: logger.debug('---check_cert_query_task no task_info ids: %s---' % (task_ids)) return for task_info in task_info_list: error_devs = get_error_dev_result(task_info) if not error_devs: continue set_finished(task_info['_id'], 'FAILED') logger.debug('---check_cert_query_task id %s set Failed end---' % (task_info['_id'])) return
def get_cert_by_task_id(task_id): ''' 通过任务ID 获取证书/私钥/存储名 ''' cert_res = {} task_info = s1_db.cert_trans_tasks.find_one({'_id': ObjectId(task_id)}) if not task_info: return cert_res cert_id = task_info.get('c_id', '') if not cert_id: return cert_res cert_info = s1_db.cert_detail.find_one({'_id': ObjectId(cert_id)}) if not cert_info: return cert_res return get_cert_detail(cert_info)
def dispatch(tasks): try: logger.debug("dispatch cert_query trans begin %s" % len(tasks)) logger.debug("dispatch cert_query trans begin task_ids %s" % [i['_id'] for i in tasks]) dev_id, devs, devs_dict = init_cert_dev(tasks[0]) logger.debug("devs is %s" % devs) logger.debug("cert_devs dev_id %s, devs len %s" % (dev_id, len(devs))) save_tasks = [] for task in tasks: task['dev_id'] = dev_id save_task = {} save_task['_id'] = ObjectId(task['_id']) save_task['status'] = "PROGRESS" save_task['dev_id'] = dev_id save_task['query_path'] = task.get('query_path') save_task['username'] = task.get('username') save_task['q_id'] = task.get('q_id') save_task['query_type'] = task.get('query_type') save_task['send_devs'] = task.get('query_dev_ip') save_task['created_time'] = task.get('created_time') save_task['query_config_path'] = task.get('query_config_path') save_task['query_cert_name'] = task.get('query_cert_name') save_task['worker_host'] = WORKER_HOST save_tasks.append(save_task) make_result_cache(task['_id'], devs_dict) s1_db.cert_query_tasks.insert(save_tasks) worker(tasks, devs) logger.debug("query cert trans end") except Exception, e: logger.debug("query cert trans error:%s " % traceback.format_exc(e))
def init_db_device(self, url): worker_hostname = REFRESH_WORKER_HOST devs = rcmsapi.getDevices(url.get("channel_code")) if url.get("layer_type") != "one": devs += rcmsapi.getFirstLayerDevices(url.get("channel_code")) db_device = { "devices": verify.create_dev_dict(devs), "unprocess": len(devs), "created_time": datetime.now(), "_id": self.dev_id } # dev_id = db.device.insert(db_device) url["dev_id"] = self.dev_id #db_update(db.url, {"_id": ObjectId(url.get("id"))}, {"$set": {"dev_id": self.dev_id}}) db_update(db.url, {"_id": ObjectId(url.get("id"))}, { "$set": { "dev_id": self.dev_id, "worker_host": worker_hostname, "recev_host": url.get("recev_host", "") } }) logger.debug( "dir_init_db_device successed ,url_id : %s ,dev_id = %s " % (url.get("id"), self.dev_id)) return db_device
def process_redis_1_minute(): """ every 1 minute process the function :return: """ try: minute_1_list = redis_preload_timer.keys('timer_1_*') if minute_1_list: id_list = [] for key in minute_1_list: timer_seconds = float(redis_preload_timer.get(key)) logger.debug( 'process_redis_1_minute timer_seconds:%s, type:%s' % (timer_seconds, type(timer_seconds))) timestamp_now = time.mktime(datetime.now().timetuple()) logger.debug('process_redis_1_minute timestamp_now:%s' % timestamp_now) time_diff = timer_seconds - timestamp_now logger.debug('process_redis_1_minute time_diff:%s' % time_diff) if time_diff < 60: id_list.append(ObjectId(get_id(key))) redis_preload_timer.delete(key) logger.debug('process_redis_1_minute delete key:%s' % key) else: pass if id_list: commit_preload_timer_task.delay(id_list) logger.debug('process_redis_1_minute id_list:%s' % key) except Exception: logger.debug('operator error 1 mintuate:%s' % traceback.format_exc())
def init_db_device_refresh_device(self, urls): """ 设备列表全部存在于urls Args: urls: Returns: """ worker_hostname = REFRESH_WORKER_HOST devs = urls[0].get('devices') db_device = { "devices": verify.create_dev_dict(devs), "unprocess": len(devs), "created_time": datetime.now(), "_id": self.dev_id } for url in urls: url["dev_id"] = self.dev_id db_update(db.url, {"_id": ObjectId(url.get("id"))}, { "$set": { "dev_id": self.dev_id, "worker_host": worker_hostname, "recev_host": url.get("recev_host", "") } }) logger.debug( "url_init_db_device successed ,worker_id: %s ,dev_id: %s " % (self.get_id_in_requestofwork(), self.dev_id)) return db_device
def set_finished(task_id, status): ''' to finish ''' now = datetime.datetime.now() logger.debug('now is %s' % now) task_info = s1_db.cert_query_tasks.find_one_and_update( { '_id': ObjectId(task_id), 'status': "PROGRESS" }, {"$set": { 'status': status, 'hpc_finish_time': now }}, return_document=ReturnDocument.AFTER) #cache -> mongo res_cache_key = '%s_res' % (task_id) dev_num_cache_key = '%s_res_dev_num' % (task_id) failed_cache_key = '%s_res_failed' % (task_id) success_cache_key = '%s_res_success' % (task_id) all_cache = CERT_QUERY_CACHE.hgetall(res_cache_key) all_dev_num = CERT_QUERY_CACHE.get(dev_num_cache_key) success_num = CERT_QUERY_CACHE.scard(success_cache_key) unprocess = int(all_dev_num) - int(success_num) logger.debug('unprocess is %s' % unprocess) if unprocess <= 0: unprocess = 0 #save_cache = {'_id': ObjectId(task_id), 'devices': {}, 'created_time': now, 'unprocess': int(unprocess),'query_result':query_result} save_cache = { '_id': ObjectId(task_id), 'devices': {}, 'created_time': now, 'unprocess': int(unprocess) } logger.debug('save_cache is %s' % save_cache) for k, v in all_cache.items(): v_obj = json.loads(v) save_cache['devices'][v_obj['name']] = v_obj try: s1_db.cert_query_result.insert_one(save_cache) except Exception, e: logger.debug('set_finished error is %s' % (traceback.format_exc(e))) return
def make_all_callback_force(task_ids): ''' 强行回调 ''' try: for t in task_ids: t_id = t t_obj = s1_db.cert_trans_tasks.find_one({'_id': ObjectId(t_id)}) if t_obj: c_obj = s1_db.cert_detail.find_one( {'_id': ObjectId(t_obj['c_id'])}) logger.debug('make_all_callback_force begin task_id %s' % (t_id)) make_all_callback(t_obj, c_obj) else: logger.debug('make_all_callback_force not begin task_id %s' % (t_id)) except Exception: logger.debug('make_all_callback_force error e %s' % (e))
def scheduleTask(self, url_list): s1_db = database.s1_db_session() host_aps = choice( eval(config.get('apscheduler_server', 'host_cluster'))) logger.debug("scheduleTask host_aps: %s|| url_list: %s" % (host_aps, url_list)) conn_aps = rpyc.connect(host_aps, int(config.get('apscheduler_server', 'port')), config={ 'allow_public_attrs': True, 'allow_all_attrs': True, 'allow_pickle': True }) for url in url_list: if url.get('task_type') == 'SCHEDULE': conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', trigger='interval', args=([url], ), seconds=int(url.get('interval')), start_date=url.get('start_time'), end_date=url.get('end_time')) elif url.get('task_type') == 'INTERVAL': conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', trigger='interval', args=([url], ), seconds=int(url.get('interval')), end_date=url.get('end_time')) elif url.get('task_type') == 'TIMER': url['_id'] = ObjectId() self.s1_db.preload_url.insert(url) rdate = url.get('start_time') rdate = rdate if isinstance(rdate, datetime) else datetime.strptime( rdate, '%Y-%m-%d %H:%M:%S') run_date_dict = { 'year': rdate.year, 'month': rdate.month, 'day': rdate.day, 'hour': rdate.hour, 'minute': rdate.minute, 'second': rdate.second, } conn_aps.root.add_job( 'util.aps_server:preload_worker_new.dispatch.delay', 'cron', args=([url], ), **run_date_dict) logger.info("scheduleTask add_job [%s done!] url: %s." % (url.get('task_type'), url.get('url')))
def test_update_mongo(): """ :return: """ data = [{ "_id": ObjectId("57ff206d2b8a6831b5c2f9b8"), 'a': 3, 'b': 4 }, { "_id": ObjectId("57ff206d2b8a6831b5c2f9b9"), 'a': 5, 'b': 6 }] db_s1.test_update.insert_many( { '$in': { "_id": ObjectId("57ff206d2b8a6831b5c2f9b8"), "_id": ObjectId("57ff206d2b8a6831b5c2f9b9") } }, data) print('insert success!')
def get_cert_by_ids(cert_ids): ''' 通过任务IDS 获取证书/私钥/存储名 ''' cert_info_list = s1_db.cert_detail.find( {'_id': { '$in': [ObjectId(i) for i in cert_ids] }}) all_res = [] for cert_info in cert_info_list: res = get_cert_detail(cert_info) if res: all_res.append(res) return all_res
def judge_task_success_failed(self, dev_id, retry_branch_id): """ :param dev_id: the _id of preload_dev :param retry_branch_id: the _id of retry_device_branch :return: """ retry_branch_dict = self.get_data_from_retry_device_branch( retry_branch_id) if dev_id: try: preload_dev = s1_db.preload_dev.find_one( {'_id': ObjectId(dev_id)}) except Exception: logger.debug("get data error:%s" % traceback.format_exc()) return False if not preload_dev: return False try: devices = preload_dev.get('devices') if devices: # get content of all device info devices_values = list(devices.values()) if devices_values: for dev in devices_values: code = dev.get('code') a_code = dev.get('a_code') r_code = dev.get('r_code') name = dev.get('name') if code == 200 or a_code == 200 or r_code == 200: continue elif name in retry_branch_dict: if retry_branch_dict[name] == 200: continue else: if dev.get('name') not in [ 'CHN-JQ-2-3S3', 'CHN-JA-g-3WD' ]: d_id = preload_dev.get('_id') logger.debug( "judge_task_success_failed d_id: %s|| devices_values: %s" % (d_id, devices_values)) return False return True except Exception: logger.debug('judge_tak_success_failed parse data error:%s' % traceback.format_exc()) return False return False
def update_url_autodesk(id): """ autodesk_id queue rabbitmq message queue, udpate url_autodesk status, finish_time :param id: the _id of url_autodesk , the _id of url :return: """ logger.debug('update_url_autodesk id:%s' % id) try: url_result = db.url.find_one({'_id': ObjectId(id)}) logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id) except Exception, e: logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e)) return
def udpate_url_dev_autodesk(id, different_time): """ :param id: :param different_time: :return: """ logger.debug('update_url_autodesk id:%s, different_time:%s' % (id, different_time)) try: url_result = db.url.find_one({'_id': ObjectId(id)}) logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id) except Exception, e: logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e)) return
def update_url_autodesk(id): """ autodesk_id queue rabbitmq message queue, udpate url_autodesk status, finish_time :param id: the _id of url_autodesk , the _id of url :return: """ logger.debug('update_url_autodesk id:%s' % id) try: url_result = db.url.find_one({'_id': ObjectId(id)}) logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id) except Exception: logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e)) return if not url_result: return # finish url_autodesk state alter finish_time = datetime.now() finish_time_timestamp = time.mktime(finish_time.timetuple()) try: db.url_autodesk.update_one({'_id': ObjectId(id)}, {'$set': {'status': url_result.get('status'), 'finish_time': finish_time, 'finish_time_timestamp': finish_time_timestamp, 'dev_id': url_result.get('dev_id', None), 'retry_branch_id': url_result.get('retry_branch_id', None)}}) db_update(db.request, {'_id': ObjectId(url_result.get('r_id'))}, {'$inc': {'check_unprocess': -1}}) logger.debug('update_url_autodes request r_id:%s' % url_result.get('r_id')) request_result = db.request.find_one({'_id': ObjectId(url_result.get('r_id'))}) logger.debug('update_url_autodesk request_result:%s' % request_result) if not request_result.get('check_unprocess'): finish_time_request = datetime.now() finish_time_request_timestamp = time.mktime(finish_time_request.timetuple()) db_update(db.request, {'_id': ObjectId(url_result.get('r_id'))}, {'$set': {'finish_time_autodesk': finish_time_request, 'finish_time_autodesk_timestamp': finish_time_request_timestamp}}) logger.debug('check_url_autodesk update_url_autodesk success id:%s' % id) except Exception: logger.debug("check_url_autodesk update_url_autodesk error, id:%s, %s" % (id, traceback.format_exc())) return
def process(db, refresh_task, check_overload=False): ''' 处理任务 Parameters ---------- db : 数据库 refresh_task : 刷新的任务 check_overload : 是否检查超量 Returns ------- ''' try: request_id = ObjectId() message = {"r_id": str(request_id)} refresh_task['r_id'] = request_id logger.debug("process refresh_task:%s %s %s " % (refresh_task['r_id'], refresh_task['username'], refresh_task['urls'] if 'urls' in list(refresh_task.keys()) else refresh_task['dirs'])) if check_overload: url_overload = getOverload(refresh_task.get("username"), 'URL') # 直接返回剩余数量 dir_overload = getOverload(refresh_task.get("username"), 'DIR') url_length = len(refresh_task.get("urls") if refresh_task.get("urls") else []) + len(refresh_task.get("update_urls") if refresh_task.get("update_urls") else []) dir_length = len(refresh_task.get("dirs") if refresh_task.get("dirs") else []) if (url_length > 0 and url_overload > 0): message['urlExceed'] = url_length refresh_task['urls'] = [] refresh_task['update_urls'] = [] setCounterCache(refresh_task, url_length, 'URL') logger.error('process error ! refresh_task :%s,url:%s' % (refresh_task['r_id'], url_overload)) if (dir_length > 0 and dir_overload > 0): message['dirExceed'] = dir_length refresh_task['dirs'] = [] setCounterCache(refresh_task, dir_length, 'DIR') logger.error('process error ! refresh_task :%s,dir:%s ' % (refresh_task['r_id'], dir_overload)) if len(refresh_task.get("urls") if refresh_task.get("urls") else []) > 0 or len(refresh_task.get("dirs") if refresh_task.get("dirs") else []) > 0 or len(refresh_task.get("update_urls") if refresh_task.get("update_urls") else []) > 0 : submit.delay(refresh_task) # submit(refresh_task) else: submit.delay(refresh_task) except Exception: logger.error(traceback.format_exc()) logger.error('process error ! refresh_task :%s ' % refresh_task) return message
def dispatch(tasks): try: logger.debug("transfer expired_cert begin %s" % len(tasks)) logger.debug("transfer expired_cert begin task_ids %s" % [i['_id'] for i in tasks]) dev_id, devs, devs_dict = init_cert_dev(tasks[0]) logger.debug("devs is %s" % devs) logger.debug("cert_devs dev_id %s, devs len %s" % (dev_id, len(devs))) save_tasks = [] for task in tasks: task['dev_id'] = dev_id save_task = {} save_task['_id'] = ObjectId(task['_id']) save_task['status'] = "PROGRESS" save_task['dev_id'] = dev_id save_task['path'] = task.get('path') save_task['username'] = task.get('username') save_task['t_id'] = task.get('t_id') save_task['send_devs'] = task.get('send_dev') save_task['created_time'] = task.get('created_time') save_task['save_name'] = task.get('save_name') save_task['worker_host'] = WORKER_HOST save_tasks.append(save_task) make_result_cache(task['_id'], devs_dict) s1_db.transfer_cert_tasks.insert(save_tasks) worker(tasks, devs) # try: # task_id_list = [] # for task in tasks: # for sav_name in task['save_name'].split(','): # info_cert = s1_db.cert_detail.find_one({'save_name': sav_name}) # task_id_list.append(str(info_cert['_id'])) # status = cert_cms_delete(task_id_list) # if status: # cert_portal_delete(task_id_list) # except Exception,e: # logger.error('callback error %s'%(e.message)) logger.debug("transfer cert trans end") except Exception, e: logger.debug("trasnfer cert trans error:%s " % traceback.format_exc(e))
def commit_preload_timer_task(id_list): """ according preload_id get data from preload_url, and put data to rabbit , then execute the pre loading process :param preload_id: :return: """ try: results = db_s1.preload_url.find( {"_id": { '$in': [ObjectId(i) for i in id_list] }}) results = [asssemble_data(i) for i in results] if results: logger.debug('commit_preload_timer_task results:%s' % results) url_dict = {} for r in results: logger.debug('commit_preload_timer_task r:%s' % r) logger.debug('commit_preload_timer_task type(r):%s' % type(r)) if r.get('devices'): d_md5 = md5(json.dumps(r['devices'])).hexdigest() url_dict.setdefault(d_md5, []).append(r) logger.debug('commit_preload_timer_task d_md5: %s' % d_md5) if len(url_dict[d_md5]) > PACKAGE_SIZE: preload_worker_new.dispatch.delay(url_dict.pop(d_md5)) else: url_dict.setdefault(r.get('channel_code'), []).append(r) if len(url_dict.get(r.get('channel_code'), {})) > PACKAGE_SIZE: preload_worker_new.dispatch.delay( url_dict.pop(r.get('channel_code'))) logger.debug('commit_preload_timer_task url_dict: %s' % url_dict) for urls in list(url_dict.values()): preload_worker_new.dispatch.delay(urls) logger.debug( 'commit_preload_timer_task delay(urls): %s, type(urls): %s' % (urls, type(urls))) logger.debug('commit_preload_timer_task delay(urls) finished!') except Exception: logger.error('commit_preload_timer_task error: %s' % trackback.format_exc()) return
def put_data_to_redis(result): """ according result which is cursor returned by query database, assemble data put into redis two queue in redis, :param result: cursor returned by query database :return: """ if result: try: pip = redis_preload_timer.pipeline() batch_size = 1000 count = 0 id_list = [] for res in result: id = res.get('_id', None) start_time = res.get('start_time', None) if id and start_time: timestamp_start_time = datetime_convert_timestamp(start_time) timestamp_now = datetime_convert_timestamp(datetime.now()) timestamp_diff = timestamp_start_time - timestamp_now if timestamp_diff < 60: id_list.append(ObjectId(id)) elif timestamp_diff >= 60 and timestamp_diff < 420: logger.debug('preload_url_timer_pull put_data_to_redis, put to redis 1 key:%s' % id) # set in redis pip.set("timer_1_" + str(id), timestamp_start_time) else: logger.debug('preload_url_timer_pull put_data_to_redis, put to redis 7 key:%s' % id) # set in redis pip.set("timer_7_" + str(id), timestamp_start_time) count += 1 if not count % batch_size: pip.execute() count = 0 # send the last batch pip.execute() if id_list: logger.debug('preload_url_timer_pull put_data_to_redis id_list: %s' % id_list) commit_preload_timer_task.delay(id_list) except Exception: logger.debug("operator error trace:%s" % traceback.format_exc())
def dispatch(tasks): ''' 分发证书 [{‘key’:xx,’cert’:xx, seed: xxx, dir:xxx, send_devs:’all_hpcc’, 'task_id':xxx} ] ''' try: logger.debug("dispatch cert trans begin %s" % len(tasks)) logger.debug("dispatch cert trans begin task_ids %s" % [i['_id'] for i in tasks]) dev_id, devs, devs_dict = init_cert_devs(tasks[0]['send_devs']) logger.debug("cert_devs dev_id %s, devs len %s" % (dev_id, len(devs))) save_tasks = [] for task in tasks: task['dev_id'] = dev_id save_task = {} save_task['_id'] = ObjectId(task['_id']) save_task['status'] = "PROGRESS" save_task['dev_id'] = dev_id save_task['username'] = task.get('username') save_task['user_id'] = task.get('user_id') save_task['c_id'] = task.get('c_id', '') save_task['o_c_id'] = task.get('o_c_id', '') save_task['cert_alias'] = task.get('cert_alias', '') save_task['send_devs'] = task.get('send_devs', '') save_task['created_time'] = task.get('created_time') save_task['recev_host'] = task.get('recev_host') save_task['worker_host'] = WORKER_HOST save_task['rcms_callback_time'] = '' save_task['portal_callback_time'] = '' save_tasks.append(save_task) make_result_cache(task['_id'], devs_dict) s1_db.cert_trans_tasks.insert(save_tasks) worker(tasks, devs) logger.debug("dispatch cert trans end") except Exception: logger.debug("dispatch cert trans error:%s " % traceback.format_exc())
def __init__(self): self.dev_id = ObjectId() self.db_dev = {} self.basic_info = {} self.last_basic_info_reload_time = 0 self.use_old = config.getboolean("success_definition_strategy", "use_old") self.basic_info_file_path = config.get('success_definition_strategy', 'basic_info_file') self.basic_info_reload_interval = config.get( 'success_definition_strategy', 'basic_info_reload_interval') self.isp_priority_list = config.get('success_definition_strategy', 'isp_priority').split(',') self.isp_priority_list = [ item.strip().upper() for item in self.isp_priority_list ] self.region_priority_list = config.get('success_definition_strategy', 'region_priority').split(',') self.region_priority_list = [ item.strip().upper() for item in self.region_priority_list ] self.fail_device_list = []