def update_error_task(task, results, db, query_db): task['retryCount'] = task.get('retryCount', 0) + 1 if results: task['retryCount'] = -2 db.error_task.save(task) db_update(db.device, {"_id": task.get("dev_id")}, { "$set": { "devices.%s.code" % task.get('name'): STATUS_RETRY_SUCCESS } }) logger.debug('successed, task_id = %s dev_id = %s host=%s...' % (task.get("_id"), task.get("dev_id"), task.get("host"))) if not query_db.error_tasks.find({ "dev_id": task.get("dev_id"), "retryCount": { '$ne': -2 }, "status": 'OPEN' }).count(): verify.verify(task.get("urls"), db, 'FINISHED') else: db.error_task.save(task) if task.get('retryCount') == 1: logger.debug( "retryCount:3, status:FAILED,task_id : %s dev_id : %s..." % (task.get("_id"), task.get("dev_id"))) verify.verify(task.get("urls"), db, 'FAILED') queue.put_json2("error_task", [get_queue(task)])
def merge_preload_task(self, message, url_dict, url_other, package_size=50): """ 合并preload任务 :param message: :param url_dict: 实时任务 :param url_other: 定时任务 """ try: task = json.loads(message) compressed = task.get('compressed') task['created_time'] = datetime.strptime(task.get("created_time"), "%Y-%m-%d %H:%M:%S") p_key = 'channel_code_%s' % (task.get('channel_code'), ) productType = CHECK_CACHE.get(p_key) if not productType: productType = self.db.preload_channel.find_one({ 'channel_code': task.get('channel_code') }).get('productType') CHECK_CACHE.set(p_key, productType) CHECK_CACHE.expire(p_key, 60 * 60) logger.debug( "merge_preload_task [inIF:productTypeNotExist] productType: %s" % (productType, )) logger.debug("merge_preload_task productType: %s" % (productType, )) if task.get('status') == 'PROGRESS': # 实时任务 url_dict.setdefault(task.get("channel_code"), []).append(task) if productType == '1' and compressed: task2 = copy.deepcopy(task) task2['compressed'] = False task2['created_time'] = datetime.strftime( task2['created_time'], "%Y-%m-%d %H:%M:%S") queue.put_json2('preload_task', [task2]) logger.debug("merge_preload_task url_dict: %s" % (url_dict, )) else: # timer, interval, schedule tasks url_other.append(task) if productType == '1' and compressed: task2 = copy.deepcopy(task) task2['compressed'] = False task2['created_time'] = datetime.strftime( task2['created_time'], "%Y-%m-%d %H:%M:%S") queue.put_json2('preload_task', [task2]) # logger.debug("merge_preload_task url_dict: %s|| url_other: %s" % (url_dict, url_other)) if len(url_dict.get(task.get("channel_code"), {})) >= package_size: preload_worker_new.dispatch.delay( url_dict.pop(task.get("channel_code"))) except Exception: logger.error("merge_preload_task error: %s" % (traceback.format_exc()))
def cert_portal_delete(certid_list, method="DELETE"): access_key = '51e7713a8e9e7d31' access_secret = '85053e054c735430e12f12850397545f' status = False message = '' for i in range(RETRY_NUM): try: timestamp = str(int(time.time())) nonce = "".join( random.sample(string.ascii_letters + string.digits, 10)) url = url_portal_delete str_list = [access_key, access_secret, timestamp, nonce] str_list_sorted = sorted(str_list) str_sorted = "".join(str_list_sorted) print(str_sorted) signature = hashlib.sha1(str_sorted).hexdigest() content_type = 'application/json' send_headers = { 'X-CC-Auth-Key': access_key, 'X-CC-Auth-Timestamp': timestamp, 'X-CC-Auth-Nonce': nonce, 'X-CC-Auth-Signature': signature } if content_type: send_headers["Content-Type"] = content_type #req = urllib.request.Request(url, headers=send_headers) values = {"cert_ids": certid_list} jdata = json.dumps(values) request = urllib.request.Request(url, jdata, headers=send_headers) #request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'DELETE' # 设置HTTP的访问方式 request = urllib.request.urlopen(request) result = json.loads(request.read()) print(result) logger.debug("portal delete result : %s,%s" % (result, certid_list)) if result['status'] == 0: status = True message = result['msg'] break else: message = result except Exception: logger.debug("cert portal delete erros %s" % (e.message)) print(e) if status != True: email = [{ "to_addrs": email_group, "title": 'PORTAL证书转移失败', "body": "失败的证书id列表为%s;具体信息:%s" % (certid_list, message) }] queue.put_json2('email', email) print('FAILED') return status
def noticeEmail(refresh_task): try: callback = refresh_task.get('callback') if callback.get('email'): if callback.get('acptNotice'): email = [{"username": refresh_task.get('username'), "to_addrs": callback.get('email'), "title": 'refresh callback', "body": get_email(refresh_task)}] queue.put_json2('email', email) logger.debug('email :%s put email_queue!' % callback.get('email')) except Exception: logger.error('sendEmail error!') logger.error(e)
def noticeEmail(refresh_task): try: callback = refresh_task.get('callback') if callback.get('email'): # judge the uername is in redis(black list) or not, if not, continue if not CALLBACK_CACHE.exists(prefix_callback_email_username + refresh_task.get('username')): if callback.get('acptNotice'): email = [{"username": refresh_task.get('username'), "to_addrs": callback.get('email'), "title": 'refresh callback', "body": get_email(refresh_task)}] queue.put_json2('email', email) logger.debug('email :%s put email_queue!' % callback.get('email')) except Exception, e: logger.error('sendEmail error!') logger.error(e)
def send_result_error(err_message): #err_message = {'request_id': request_id, 'url': url.get('url'), 'success': ss_count, 'all': countAll,'channelName': url.get('channel_name')} rep = q_db.rep_channel.find_one( {'channelName': err_message.get('channelName')}) to_user = rep.get('userName', None) if to_user: to_user = rep.get('userName').split(',') else: to_user = ["*****@*****.**"] email = [{ "username": to_user, "to_addrs": to_user, "title": u'刷新回调失败任务', "body": json.dumps(err_message) }] queue.put_json2('email', email)
def cert_cms_delete(extnIds): url = url_cms_delete #url = 'http://223.202.75.137:32000/apiw/9040/delCrtInfo' # req = urllib.request.Request(url, headers=send_headers) values = { "ROOT": { "HEADER": { "AUTH_INFO": { "LOGIN_NO": "*****@*****.**", "LOGIN_PWD": "cert_2018_Q1", "FUNC_CODE": "9072" } }, "BODY": { "BUSI_INFO": { "extnIds": extnIds } } } } send_headers = {'Content-Type': 'application/json'} jdata = json.dumps(values) status = False for i in range(RETRY_NUM): try: request = urllib.request.Request(url, jdata, headers=send_headers) request = urllib.request.urlopen(request) result = json.loads(request.read()) logger.debug("cms delete result : %s,%s" % (result, extnIds)) if result['ROOT']['BODY']['RETURN_MSG'] == 'OK': status = True print('OK') break # check_result = json.loads(request.read()) # print check_result except Exception: logger.debug("cert cms delete erros %s" % (e.message)) print(e) if status != True: email = [{ "to_addrs": email_group, "title": 'CMS证书转移失败', "body": "失败的证书id列表为%s" % (extnIds) }] queue.put_json2('email', email) print('FAILED') return status
def cert_trans(): ''' 接收portal下发请求 ''' try: cert_data = request.data logger.debug('receiver cert data:{}'.format(cert_data)) data = json.loads(cert_data) task = make_task(data) queue.put_json2('cert_task', [task]) return jsonify({ 'code': 200, 'task_id': task['_id'], 'cert_id': task['c_id'], 'cert_cache_name': task['s_name'] }) except CertInputError as ex: return jsonify({"code": 504, "msg": ex.__str__()}) except CertExpireError as ex: return jsonify({"code": 505, "msg": ex.__str__()}) except CertRevokeError as ex: return jsonify({"code": 506, "msg": ex.__str__()}) except CertPrikeyError as ex: return jsonify({"code": 507, "msg": ex.__str__()}) except CertPathError as ex: return jsonify({"code": 508, "msg": ex.__str__()}) except CertDecryptError as ex: return jsonify({"code": 509, "msg": ex.__str__()}) except CertPrikeyTypeError as ex: return jsonify({"code": 510, "msg": ex.__str__()}) except CertSaveNameError as ex: return jsonify({"code": 511, "msg": ex.__str__()}) except CertNoRoot as ex: return jsonify({"code": 512, "msg": ex.__str__()}) except CertNoMiddle as ex: return jsonify({"code": 513, "msg": ex.__str__()}) except CertAliasError as ex: return jsonify({"code": 514, "msg": ex.__str__()}) except Exception: logger.debug('/internal/cert/trans error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})
def timer_run(self): """ refresh定时任务运行,执行前30-前5之间的数据 """ try: logger.debug("refresh timer work begining...") # json.dumps([dict((key, item[key]) for key in item if key != '_id') for item in doc.find()]) .update({'id':str(task['_id'])}) task_list = [ self.process_url(task['username'], dict((key, task[key]) for key in task)) for task in self.query_db.url.find( { "status": "PROGRESS", "created_time": { "$lte": self.dt - timedelta(seconds=300), "$gte": self.dt - timedelta(seconds=1800) } }, { 'status': 1, 'isdir': 1, 'ignore_case': 1, 'layer_type': 1, '_id': 1, 'username': 1, 'url': 1, 'r_id': 1, 'action': 1, 'firstLayer': 1, 'channel_code': 1, 'is_multilayer': 1, 'created_time': 1 }) ] #print task_list,json.loads(json.dumps(task_list)) queue.put_json2('url_queue', json.loads(json.dumps(task_list))) logging.debug( "refresh timer.work process messages end, count: %d " % len(task_list)) except Exception: print(e) logger.warning('timer work error:%s' % traceback.format_exc())
if result['status'] == 0: status = True message = result['msg'] break else: message = result except Exception, e: logger.debug("cert portal delete erros %s" % (e.message)) print e if status != True: email = [{ "to_addrs": email_group, "title": 'PORTAL证书转移失败', "body": "失败的证书id列表为%s;具体信息:%s" % (certid_list, message) }] queue.put_json2('email', email) print 'FAILED' return status def do_callback_portal(url, _type, command): is_success = False start_time = time.time() json_command = json.dumps(command) for x in xrange(2): try: access_key = '51e7713a8e9e7d31' access_secret = '85053e054c735430e12f12850397545f'
def submit(refresh_task, urls): ''' 提交任务到消息队列 Parameters ---------- refresh_task : 任务 ignore_result 设置任务存储状态,如果为True,不存状态,也查询不了返回值 default_retry_delay 设置重试提交到消息队列间隔时间,默认10 分钟,单位为秒 max_retries 设置重试次数,默认为3 Returns ------- ------- 修饰符 @task 将submit函数变成了异步任务。在webapp中调用submit并不会立即执行该函数, 而是将函数名、 参数等打包成消息发送到消息队列中,再由worker执行实际的代码 ''' try: logger.debug('getUrls:%s' % urls) urls = getUrlsInLimit(urls) username_t = 'prefix_username_' + refresh_task.get("username") if REWRITE_CACHE.exists(username_t): urls = prefixReplace(urls, username_t) urls = processChinese(urls) urls = get_physical_del_channels(urls) try: urls = domain_ignore(urls) except Exception: logger.debug('domain ignore error {}'.format( traceback.format_exc())) try: urls = dir_and_url(urls) except Exception: logger.debug('domain ignore error {}'.format( traceback.format_exc())) if not urls: return # setOveload(refresh_task, urls) # for url in urls: # url['url'] = add_https_443(url.get('url', '')) # if refresh_task.get('username') in user_list_special: # logger.debug('username:%s, urls:%s' % (refresh_task.get('username'), urls)) # for url in urls: # url['url'] = delete_zip(url.get('url')) logger.debug('submit: %s' % urls) db.url.insert(urls) username = refresh_task.get('username') try: user_list = eval( config.get('refresh_redis_store_usernames', 'usernames')) except Exception: logger.debug('splitter_new submit error:%s' % traceback.format_exc()) user_list = [] try: if username in user_list: add_rid_url_info_into_redis(refresh_task.get('r_id'), urls) except Exception: logger.debug('insert result into redis error:%s' % traceback.formate_exc(e)) #筛选优先级任务 messages = [] messages_high = [] for url in urls: if url.get("status") == 'PROGRESS': url_info = get_refreshurl(refresh_task.get('username'), url) if url.get('high_priority', False): messages_high.append(url_info) else: messages.append(url_info) #messages = [get_refreshurl(refresh_task.get('username'), url) for url in urls if url.get("status") == 'PROGRESS'] logger.debug("需要加入到url_queue中的messages: %s" % messages) logger.debug("需要加入到url_high_priority_queue中的messages: %s" % messages_high) db.request.insert({ "_id": refresh_task.get('r_id'), "username": refresh_task.get("username"), "parent": refresh_task.get("parent"), "callback": refresh_task.get("callback"), "status": "PROGRESS", "unprocess": len(messages), "created_time": datetime.strptime(refresh_task.get('request_time'), '%Y-%m-%d %X') if refresh_task.get('request_time') else datetime.now(), "remote_addr": refresh_task.get('remote_addr', ''), "serial_num": refresh_task.get('serial_num', '') }) web_task = refresh_task.get('web_task') if web_task: try: webluker_tools.post_data_to_webluker(web_task.get('task_new'), web_task.get('task_all'), str(web_task.get('r_id'))) except Exception: logger.debug( 'splitter_new webluker task have r_id:%s, error:%s' % (web_task.get('r_id'), traceback.format_exc())) queue.put_json2('url_queue', messages) if messages_high: queue.put_json2('url_high_priority_queue', messages_high) if refresh_task.get('callback'): noticeEmail(refresh_task) except Exception: logger.warning('submit error! do retry. error:%s' % traceback.format_exc()) raise submit.retry(exc=e)
"created_time": created_time, "remote_addr": refresh_task.get('remote_addr', ''), "serial_num": refresh_task.get('serial_num', ''), 'executed_end_time_timestamp': executed_end_time_timestamp, 'executed_end_time': executed_end_time, 'remain_time_return_timestamp': remain_time_return_timestamp, 'remain_time_failed_timestamp': remain_time_failed_timestamp }) queue.put_json2('url_queue', messages) if messages_high: queue.put_json2('url_high_priority_queue', messages_high) if refresh_task.get('callback'): noticeEmail(refresh_task) except Exception, e: logger.warning('submit error! do retry. error:%s' % traceback.format_exc(e)) raise submit.retry(exc=e) def process(db, refresh_task, check_overload=False): ''' 处理任务
def run(): # global now, start_str, emails, cur_time, pre_time, config_dic, ref_err, key, key1, config, emailResult, email now = datetime.now() start_str = 'start script on {datetime}'.format(datetime=now) logger.info(start_str) emails = {} # cur_time = datetime.combine(now.date(), now.time().replace(minute=0, second=0, microsecond=0)) cur_time = datetime.combine(now.date(), now.time().replace(second=0, microsecond=0)) # pre_time = cur_time + timedelta(hours=1) future_time = cur_time + timedelta(minutes=10) pre_time = cur_time - timedelta(minutes=30) #future_time = cur_time - timedelta(minutes=1) #pre_time = cur_time - timedelta(minutes=30) logger.info('begin_date:%s end_date:%s' % (pre_time, cur_time)) config_dic = get_key_customers_monitor() uid_succss_list = [] try: print(start_str) uid_succss_list = retry_send(pre_time, future_time) end_time = datetime.now() end_str = 'finish script on {datetime}, use {time}'.format( datetime=end_time, time=end_time - now) print(end_str) logger.debug("get retry success list {0}".format(uid_succss_list)) except Exception: logger.debug('retry_send error:%s' % traceback.format_exc()) print("---------------uid------------------") print(uid_succss_list) logger.debug(uid_succss_list) for ref_err in get_ref_errs(pre_time, cur_time): if ref_err['uid'] in uid_succss_list: continue try: key = '%s_%s' % (str(ref_err.get( 'username', '')), str(ref_err.get('channel_code', ''))) key1 = '%s_' % str(ref_err.get('username', '')) config = config_dic.get(key, config_dic.get(key1, '')) logger.debug('key:%s, key1:%s, config:%s' % (key, key1, config)) # logger.debug("ref_err:%s" % ref_err) print("---------------") print(config) if config: logger.debug(' config:%s' % config) str_flag = str(config.get('USERNAME', '')) + '_' + str( config.get('Channel_Code', '')) if str_flag not in emails: emailResult = '\r\n%s 客户下存在刷新失败\n\n' % ref_err.get( 'username', '') emailResult += '\n%s\n' % ref_err.get('url', '') emailResult += get_failed_dev(ref_err.get('devices', '')) emailResult += '\r\n----------------------------------------------------------------------------' \ '----------\r\n' email = [{ "username": ref_err.get('username', ''), "to_addrs": config.get('Monitor_Email', []), "title": '刷新失败任务', "body": emailResult }] # emails['%s_%s' % (str(config.get('USERNAME', '')), str(config.get('Channel_Code', '')))] = email if str_flag in emails: email_temp = emails[str_flag] email_temp[0]['body'] += emailResult emails[str_flag] = email_temp else: emails[str_flag] = email emailResult = '' except Exception: logger.debug('error:%s' % e) logger.info(emails) for email in list(emails.values()): queue.put_json2('email', email) os._exit(0)
def submit(refresh_task): ''' 提交任务到消息队列 Parameters ---------- refresh_task : 任务 ignore_result 设置任务存储状态,如果为True,不存状态,也查询不了返回值 default_retry_delay 设置重试提交到消息队列间隔时间,默认10 分钟,单位为秒 max_retries 设置重试次数,默认为3 Returns ------- ------- 修饰符 @task 将submit函数变成了异步任务。在webapp中调用submit并不会立即执行该函数, 而是将函数名、 参数等打包成消息发送到消息队列中,再由worker执行实际的代码 ''' try: urls = getUrlsInLimit(getUrls(refresh_task)) logger.debug('submit: %s' % urls) if not urls: return setOveload(refresh_task, urls) # https add port 443 # for url in urls: # logger.debug("before url not have 443:%s" % url) # url['url'] = add_https_443(url.get('url', '')) # logger.debug('end url have 443:%s' % url) # logger.debug('submit: %s' % urls) db.url.insert(urls) username = refresh_task.get('username') try: user_list = eval(config.get('refresh_redis_store_usernames', 'usernames')) except Exception: logger.debug('splitter_new submit error:%s' % traceback.format_exc()) user_list = [] try: if username in user_list: add_rid_url_info_into_redis(refresh_task.get('r_id'), urls) except Exception: logger.debug('insert result into redis error:%s' % traceback.formate_exc(e)) # logger.debug("rubin_test can delete splitter_refreshDevice submit urls:%s" % urls) # re put the equipment into the URL for url_t in urls: url_t['devices'] = refresh_task.get('devices') # the interface does not have channel_code, instead of using the channel name url_t['channel_code'] = get_channelname(url_t.get('url')) #筛选优先级任务 messages = [] messages_high = [] for url in urls: if url.get("status") == 'PROGRESS': url_info = get_refreshurl(refresh_task.get('username'), url) if url.get('high_priority', False): messages_high.append(url_info) else: messages.append(url_info) #messages = [get_refreshurl(refresh_task.get('username'), url) for url in urls if url.get("status") == 'PROGRESS'] db.request.insert({"_id": refresh_task.get('r_id'), "username": refresh_task.get("username"), "parent": refresh_task.get("username"), "callback": refresh_task.get("callback"), "status": "PROGRESS", "unprocess": len(messages), "created_time": datetime.strptime(refresh_task.get('request_time'), '%Y-%m-%d %X') if refresh_task.get('request_time') else datetime.now(), "remote_addr": refresh_task.get('remote_addr', ''), "serial_num": refresh_task.get('serial_num', '')}) queue.put_json2('url_queue', messages) if messages_high: queue.put_json2('url_high_priority_queue', messages_high) if refresh_task.get('callback'): noticeEmail(refresh_task) except Exception: logger.warning('submit error! do retry. error:%s' % traceback.format_exc()) raise submit.retry(exc=e)
def submit(refresh_task, urls, executed_end_time_timestamp=420, remain_time_return_timestamp=1800, remain_time_failed_timestamp=1200): ''' 提交任务到消息队列 Parameters ---------- refresh_task : 任务 ignore_result 设置任务存储状态,如果为True,不存状态,也查询不了返回值 default_retry_delay 设置重试提交到消息队列间隔时间,默认10 分钟,单位为秒 max_retries 设置重试次数,默认为3 remain_time_return_timestamp 返回给客户的剩余时间,默认1800s remain_time_failed_timestamp 任务下发失败,返回客户的时间重新估算, 默认1200s Returns ------- ------- 修饰符 @task 将submit函数变成了异步任务。在webapp中调用submit并不会立即执行该函数, 而是将函数名、 参数等打包成消息发送到消息队列中,再由worker执行实际的代码 ''' try: logger.debug('getUrls:%s' % urls) urls = getUrlsInLimit(urls) # 增加空格处理 urls = encode_balank(urls) if not urls: return # setOveload(refresh_task, urls) # for url in urls: # url['url'] = add_https_443(url.get('url', '')) logger.debug('submit: %s' % urls) db.url.insert(urls) # for autodesk, insert a special collection in addition # if urls[0].get('username') == 'autodesk2': # add autodesk_flag username = refresh_task.get('username') try: user_list = eval( config.get('refresh_redis_store_usernames', 'usernames')) except Exception: logger.debug('splitter_new submit error:%s' % traceback.format_exc()) user_list = [] try: if username in user_list: add_rid_url_info_into_redis(refresh_task.get('r_id'), urls) except Exception: logger.debug('insert result into redis error:%s' % traceback.formate_exc(e)) for url_temp in urls: url_temp['autodesk_flag'] = 0 db.url_autodesk.insert(urls) #筛选优先级任务 messages = [] messages_high = [] for url in urls: if url.get("status") == 'PROGRESS': url_info = get_refreshurl(refresh_task.get('username'), url) if url.get('high_priority', False): messages_high.append(url_info) else: messages.append(url_info) #messages = [get_refreshurl(refresh_task.get('username'), url) for url in urls if url.get("status") == 'PROGRESS'] logger.debug("需要加入到url_queue中的messages: %s" % messages) logger.debug("需要加入到url_high_priority_queue中的messages: %s" % messages_high) created_time = datetime.strptime( refresh_task.get('request_time'), '%Y-%m-%d %X' ) if refresh_task.get('request_time') else datetime.now() executed_end_time = datetime.fromtimestamp(executed_end_time_timestamp) db.request.insert({ "_id": refresh_task.get('r_id'), "username": refresh_task.get("username"), "parent": refresh_task.get("parent"), "callback": refresh_task.get("callback"), "status": "PROGRESS", "unprocess": len(messages), 'check_unprocess': len(messages), "created_time": created_time, "remote_addr": refresh_task.get('remote_addr', ''), "serial_num": refresh_task.get('serial_num', ''), 'executed_end_time_timestamp': executed_end_time_timestamp, 'executed_end_time': executed_end_time, 'remain_time_return_timestamp': remain_time_return_timestamp, 'remain_time_failed_timestamp': remain_time_failed_timestamp }) queue.put_json2('url_queue', messages) if messages_high: queue.put_json2('url_high_priority_queue', messages_high) if refresh_task.get('callback'): noticeEmail(refresh_task) except Exception: logger.warning('submit error! do retry. error:%s' % traceback.format_exc()) raise submit.retry(exc=e)
def cert_query_trans(): ''' 证书查询任务下发 ''' try: s1_db = database.s1_db_session() data = json.loads(request.data) logger.debug('cert_query_trans post data %s' % (data)) data_username = data.get('username', 'chinacache') data_info = data['info'] query_ip = data_info.get('ip', '') query_path = data_info.get('path', '') query_config_path = data_info.get('config_path', '') query_cert_type = data_info.get('cert_type', '') query_type = data_info.get('query_type', '') query_cert_name = data_info.get('cert_name', '') query_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if not query_ip: raise QueryipError('not input ip') if not query_path: raise QuerypathError('not input path') if not query_config_path: raise QueryconpathError('not input config path') if not query_cert_name: raise QuerycertnameError('not input cert name') if not query_cert_type: raise QuerycerttypeError('not input cert type') devices_list = [] for q_ip in query_ip: q_ip_type = tools.judge_dev_ForH_byip(q_ip) if q_ip_type != 'HPCC': devices_list.append(q_ip) if devices_list: raise QuerydevicesError('%s isn`t HPCC devices ' % ' '.join(devices_list)) q_id = s1_db.cert_query_info.insert({ 'cert_type': query_cert_type, 'cert_name': query_cert_name, 'path': query_path, 'config_path': query_config_path, 'created_time': datetime.datetime.now(), 'username': data_username }) task = {} task['_id'] = str(ObjectId()) task['query_dev_ip'] = tools.sortip(query_ip) logger.debug('cert_query_trans query_dev_ip %s' % (task['query_dev_ip'])) task['dev_ip_md5'] = tools.md5(json.dumps(task['query_dev_ip'])) logger.debug('cert_query_trans dev_ip_md5 %s' % (task['dev_ip_md5'])) task['q_id'] = str(q_id) task['username'] = data_username task['query_path'] = query_path task['query_cert_name'] = query_cert_name task['query_cert_type'] = query_cert_type task['query_config_path'] = query_config_path task['created_time'] = query_time queue.put_json2('cert_query_task', [task]) return jsonify({ 'code': 200, 'task_id': task['_id'], 'cert_query_id': task['q_id'] }) except QueryipError as ex: return jsonify({"code": 520, "msg": ex.__str__()}) except QuerypathError as ex: return jsonify({"code": 521, "msg": ex.__str__()}) except QueryconpathError as ex: return jsonify({"code": 522, "msg": ex.__str__()}) except QuerycertnameError as ex: return jsonify({"code": 523, "msg": ex.__str__()}) except QuerycerttypeError as ex: return jsonify({"code": 524, "msg": ex.__str__()}) except QuerydevicesError as ex: return jsonify({"code": 525, "msg": ex.__str__()}) except Exception: logger.debug('/internal/cert/query error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})
def transfer_portal_expired_cert(): ''' portal转移过期证书 ''' try: s1_db = database.s1_db_session() data = json.loads(request.data) cer_id_str = data.get('cert_ids', '') #s_name = data.get('save_name', '')#证书名称 username = data.get('username', 'portal') transfer_dev = data.get('transfer_dev', ['']) #转移证书的cache设备 dev_type = data.get('dev_type', 'all_dev') #转移证书的类型 c_o_path = config.get('app', 'o_path') c_d_path = config.get('app', 'd_path') o_path = data.get('o_path', c_o_path) d_path = data.get('d_path', c_d_path) # o_path = data.get('o_path', '') # d_path = data.get('d_path', '') transfer_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') #logger.debug("s_name is %s"%s_name) logger.debug("username is %s" % username) logger.debug("o_path is %s" % o_path) logger.debug("d_path is %s" % d_path) logger.debug("dev_type is %s" % dev_type) if len(transfer_dev ) <= 1 and transfer_dev[0] == '' and dev_type != 'all_dev': raise TransferdevError('not input ip') #array_s_name = s_name.split(',') array_s_name = [] cer_id_list = cer_id_str.split(',') if not cer_id_list: return jsonify({"code": 504, "msg": 'please push cert id'}) for cert_id in cer_id_list: cert_id_objectid = ObjectId(cert_id) cert_detail_one = s1_db.cert_detail.find_one( {'_id': cert_id_objectid}) if not cert_detail_one: return jsonify({"code": 504, "msg": 'this id not exist'}) array_s_name.append(cert_detail_one['save_name']) if not array_s_name: return jsonify({"code": 504, "msg": 'please push cert id'}) db.transfer_certs_detail.ensure_index('save_name', unique=True) #cret_id_list = [] # cert_not_find = [] # for save_name in array_s_name: # info_cert = s1_db.cert_detail.find_one({'save_name': save_name}) # if not info_cert: # cert_not_find.append(save_name) # else: # cret_id_list.append(str(info_cert['_id'])) # if cert_not_find: # return jsonify({"code": 504, "msg": 'Certificate does not exist, please check the name of the certificate %s'%(cert_not_find)}) # #status,message = check_cert_cms(cret_id_list) status, message = check_cert_cms(cer_id_list) if status == False: return jsonify({"code": 504, "msg": message}) task = {} task['_id'] = str(ObjectId()) if dev_type == 'all_dev' or transfer_dev == 'all_hpcc': task['send_dev'] = 'all_dev' task['send_dev_md5'] = tools.md5(task['send_dev']) else: task['send_dev'] = tools.sortip(transfer_dev) logger.debug(task['send_dev']) devices_list = [] for q_ip in task['send_dev']: q_ip_type = tools.judge_dev_ForH_byip(q_ip) if q_ip_type != 'HPCC': devices_list.append(q_ip) if devices_list: raise QuerydevicesError('%s isn`t HPCC devices ' % ' '.join(devices_list)) task['send_dev_md5'] = tools.md5(json.dumps(task['send_dev'])) logger.debug(task['send_dev_md5']) try: status = cert_cms_delete(cer_id_list) if status: portal_status = cert_portal_delete(cer_id_list) if portal_status != True: return jsonify({'code': 504, 'msg': 'portal delete error'}) else: return jsonify({'code': 504, 'msg': 'cms delete error'}) except Exception: logger.error('callback error %s' % (traceback.format_exc())) return jsonify({ 'code': 504, 'msg': 'delete error%s' % (traceback.format_exc()) }) for save_name in array_s_name: info = s1_db.cert_detail.find_one({'save_name': save_name}) datestr = int(time.mktime(datetime.datetime.now().timetuple())) change_name = "{}{}{}{}".format("trans_", username, info.get('cert_alias'), datestr) #db.transfer_certs_detail.ensure_index('save_name', unique=True) t_id = s1_db.transfer_certs_detail.insert({ 'save_name': save_name, 'o_path': o_path, 'd_path': d_path, 'created_time': datetime.datetime.now(), 'username': username }) #db.transfer_certs_detail.ensure_index('save_name', unique=True) s1_db.cert_detail.update( {'save_name': save_name}, {"$set": { "t_id": t_id, "cert_alias": change_name }}) #if not info: # raise CertNotFoundError() task['t_id'] = str(t_id) task['username'] = username task['o_path'] = o_path task['d_path'] = d_path task['save_name'] = ','.join(array_s_name) #s_name task['created_time'] = transfer_time logger.debug('transfer cert task {}'.format([task])) queue.put_json2('transfer_cert_task', [task]) #res ={'code': 200, 'cert_id': str(info.get('_id'))} res = {'code': 200, 'msg': 'ok'} return jsonify(res) except CertNotFoundError as ex: return jsonify({"code": 504, "msg": "The certificate does not exist"}) except TransferdevError as ex: return jsonify({"code": 524, "msg": ex.__str__()}) except QuerydevicesError as ex: return jsonify({"code": 525, "msg": ex.__str__()}) except Exception: logger.debug('/transfer_expired_cert error') logger.debug(traceback.format_exc()) logger.debug(e.__str__()) return jsonify({"code": 500, "msg": "The schema of request is error."})