Example #1
0
    def init_db_device_refresh_device(self, urls):
        """
        设备列表全部存在于urls
        Args:
            urls:

        Returns:

        """
        worker_hostname = REFRESH_WORKER_HOST
        devs = urls[0].get('devices')
        db_device = {
            "devices": verify.create_dev_dict(devs),
            "unprocess": len(devs),
            "created_time": datetime.now(),
            "_id": self.dev_id
        }
        for url in urls:
            url["dev_id"] = self.dev_id
            db_update(db.url, {"_id": ObjectId(url.get("id"))}, {
                "$set": {
                    "dev_id": self.dev_id,
                    "worker_host": worker_hostname,
                    "recev_host": url.get("recev_host", "")
                }
            })
        logger.debug(
            "url_init_db_device successed ,worker_id: %s ,dev_id: %s " %
            (self.get_id_in_requestofwork(), self.dev_id))
        return db_device
Example #2
0
def get_cert_by_task_id(task_id):
    '''
    通过任务ID 获取证书/私钥/存储名
    '''
    cert_res = {}
    task_info = s1_db.cert_trans_tasks.find_one({'_id': ObjectId(task_id)})
    if not task_info:
        return cert_res
    cert_id = task_info.get('c_id', '')
    if not cert_id:
        return cert_res
    cert_info = s1_db.cert_detail.find_one({'_id': ObjectId(cert_id)})
    if not cert_info:
        return cert_res

    return get_cert_detail(cert_info)
def check_transfer_cert_task(task_ids):
    '''
    n seconds 后检查任务状态
    '''
    logger.debug('---check_transfer_cert_task start task_ids: %s---' %
                 (task_ids))

    task_info_list = s1_db.transfer_cert_tasks.find({
        '_id': {
            '$in': [ObjectId(task_id) for task_id in task_ids]
        },
        'status': 'PROGRESS'
    })
    if task_info_list.count() == 0:
        logger.debug('---check_transfer_cert_task no task_info  ids: %s---' %
                     (task_ids))
        return

    for task_info in task_info_list:

        error_devs = get_error_dev_result(task_info)
        if not error_devs:
            continue
        set_finished(task_info['_id'], 'FAILED')
        logger.debug('---check_transfer_cert_task id %s set Failed end---' %
                     (task_info['_id']))

    return
Example #4
0
    def init_db_device_refresh_devices(self, url):
        """

        Args:
            url:

        Returns:

        """
        worker_hostname = REFRESH_WORKER_HOST
        devs = url.get('devices')

        db_device = {
            "devices": verify.create_dev_dict(devs),
            "unprocess": len(devs),
            "created_time": datetime.now(),
            "_id": self.dev_id
        }
        # dev_id = db.device.insert(db_device)
        url["dev_id"] = self.dev_id
        #db_update(db.url, {"_id": ObjectId(url.get("id"))}, {"$set": {"dev_id": self.dev_id}})
        db_update(db.url, {"_id": ObjectId(url.get("id"))}, {
            "$set": {
                "dev_id": self.dev_id,
                "worker_host": worker_hostname,
                "recev_host": url.get("recev_host", "")
            }
        })
        logger.debug(
            "dir_init_db_device successed ,url_id : %s ,dev_id = %s " %
            (url.get("id"), self.dev_id))
        return db_device
Example #5
0
 def init_db_device(self, url):
     worker_hostname = REFRESH_WORKER_HOST
     devs = rcmsapi.getDevices(url.get("channel_code"))
     if url.get("layer_type") != "one":
         devs += rcmsapi.getFirstLayerDevices(url.get("channel_code"))
     db_device = {
         "devices": verify.create_dev_dict(devs),
         "unprocess": len(devs),
         "created_time": datetime.now(),
         "_id": self.dev_id
     }
     # dev_id = db.device.insert(db_device)
     url["dev_id"] = self.dev_id
     #db_update(db.url, {"_id": ObjectId(url.get("id"))}, {"$set": {"dev_id": self.dev_id}})
     db_update(db.url, {"_id": ObjectId(url.get("id"))}, {
         "$set": {
             "dev_id": self.dev_id,
             "worker_host": worker_hostname,
             "recev_host": url.get("recev_host", "")
         }
     })
     logger.debug(
         "dir_init_db_device successed ,url_id : %s ,dev_id = %s " %
         (url.get("id"), self.dev_id))
     return db_device
 def get_data_from_retry_device_branch(self, retry_branch_id):
     """
     get device info from retry_branch_id
     :param retry_branch_id: the _id of retry_device_branch
     :return: {'host1': 200, 'hots': 503}  dict
     """
     result_return = {}
     if not retry_branch_id:
         return result_return
     else:
         try:
             result = db.retry_device_branch.find_one(
                 {"_id": ObjectId(retry_branch_id)})
             logger.debug(
                 'success get data from retry_device_branch result:%s, retry_branch_id:%s'
                 % (result, retry_branch_id))
         except Exception, e:
             logger.debug(
                 'get data error from retry_device_branch retry_branch_id:%s, error content:%s'
                 % (retry_branch_id, e))
             return result_return
         try:
             if result:
                 devices = result.get('devices')
                 if devices:
                     for dev in devices:
                         name = dev.get('name')
                         branch_code = dev.get('branch_code')
                         if name and branch_code:
                             result_return[name] = branch_code
         except Exception, e:
             logger.debug(
                 'get_data_from_retry_device_branch parse data error:%s' %
                 traceback.format_exc(e))
Example #7
0
def set_finished(task_id, status):
    '''
    to finish
    '''
    now = datetime.datetime.now()
    logger.debug('now is %s' % now)
    task_info = s1_db.cert_query_tasks.find_one_and_update(
        {
            '_id': ObjectId(task_id),
            'status': "PROGRESS"
        }, {"$set": {
            'status': status,
            'hpc_finish_time': now
        }},
        return_document=ReturnDocument.AFTER)
    # cache -> mongo
    res_cache_key = '%s_res' % (task_id)
    dev_num_cache_key = '%s_res_dev_num' % (task_id)
    failed_cache_key = '%s_res_failed' % (task_id)
    success_cache_key = '%s_res_success' % (task_id)
    all_cache = CERT_QUERY_CACHE.hgetall(res_cache_key)
    all_dev_num = CERT_QUERY_CACHE.get(dev_num_cache_key)
    success_num = CERT_QUERY_CACHE.scard(success_cache_key)
    unprocess = int(all_dev_num) - int(success_num)
    logger.debug('unprocess is %s' % unprocess)
    if unprocess <= 0:
        unprocess = 0
    #save_cache = {'_id': ObjectId(task_id), 'devices': {}, 'created_time': now, 'unprocess': int(unprocess),'query_result':query_result}
    save_cache = {
        '_id': ObjectId(task_id),
        'devices': {},
        'created_time': now,
        'unprocess': int(unprocess)
    }
    logger.debug('save_cache is %s' % save_cache)
    for k, v in list(all_cache.items()):
        v_obj = json.loads(v)
        save_cache['devices'][v_obj['name']] = v_obj
    try:
        s1_db.cert_query_result.insert_one(save_cache)
    except Exception:
        logger.debug('set_finished error is %s' % (traceback.format_exc()))
        return
    CERT_QUERY_CACHE.delete(res_cache_key)
    CERT_QUERY_CACHE.delete(dev_num_cache_key)
    CERT_QUERY_CACHE.delete(failed_cache_key)
    CERT_QUERY_CACHE.delete(success_cache_key)
Example #8
0
def make_all_callback_force(task_ids):
    '''
    强行回调
    '''
    try:
        for t in task_ids:
            t_id = t
            t_obj = s1_db.cert_trans_tasks.find_one({'_id': ObjectId(t_id)})
            if t_obj:
                c_obj = s1_db.cert_detail.find_one(
                    {'_id': ObjectId(t_obj['c_id'])})
                logger.debug('make_all_callback_force begin task_id %s' %
                             (t_id))
                make_all_callback(t_obj, c_obj)
            else:
                logger.debug('make_all_callback_force not begin task_id %s' %
                             (t_id))

    except Exception:
        logger.debug('make_all_callback_force error e %s' % (e))
Example #9
0
    def scheduleTask(self, url_list):
        s1_db = database.s1_db_session()

        host_aps = choice(
            eval(config.get('apscheduler_server', 'host_cluster')))
        logger.debug("scheduleTask host_aps: %s|| url_list: %s" %
                     (host_aps, url_list))
        conn_aps = rpyc.connect(host_aps,
                                int(config.get('apscheduler_server', 'port')),
                                config={
                                    'allow_public_attrs': True,
                                    'allow_all_attrs': True,
                                    'allow_pickle': True
                                })

        for url in url_list:
            if url.get('task_type') == 'SCHEDULE':
                conn_aps.root.add_job(
                    'util.aps_server:preload_worker_new.dispatch.delay',
                    trigger='interval',
                    args=([url], ),
                    seconds=int(url.get('interval')),
                    start_date=url.get('start_time'),
                    end_date=url.get('end_time'))
            elif url.get('task_type') == 'INTERVAL':
                conn_aps.root.add_job(
                    'util.aps_server:preload_worker_new.dispatch.delay',
                    trigger='interval',
                    args=([url], ),
                    seconds=int(url.get('interval')),
                    end_date=url.get('end_time'))
            elif url.get('task_type') == 'TIMER':
                url['_id'] = ObjectId()
                self.s1_db.preload_url.insert(url)
                rdate = url.get('start_time')
                rdate = rdate if isinstance(rdate,
                                            datetime) else datetime.strptime(
                                                rdate, '%Y-%m-%d %H:%M:%S')
                run_date_dict = {
                    'year': rdate.year,
                    'month': rdate.month,
                    'day': rdate.day,
                    'hour': rdate.hour,
                    'minute': rdate.minute,
                    'second': rdate.second,
                }
                conn_aps.root.add_job(
                    'util.aps_server:preload_worker_new.dispatch.delay',
                    'cron',
                    args=([url], ),
                    **run_date_dict)
            logger.info("scheduleTask add_job [%s done!] url: %s." %
                        (url.get('task_type'), url.get('url')))
Example #10
0
def test_update_mongo():
    """

    :return:
    """
    data = [{
        "_id": ObjectId("57ff206d2b8a6831b5c2f9b8"),
        'a': 3,
        'b': 4
    }, {
        "_id": ObjectId("57ff206d2b8a6831b5c2f9b9"),
        'a': 5,
        'b': 6
    }]
    db_s1.test_update.insert_many(
        {
            '$in': {
                "_id": ObjectId("57ff206d2b8a6831b5c2f9b8"),
                "_id": ObjectId("57ff206d2b8a6831b5c2f9b9")
            }
        }, data)
    print('insert success!')
Example #11
0
def get_cert_by_ids(cert_ids):
    '''
    通过任务IDS 获取证书/私钥/存储名
    '''
    cert_info_list = s1_db.cert_detail.find(
        {'_id': {
            '$in': [ObjectId(i) for i in cert_ids]
        }})
    all_res = []
    for cert_info in cert_info_list:
        res = get_cert_detail(cert_info)
        if res:
            all_res.append(res)
    return all_res
Example #12
0
def update_url_autodesk(id):
    """
    autodesk_id  queue
    rabbitmq message queue, udpate url_autodesk  status, finish_time
    :param id: the _id of url_autodesk , the _id of url
    :return:
    """
    logger.debug('update_url_autodesk id:%s' % id)
    try:
        url_result = db.url.find_one({'_id': ObjectId(id)})
        logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id)
    except Exception, e:
        logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e))
        return
Example #13
0
def udpate_url_dev_autodesk(id, different_time):
    """

    :param id:
    :param different_time:
    :return:
    """
    logger.debug('update_url_autodesk id:%s, different_time:%s' % (id, different_time))
    try:
        url_result = db.url.find_one({'_id': ObjectId(id)})
        logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id)
    except Exception, e:
        logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e))
        return
Example #14
0
    def judge_task_success_failed(self, dev_id, retry_branch_id):
        """

        :param dev_id: the _id of preload_dev
        :param retry_branch_id: the _id of retry_device_branch
        :return:
        """
        retry_branch_dict = self.get_data_from_retry_device_branch(
            retry_branch_id)
        if dev_id:
            try:
                preload_dev = s1_db.preload_dev.find_one(
                    {'_id': ObjectId(dev_id)})
            except Exception:
                logger.debug("get data error:%s" % traceback.format_exc())
                return False
            if not preload_dev:
                return False
            try:
                devices = preload_dev.get('devices')
                if devices:
                    # get content of all device info
                    devices_values = list(devices.values())
                    if devices_values:
                        for dev in devices_values:
                            code = dev.get('code')
                            a_code = dev.get('a_code')
                            r_code = dev.get('r_code')
                            name = dev.get('name')
                            if code == 200 or a_code == 200 or r_code == 200:
                                continue
                            elif name in retry_branch_dict:
                                if retry_branch_dict[name] == 200:
                                    continue
                            else:
                                if dev.get('name') not in [
                                        'CHN-JQ-2-3S3', 'CHN-JA-g-3WD'
                                ]:
                                    d_id = preload_dev.get('_id')
                                    logger.debug(
                                        "judge_task_success_failed d_id: %s|| devices_values: %s"
                                        % (d_id, devices_values))
                                return False
                    return True
            except Exception:
                logger.debug('judge_tak_success_failed parse data error:%s' %
                             traceback.format_exc())
                return False

        return False
Example #15
0
def update_url_autodesk(id):
    """
    autodesk_id  queue
    rabbitmq message queue, udpate url_autodesk  status, finish_time
    :param id: the _id of url_autodesk , the _id of url
    :return:
    """
    logger.debug('update_url_autodesk id:%s' % id)
    try:
        url_result = db.url.find_one({'_id': ObjectId(id)})
        logger.debug('check_url_autodesk update_url_autodesk get data success, id:%s' % id)
    except Exception:
        logger._log('check_url_autodesk update_url_autodesk error, id:%s, %s' % (id, e))
        return
    if not url_result:
        return
    # finish url_autodesk state alter
    finish_time = datetime.now()
    finish_time_timestamp = time.mktime(finish_time.timetuple())
    try:
        db.url_autodesk.update_one({'_id': ObjectId(id)}, {'$set': {'status': url_result.get('status'), 'finish_time': finish_time,
                                'finish_time_timestamp': finish_time_timestamp, 'dev_id': url_result.get('dev_id', None),
                            'retry_branch_id': url_result.get('retry_branch_id', None)}})
        db_update(db.request, {'_id': ObjectId(url_result.get('r_id'))}, {'$inc': {'check_unprocess': -1}})
        logger.debug('update_url_autodes  request r_id:%s' % url_result.get('r_id'))
        request_result = db.request.find_one({'_id': ObjectId(url_result.get('r_id'))})
        logger.debug('update_url_autodesk request_result:%s' % request_result)
        if not request_result.get('check_unprocess'):
            finish_time_request = datetime.now()
            finish_time_request_timestamp = time.mktime(finish_time_request.timetuple())

            db_update(db.request, {'_id': ObjectId(url_result.get('r_id'))}, {'$set': {'finish_time_autodesk': finish_time_request,
                                'finish_time_autodesk_timestamp': finish_time_request_timestamp}})
        logger.debug('check_url_autodesk update_url_autodesk success id:%s' % id)
    except Exception:
        logger.debug("check_url_autodesk update_url_autodesk error, id:%s, %s" % (id, traceback.format_exc()))
        return
def update_db_dev(dev_id, results):
    '''
    更新任务状态 transfer_cert_dev & transfer_cert_tasks
    '''
    try:

        now = datetime.datetime.now()
        db_dev = s1_db.transfer_cert_dev.find_one({"_id": ObjectId(dev_id)})
        db_dev["finish_time"] = now
        devices = db_dev.get("devices")
        for ret in results:
            devices.get(ret.get("name"))["code"] = ret.get("code", 0)
            devices.get(ret.get("name"))["a_code"] = ret.get("a_code", 0)
            devices.get(ret.get("name"))["r_code"] = ret.get("r_code", 0)
            db_dev["unprocess"] = int(db_dev.get("unprocess")) - 1
        update_dev = copy.deepcopy(db_dev)
        if '_id' in update_dev:
            update_dev.pop('_id')
        s1_db.transfer_cert_dev.update_one({"_id": ObjectId(dev_id)}, {'$set': update_dev})
        s1_db.transfer_cert_tasks.update_many({'dev_id': ObjectId(dev_id)}, {
                                              '$set': {'finish_time': now}})

    except Exception:
        logger.debug('transfer_cert update_db_dev error is %s' % (traceback.format_exc()))
def process(db, refresh_task, check_overload=False):
    '''
    处理任务

    Parameters
    ----------
    db : 数据库
    refresh_task : 刷新的任务
    check_overload : 是否检查超量

    Returns
    -------
    '''
    try:
        request_id = ObjectId()
        message = {"r_id": str(request_id)}
        refresh_task['r_id'] = request_id
        logger.debug("process refresh_task:%s %s %s " % (refresh_task['r_id'], refresh_task['username'], refresh_task['urls'] if 'urls' in list(refresh_task.keys()) else refresh_task['dirs']))
        if check_overload:
            url_overload = getOverload(refresh_task.get("username"), 'URL')  # 直接返回剩余数量
            dir_overload = getOverload(refresh_task.get("username"), 'DIR')
            url_length = len(refresh_task.get("urls") if refresh_task.get("urls") else []) + len(refresh_task.get("update_urls") if refresh_task.get("update_urls") else [])
            dir_length = len(refresh_task.get("dirs") if refresh_task.get("dirs") else [])

            if (url_length > 0 and url_overload > 0):
                message['urlExceed'] = url_length
                refresh_task['urls'] = []
                refresh_task['update_urls'] = []
                setCounterCache(refresh_task, url_length, 'URL')
                logger.error('process error ! refresh_task :%s,url:%s' % (refresh_task['r_id'], url_overload))

            if (dir_length > 0 and dir_overload > 0):
                message['dirExceed'] = dir_length
                refresh_task['dirs'] = []
                setCounterCache(refresh_task, dir_length, 'DIR')
                logger.error('process error ! refresh_task :%s,dir:%s ' % (refresh_task['r_id'], dir_overload))

            if len(refresh_task.get("urls") if refresh_task.get("urls") else []) > 0 or len(refresh_task.get("dirs") if refresh_task.get("dirs") else []) > 0 or len(refresh_task.get("update_urls") if refresh_task.get("update_urls") else []) > 0 :

                submit.delay(refresh_task)
                # submit(refresh_task)

        else:
            submit.delay(refresh_task)
    except Exception:
        logger.error(traceback.format_exc())
        logger.error('process error ! refresh_task :%s ' % refresh_task)
    return message
Example #18
0
def get_url(url, username, parent, request_id, action, isSub, type, isdir):
    """
    从RCMS获取用户的频道信息,匹配出channel_code

    :param url:
    :param username:
    :param request_id:
    :param action:
    :param isdir:
    :return:
    """
    url_id = ObjectId()
    #检查dir情况下,url是否合法,不合法则变为url
    if isdir:
        if not url.endswith('/'):
            logger.info('get url url is not dir: url %s isdir %s' %
                        (url, isdir))
            isdir = False

    if isSub:
        isValid, is_multilayer, channel_code, ignore_case = rcmsapi.isValidUrlByPortal(
            username, parent, url)
    else:
        isValid, is_multilayer, channel_code, ignore_case = rcmsapi.isValidUrl(
            parent, url)
    #检查任务优先级
    high_priority = False
    if isValid:
        high_priority = is_refresh_high_priority(channel_code)
    return {
        "_id": url_id,
        "r_id": request_id,
        "url": url,
        "ignore_case": ignore_case,
        "status": 'PROGRESS' if isValid else 'INVALID',
        "isdir": isdir,
        "username": username,
        "parent": parent,
        "created_time": datetime.now(),
        "action": action,
        "is_multilayer": is_multilayer,
        "channel_code": channel_code,
        'type': type,
        'high_priority': high_priority,
        'channel_name': get_channelname(url)
    }
def dispatch(tasks):
    try:
        logger.debug("transfer  expired_cert begin %s" % len(tasks))
        logger.debug("transfer  expired_cert begin task_ids %s" %
                     [i['_id'] for i in tasks])
        dev_id, devs, devs_dict = init_cert_dev(tasks[0])
        logger.debug("devs is %s" % devs)
        logger.debug("cert_devs dev_id %s, devs len %s" % (dev_id, len(devs)))
        save_tasks = []
        for task in tasks:

            task['dev_id'] = dev_id
            save_task = {}
            save_task['_id'] = ObjectId(task['_id'])
            save_task['status'] = "PROGRESS"
            save_task['dev_id'] = dev_id
            save_task['path'] = task.get('path')
            save_task['username'] = task.get('username')
            save_task['t_id'] = task.get('t_id')
            save_task['send_devs'] = task.get('send_dev')
            save_task['created_time'] = task.get('created_time')
            save_task['save_name'] = task.get('save_name')
            save_task['worker_host'] = WORKER_HOST
            save_tasks.append(save_task)
            make_result_cache(task['_id'], devs_dict)

        s1_db.transfer_cert_tasks.insert(save_tasks)
        worker(tasks, devs)

        # try:
        #     task_id_list = []
        #     for task in tasks:
        #         for sav_name in task['save_name'].split(','):
        #             info_cert = s1_db.cert_detail.find_one({'save_name': sav_name})
        #             task_id_list.append(str(info_cert['_id']))
        #     status = cert_cms_delete(task_id_list)
        #     if status:
        #             cert_portal_delete(task_id_list)
        # except Exception,e:
        #     logger.error('callback error %s'%(e.message))

        logger.debug("transfer cert trans end")
    except Exception, e:
        logger.debug("trasnfer cert trans error:%s " % traceback.format_exc(e))
Example #20
0
def commit_preload_timer_task(id_list):
    """
    according preload_id get data from preload_url, and put data to rabbit , then execute the pre loading process
    :param preload_id:
    :return:
    """
    try:
        results = db_s1.preload_url.find(
            {"_id": {
                '$in': [ObjectId(i) for i in id_list]
            }})
        results = [asssemble_data(i) for i in results]
        if results:
            logger.debug('commit_preload_timer_task results:%s' % results)
            url_dict = {}
            for r in results:
                logger.debug('commit_preload_timer_task r:%s' % r)
                logger.debug('commit_preload_timer_task type(r):%s' % type(r))
                if r.get('devices'):
                    d_md5 = md5(json.dumps(r['devices'])).hexdigest()
                    url_dict.setdefault(d_md5, []).append(r)
                    logger.debug('commit_preload_timer_task d_md5: %s' % d_md5)
                    if len(url_dict[d_md5]) > PACKAGE_SIZE:
                        preload_worker_new.dispatch.delay(url_dict.pop(d_md5))

                else:
                    url_dict.setdefault(r.get('channel_code'), []).append(r)
                    if len(url_dict.get(r.get('channel_code'),
                                        {})) > PACKAGE_SIZE:
                        preload_worker_new.dispatch.delay(
                            url_dict.pop(r.get('channel_code')))
            logger.debug('commit_preload_timer_task url_dict: %s' % url_dict)
            for urls in list(url_dict.values()):
                preload_worker_new.dispatch.delay(urls)
                logger.debug(
                    'commit_preload_timer_task delay(urls): %s, type(urls): %s'
                    % (urls, type(urls)))
            logger.debug('commit_preload_timer_task delay(urls) finished!')
    except Exception:
        logger.error('commit_preload_timer_task error: %s' %
                     trackback.format_exc())
        return
def put_data_to_redis(result):
    """
    according result which is cursor returned by query database, assemble data put into redis
    two queue in redis,
    :param result: cursor returned by query database
    :return:
    """
    if result:
        try:
            pip = redis_preload_timer.pipeline()
            batch_size = 1000
            count = 0
            id_list = []
            for res in result:
                id = res.get('_id', None)
                start_time = res.get('start_time', None)
                if id and start_time:
                    timestamp_start_time = datetime_convert_timestamp(start_time)
                    timestamp_now = datetime_convert_timestamp(datetime.now())
                    timestamp_diff = timestamp_start_time - timestamp_now
                    if timestamp_diff < 60:
                        id_list.append(ObjectId(id))
                    elif timestamp_diff >= 60 and timestamp_diff < 420:
                        logger.debug('preload_url_timer_pull put_data_to_redis, put to redis 1 key:%s' % id)
                        # set in redis
                        pip.set("timer_1_" + str(id), timestamp_start_time)
                    else:
                        logger.debug('preload_url_timer_pull put_data_to_redis, put to redis 7 key:%s' % id)
                        # set in redis
                        pip.set("timer_7_" + str(id), timestamp_start_time)
                    count += 1
                    if not count % batch_size:
                        pip.execute()
                        count = 0
            # send the last batch
            pip.execute()
            if id_list:
                logger.debug('preload_url_timer_pull put_data_to_redis id_list: %s' % id_list)
                commit_preload_timer_task.delay(id_list)
        except Exception:
            logger.debug("operator error trace:%s" % traceback.format_exc())
Example #22
0
def dispatch(tasks):
    '''
    分发证书
    [{‘key’:xx,’cert’:xx, seed: xxx, dir:xxx, send_devs:’all_hpcc’, 'task_id':xxx} ]
    '''
    try:
        logger.debug("dispatch cert trans  begin %s" % len(tasks))
        logger.debug("dispatch cert trans  begin task_ids %s" %
                     [i['_id'] for i in tasks])
        dev_id, devs, devs_dict = init_cert_devs(tasks[0]['send_devs'])
        logger.debug("cert_devs dev_id %s, devs len %s" % (dev_id, len(devs)))
        save_tasks = []
        for task in tasks:

            task['dev_id'] = dev_id

            save_task = {}
            save_task['_id'] = ObjectId(task['_id'])
            save_task['status'] = "PROGRESS"
            save_task['dev_id'] = dev_id
            save_task['username'] = task.get('username')
            save_task['user_id'] = task.get('user_id')
            save_task['c_id'] = task.get('c_id', '')
            save_task['o_c_id'] = task.get('o_c_id', '')
            save_task['cert_alias'] = task.get('cert_alias', '')
            save_task['send_devs'] = task.get('send_devs', '')
            save_task['created_time'] = task.get('created_time')
            save_task['recev_host'] = task.get('recev_host')
            save_task['worker_host'] = WORKER_HOST
            save_task['rcms_callback_time'] = ''
            save_task['portal_callback_time'] = ''
            save_tasks.append(save_task)
            make_result_cache(task['_id'], devs_dict)

        s1_db.cert_trans_tasks.insert(save_tasks)
        worker(tasks, devs)
        logger.debug("dispatch cert trans end")
    except Exception:
        logger.debug("dispatch cert trans error:%s " % traceback.format_exc())
Example #23
0
 def __init__(self):
     self.dev_id = ObjectId()
     self.db_dev = {}
     self.basic_info = {}
     self.last_basic_info_reload_time = 0
     self.use_old = config.getboolean("success_definition_strategy",
                                      "use_old")
     self.basic_info_file_path = config.get('success_definition_strategy',
                                            'basic_info_file')
     self.basic_info_reload_interval = config.get(
         'success_definition_strategy', 'basic_info_reload_interval')
     self.isp_priority_list = config.get('success_definition_strategy',
                                         'isp_priority').split(',')
     self.isp_priority_list = [
         item.strip().upper() for item in self.isp_priority_list
     ]
     self.region_priority_list = config.get('success_definition_strategy',
                                            'region_priority').split(',')
     self.region_priority_list = [
         item.strip().upper() for item in self.region_priority_list
     ]
     self.fail_device_list = []
Example #24
0
def cert_query_trans():
    '''
    证书查询任务下发
    '''
    try:
        s1_db = database.s1_db_session()
        data = json.loads(request.data)
        logger.debug('cert_query_trans post data %s' % (data))
        data_username = data.get('username', 'chinacache')
        data_info = data['info']
        query_ip = data_info.get('ip', '')
        query_path = data_info.get('path', '')
        query_config_path = data_info.get('config_path', '')
        query_cert_type = data_info.get('cert_type', '')
        query_type = data_info.get('query_type', '')
        query_cert_name = data_info.get('cert_name', '')
        query_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        if not query_ip:
            raise QueryipError('not input ip')
        if not query_path:
            raise QuerypathError('not input path')
        if not query_config_path:
            raise QueryconpathError('not input config path')
        if not query_cert_name:
            raise QuerycertnameError('not input cert name')
        if not query_cert_type:
            raise QuerycerttypeError('not input cert type')
        devices_list = []
        for q_ip in query_ip:
            q_ip_type = tools.judge_dev_ForH_byip(q_ip)
            if q_ip_type != 'HPCC':
                devices_list.append(q_ip)
        if devices_list:
            raise QuerydevicesError('%s    isn`t HPCC devices ' %
                                    '  '.join(devices_list))
        q_id = s1_db.cert_query_info.insert({
            'cert_type':
            query_cert_type,
            'cert_name':
            query_cert_name,
            'path':
            query_path,
            'config_path':
            query_config_path,
            'created_time':
            datetime.datetime.now(),
            'username':
            data_username
        })
        task = {}
        task['_id'] = str(ObjectId())
        task['query_dev_ip'] = tools.sortip(query_ip)
        logger.debug('cert_query_trans query_dev_ip %s' %
                     (task['query_dev_ip']))
        task['dev_ip_md5'] = tools.md5(json.dumps(task['query_dev_ip']))
        logger.debug('cert_query_trans dev_ip_md5 %s' % (task['dev_ip_md5']))
        task['q_id'] = str(q_id)
        task['username'] = data_username
        task['query_path'] = query_path
        task['query_cert_name'] = query_cert_name
        task['query_cert_type'] = query_cert_type
        task['query_config_path'] = query_config_path
        task['created_time'] = query_time
        queue.put_json2('cert_query_task', [task])
        return jsonify({
            'code': 200,
            'task_id': task['_id'],
            'cert_query_id': task['q_id']
        })

    except QueryipError as ex:
        return jsonify({"code": 520, "msg": ex.__str__()})
    except QuerypathError as ex:
        return jsonify({"code": 521, "msg": ex.__str__()})
    except QueryconpathError as ex:
        return jsonify({"code": 522, "msg": ex.__str__()})
    except QuerycertnameError as ex:
        return jsonify({"code": 523, "msg": ex.__str__()})
    except QuerycerttypeError as ex:
        return jsonify({"code": 524, "msg": ex.__str__()})
    except QuerydevicesError as ex:
        return jsonify({"code": 525, "msg": ex.__str__()})
    except Exception:
        logger.debug('/internal/cert/query error')
        logger.debug(traceback.format_exc())
        logger.debug(e.__str__())
        return jsonify({"code": 500, "msg": "The schema of request is error."})
Example #25
0
def process(db, refresh_task, check_overload=False):
    '''
    处理任务

    Parameters
    ----------
    db : 数据库
    refresh_task : 刷新的任务
    check_overload : 是否检查超量

    Returns
    -------
    '''
    request_id = ObjectId()
    message = {}
    refresh_task['r_id'] = request_id

    Exceed = False
    try:
        logger.debug("process refresh_task:%s %s %s " %
                     (refresh_task['r_id'], refresh_task['username'],
                      refresh_task['urls'] if 'urls' in list(
                          refresh_task.keys()) else refresh_task['dirs']))
        if check_overload:
            url_overload = getOverload(refresh_task.get("username"),
                                       'URL')  # 直接返回剩余数量
            dir_overload = getOverload(refresh_task.get("username"), 'DIR')
            url_length = len(
                refresh_task.get("urls") if refresh_task.get("urls") else []
            ) + len(
                refresh_task.get("update_urls") if refresh_task.
                get("update_urls") else [])
            dir_length = len(
                refresh_task.get("dirs") if refresh_task.get("dirs") else [])
            logger.debug('url_length:%s dir_length:%s' %
                         (str(url_length), str(dir_length)))
            if (url_length > 0 and url_overload > 0):
                message['urlExceed'] = url_length
                refresh_task['urls'] = []
                refresh_task['update_urls'] = []
                setCounterCache(refresh_task, url_length, 'URL')
                logger.debug('process error ! refresh_task :%s,url:%s' %
                             (refresh_task['r_id'], url_overload))

            if (dir_length > 0 and dir_overload > 0):
                message['dirExceed'] = dir_length
                refresh_task['dirs'] = []
                setCounterCache(refresh_task, dir_length, 'DIR')
                logger.debug('process error ! refresh_task :%s,dir:%s ' %
                             (refresh_task['r_id'], dir_overload))

            if message.get("urlExceed") or message.get("dirExceed"):
                return message

            message["r_id"] = str(request_id)

            urls, invalid_urls = channel_verify(refresh_task)
            if invalid_urls:
                message['invalids'] = invalid_urls
            setOveload(refresh_task, urls)
            if len(refresh_task.get("urls") if refresh_task.get("urls") else []) > 0 or len(refresh_task.get("dirs") \
               if refresh_task.get("dirs") else []) > 0 or len(refresh_task.get("update_urls") if refresh_task.get("update_urls") else []) > 0 \
                    or (refresh_task.get('purge_dirs') if refresh_task.get('purge_dirs') else []) > 0:
                # add by rubin 2017-9-12  sync
                web_task = refresh_task.get('web_task')
                if web_task:
                    web_task['r_id'] = message.get('r_id')
                # add end
                submit.delay(refresh_task, urls)
        else:
            message["r_id"] = str(request_id)
            # add by rubin 2017-9-12
            web_task = refresh_task.get('web_task')
            if web_task:
                web_task['r_id'] = message.get('r_id')
            # add end
            urls, invalid_urls = channel_verify(refresh_task)
            if invalid_urls:
                message['invalids'] = invalid_urls
            setOveload(refresh_task, urls)
            submit.delay(refresh_task, urls)
    except Exception:
        logger.error(traceback.format_exc())
        logger.error('process error! refresh_task :%s ' % refresh_task)
        raise InternalServerError(e.message)

    if message.get('invalids'):
        logger.debug('process error ! refresh_task :%s,invalid_urls:%s ' %
                     (refresh_task['r_id'], len(invalid_urls)))
    return message
Example #26
0
def process(db, refresh_task, check_overload=False):
    '''
    处理任务

    Parameters
    ----------
    db : 数据库
    refresh_task : 刷新的任务
    check_overload : 是否检查超量

    Returns
    -------
    '''
    request_id = ObjectId()
    message = {}
    refresh_task['r_id'] = request_id

    Exceed = False
    try:
        logger.debug("process refresh_task:%s %s %s " %
                     (refresh_task['r_id'], refresh_task['username'],
                      refresh_task['urls'] if 'urls' in refresh_task.keys()
                      else refresh_task['dirs']))
        if check_overload:
            url_overload = getOverload(refresh_task.get("username"),
                                       'URL')  # 直接返回剩余数量
            dir_overload = getOverload(refresh_task.get("username"), 'DIR')
            url_length = len(
                refresh_task.get("urls") if refresh_task.get("urls") else []
            ) + len(
                refresh_task.get("update_urls") if refresh_task.
                get("update_urls") else [])
            dir_length = len(
                refresh_task.get("dirs") if refresh_task.get("dirs") else [])
            logger.error('url_length:%s dir_length:%s' %
                         (str(url_length), str(dir_length)))
            if (url_length > 0 and url_overload > 0):
                message['urlExceed'] = url_length
                refresh_task['urls'] = []
                refresh_task['update_urls'] = []
                setCounterCache(refresh_task, url_length, 'URL')
                logger.error('process error ! refresh_task :%s,url:%s' %
                             (refresh_task['r_id'], url_overload))

            if (dir_length > 0 and dir_overload > 0):
                message['dirExceed'] = dir_length
                refresh_task['dirs'] = []
                setCounterCache(refresh_task, dir_length, 'DIR')
                logger.error('process error ! refresh_task :%s,dir:%s ' %
                             (refresh_task['r_id'], dir_overload))

            if message.get("urlExceed") or message.get("dirExceed"):
                return message

            message["r_id"] = str(request_id)

            urls, invalid_urls = channel_verify(refresh_task)
            # add autodesk
            list_time = []
            timestamp_now = time.time()
            for url_t in urls:
                time_t = get_remaind_time(url_t.get('url'))
                list_time.append(time_t)
                url_t['executed_end_time'] = datetime.fromtimestamp(
                    timestamp_now + time_t)
                url_t['executed_end_time_timestamp'] = timestamp_now + time_t
            logger.debug(
                'splitter_autodesk process type list_time:%s, list_time:%s' %
                (type(list_time), list_time))
            # origin remain_time
            message['remain_time'] = get_max_time(list_time)
            # add autodesk end
            # autodesk 临时解决方案,增加一个字段,remain_time
            # message['remain_time'] = 1800

            if invalid_urls:
                message['invalids'] = invalid_urls
            setOveload(refresh_task, urls)
            # 方案原本定位600 到  1400  最后为了安全起见 改为1340到1400  420 + 1400 > 1800
            # remain_time_failed_timestamp = timestamp_now + message['remain_time'] + random.randint(1340, 1400) - 20
            # http://ccwf.chinacache.com/browse/DEMAND-2386   remain time 1440   start count_down
            remain_time_failed_timestamp = timestamp_now + 1440
            logger.debug(
                "process 1 remain_time_failed_timestamp:%s, timestamp_now:%s, request_id:%s"
                % (remain_time_failed_timestamp, timestamp_now, request_id))
            if len(
                    refresh_task.get("urls") if refresh_task.
                    get("urls") else []) > 0 or len(
                        refresh_task.get("dirs") if refresh_task.
                        get("dirs") else []) > 0 or len(
                            refresh_task.get("update_urls") if refresh_task.
                            get("update_urls") else []) > 0:
                submit.delay(
                    refresh_task,
                    urls,
                    executed_end_time_timestamp=timestamp_now +
                    message['remain_time'],
                    remain_time_return_timestamp=timestamp_now + 1800,
                    remain_time_failed_timestamp=remain_time_failed_timestamp)
        else:
            message["r_id"] = str(request_id)
            urls, invalid_urls = channel_verify(refresh_task)
            # add new start
            list_time = []
            timestamp_now = time.time()
            for url_t in urls:
                time_t = get_remaind_time(url_t.get('url'))
                logger.debug(
                    'splitter_autodesk process type:%s, content time_t:%s' %
                    (type(time_t), time_t))
                list_time.append(time_t)
                url_t['executed_end_time'] = datetime.fromtimestamp(
                    timestamp_now + time_t)
                url_t['executed_end_time_timestamp'] = timestamp_now + time_t
            logger.debug(
                'splitter_autodesk process type list_time:%s, list_time:%s' %
                (type(list_time), list_time))

            message['remain_time'] = get_max_time(list_time)
            # add new end

            if invalid_urls:
                message['invalids'] = invalid_urls
            setOveload(refresh_task, urls)
            # 临时方案,第一次下发失败,剩余时间重新评定
            # 方案原本定位600 到  1400  最后为了安全起见 改为1340到1400
            # remain_time_failed_timestamp = timestamp_now + message['remain_time'] + random.randint(1340, 1400) - 20
            # http://ccwf.chinacache.com/browse/DEMAND-2386   remain time 1440   start count_down
            remain_time_failed_timestamp = timestamp_now + 1440
            logger.debug(
                "process 2 remain_time_failed_timestamp:%s, timestamp_now:%s, request_id:%s"
                % (remain_time_failed_timestamp, timestamp_now, request_id))
            submit.delay(
                refresh_task,
                urls,
                executed_end_time_timestamp=timestamp_now +
                message['remain_time'],
                remain_time_return_timestamp=timestamp_now + 1800,
                remain_time_failed_timestamp=remain_time_failed_timestamp)
    except Exception, e:
        logger.error(traceback.format_exc(e))
        logger.error('process error! refresh_task :%s ' % refresh_task)
        raise InternalServerError(e.message)
Example #27
0
def save_result(data_list, remote_ip):
    '''
    存储状态
    '''
    logger.debug('save_result begin data_list %s remote_ip %s' %
                 (data_list, remote_ip))
    now = datetime.datetime.now()
    for data in data_list:

        logger.debug('data is %s' % data)

        task_id = data.get('task_id', '')
        task_id = task_id.replace('\n', '')
        task_id = task_id.replace('"', '')
        status = int(data.get('status', 0))
        query_result = data.get('info', '')
        res_cache_key = '%s_res' % (task_id)
        logger.debug('save_result task_id %s  status:%s remote_ip %s' %
                     (task_id, status, remote_ip))
        dev_num_cache_key = '%s_res_dev_num' % (task_id)
        failed_cache_key = '%s_res_failed' % (task_id)
        success_cache_key = '%s_res_success' % (task_id)
        if not task_id:
            logger.debug('save_result not task_id return %s' % (remote_ip))
            continue
        cache_key = '%s_res' % (task_id)
        info_str = CERT_QUERY_CACHE.hget(res_cache_key, remote_ip)
        logger.debug('save_result hget cache_key %s  ,remote_ip %s' %
                     (res_cache_key, remote_ip))

        if not info_str:
            logger.debug('save_result no cache id %s remote_ip %s' %
                         (task_id, remote_ip))
            continue
        info = json.loads(info_str)
        logger.debug('info is %s' % info)
        if info['result_status'] == 200:
            logger.debug('save_result had 200 id %s remote_ip %s' %
                         (task_id, remote_ip))
            continue
        info['update_time'] = time.time()
        info['result_status'] = status
        info['result'] = query_result
        logger.debug('save_result to save redis  info is %s remote_ip is %s' %
                     (info, remote_ip))
        CERT_QUERY_CACHE.hset(res_cache_key, remote_ip, json.dumps(info))
        logger.debug('save_result to save redis  remote_ip is %s over' %
                     (remote_ip))
        if status == 200:
            before_success_num = CERT_QUERY_CACHE.scard(success_cache_key)
            logger.debug(
                'save_result add count success remote_ip %s before_success_num %s'
                % (remote_ip, before_success_num))
            CERT_QUERY_CACHE.sadd(success_cache_key, remote_ip)
        else:
            CERT_QUERY_CACHE.sadd(failed_cache_key, remote_ip)
            logger.debug('save_result add count failed remote_ip %s' %
                         (remote_ip))

        success_num = CERT_QUERY_CACHE.scard(success_cache_key)
        failed_num = CERT_QUERY_CACHE.scard(failed_cache_key)
        all_dev_num = CERT_QUERY_CACHE.get(dev_num_cache_key)

        logger.debug(
            'save_result remote_ip %s success_num %s, failed_num %s, all_count %s'
            % (remote_ip, success_num, failed_num, all_dev_num))
        if int(success_num) == int(all_dev_num):
            logger.debug('success_num is %s' % success_num)
            #全部成功
            logger.debug('task_id is %s' % task_id)
            #set_finished(task_id, 'FINISHED',query_result)
            set_finished(task_id, 'FINISHED')
        elif int(failed_num) == int(all_dev_num):
            #TODO
            task_info = s1_db.cert_query_tasks.find_one(
                {'_id': ObjectId(task_id)})
            error_list = get_error_dev_result(task_info)
            #send_error_email(task_info, error_list)
            #set_finished(task_id, 'FAILED',query_result)
            set_finished(task_id, 'FAILED')
Example #28
0
        try:
            devs += rcmsapi.getGrayDevices(url.get("channel_code"))
        except Exception, e:
            logger.debug(traceback.format_exc())

        db_device = {
            "devices": verify.create_dev_dict(devs),
            "unprocess": len(devs),
            "created_time": datetime.now(),
            "_id": self.dev_id
        }
        # dev_id = db.device.insert(db_device)
        url["dev_id"] = self.dev_id
        # db_update(db.url, {"_id": ObjectId(url.get("id"))}, {"$set": {"dev_id": self.dev_id}})
        db_update(db.url, {"_id": ObjectId(url.get("id"))}, {
            "$set": {
                "dev_id": self.dev_id,
                "worker_host": worker_hostname,
                "recev_host": url.get("recev_host", "")
            }
        })
        logger.debug(
            "dir_init_db_device successed ,url_id : %s ,dev_id = %s " %
            (url.get("id"), self.dev_id))
        return db_device

    def init_db_device_refresh_devices(self, url):
        """

        Args:
Example #29
0
def transfer_portal_expired_cert():
    '''
    portal转移过期证书
    '''
    try:
        s1_db = database.s1_db_session()
        data = json.loads(request.data)
        cer_id_str = data.get('cert_ids', '')
        #s_name = data.get('save_name', '')#证书名称
        username = data.get('username', 'portal')
        transfer_dev = data.get('transfer_dev', [''])  #转移证书的cache设备
        dev_type = data.get('dev_type', 'all_dev')  #转移证书的类型
        c_o_path = config.get('app', 'o_path')
        c_d_path = config.get('app', 'd_path')

        o_path = data.get('o_path', c_o_path)
        d_path = data.get('d_path', c_d_path)
        # o_path = data.get('o_path', '')
        # d_path = data.get('d_path', '')
        transfer_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        #logger.debug("s_name is %s"%s_name)
        logger.debug("username is %s" % username)
        logger.debug("o_path is %s" % o_path)
        logger.debug("d_path is %s" % d_path)
        logger.debug("dev_type is %s" % dev_type)
        if len(transfer_dev
               ) <= 1 and transfer_dev[0] == '' and dev_type != 'all_dev':
            raise TransferdevError('not input ip')

        #array_s_name = s_name.split(',')
        array_s_name = []
        cer_id_list = cer_id_str.split(',')
        if not cer_id_list:
            return jsonify({"code": 504, "msg": 'please push cert id'})
        for cert_id in cer_id_list:
            cert_id_objectid = ObjectId(cert_id)
            cert_detail_one = s1_db.cert_detail.find_one(
                {'_id': cert_id_objectid})
            if not cert_detail_one:
                return jsonify({"code": 504, "msg": 'this id not exist'})
            array_s_name.append(cert_detail_one['save_name'])
        if not array_s_name:
            return jsonify({"code": 504, "msg": 'please push cert id'})

        db.transfer_certs_detail.ensure_index('save_name', unique=True)

        #cret_id_list = []
        # cert_not_find = []
        # for save_name in array_s_name:
        #     info_cert = s1_db.cert_detail.find_one({'save_name': save_name})
        #     if not info_cert:
        #         cert_not_find.append(save_name)
        #     else:
        #         cret_id_list.append(str(info_cert['_id']))
        # if cert_not_find:
        #     return jsonify({"code": 504, "msg": 'Certificate does not exist, please check the name of the certificate %s'%(cert_not_find)})
        #
        #status,message = check_cert_cms(cret_id_list)

        status, message = check_cert_cms(cer_id_list)
        if status == False:
            return jsonify({"code": 504, "msg": message})

        task = {}
        task['_id'] = str(ObjectId())
        if dev_type == 'all_dev' or transfer_dev == 'all_hpcc':
            task['send_dev'] = 'all_dev'
            task['send_dev_md5'] = tools.md5(task['send_dev'])
        else:
            task['send_dev'] = tools.sortip(transfer_dev)
            logger.debug(task['send_dev'])
            devices_list = []
            for q_ip in task['send_dev']:
                q_ip_type = tools.judge_dev_ForH_byip(q_ip)
                if q_ip_type != 'HPCC':
                    devices_list.append(q_ip)
            if devices_list:
                raise QuerydevicesError('%s    isn`t HPCC devices ' %
                                        '  '.join(devices_list))
            task['send_dev_md5'] = tools.md5(json.dumps(task['send_dev']))
            logger.debug(task['send_dev_md5'])

        try:
            status = cert_cms_delete(cer_id_list)
            if status:
                portal_status = cert_portal_delete(cer_id_list)
                if portal_status != True:
                    return jsonify({'code': 504, 'msg': 'portal delete error'})
            else:
                return jsonify({'code': 504, 'msg': 'cms delete error'})
        except Exception:
            logger.error('callback error %s' % (traceback.format_exc()))
            return jsonify({
                'code': 504,
                'msg': 'delete error%s' % (traceback.format_exc())
            })

        for save_name in array_s_name:
            info = s1_db.cert_detail.find_one({'save_name': save_name})

            datestr = int(time.mktime(datetime.datetime.now().timetuple()))
            change_name = "{}{}{}{}".format("trans_", username,
                                            info.get('cert_alias'), datestr)
            #db.transfer_certs_detail.ensure_index('save_name', unique=True)
            t_id = s1_db.transfer_certs_detail.insert({
                'save_name':
                save_name,
                'o_path':
                o_path,
                'd_path':
                d_path,
                'created_time':
                datetime.datetime.now(),
                'username':
                username
            })
            #db.transfer_certs_detail.ensure_index('save_name', unique=True)
            s1_db.cert_detail.update(
                {'save_name': save_name},
                {"$set": {
                    "t_id": t_id,
                    "cert_alias": change_name
                }})
            #if not info:
            #    raise CertNotFoundError()

        task['t_id'] = str(t_id)
        task['username'] = username
        task['o_path'] = o_path
        task['d_path'] = d_path
        task['save_name'] = ','.join(array_s_name)  #s_name
        task['created_time'] = transfer_time
        logger.debug('transfer cert task {}'.format([task]))
        queue.put_json2('transfer_cert_task', [task])
        #res ={'code': 200, 'cert_id': str(info.get('_id'))}
        res = {'code': 200, 'msg': 'ok'}
        return jsonify(res)
    except CertNotFoundError as ex:
        return jsonify({"code": 504, "msg": "The certificate does not exist"})
    except TransferdevError as ex:
        return jsonify({"code": 524, "msg": ex.__str__()})
    except QuerydevicesError as ex:
        return jsonify({"code": 525, "msg": ex.__str__()})
    except Exception:
        logger.debug('/transfer_expired_cert error')
        logger.debug(traceback.format_exc())
        logger.debug(e.__str__())
        return jsonify({"code": 500, "msg": "The schema of request is error."})
Example #30
0
def make_task(data, _type='portal'):
    '''
    生成证书任务
    '''
    if _type == 'portal':
        s1_db = database.s1_db_session()
        username = data.get('username', 'chinacache')
        user_id = data.get('user_id', 2275)
        p_key = data.get('p_key', '')
        cert = data.get('cert', '')
        r_cert = data.get('r_cert', '')
        cert_alias = data.get('cert_name', 'chinacache-cert')
        seed = cert_trans_worker.get_custom_seed(username)

        is_rigid = data.get('rigid', False)

        try:
            cert_sum = s1_db.cert_detail.find({
                "username": username,
                'cert_alias': cert_alias
            }).count()
            if cert_sum >= 1:
                raise CertAliasError('cert alias has already existed')
        except Exception:
            logger.debug(traceback.format_exc())
            raise
        #私钥
        try:
            p_key_cip, p_key_sign = rsa_tools.split_ciphertext_and_sign(p_key)
            p_key = rsa_tools.decrypt_trunk(p_key_cip,
                                            rsa_tools.bermuda_pri_key)
            try:
                rsa_tools.verify_sign(p_key_sign, rsa_tools.portal_pub_key,
                                      p_key)
            except Exception:
                logger.debug('---p_key--- verify_sign error---')
                logger.debug(traceback.format_exc())
                raise CertDecryptError('Private key of verify_sign error')

        except Exception:
            logger.debug('---p_key--- decrypt error---')
            logger.debug(traceback.format_exc())
            raise CertDecryptError('Private key of decryption error')

        #证书
        try:
            cert_cip, cert_sign = rsa_tools.split_ciphertext_and_sign(cert)
            cert = rsa_tools.decrypt_trunk(cert_cip, rsa_tools.bermuda_pri_key)
            try:
                rsa_tools.verify_sign(cert_sign, rsa_tools.portal_pub_key,
                                      cert)
            except Exception:
                logger.debug('---cert--- verify_sign error---')
                logger.debug(traceback.format_exc())
                raise CertDecryptError('Cert of verify_sign error')
            #去掉多余回车
            cert = cert.strip('\n')

        except Exception:
            logger.debug('---cert--- decrypt error---')
            logger.debug(traceback.format_exc())
            raise CertDecryptError('Cert key of decryption eror')

        if r_cert:
            #根证书
            try:
                r_cert_cip, r_cert_sign = rsa_tools.split_ciphertext_and_sign(
                    r_cert)
                r_cert = rsa_tools.decrypt_trunk(r_cert_cip,
                                                 rsa_tools.bermuda_pri_key)
                try:
                    rsa_tools.verify_sign(r_cert_sign,
                                          rsa_tools.portal_pub_key, r_cert)
                except Exception:
                    logger.debug('---r_cert--- verify_sign error---')
                    logger.debug(traceback.format_exc())
                    raise CertDecryptError('R_cert of verify_sign eror')
                #去掉多余回车
                r_cert = r_cert.strip('\n')

            except Exception:
                logger.debug('---r_cert--- decrypt error---')
                logger.debug(traceback.format_exc())
                raise CertDecryptError('R_cert key of decryption eror')

        #私钥是否为PKCS1
        if 'BEGIN PRIVATE KEY' in p_key:
            raise CertPrikeyTypeError('RSA Private Key must be (PKCS#1)')

        #提交内容合并
        all_cert = cert
        if r_cert:
            all_cert = cert + '\n' + r_cert

        if cert_tools.crt_number(all_cert) < 1:
            raise CertInputError('Certificate must be more content')

        middle_cert_lack = False
        if cert_tools.crt_number(all_cert) < 2:
            '''
            判断有没有中间证书
            '''
            middle_cert_lack = True
        if middle_cert_lack and is_rigid:
            raise CertNoMiddle('Please upload intermediate certificate ')
        all_cert_checked = cert_tools.get_all_chain(all_cert)
        if not all_cert_checked:
            raise CertPathError('The certificate path does not match')
        if all_cert_checked == 1:
            raise CertNoRoot('The match root certificate fails')
        if all_cert_checked in [2, 3]:
            raise CertNoMiddle('The matching intermediate certificate fails')

        cert_last = cert_tools.get_cert(all_cert_checked)[0]

        #检查子证书和私钥match
        if not cert_tools.check_consistency(cert_last, p_key):
            raise CertPrikeyError(
                'Certificates and private keys does not match')

        #检查子证书是否吊销
        crl_info = cert_tools.get_crl(cert_last)
        if crl_info:
            crl_object = cert_tools.get_crl_object(crl_info)
            if crl_object:
                if cert_tools.get_revoke(cert_last, crl_object):
                    raise CertRevokeError('Certificate has been revoked')

        #检查子证书是否过期
        if cert_tools.is_expire(cert_last):
            raise CertExpireError('Certificate Is Expired')

        #解析子证书主要信息
        cert_last_subject = cert_tools.get_subject(cert_last)
        cert_last_issuer = cert_tools.get_issuer(cert_last)
        cert_last_validity = cert_tools.get_Validity(cert_last)
        cert_last_pubkey = cert_tools.get_public_key(cert_last)
        cert_last_DNS = cert_tools.get_DNS(cert_last)

        end_time = cert_tools.make_validity_to_China(
            cert_last_validity)['end_time']
        end_time_obj = datetime.datetime.strptime(end_time, '%Y%m%d%H%M%S')
        end_time_name = end_time_obj.strftime('%Y-%m-%d-%H')
        now_time_name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

        if cert_last_DNS:
            save_dns_name = cert_last_DNS[0]
        else:
            save_dns_name = cert_last_subject['CN']

        #替换泛域名×
        save_dns_name = save_dns_name.replace('*', '_')
        save_name = '%s-%s-%s' % (end_time_name, save_dns_name, now_time_name)

        #检查存储名是否冲突
        if s1_db.cert_detail.find_one({'save_name': save_name}):
            raise CertSaveNameError('The certificate had same save name')

        #加密证书&pk

        try:
            p_key_e = rsa_tools.fun(p_key, rsa_tools.cache_pub_key,
                                    rsa_tools.bermuda_pri_key, seed)
            all_cert_e = rsa_tools.fun(all_cert_checked,
                                       rsa_tools.cache_pub_key,
                                       rsa_tools.bermuda_pri_key, seed)
            #r_cert_e = rsa_tools.fun(r_cert,rsa_tools.cache_pub_key, rsa_tools.bermuda_pri_key, seed)
        except Exception:
            logger.debug('---make_task--- encrypt error---')
            logger.debug(traceback.format_exc())
            raise

        #origin cert
        o_c_id = s1_db.cert_origin.insert({
            'cert_origin':
            cert,
            'cert_origin_r':
            r_cert,
            'cert_all':
            all_cert_checked,
            'created_time':
            datetime.datetime.now()
        })

        #cert
        c_id = s1_db.cert_detail.insert({
            'cert': all_cert_e,
            'p_key': p_key_e,
            'username': username,
            'seed': seed,
            'o_c_id': o_c_id,
            'cert_alias': cert_alias,
            'save_name': save_name,
            'subject': cert_last_subject,
            'issuer': cert_last_issuer,
            'validity': cert_last_validity,
            'pubkey': cert_last_pubkey,
            'DNS': cert_last_DNS,
            'user_id': user_id,
            'created_time': datetime.datetime.now(),
            'middle_cert_lack': middle_cert_lack
        })

        task = {
            '_id': str(ObjectId()),
            'middle_cert_lack': middle_cert_lack,
            'username': username,
            'p_key': p_key_e,
            'cert': all_cert_e,
            'created_time':
            datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'c_id': str(c_id),
            'send_devs': 'all_hpcc',
            'cert_alias': cert_alias,
            'o_c_id': str(o_c_id),
            'op_type': "add",
            'seed': seed,
            's_name': save_name,
            's_dir': HPCC_SAVE_DIR,
            'user_id': user_id,
            'recev_host': RECEV_HOST
        }

        return task
    else:
        raise