Esempio n. 1
0
        def bluk():
            bulk_data = []
            for agent_id in agents:
                data = dict(request_id=request_id,
                            agent_id=agent_id,
                            agent_time=agent_time,
                            server_time=int(time.time()),
                            resultcode=manager_common.RESULT_OVER_FINISHTIME,
                            result='Agent respone overtime')
                bulk_data.append(data)
            responeutils.bluk_insert(
                storage=get_cache() if asynecrequest.expire else session,
                agents=agents,
                bulk_data=bulk_data,
                expire=asynecrequest.expire)

            if agents:
                query.update({
                    'status': manager_common.FINISH,
                    'resultcode': manager_common.RESULT_NOT_ALL_SUCCESS,
                    'result': '%d agent not respone' % len(agents)
                })
            else:
                query.update({
                    'status': manager_common.FINISH,
                    'resultcode': manager_common.RESULT_SUCCESS,
                    'result': 'all agent respone result' % len(agents)
                })
            session.flush()
            session.close()
Esempio n. 2
0
 def delete(self, req, key, body=None):
     if not key.startswith('-'.join([self.PREFIX, 'caches'])):
         raise InvalidArgument('Key prefix not match')
     if '*' in key:
         raise InvalidArgument('* in key!')
     cache = get_cache()
     cache.delete(key)
     return resultutils.results(result='Delete cache success', data=[key])
Esempio n. 3
0
 def response(self, req, request_id, body):
     """agent report respone api"""
     session = get_session()
     asyncrequest = model_query(
         session,
         AsyncRequest,
         filter=AsyncRequest.request_id == request_id).one()
     if not asyncrequest.expire:
         return responeutils.agentrespone(session, request_id, body)
     else:
         return responeutils.agentrespone(get_cache(), request_id, body)
Esempio n. 4
0
 def create(self, req, body=None):
     expire = int(body.get('expire') or 30)
     cache = get_cache()
     salt = ''.join(random.sample(string.lowercase, 6))
     key = '-'.join([self.PREFIX, 'caches', str(int(time.time())), salt])
     if not cache.set(key,
                      jsonutils.dumps_as_bytes(body) if body else '',
                      ex=expire or manager_common.ONLINE_EXIST_TIME,
                      nx=True):
         raise CacheStoneError('Cache key value error')
     return resultutils.results(result='Make cache success', data=[key])
Esempio n. 5
0
 def _fetch_token_from_cache(self, token_id):
     cache_store = api.get_cache()
     # 从cache存储中获取token以及ttl
     pipe = cache_store.pipeline()
     pipe.multi()
     pipe.get(token_id)
     pipe.ttl(token_id)
     token, ttl = pipe.execute()
     # 过期时间小于15s, 认为已经过期
     if not token or ttl < 15:
         raise exceptions.TokenExpiredError(
             'Token has been expired drop from cache')
     token = jsonutils.loads_as_bytes(token)
     return token
Esempio n. 6
0
 def delete(self, req, token_id, checker=None):
     if self._is_fernet(req):
         token = self.fernet_formatter.unpack(token_id)
         if checker: checker(token)
     else:
         if not token_id.startswith(self.AUTH_PREFIX):
             raise InvalidArgument('Token id prefix error')
         cache_store = api.get_cache()
         token = cache_store.get(token_id)
         if token:
             token = jsonutils.loads_as_bytes(token)
             if checker: checker(token)
             cache_store.delete(token_id)
     return token
Esempio n. 7
0
 def create(self, req, token, expire):
     if self._is_fernet(req):
         token.update({'expire': expire + int(time.time())})
         token_id = self.fernet_formatter.pack(token)
     else:
         cache_store = api.get_cache()
         token_id = '-'.join([
             self.AUTH_PREFIX,
             str(uuidutils.generate_uuid()).replace('-', '')
         ])
         if not cache_store.set(
                 token_id, jsonutils.dumps_as_bytes(token), ex=expire,
                 nx=True):
             LOG.error('Cache token fail')
             raise exceptions.CacheStoneError('Set to cache store fail')
     req.environ[manager_common.TOKENNAME] = token
     return token_id
Esempio n. 8
0
 def show(self, req, key, body=None):
     if not key.startswith('-'.join([self.PREFIX, 'caches'])):
         raise InvalidArgument('Key prefix not match')
     if '*' in key:
         raise InvalidArgument('* in key!')
     cache = get_cache()
     data = cache.get(key)
     if data is None:
         return resultutils.results(
             result='Get cache fail, key not exist or expired',
             resultcode=manager_common.RESULT_ERROR)
     if data:
         data = jsonutils.loads_as_bytes(data)
     return resultutils.results(result='Delete cache success',
                                data=[
                                    data,
                                ])
Esempio n. 9
0
 def expire(self, req, token_id, expire, checker=None):
     if self._is_fernet(req):
         token = self.fernet_formatter.unpack(token_id)
         if checker: checker(token)
         expire = token.get('expire') + expire
         token.update({'expire': expire})
         token_id = self.fernet_formatter.pack(token)
     else:
         if not token_id.startswith(self.AUTH_PREFIX):
             raise InvalidArgument('Token id prefix error')
         cache_store = api.get_cache()
         token = cache_store.get(token_id)
         if not token:
             raise exceptions.TokenExpiredError('Token not exist now')
         token = jsonutils.loads_as_bytes(token)
         if checker: checker(token)
         cache_store.expire(token_id, expire)
     return token_id, token
Esempio n. 10
0
def async_request(_request, agents=False, details=False):
    """this function just for route asynrequest show"""
    req_dict = {'request_id': _request.request_id,
                'request_time': _request.request_time,
                'finishtime': _request.finishtime,
                'deadline': _request.deadline,
                'status': _request.status,
                'expire': _request.expire,
                'resultcode': _request.resultcode,
                'result': _request.result,
                'respones': []
                }
    # ret_dict = results(data=[req_dict, ], result='Get async request data finish')
    if _request.expire:
        req_dict['result'] += ',Data in cache,May miss some respone'
    if agents:
        if _request.expire:
            _cache = get_cache()
            key_pattern = targetutils.async_request_pattern(_request.request_id)
            respone_keys = _cache.keys(key_pattern)
            if respone_keys:
                agent_respones = _cache.mget(*respone_keys)
                if agent_respones:
                    for agent_respone in agent_respones:
                        if agent_respone:
                            try:
                                agent_respone_data = jsonutils.loads_as_bytes(agent_respone)
                            except (TypeError, ValueError):
                                continue
                            if not details:
                                agent_respone_data.pop('details', None)
                            req_dict['respones'].append(agent_respone_data)
        else:
            for agent_data in _request.respones:
                req_dict['respones'].append(agent(agent_data, details=details))
    return req_dict
Esempio n. 11
0
def map_resources(resource_ids):
    # 删除过期缓存
    CDNRESOURCE.expire()

    need = set(resource_ids)
    provides = set(CDNRESOURCE.keys())
    notmiss = need & provides

    # 有资源在进程缓存字典中
    if notmiss:
        caches_time_dict = {}
        # 本地最旧缓存时间点
        time_point = int(time.time())
        for resource_id in notmiss:
            # 获取单个资源本地缓存时间点
            cache_on = int(CDNRESOURCE.expiretime(resource_id)) - cdncommon.CACHETIME
            if cache_on < time_point:
                time_point = cache_on
            caches_time_dict[resource_id] = cache_on
        cache = get_cache()
        scores = cache.zrangebyscore(name=cdncommon.CACHESETNAME,
                                     min=str(time_point - 3), max='+inf',
                                     withscores=True, score_cast_func=int)
        if scores:
            for data in scores:
                resource_id = int(data[0])
                cache_on = int(data[1])
                # redis中缓存时间点超过本地缓存时间点
                # 弹出本地缓存
                try:
                    # 保险做法本地缓存时间回退3秒
                    if cache_on > caches_time_dict[resource_id] - 3:
                        CDNRESOURCE.pop(resource_id, None)
                except KeyError:
                    continue
        caches_time_dict.clear()
    # 没有本地缓存的资源数量
    missed = need - set(CDNRESOURCE.keys())
    if missed:
        # 重新从数据库读取
        with goperation.tlock('gogamechen1-cdnresource'):
            resources = cdnresource_controller.list(resource_ids=missed,
                                                    versions=True, domains=True, metadatas=True)
            for resource in resources:
                resource_id = resource.get('resource_id')
                agent_id = resource.get('agent_id')
                port = resource.get('port')
                internal = resource.get('internal')
                name = resource.get('name')
                etype = resource.get('etype')
                domains = resource.get('domains')
                versions = resource.get('versions')
                metadata = resource.get('metadata')
                if internal:
                    if not metadata:
                        raise ValueError('Agent %d not online, get domain entity fail' % agent_id)
                    hostnames = [metadata.get('local_ip')]
                else:
                    if not domains:
                        if not metadata:
                            raise ValueError('Agent %d not online get domain entity fail' % agent_id)
                        if metadata.get('external_ips'):
                            hostnames = metadata.get('external_ips')
                        else:
                            hostnames = [metadata.get('local_ip')]
                    else:
                        hostnames = domains
                schema = 'http'
                if port == 443:
                    schema = 'https'
                netlocs = []
                for host in hostnames:
                    if port in (80, 443):
                        netloc = '%s://%s' % (schema, host)
                    else:
                        netloc = '%s://%s:%d' % (schema, host, port)
                    netlocs.append(netloc)
                CDNRESOURCE.setdefault(resource_id, dict(name=name, etype=etype, agent_id=agent_id,
                                                         internal=internal, versions=versions,
                                                         netlocs=netlocs, port=port,
                                                         domains=domains))
Esempio n. 12
0
    def rpc_asyncrequest(self, ctxt,
                         asyncrequest, rpc_target, rpc_method,
                         rpc_ctxt, rpc_args):
        """async respone check"""
        session = get_session()
        finishtime = ctxt.get('finishtime', None)
        asyncrequest = AsyncRequest(**asyncrequest)

        pre_run = ctxt.pop('pre_run', None)
        after_run = ctxt.pop('after_run', None)
        post_run = ctxt.pop('post_run', None)

        if finishtime and int(realnow()) >= finishtime:
            asyncrequest.resultcode = manager_common.RESULT_OVER_FINISHTIME
            asyncrequest.result = 'Async request over finish time'
            asyncrequest.status = manager_common.FINISH
            try:
                session.add(asyncrequest)
                session.flush()
            except DBDuplicateEntry:
                LOG.warning('Async request record DBDuplicateEntry')
            except DBError as e:
                LOG.error('Async request record DBError %s: %s' % (e.__class__.__name__, e.message))
            return

        if not self.is_active:
            asyncrequest.resultcode = manager_common.SCHEDULER_STATUS_ERROR
            asyncrequest.result = 'Rpc server not active now'
            asyncrequest.status = manager_common.FINISH
            session.add(asyncrequest)
            session.flush()
            return

        try:
            if pre_run:
                pre_run = self._compile('pre', pre_run)
            if after_run:
                after_run = self._compile('after', after_run)
            if post_run:
                post_run = self._compile('post', post_run)
        except (KeyError, jsonutils.ValidationError):
            asyncrequest.resultcode = manager_common.SCHEDULER_EXECUTER_ERROR
            asyncrequest.result = 'Rpc server can not find executer or ctxt error'
            asyncrequest.status = manager_common.FINISH
            session.add(asyncrequest)
            session.flush()
            return
        # except Exception:
        #     LOG.exception('wtf')
        #     raise

        if rpc_ctxt.get('agents') is None:
            wait_agents = [x[0] for x in model_query(session, Agent.agent_id,
                                                     filter=Agent.status > manager_common.DELETED).all()]
        else:
            wait_agents = rpc_ctxt.get('agents')
        rpc_ctxt.update({'request_id': asyncrequest.request_id,
                         'expire': asyncrequest.expire,
                         'finishtime': asyncrequest.finishtime})

        try:
            target = Target(**rpc_target)
            rpc = get_client()
        except Exception:
            LOG.error('Prepare rpc clinet error')
            asyncrequest.resultcode = manager_common.SCHEDULER_PREPARE_ERROR
            asyncrequest.result = 'Rpc server prepare rpc clinet error'
            asyncrequest.status = manager_common.FINISH
            session.add(asyncrequest)
            session.flush()
            return

        if pre_run:
            try:
                pre_run.run(asyncrequest, wait_agents)
            except RpcServerCtxtException as e:
                asyncrequest.resultcode = manager_common.SCHEDULER_EXECUTER_ERROR
                asyncrequest.result = 'Rpc server ctxt pre function fail: %s' % e.message
                asyncrequest.status = manager_common.FINISH
                session.add(asyncrequest)
                session.flush()
                return

        session.add(asyncrequest)
        session.flush()

        LOG.debug('Try cast rpc method %s' % rpc_method)

        try:
            rpc.cast(target, ctxt=rpc_ctxt, msg={'method': rpc_method, 'args': rpc_args})
        except AMQPDestinationNotFound:
            asyncrequest.resultcode = manager_common.SEND_FAIL
            asyncrequest.result = 'Async %s request send fail, AMQPDestinationNotFound' % rpc_method
            asyncrequest.status = manager_common.FINISH
            session.flush()
            return

        LOG.debug('Cast %s to %s success' % (asyncrequest.request_id, target.to_dict()))

        if after_run:
            try:
                after_run.run(asyncrequest, wait_agents)
            except RpcServerCtxtException as e:
                asyncrequest.result = 'Async request %s cast success, ' \
                                      'ctxt after function error~%s' % (rpc_method, e.message)
            else:
                asyncrequest.result = 'Async request %s cast success' % rpc_method
            finally:
                session.flush()

        request_id = asyncrequest.request_id
        finishtime = asyncrequest.finishtime
        deadline = asyncrequest.deadline + 1
        expire = asyncrequest.expire
        if expire:
            storage = get_cache()
        else:
            storage = session

        def check_respone():
            wait = finishtime - int(time.time())
            # 先等待3秒,可以做一次提前检查
            if wait > 3:
                eventlet.sleep(3)
            no_response_agents = set(wait_agents)
            interval = int(wait / 10)
            if interval < 3:
                interval = 3
            elif interval > 10:
                interval = 10
            not_overtime = 2
            while True:
                no_response_agents = responeutils.norespones(storage=storage,
                                                             request_id=request_id,
                                                             agents=no_response_agents)
                if not no_response_agents:
                    break
                if int(time.time()) < finishtime:
                    eventlet.sleep(interval)
                if int(time.time()) > deadline:
                    not_overtime -= 1
                    if not not_overtime:
                        break
                eventlet.sleep(1)
            LOG.debug('Not response agents count %d' % len(no_response_agents))
            bulk_data = []
            agent_time = int(time.time())
            for agent_id in no_response_agents:
                data = dict(request_id=request_id,
                            agent_id=agent_id,
                            agent_time=agent_time,
                            server_time=agent_time,
                            resultcode=manager_common.RESULT_OVER_FINISHTIME,
                            result='Agent respone overtime')
                bulk_data.append(data)
            responeutils.bluk_insert(storage, no_response_agents, bulk_data, expire)
            asyncrequest.status = manager_common.FINISH
            if no_response_agents:
                asyncrequest.resultcode = manager_common.RESULT_NOT_ALL_SUCCESS
                asyncrequest.result = 'agents not respone, count:%d' % len(no_response_agents)
            else:
                asyncrequest.resultcode = manager_common.RESULT_SUCCESS
                asyncrequest.result = 'all agent respone result'
            session.flush()
            if post_run:
                try:
                    post_run.run(asyncrequest, no_response_agents)
                except RpcServerCtxtException as e:
                    asyncrequest.result += (' ctxt post function error~%s' % e.message)
                    session.flush()
            session.close()

        threadpool.add_thread(safe_func_wrapper, check_respone, LOG)