示例#1
0
 def delete(self, req, agent_id, endpoint):
     endpoints = validateutils.validate_endpoints(endpoint)
     if not endpoints:
         raise InvalidArgument('Endpoints is None for add endpoints')
     endpoints = validateutils.validate_endpoints(endpoints)
     session = get_session()
     glock = get_global().lock('agents')
     with glock([
             agent_id,
     ]):
         with session.begin():
             if model_count_with_key(
                     session,
                     AgentEntity.entity,
                     filter=and_(AgentEntity.agent_id == agent_id,
                                 AgentEntity.endpoint.in_(endpoints))):
                 return resultutils.results(
                     resultcode=manager_common.RESULT_ERROR,
                     result=
                     'delete endpoints fail, entitys still in endpoint')
             query = model_query(session,
                                 AgentEndpoint,
                                 filter=and_(
                                     AgentEndpoint.agent_id == agent_id,
                                     AgentEndpoint.endpoint.in_(endpoints)))
             delete_count = query.delete(synchronize_session=False)
             need_to_delete = len(endpoints)
             if delete_count != len(endpoints):
                 LOG.warning('Delete %d endpoints, but expect count is %d' %
                             (delete_count, need_to_delete))
     session.close()
     return resultutils.results(result='delete endpoints success')
示例#2
0
 def rpc_changesource(self, ctxt, agent_id, fds, conns, free, process, cputime, iowait, left, metadata):
     """agent status of performance change"""
     if agent_id not in self.agents_loads:
         session = get_session(readonly=True)
         query = model_query(session, Agent, filter=Agent.agent_id == agent_id)
         agent = query.one_or_none()
         if not agent:
             return
         if agent_id not in self.agents_loads:
             self.agents_loads[agent_id] = dict(cpu=agent.cpu,
                                                memory=agent.memory,
                                                disk=agent.disk)
     new_status = {'free': free, 'process': process,
                   'cputime': cputime, 'iowait': iowait,
                   'left': left, 'fds': fds, 'conns': conns,
                   'time': int(time.time())}
     # 元数据为None时不更新元数据
     if metadata is not None:
         new_status['metadata'] = metadata
     else:
         # 当前agent没有元数据,尝试获取元数据
         if not self.agents_loads[agent_id].get('metadata'):
             # cache_store = get_redis()
             # metadata = cache_store.get(targetutils.host_online_key(agent_id))
             # new_status['metadata'] = metadata if not metadata else jsonutils.loads_as_bytes(metadata)
             global_data = get_global()
             metadatas = global_data.agents_metadata([agent_id, ])
             new_status['metadata'] = metadatas.get(agent_id)
     self.agents_loads[agent_id].update(new_status)
示例#3
0
 def show(self, req, agent_id, body=None):
     body = body or {}
     show_ports = body.get('ports', False)
     show_entitys = body.get('entitys', False)
     session = get_session(readonly=True)
     joins = joinedload(Agent.endpoints, innerjoin=False)
     if show_entitys:
         joins = joins.joinedload(AgentEndpoint.entitys, innerjoin=False)
     if show_ports:
         joins = joins.joinedload(AgentEntity.ports, innerjoin=False)
     query = model_query(session, Agent).options(joins)
     agent = query.filter_by(agent_id=agent_id).one()
     result = resultutils.results(total=1, pagenum=0, result='Show agent success')
     endpoints = {}
     for endpoint in agent.endpoints:
         endpoints[endpoint.endpoint] = []
         if show_entitys:
             for entity in endpoint.entitys:
                 _entity = {'entity': entity.entity, 'ports': []}
                 endpoints[endpoint.endpoint].append(_entity)
                 if show_ports:
                     for port in entity.ports:
                        _entity['ports'].append(port['port'])
     result_data = dict(agent_id=agent.agent_id, host=agent.host,
                        agent_type=agent.agent_type,
                        status=agent.status,
                        cpu=agent.cpu,
                        memory=agent.memory,
                        disk=agent.disk,
                        ports_range=jsonutils.safe_loads_as_bytes(agent.ports_range) or [],
                        endpoints=endpoints,
                        metadata=BaseContorller.agent_metadata(agent_id),
                        )
     result['data'].append(result_data)
     return result
示例#4
0
 def show(self, req, endpoint, entity, body=None):
     body = body or {}
     show_ports = body.get('ports', False)
     endpoint = validateutils.validate_endpoint(endpoint)
     entity = int(entity)
     session = get_session(readonly=True)
     query = model_query(session,
                         AgentEntity,
                         filter=and_(AgentEntity.endpoint == endpoint,
                                     AgentEntity.entity == entity))
     if show_ports:
         query = query.options(
             joinedload(AgentEntity.ports, innerjoin=False))
     _entity = query.one_or_none()
     if not _entity:
         raise InvalidArgument('no entity found for %s' % endpoint)
         # return resultutils.results(result='no entity found', resultcode=manager_common.RESULT_ERROR)
     return resultutils.results(
         result='show entity success',
         data=[
             dict(endpoint=_entity.endpoint,
                  agent_id=_entity.agent_id,
                  metadata=BaseContorller.agent_metadata(_entity.agent_id),
                  entity=_entity.entity,
                  ports=sorted([x.port for x in _entity.ports])
                  if show_ports else [])
         ])
示例#5
0
 def create(self, req, body):
     jsonutils.schema_validate(body, FileReuest.SCHEMA)
     address = body.pop('address')
     size = body.pop('size')
     md5 = body.pop('md5')
     ext = body.get('ext') or address.split('.')[-1]
     status = body.get('status', manager_common.DOWNFILE_FILEOK)
     if ext.startswith('.'):
         ext = ext[1:]
     session = get_session()
     downfile = DownFile(md5=md5,
                         downloader=body.get('downloader', 'http'),
                         adapter_args=body.get('adapter_args'),
                         address=address,
                         ext=ext,
                         size=size,
                         status=status,
                         desc=body.get('desc'),
                         uploadtime=body.get('uploadtime',
                                             timeutils.utcnow()))
     session.add(downfile)
     session.flush()
     return resultutils.results(result='Add file success',
                                data=[
                                    dict(md5=downfile.md5,
                                         size=downfile.size,
                                         uploadtime=downfile.uploadtime,
                                         downloader=downfile.downloader)
                                ])
示例#6
0
 def wapper():
     eventlet.sleep(random.randint(0, 5))
     # save report log
     session = get_session()
     report = AgentReportLog(**snapshot)
     session.add(report)
     session.flush()
     session.close()
     process = snapshot.get('running') + snapshot.get('sleeping')
     free = snapshot.get('free') + snapshot.get('cached')
     conns = snapshot.get('syn') + snapshot.get('enable')
     cputime = snapshot.get('iowait') + snapshot.get('user') \
               + snapshot.get('system') + snapshot.get('nice')\
               + snapshot.get('irq') + snapshot.get('sirq')
     rpc = get_client()
     # send to rpc server
     rpc.cast(targetutils.target_rpcserver(fanout=True),
              ctxt = {},
              msg={'method': 'changesource',
                   'args': {'agent_id': agent_id,
                            'free':  free,
                            'process': process,
                            'cputime': cputime,
                            'iowait': snapshot.get('iowait'),
                            'left': snapshot.get('left'),
                            'fds': snapshot.get('num_fds'),
                            'conns': conns,
                            'metadata': metadata,
                            }})
示例#7
0
 def show(self, req, md5, body=None):
     session = get_session(readonly=True)
     query = model_query(session, DownFile, filter=DownFile.md5 == md5)
     downfile = query.one_or_none()
     if not downfile:
         return resultutils.results(resultcode=manager_common.RESULT_ERROR,
                                    result='Get file fail, no found')
     file_info = {
         'downloader': downfile.downloader,
         'address': downfile.address,
         'ext': downfile.ext,
         'size': downfile.size,
         'uploadtime': str(downfile.uploadtime),
         'md5': downfile.md5,
         'status': downfile.status,
     }
     if downfile.adapter_args:
         file_info.setdefault(
             'adapter_args',
             jsonutils.dumps_as_bytes(downfile.adapter_args))
     if downfile.desc:
         file_info.setdefault('desc', downfile.desc)
     resultcode = manager_common.RESULT_SUCCESS
     if downfile.status != manager_common.DOWNFILE_FILEOK:
         resultcode = manager_common.RESULT_ERROR
     return resultutils.results(result='Get file success',
                                resultcode=resultcode,
                                data=[
                                    file_info,
                                ])
示例#8
0
 def task():
     session = get_session()
     query = model_query(session,
                         ScheduleJob,
                         filter=ScheduleJob.job_id == job_id)
     job = query.one_or_none()
     if job is None:
         LOG.warring(
             'Scheduler job %d has been deleted or run by this scheduler'
         )
         raise loopingcall.LoopingCallDone
     if job.times is not None:
         if job.times == 0:
             query.delete()
             raise loopingcall.LoopingCallDone
     # call taskflow run job
     factory.start_taskflow(job)
     if job.times is not None:
         job.times -= 1
     session.flush()
     session.close()
     if job.times is not None:
         if job.times == 0:
             query.delete()
             raise loopingcall.LoopingCallDone
示例#9
0
 def checker(_token):
     if not _token.get('user') == username:
         raise InvalidArgument('username not match')
     session = get_session(readonly=True)
     query = model_query(session,
                         User,
                         filter=User.username == _token.get('user'))
     query.one()
示例#10
0
 def index(self, req, agent_id):
     session = get_session(readonly=True)
     query = model_query(session,
                         AgentEndpoint,
                         filter=AgentEndpoint.agent_id == agent_id)
     data = [endpoint for endpoint in query.options(entityjoin)]
     return resultutils.results(result='list endpoint on success',
                                data=data)
示例#11
0
 def post_start(self):
     super(SchedulerManager, self).post_start()
     with self.job_lock:
         session = get_session(readonly=True)
         query = model_query(session,
                             ScheduleJob,
                             filter=ScheduleJob.schedule == self.agent_id)
         for job in query.all():
             if job.times is not None and job.times:
                 self.start_task(job.job_id, job.start, job.interval)
示例#12
0
 def show(self, req, agent_id, endpoint):
     session = get_session(readonly=True)
     endpoints_filter = and_(AgentEndpoint.agent_id == agent_id,
                             AgentEndpoint.endpoint == endpoint)
     query = model_query(session, AgentEndpoint, filter=endpoints_filter)
     endpoint = query.options(entityjoin).one()
     return resultutils.results(result='show endpoint success',
                                data=[
                                    endpoint,
                                ])
示例#13
0
 def response(self, req, request_id, body):
     """agent report respone api"""
     session = get_session()
     asyncrequest = model_query(
         session,
         AsyncRequest,
         filter=AsyncRequest.request_id == request_id).one()
     if not asyncrequest.expire:
         return responeutils.agentrespone(session, request_id, body)
     else:
         return responeutils.agentrespone(get_cache(), request_id, body)
示例#14
0
 def allocated(self, req, agent_id):
     session = get_session(readonly=True)
     query = model_query(session,
                         AllocatedPort,
                         filter=AllocatedPort.agent_id == agent_id)
     return resultutils.results(
         result='list ports success',
         data=[dict(
             port=p.port,
             desc=p.desc,
         ) for p in query.all()])
示例#15
0
 def count(self, req, endpoint):
     session = get_session(readonly=True)
     data = []
     for endpoint in argutils.map_with(endpoint,
                                       validateutils.validate_endpoint):
         count = model_count_with_key(
             session,
             AgentEndpoint.endpoint,
             filter=AgentEndpoint.endpoint == endpoint)
         data.append(dict(endpoint=endpoint, count=count))
     return resultutils.results(result='count endpoint for success',
                                data=data)
示例#16
0
    def post_run(self, asyncrequest, no_response_agents):
        kwargs = self.kwargs
        if not kwargs:
            return

        all = kwargs.get('all', True)
        if all and no_response_agents:
            raise exceptions.RpcServerCtxtException(
                'Entitys check fail, same agent not respone')

        operator = kwargs.get('operator')
        operator = OPERATIORS[operator]
        value = kwargs.get('value')

        counter = kwargs.get('counter')
        if counter:
            counter = OPERATIORS[counter]
            count = kwargs.get('count')
        elif not all:
            raise exceptions.RpcServerCtxtException(
                'No counter found when all is False')

        query = model_query(
            get_session(readonly=True),
            AsyncRequest,
            filter=AsyncRequest.request_id == asyncrequest.request_id)
        joins = joinedload(AsyncRequest.respones, innerjoin=False)
        joins = joins.joinedload(AgentRespone.details, innerjoin=False)
        query = query.options(joins)
        asyncrequest = query.one()
        results = resultutils.async_request(asyncrequest,
                                            agents=True,
                                            details=True)
        respones = results.get('respones')

        _count = 0
        for respone in respones:
            if all and respone.get(
                    'resultcode') != manager_common.RESULT_SUCCESS:
                raise exceptions.RpcServerCtxtException(
                    'Entitys check fail, one agent resultcode not success')
            details = respone.get('details')
            for detail in details:
                if operator(detail.get('resultcode'), value):
                    _count += 1
                elif all:
                    raise exceptions.RpcServerCtxtException(
                        'Check fail, entity %d resultcode not match' %
                        detail.get('detail_id'))
        if counter and not counter(_count, count):
            raise exceptions.RpcServerCtxtException(
                'Check fail, entitys count not match')
示例#17
0
 def index(self, req, agent_id, endpoint, entity):
     session = get_session(readonly=True)
     query = model_query(session,
                         AllocatedPort,
                         filter=and_(AllocatedPort.agent_id == agent_id,
                                     AllocatedPort.endpoint == endpoint,
                                     AllocatedPort.entity == entity))
     return resultutils.results(
         result='list ports success',
         data=[dict(
             port=p.port,
             desc=p.desc,
         ) for p in query.all()])
示例#18
0
 def send_asyncrequest(asyncrequest,
                       rpc_target,
                       rpc_ctxt,
                       rpc_method,
                       rpc_args=None,
                       async_ctxt=None):
     rpc = get_client()
     session = get_session()
     try:
         rpc.cast(
             targetutils.target_rpcserver(),
             # ctxt={'finishtime': asyncrequest.finishtime-2},
             ctxt=async_ctxt or {},
             msg={
                 'method': 'asyncrequest',
                 'args': {
                     'asyncrequest': asyncrequest.to_dict(),
                     'rpc_target': rpc_target.to_dict(),
                     'rpc_method': rpc_method,
                     'rpc_ctxt': rpc_ctxt,
                     'rpc_args': rpc_args or dict()
                 }
             })
     except AMQPDestinationNotFound as e:
         LOG.error('Send async request to scheduler fail %s' %
                   e.__class__.__name__)
         asyncrequest.status = manager_common.FINISH
         asyncrequest.result = e.message
         asyncrequest.resultcode = manager_common.SCHEDULER_NOTIFY_ERROR
         try:
             session.add(asyncrequest)
             session.flush()
         except DBDuplicateEntry:
             LOG.warning(
                 'Async request rpc call result is None, but recode found')
     except Exception as e:
         if LOG.isEnabledFor(logging.DEBUG):
             LOG.exception('Async request rpc cast fail')
         else:
             LOG.error('Async request rpc cast unkonw error')
         asyncrequest.status = manager_common.FINISH
         asyncrequest.result = 'Async request rpc cast error: %s' % e.__class__.__name__
         asyncrequest.resultcode = manager_common.RESULT_ERROR
         try:
             session.add(asyncrequest)
             session.flush()
             raise
         except DBDuplicateEntry:
             LOG.warning(
                 'Async request rpc call result is None, but recode found')
示例#19
0
    def delete(self, req, md5, body=None):
        session = get_session()
        query = model_query(session, DownFile, filter=DownFile.md5 == md5)
        with session.begin():
            downfile = query.one_or_none()
            if not downfile:
                return resultutils.results(result='Delete file do nothing, not found')
            query.delete()

        return resultutils.results(result='Delete file success', data=[dict(md5=downfile.md5,
                                                                            size=downfile.size,
                                                                            address=downfile.address,
                                                                            uploadtime=downfile.uploadtime,
                                                                            downloader=downfile.downloader)])
示例#20
0
 def update(self, req, md5, body=None):
     body = body or {}
     status = body.pop('status', None)
     if status not in manager_common.DOWNFILESTATUS:
         raise InvalidArgument('status value error')
     session = get_session()
     query = model_query(session, DownFile, filter=DownFile.md5 == md5)
     with session.begin():
         downfile = query.one()
         query.update({'status': status})
     return resultutils.results(result='Update file success', data=[dict(md5=downfile.md5,
                                                                         size=downfile.size,
                                                                         status=downfile.status,
                                                                         uploadtime=downfile.uploadtime,
                                                                         downloader=downfile.downloader)])
示例#21
0
 def create(self, req, agent_id, body=None):
     body = body or {}
     endpoints = validateutils.validate_endpoints(body.get('endpoints'))
     session = get_session()
     glock = get_global().lock('agents')
     with glock([
             agent_id,
     ]):
         with session.begin():
             for endpoint in endpoints:
                 session.add(
                     AgentEndpoint(agent_id=agent_id, endpoint=endpoint))
                 session.flush()
     return resultutils.results(result='add endpoints success',
                                data=endpoints)
示例#22
0
 def entitys(self, req, endpoint):
     session = get_session(readonly=True)
     endpoint = validateutils.validate_endpoint(endpoint)
     query = model_query(session,
                         AgentEntity,
                         filter=AgentEntity.endpoint == endpoint)
     query = query.options(joinedload(AgentEntity.ports, innerjoin=False))
     return resultutils.results(
         result='get endpoint %s entitys success' % endpoint,
         data=[
             dict(agent_id=entity.agent_id,
                  entity=entity.entity,
                  ports=[port.port for port in entity.ports])
             for entity in query
         ])
示例#23
0
    def overtime(self, req, request_id, body):
        """
        agent not response, async checker send a overtime respone
        此接口为保留接口,接口功能已经在rpc server中实现
        """
        jsonutils.schema_validate(body, OVERTIMESCHEMA)
        agent_time = body.get('agent_time')
        agents = set(body.get('agents'))
        session = get_session()
        query = model_query(session,
                            AsyncRequest).filter_by(request_id=request_id)
        asynecrequest = query.one()
        if asynecrequest.status == manager_common.FINISH:
            raise InvalidArgument('Async request has been finished')

        def bluk():
            bulk_data = []
            for agent_id in agents:
                data = dict(request_id=request_id,
                            agent_id=agent_id,
                            agent_time=agent_time,
                            server_time=int(time.time()),
                            resultcode=manager_common.RESULT_OVER_FINISHTIME,
                            result='Agent respone overtime')
                bulk_data.append(data)
            responeutils.bluk_insert(
                storage=get_cache() if asynecrequest.expire else session,
                agents=agents,
                bulk_data=bulk_data,
                expire=asynecrequest.expire)

            if agents:
                query.update({
                    'status': manager_common.FINISH,
                    'resultcode': manager_common.RESULT_NOT_ALL_SUCCESS,
                    'result': '%d agent not respone' % len(agents)
                })
            else:
                query.update({
                    'status': manager_common.FINISH,
                    'resultcode': manager_common.RESULT_SUCCESS,
                    'result': 'all agent respone result' % len(agents)
                })
            session.flush()
            session.close()

        threadpool.add_thread(bluk)
        return resultutils.results(result='Post agent overtime success')
示例#24
0
 def clean(self, req, agent_id):
     session = get_session()
     query = model_query(session, Agent,
                         filter=and_(Agent.agent_id == agent_id,
                                     Agent.status <= manager_common.DELETED))
     entity_query = model_query(session, AgentEntity.entity, filter=Agent.agent_id == agent_id)
     with session.begin():
         entitys = entity_query.all()
         if entitys:
             for entity in entitys:
                 LOG.error('Clean agent fail, entity %s:%d still on %s' %
                           (entity.endpoint, entity.entity, agent_id))
             raise EndpointNotEmpty('Clean agent %s fail, still has %d entitys' % (agent_id, len(entitys)))
         count = query.delete()
         LOG.info('Clean deleted agent %d, agent_id %s' % (count, agent_id))
         return resultutils.results(result='Clean deleted agent success')
示例#25
0
 def online(self, req, host, body):
     """call buy agent
     when a agent start, it will cache agent ipaddr
     """
     try:
         host = validators['type:hostname'](host)
         agent_type = body.pop('agent_type')
         metadata = body.pop('metadata')
         expire = body.pop('expire')
     except KeyError as e:
         raise InvalidArgument('Can not find argument: %s' % e.message)
     except ValueError as e:
         raise InvalidArgument('Argument value type error: %s' % e.message)
     except InvalidInput as e:
         raise InvalidArgument(e.message)
     session = get_session(readonly=True)
     query = model_query(session,
                         Agent,
                         filter=(and_(Agent.status > manager_common.DELETED,
                                      Agent.agent_type == agent_type,
                                      Agent.host == host)))
     agent = query.one_or_none()
     if not agent:
         LOG.info('Cache online called but no Agent found')
         ret = {'agent_id': None}
     else:
         self.agent_id_check(agent.agent_id)
         local_ip = metadata.get('local_ip')
         external_ips = str(metadata.get('external_ips'))
         LOG.debug(
             'Cache online called. agent_id:%(agent_id)s, type:%(agent_type)s, '
             'host:%(host)s, local_ip:%(local_ip)s, external_ips:%(external_ips)s'
             % {
                 'agent_id': agent.agent_id,
                 'agent_type': agent_type,
                 'host': host,
                 'local_ip': local_ip,
                 'external_ips': external_ips
             })
         ret = {'agent_id': agent.agent_id}
         BaseContorller._agent_metadata_flush(agent.agent_id,
                                              metadata,
                                              expire=expire)
     result = resultutils.results(
         result='Cache online function run success')
     result['data'].append(ret)
     return result
示例#26
0
 def unsafe_create(agent_id, endpoint, entity, ports):
     session = get_session()
     with session.begin():
         query = model_query(session,
                             AgentEntity,
                             filter=and_(AgentEntity.endpoint == endpoint,
                                         AgentEntity.entity == entity))
         _entity = query.one()
         for port in ports:
             session.add(
                 AllocatedPort(agent_id=agent_id,
                               port=port,
                               endpoint_id=_entity.endpoint_id,
                               entity_id=_entity.id,
                               endpoint=endpoint,
                               entity=entity))
             session.flush()
示例#27
0
 def show(self, req, request_id, body=None):
     body = body or {}
     agents = body.get('agents', True)
     details = body.get('details', False)
     session = get_session(readonly=True)
     query = model_query(session,
                         AsyncRequest,
                         filter=AsyncRequest.request_id == request_id)
     if agents:
         joins = joinedload(AsyncRequest.respones)
         if details:
             joins = joins.joinedload(AgentRespone.details)
         query = query.options(joins)
     request = query.one()
     async_result = resultutils.async_request(request, agents, details)
     return resultutils.results(result='show async request success',
                                data=[async_result])
示例#28
0
 def delete(self, req, endpoint, entity, body=None, action='delete'):
     body = body or {}
     notify = body.pop('notify', True)
     endpoint = validateutils.validate_endpoint(endpoint)
     entity = int(entity)
     session = get_session()
     glock = get_global().lock('entitys')
     result = 'delete entity success.'
     with glock(endpoint, [
             entity,
     ]) as agents:
         with session.begin():
             query = model_query(session,
                                 AgentEntity,
                                 filter=and_(
                                     AgentEntity.endpoint == endpoint,
                                     AgentEntity.entity == entity))
             if notify:
                 agent_id = agents.pop()
                 metadata = BaseContorller.agent_metadata(agent_id)
                 if not metadata:
                     raise InvalidArgument('Agent not online or not exist')
             _entity = query.one_or_none()
             if not _entity:
                 LOG.warning('Delete no entitys, but expect count 1')
             else:
                 query.delete()
             if notify:
                 target = targetutils.target_agent_by_string(
                     metadata.get('agent_type'), metadata.get('host'))
                 target.namespace = endpoint
                 delete_result = self.notify_delete(target, agent_id,
                                                    entity, body, action)
                 if not delete_result:
                     raise RpcResultError('delete entitys result is None')
                 if delete_result.get(
                         'resultcode') != manager_common.RESULT_SUCCESS:
                     raise RpcResultError('delete entity fail %s' %
                                          delete_result.get('result'))
                 result += delete_result.get('result')
                 notify = delete_result
     return resultutils.results(
         result=result,
         data=[dict(entity=entity, endpoint=endpoint, notify=notify)])
示例#29
0
 def rpc_chioces(self, ctxt, target, includes=None, weighters=None):
     """chioce best best performance agent for endpoint"""
     session = get_session(readonly=True)
     query = session.query(Agent.agent_id).join(AgentEndpoint,
                                                and_(Agent.agent_id == AgentEndpoint.agent_id,
                                                     AgentEndpoint.endpoint == target))
     # 可以选取的服务器列表
     chioces = []
     # 30分钟以内上报过数据的服务器才可以被选取
     timeline = int(time.time()) - (30*60 + 30)
     for agent in query:
         if agent.agent_id in self.agents_loads:
             loads = self.agents_loads[agent.agent_id]
             if loads.get('time') and loads.get('time') > timeline:
                 chioces.append(agent.agent_id)
     if not chioces:
         LOG.info('Not agent found for endpoint %s, maybe report overtime?' % target)
         return ChiocesResult(chioces)
     # 有包含规则
     if includes:
         self._exclud_filter(includes, chioces)
     if not chioces:
         LOG.info('Not agent found for endpoint %s with includes' % target)
         if LOG.isEnabledFor(logging.DEBUG):
             LOG.debug('No agent found includes %s', includes)
             LOG.debug('No agent found weighters %s', weighters)
         return ChiocesResult(chioces)
     # 没有排序规则
     if not weighters:
         # 统计agent的entitys数量
         query = model_query(session, (AgentEntity.agent_id, func.count(AgentEntity.id)),
                             filter=AgentEntity.agent_id.in_(chioces))
         query.group_by(AgentEntity.agent_id)
         count = {}
         for r in query:
             count[r[0]] = r[1]
         # 按照entitys数量排序
         chioces.sort(key=lambda agent_id: count.get(agent_id, 0))
     else:
         # 按照排序规则排序
         self._sort_by_weigher(weighters, chioces)
     return ChiocesResult(chioces)
示例#30
0
 def rpc_scheduler(self, ctxt, job_id, jobdata):
     if not self.is_active:
         return AgentRpcResult(self.agent_id,
                               resultcode=manager_common.RESULT_ERROR,
                               result='Scheduler not active now')
     session = get_session()
     # write job to database
     interval = jobdata['interval']
     start = datetime.datetime.fromtimestamp(jobdata['start'])
     with session.begin():
         session.add(
             ScheduleJob(job_id=job_id,
                         times=jobdata['times'],
                         interval=jobdata['interval'],
                         schedule=self.agent_id,
                         start=start,
                         retry=jobdata['retry'],
                         revertall=jobdata['revertall'],
                         desc=jobdata['desc'],
                         kwargs=safe_dumps(jobdata.get('kwargs'))))
         session.flush()
         for index, step in enumerate(jobdata['jobs']):
             executor_cls = getattr(executor, step['executor'])
             analyzer_cls = getattr(analyzer, step['executor'])
             if not executor_cls and not analyzer_cls:
                 raise NotImplementedError('executor not exist')
             # check execute and revert
             executor_cls.esure_subclass(step)
             session.add(
                 JobStep(job_id=job_id,
                         step=index,
                         executor=step['executor'],
                         kwargs=safe_dumps(step.get('kwargs', None)),
                         execute=step.get('execute', None),
                         revert=step.get('revert', None),
                         method=step.get('method', None),
                         rebind=safe_dumps(step.get('rebind', None)),
                         provides=safe_dumps(step.get('provides', None))))
             session.flush()
     self.start_task(job_id, start, interval)
     return AgentRpcResult(result='Scheduler Job accepted',
                           agent_id=self.agent_id)