Exemplo n.º 1
0
class ControllerWorkload(WorkloadStorable):
    '''
        通过Controller进行workload管理
    '''
    def __init__(self, host, sources = None):
        self.__client = HttpClientPool(host, timeout = 1000, maxsize = 10)
        self.__sources = sources
        self.__sem = threading.Semaphore()
        self.__tasks = []
        self.__tasks_status = []
        #self.__timer = timer.Timer(TASK_TIME_SPAN, self.get_workloads)
        #self.__timer.start()
        self.__timer2 = timer.Timer(COMPLETE_TIME_SPAN, self.complete_workloads)
        self.__timer2.start()


    def add_workload(self, task):
        pass
    
    def get_workloads(self):
        '''
            从master取一批workloads
            get every TASK_TIME_SPAN (s), up to TASK_COUNT
        '''
        task_length = TASK_COUNT - len(self.__tasks)

        if task_length <= 0 :
            return None
        logger.info('Need %d New Tasks'%task_length)
        url = "/workload?count=" + str(task_length)
        result = self.__client.get(url)

        if result == None or result == []:
            return False

        try:
	    result = result.strip('\0').strip()
            tasks = eval(result)
        except Exception,e:
            logger.info('GET TASKS ERROR: '+str(e))
            return False

        logger.info('Get %d New Tasks From Master'%len(tasks))

        for task in tasks:
	    #logger.info("parse string is : %s" % str(task))
            self.__tasks.append(Task.parse(json.dumps(task)))

        return True
Exemplo n.º 2
0
def restart_process(params):  # receve 重启命令

    logger.info(
        '------------------------> receve  restart mingling server is starting'
    )
    workload.workload_restart_flag = False  # stop get task thread
    workers.stop()  # stop working thread
    logger.info('------------------------> update workload.tasks.qsize = ' +
                str(workload.tasks.qsize()))

    len_keys = workload.TaskingDict.keys()

    for key in len_keys:
        len_item = workload.TaskingDict.pop(key)
        while len_item > 0:
            workload.complete_workload(key, '53', 'NULL')
            len_item -= 1

    while (len(workload.newtasks) > 0):
        task = workload.newtasks.pop()
        workload.complete_workload(Task.parse(json.dumps(task)), '53', 'NULL')

    return str(True)
Exemplo n.º 3
0
class ControllerWorkload(WorkloadStorable):
    """
        通过Controller进行workload管理
    """

    def __init__(self, host, sources, data_type_str, recv_real_time_request=True):

        self.__client = HttpClientPool(
            host, timeout=1000, maxsize=500, block=True)
        self.timeout = 2395
        self.__sources = sources
        self.__sem = threading.Semaphore()
        self.__complete_task_sem = threading.Semaphore()
        self.tasks = Queue(maxsize=MaxQsize)
        self.__tasks_status = []
        self.TaskingDict = {}
        self.new_tasks = []
        self.__flag = recv_real_time_request
        self.data_type_str = data_type_str
        self.workload_restart_flag = True
        self.__timer2 = timer.Timer(
            COMPLETE_TIME_SPAN, self.complete_workloads)
        self.__timer2.start()

    def add_workload(self, task):
        while self.tasks.qsize() > (MaxQsize - 100):
            logger.info('request is full, please wait !')
            time.sleep(10)
        self.tasks.put(task)
        logger.info('workload task queue size: {0}'.format(self.tasks.qsize()))

    def get_workloads(self):
        """
            从master取一批workloads
            get every TASK_TIME_SPAN (s), up to TASK_COUNT
        """
        task_length = TASK_COUNT - self.tasks.qsize()
        need_task = task_length
        if need_task <= 0:
            return True

        logger.info('Need %d New Tasks' % task_length)
        url = "/workload?count={0}&qid={1}&type=routine001&data_type={2}".format(need_task, int(1000 * time.time()),
                                                                                 self.data_type_str)
        result = self.__client.get(url)
        if result is None or result == []:
            return False

        try:
            result = result.strip('\0').strip()
            self.new_tasks = eval(result)
            logger.info(
                'from master get task count is : {0} / {1}'.format(len(self.new_tasks), need_task))

        except Exception, e:
            logger.info('GET TASKS ERROR: ' + str(e))
            return False

        get_task_count = 0
        for task in self.new_tasks:
            try:
                if not isinstance(task, dict):
                    logger.error('task is not a dict. task=' + str(task))
                    continue

                task_str = json.dumps(task)
                task_strs = Task.parse(task_str)
                self.tasks.put(task_strs)
                if task_strs not in self.TaskingDict:
                    self.TaskingDict[task_strs] = 0
                self.TaskingDict[task_strs] += 1

                get_task_count += 1

            except Exception, e:
                logger.info(
                    'add task from master to tasks fail. error = ' + str(e))
                break
Exemplo n.º 4
0
        msg = json.dumps({
            'qid': task.req_qid, 'type': task.callback_type,
            'uid': task.req_uid, 'query': json.dumps(query)
        })

        res = channel.basic_publish(
            exchange=task.master_info['spider_mq_exchange'],
            routing_key=task.master_info['spider_mq_routerKey'],
            properties=pika.BasicProperties(delivery_mode=2),
            body=msg,
        )
        connection.close()
        if not res:
            raise Exception('RabbitMQ Result False')
        logger.debug('[callback a verifytask done]')
    except Exception as exc:
        logger.exception("callback a task fail. error = {0}".format(traceback.format_exc()))


if __name__ == '__main__':
    task = Task()
    a= {"content":"MXP&FCO&20170827|FCO&VCE&20170830","csuid":"","master_info":{"master_addr":"10.10.155.146:48067","redis_addr":"10.10.173.116:6379","redis_db":0,"redis_passwd":"MiojiRedisOrzSpiderVerify","spider_mq_exchange":"spiderToVerify","spider_mq_exchangeType":"direct","spider_mq_host":"10.19.131.242","spider_mq_passwd":"miaoji1109","spider_mq_port":"5672","spider_mq_queue":"spider_callback_data","spider_mq_routerKey":"scv101","spider_mq_user":"******","spider_mq_vhost":"test"},"other_info":{"cache_key":"preflightmulti|10005|10002|20170827|10002|10003|20170830|travelocity|E","callback_type":"scv101","csuid":"","data_type":"flightmulti_new_pre_verify","machine_ip":"10.10.231.156","machine_port":8089,"ptid":"ptid","qid":"1504250930338","redis_key":["flightmulti_new_pre_verify|MXP&FCO&20170827|FCO&VCE&20170830|travelocity|E|10.10.231.156:8089|1504250930338|388325ab112e52427ce4819764c54294|0"],"req_type":"v107","request_begin_time":"1504250930374","result_source_redis_key":"flightmulti_new_pre_verify_1504250930338_ptid_388325ab112e52427ce4819764c54294|travelocity|E_10.10.231.156:8089","source":"travelocityMultiFlight","src":"travelocity","ticket_info":{"csuid":"","env_name":"offline","md5":"388325ab112e52427ce4819764c54294","ptid":"ptid","qid":"1504250930338","tid":"","uid":"test","v_seat_type":"E","verify_type":"pre_verify"},"ticket_md5":"388325ab112e52427ce4819764c54294","tid":"","uid":"test"},"ptid":"ptid","qid":"1504250930338","source":"travelocityMultiFlight","ticket_info":{"csuid":"","env_name":"offline","md5":"388325ab112e52427ce4819764c54294","ptid":"ptid","qid":"1504250930338","tid":"","uid":"test","v_seat_type":"E","verify_type":"pre_verify"},"tid":"","uid":"test"}
    task.parse('''''')
    task.req_qid = '1'
    task.req_uid = '22'
    task.callback_type = 'scv101'
    task.master_info = a['master_info']
    print task.master_info
    call_back_toservice(task, {'code': 1000})