Esempio n. 1
0
class Collector(Singleton):

    def __init__(self):
        self.redis = RedisOperation()
        self.redis.connect()
        self.pool = ThreadPool(10)
        self.pid = os.getpid()
        self.pname = multiprocessing.current_process().name

    def dispatch_work(self, task_data):
        group = task_data.get("group")
        urls = task_data.get("urls")
        if 0 == len(urls):
            LOG.warn("group(%s)'s task list is empty.'" % group)
            return
        LOG.info("pname(%s) pid(%s) receive and do task: %r."
                 % (self.pname, self.pid, task_data))

        map_obj = self.pool.map_async(self.pull_work, urls)
        ret_list = map_obj.get()
        LOG.info("pname(%s) pid(%s) finished group(%s) pull task."
                 %(self.pname, self.pid, group))
        self.finish_task(group, ret_list)

    def pull_work(self, task_url):
        pw = PullWorker(task_url)
        data = pw.run()
        return data

    def finish_task(self, group, ret_list):
        """
        1. compute metric
        2. send metric
        3. redis del group task for task state manage
        """
        task_data = self.redis.get_redis_cache(group)
        LOG.info("pname(%s) pid(%s) start push metric."
                %(self.pname, self.pid))
        pm = PushMetric(ret_list, task_data)
        pm.run()
        LOG.info("pname(%s) pid(%s) push metric finished."
                %(self.pname, self.pid))
        self.redis.delete(group)
        LOG.info("pname(%s) pid(%s) task state manage del "
                "task(%s) finished." %(self.pname, self.pid, group))
Esempio n. 2
0
class LawrenceApplication(QApplication):
    name = "lawrence"
    version = "v1.0"

    _last_update = 0

    def init_app(self):
        # init parent class, make oslo take effect
        super(LawrenceApplication, self).init_app()

        # init log
        logging.setup("lawrence instance.")
        LOG.info("server instance is runing.")

        # init redis cluster
        self.redis = RedisOperation()
        self.redis.connect()

        # init pool(max value is 50)
        self.pool = multiprocessing.Pool(4, init_signal_handler)

        # init socket long connect
        self.sock_obj = QSocketOperation()
        host_list = CONF.NODES.nodes
        proxy_port = CONF.PROXY.proxy_port
        timeout = 4
        self.sock_obj.connect(host_list, proxy_port, timeout)

    def run(self):
        while True:
            # heartbeat_check
            heartbeat = struct.pack("!II", HEARTBEAT, 0)
            send_status = self.sock_obj.sendmsg(heartbeat)
            if not send_status:
                LOG.warn("____proxy proxy master is badding, need alarm.")

            # request_task
            self._get_taskinfo()

            head_data = self.sock_obj.recvmsg(HEADER_BYTES)
            if head_data:
                head_msg = struct.unpack('!II', head_data)
                command_id = head_msg[0]
                if command_id == ACK_HEART:
                    pass
                elif command_id == ACK_NORMAL:
                    package_len = head_msg[1]
                    bodylen = package_len - HEADER_BYTES
                    body = self.sock_obj.recvmsg(bodylen)
                    LOG.info("receive body for task data is: %s" % body)
                    if not body:
                        time.sleep(2)
                        continue

                    # do task
                    task_list = json.loads(body)
                    self._dispatch_task(task_list)

            time.sleep(2)

    def _get_taskinfo(self):
        cur_time = time.time()
        timestamp = long(cur_time)
        timedelta = timestamp - self._last_update
        if timedelta < 50:
            return
        curSecond = long(time.strftime("%S", time.localtime(cur_time)))
        if curSecond < 30:
            return
        self._last_update = timestamp

        request_data = struct.pack("!II", NORMALMSG, 0)
        send_status = self.sock_obj.sendmsg(request_data)
        if not send_status:
            LOG.info("request task data is failed.........")

    def _dispatch_task(self, task_list):
        try:
            for item in task_list:
                group = item.get("group")
                is_exist = self.redis.exists(group)
                if is_exist:
                    self.redis.delete(group)
                    LOG.warn("find unfinished task, group is (%s)" % group)
                    continue
                self.redis.set_redis_cache(group, item)
                # Note(apply_async‘s args must be a single function.)
                self.pool.apply_async(collector, (item,))
            LOG.info("finish all tasks assigned.")
        except KeyboardInterrupt:
            self.pool.terminate()
            self.pool.close()