示例#1
0
class QueueMailBox(MailBox):
    def __init__(self, name, *args, **kwargs):
        super(QueueMailBox, self).__init__(*args, **kwargs)
        self._name = name
        self._queue = Queue()
        self._ready = True

    async def prepare(self):
        self._ready = True

    async def put(self, msg=None):
        if await self.policy():
            self._queue.put_nowait(msg)

    async def size(self):
        return self._queue.qsize()

    async def empty(self):
        return self._queue.empty()

    async def get(self):
        result = None
        result = await self._queue.get()
        return result

    async def policy(self):
        mem_percent = psutil.virtual_memory().percent
        if mem_percent > 80:
            logger.warning("memory usage is gt than 80")
        return True
示例#2
0
async def test_commit_concurrency(aconn):
    # Check the condition reported in psycopg2#103
    # Because of bad status check, we commit even when a commit is already on
    # its way. We can detect this condition by the warnings.
    notices = Queue()
    aconn.add_notice_handler(
        lambda diag: notices.put_nowait(diag.message_primary))
    stop = False

    async def committer():
        nonlocal stop
        while not stop:
            await aconn.commit()
            await asyncio.sleep(0)  # Allow the other worker to work

    async def runner():
        nonlocal stop
        cur = aconn.cursor()
        for i in range(1000):
            await cur.execute("select %s;", (i, ))
            await aconn.commit()

        # Stop the committer thread
        stop = True

    await asyncio.gather(committer(), runner())
    assert notices.empty(), "%d notices raised" % notices.qsize()
 async def consumer_log_response(self, res_queue: queues.Queue):
     while True:
         print(res_queue.qsize())
         item: requests.Response = await res_queue.get()
         if item is None:
             break
         print(item.url)
         res_queue.task_done()
示例#4
0
 async def astart(self, input_queue: Queue):
     while not self.stop_flag or (self.stop_flag
                                  and input_queue.qsize() > 0):
         kwargs = await input_queue.get()
         resp = await requests_future("post",
                                      "https://api.github.com/graphql",
                                      **kwargs)
         if resp.status_code == 200:
             await self.output_queue.put(resp.content)
示例#5
0
文件: core.py 项目: hydrapheetz/irc3
class Core(object):

    def __init__(self, bot):
        self.bot = bot
        self.timeout = int(self.bot.config.get('timeout'))
        self.ping_queue = Queue(loop=bot.loop)

    def connection_made(self):
        self.bot.loop.call_later(self.timeout, self.check_ping)
        self.ping_queue.put_nowait(self.bot.loop.time())

    def check_ping(self):  # pragma: no cover
        # check if we received a ping
        # reconnect if queue is empty
        self.bot.log.debug(
            'Ping queue size: {}'.format(self.ping_queue.qsize()))
        if self.ping_queue.empty():
            self.bot.loop.call_soon(self.bot.protocol.transport.close)
        else:
            self.bot.loop.call_later(self.timeout, self.check_ping)
        while not self.ping_queue.empty():
            self.ping_queue.get_nowait()

    @event(rfc.PING)
    def pong(self, data):
        """PING reply"""
        self.ping_queue.put_nowait(self.bot.loop.time())
        self.bot.send('PONG ' + data)

    @event(rfc.NEW_NICK)
    def recompile(self, nick=None, new_nick=None, **kw):
        """recompile regexp on new nick"""
        if self.bot.nick == nick.nick:
            self.bot.config['nick'] = new_nick
            self.bot.recompile()

    @event(rfc.ERR_NICK)
    def badnick(self, me=None, nick=None, **kw):
        """Use alt nick on nick error"""
        if me == '*':
            self.bot.set_nick(self.bot.nick + '_')
        self.bot.log.debug('Trying to regain nickname in 30s...')
        self.bot.loop.call_later(30, self.bot.set_nick, self.bot.original_nick)

    @event(rfc.RPL_ENDOFMOTD)
    def autojoin(self, **kw):
        """autojoin at the end of MOTD"""
        self.bot.config['nick'] = kw['me']
        self.bot.recompile()
        channels = utils.as_list(self.bot.config.get('autojoins', []))
        for channel in channels:
            channel = utils.as_channel(channel)
            self.bot.log.info('Trying to join %s', channel)
            self.bot.join(channel)
示例#6
0
    def as_completed(
        self,
        coroutines: typing.Iterable[typing.Coroutine],
        limit: typing.Optional[int] = None
    ) -> typing.Generator[typing.Coroutine, None, None]:
        """Like "asyncio.as_completed",
        run and iter coroutines out of the pool.

        :param limit: set to "self.concurrent_limit" by default,
        this "limit" is not shared with pool`s limit
        """
        limit = self.concurrent_limit if limit is None else limit

        coroutines = iter(coroutines)
        queue = Queue(loop=self.loop)
        todo = []

        def _done_callback(f):
            queue.put_nowait(f)
            todo.remove(f)
            try:
                nf = asyncio.ensure_future(next(coroutines))
                nf.add_done_callback(_done_callback)
                todo.append(nf)
            except StopIteration:
                pass

        async def _wait_for_one():
            f = await queue.get()
            return f.result()

        if limit <= 0:
            fs = {
                asyncio.ensure_future(cor, loop=self.loop)
                for cor in coroutines
            }
        else:
            fs = {
                asyncio.ensure_future(cor, loop=self.loop)
                for cor in islice(coroutines, 0, limit)
            }
        for f in fs:
            f.add_done_callback(_done_callback)
            todo.append(f)

        while len(todo) > 0 or queue.qsize() > 0:
            yield _wait_for_one()
示例#7
0
def _inspect_queue(queue: Queue):
    """
    Log queue size.
    """
    queue_size = queue.qsize()
    if queue_size < 2:
        return

    if queue_size > 15:
        logging_level = logging.ERROR
    elif queue_size > 10:
        logging_level = logging.WARNING
    elif queue_size > 5:
        logging_level = logging.INFO
    else:
        logging_level = logging.DEBUG

    logger.log(logging_level, 'External rates queue size: %s', queue_size)
示例#8
0
class OthelloPlayer:
    def __init__(self,
                 config: Config,
                 client,
                 mode="gui",
                 weight_table=0,
                 c=10,
                 mc=False):
        """
        :param config:
        :param agent.model.OthelloModel|None model:
        :param TreeNode mtcs_info:
        :parameter OthelloModelAPI api:
        """
        self.config = config
        self.client = client
        self.mode = mode
        self.play_config = self.config.play
        self.weight_table = weight_table
        self.c = c
        self.mc = mc

        # mc_tree
        self.num_tree, self.win_tree, self.policy_tree = createTrees()

        # expanded
        self.expanded = set()  #expanded存p(dict)的set形式
        self.now_expanding = set()

        # threads
        self.prediction_queue = Queue(
            self.play_config.prediction_queue_size)  #并行计算的信息队列queue大小
        self.sem = asyncio.Semaphore(
            self.play_config.parallel_search_num)  #限制并行搜索的线程数
        self.loop = asyncio.get_event_loop()

        # for gui
        if self.mode == 'gui':
            self.thinking_history = None  # for fun
            self.avalable = None
            self.allow_resign = False
        elif self.mode == 'self_play':
            self.moves = []
            self.allow_resign = True
        self.test_mode = False
        # params
        self.running_simulation_num = 0

        # solver
        self.solver = OthelloSolver()  #引入minmax树类

    def win_rate(self, node):
        return self.win_tree[node] / (self.num_tree[node] + 1e-5)

# think_and_play

    def think_and_play(self, own, enemy):
        """play tmd:方案:50步以前使用深度學習mctree,若tree到達50步深度后再用minmaxtree; 50步以後直接用minmaxtree
        若搜不到/超時再用之前構建的樹"""
        # renew env
        self.start_time = time.time()
        env = OthelloEnv().update(own, enemy, next_to_play=Stone.black)
        node = create_node(env)

        #五十步之后直接minmax树搜索,若搜索不到,再用深度學習
        if env.epoch >= self.play_config.use_solver_turn:
            logger.warning(f"Entering minmax_tree process")
            ret = self._solver(node)
            if ret:  # not save move as play data
                return ret
        else:  # 五十步之前直接用深度學習
            for t1 in range(self.play_config.thinking_loop
                            ):  # search moves for 3 times
                logger.warning(f"Entering {t1} thinking_loop")
                self._expand_tree(env, node)
                policy, action, value_diff = self._calc_policy_and_action(node)
                # if action 足够大 + n足够大 \ turn 很小
                if env.epoch <= self.play_config.start_rethinking_turn or \
                        (value_diff > -0.01 and self.num_tree[node][action] >= self.play_config.required_visit_to_decide_action):
                    break

            # record or return
            if self.mode == 'gui':
                self._update_thinking_history(own, enemy, action, policy)
                self._update_avalable(own, enemy, action, policy)
            elif self.mode == 'self_play':
                if self.allow_resign:  # resign win_rate 太小没有胜率。
                    if self.play_config.resign_threshold is not None and\
                        np.max(self.win_rate(node)-(self.num_tree[node]==0)*10) <= self.play_config.resign_threshold:
                        if env.epoch >= self.config.play.allowed_resign_turn:
                            return AcNQ(None, 0, 0)  # means resign
                        else:
                            logger.debug(
                                f"Want to resign but disallowed turn {env.epoch} < {self.config.play.allowed_resign_turn}"
                            )
                # save fuckers
                saved_policy = self.__calc_policy_by_prob(
                    node
                ) if self.config.play_data.save_policy_of_tau_1 else policy
                self.__save_data_to_moves(own, enemy, saved_policy)
            return AcNQ(action=action,
                        n=self.num_tree[node][action],
                        q=self.win_rate(node)[action])

    def _solver(self, node):
        # use solver to do minmax搜索
        action, point = self.solver.solve(node.black,
                                          node.white,
                                          Stone(node.next_to_play),
                                          exactly=True)
        if action is None:  #如果沒搜索到是不可以返回None的因爲要
            return None
        else:
            policy = np.zeros(64)
            policy[action] = 1
            update_num_tree_with_one_or_moresides(self.num_tree, node, action,
                                                  ["set"], [999])
            update_win_tree_with_one_or_moresides(self.win_tree, node, action,
                                                  ["set"],
                                                  [np.sign(point) * 999])
            update_policy_tree_with_one_or_moresides(self.policy_tree, node,
                                                     ["set"], [policy])
            self._update_thinking_history(node.black, node.white, action,
                                          policy)
        return AcNQ(action=action, n=999, q=np.sign(point))

    def _expand_tree(self, env, node):
        if env.epoch > 0:  # 对树进行拓展
            self._expand_tree_2(env.chessboard.black, env.chessboard.white)
        else:
            self._set_first_move(node)

    def _expand_tree_2(self, own, enemy):
        # params
        loop = self.loop
        self.running_simulation_num = 0
        # n simulation/move
        coroutine_list = []
        # 200 simulations
        for it in range(self.play_config.simulation_num_per_move):
            coroutine_list.append(self.__start_search_my_move(own, enemy))
        coroutine_list.append(self.__prediction_worker())
        loop.run_until_complete(asyncio.gather(*coroutine_list))

    async def __start_search_my_move(self, own, enemy):
        # set parmas
        self.running_simulation_num += 1
        # wait sems
        with await self.sem:  # 8綫程
            env = OthelloEnv().update(own, enemy, Stone.black)
            leaf_v = await self.___recursive_simulation(env, is_root_node=True)
            self.running_simulation_num -= 1
            return leaf_v

    async def ___recursive_simulation(self,
                                      env: OthelloEnv,
                                      is_root_node=False):
        "fertilize tree process"
        # get both keys
        node, another_side_node = create_both_nodes(env)
        if self.test_mode:
            if (node not in map.keys()):
                map[node] = env.epoch

        # return condition 1
        if env.done:
            if env.result == Result.black:
                return 1
            elif env.result == Result.white:
                return -1
            else:
                return 0

        # return condition 2 : get solver(大于50步,minmax)
        if env.epoch >= self.config.play.use_solver_turn_in_simulation:
            action, point = self.solver.solve(node.black,
                                              node.white,
                                              Stone(node.next_to_play),
                                              exactly=False)
            if action:
                point = point if env.next_to_play == Stone.black else -point
                leaf_v = np.sign(point)
                leaf_p = np.zeros(64)
                leaf_p[action] = 1
                # update tree
                update_num_tree_with_one_or_moresides(self.num_tree, node,
                                                      action, ["plus", "plus"],
                                                      [1, 1])  #走过的位置+1
                update_win_tree_with_one_or_moresides(
                    self.win_tree, node, action, ["plus", "minus"],
                    [leaf_v, leaf_v])  #走此步赢的次数+-1(win)
                update_policy_tree_with_one_or_moresides(
                    self.policy_tree, node, ["set", "set"],
                    [leaf_p, leaf_p])  #此节点应该走的位置(position)
                return np.sign(point)
            if time.time() - self.start_time >= 55:
                return 0
        #return condition 3 : expand tree(小於等於50步,用深度學習)
        while node in self.now_expanding:  # 兩個搜索綫程遇到同一個node,會有衝突的問題
            await asyncio.sleep(self.config.play.wait_for_expanding_sleep_sec)
        # is leaf
        if node not in self.expanded:  # reach leaf node
            leaf_v = await self.____expand_leaf_node(env)
            if env.next_to_play == Stone.black:
                return leaf_v  # Value for black
            else:
                return -leaf_v  # Value for white == -Value for black
        else:  # not leaf do
            virtual_loss_for_w = self.config.play.virtual_loss if env.next_to_play == Stone.black else -self.config.play.virtual_loss
            action_t = self.____decide_action(env, is_root_node)  #UCB公式
            update_num_tree_with_one_or_moresides(
                self.num_tree, node, action_t, ["plus"],
                [self.config.play.virtual_loss])
            update_win_tree_with_one_or_moresides(self.win_tree, node,
                                                  action_t, ["minus"],
                                                  [virtual_loss_for_w])
            env.do(action_t)
            leaf_v = await self.___recursive_simulation(env)  # next move
            # on returning search path
            update_num_tree_with_one_or_moresides(
                self.num_tree, node, action_t, ["plus", "plus"],
                [-self.config.play.virtual_loss + 1, 1])
            update_win_tree_with_one_or_moresides(
                self.win_tree, node, action_t, ["plus", "minus"],
                [virtual_loss_for_w + leaf_v, leaf_v])
            if self.test_mode:
                logger.warning(map[node], leaf_v)
        return leaf_v

    async def ____expand_leaf_node(self, env):
        "use to expand new leaf"
        node, another_side_node = create_both_nodes(env)
        self.now_expanding.add(node)

        # flip + rotate
        rotate_right_num, is_flip_vertical, black_ary, white_ary = flip_and_rotate_board_to_array(
            env.chessboard.black, env.chessboard.white)

        # predict
        state = [
            white_ary, black_ary
        ] if env.next_to_play == Stone.white else [black_ary, white_ary]
        future = await self.predict(np.array(state))  # type: Future
        await future
        leaf_p, leaf_v = future.result()

        # reverse rotate and flip about leaf_p
        leaf_p = flip_and_rotate_result(leaf_p, rotate_right_num,
                                        is_flip_vertical)
        if self.mc:
            black = env.chessboard.black
            leaf_v += np.sum(bit_to_array(black, 64) * self.weight_table)

        # update
        update_policy_tree_with_one_or_moresides(self.policy_tree, node,
                                                 ["set", "set"],
                                                 [leaf_p, leaf_p])
        self.expanded.add(node)
        self.now_expanding.remove(node)
        return leaf_v

    def ____decide_action(self, env, is_root_node):
        # find correct moves
        node = create_node(env)
        legal_moves = find_correct_moves(
            node.black, node.white
        ) if env.next_to_play == Stone.black else find_correct_moves(
            node.white, node.black)

        # vn = formula here
        vn = max(np.sqrt(np.sum(self.num_tree[node])),
                 1)  # SQRT of sum(N(s, b); for all b)

        # p = formula here  re-normalize in legal moves
        vp = self.policy_tree[node]
        vp = vp * bit_to_array(legal_moves, 64)
        temperature = 1
        if np.sum(vp) > 0:
            temperature = min(
                np.exp(1 -
                       np.power(env.epoch / self.config.play.policy_decay_turn,
                                self.config.play.policy_decay_power)), 1)
            vp = normalize(vp, temperature)
        # add noise 0.75*p + 0.25*noise
        if is_root_node and self.play_config.noise_eps > 0:  # Is it correct?? -> (1-e)p + e*Dir(alpha)
            noise = dirichlet_noise_of_mask(legal_moves,
                                            self.play_config.dirichlet_alpha)
            vp = (1 - self.play_config.noise_eps
                  ) * vp + self.play_config.noise_eps * noise

        # u_ = formula here
        vpn = vp * vn / (1 + self.num_tree[node])
        if env.next_to_play == Stone.black:
            vpn_with_weight = (self.win_rate(node) * self.c + vpn + 1000 +
                               self.weight_table) * bit_to_array(
                                   legal_moves, 64)
        else:
            vpn_with_weight = (-self.win_rate(node) * self.c + vpn + 1000 +
                               self.weight_table) * bit_to_array(
                                   legal_moves, 64)
        action_t = int(np.argmax(vpn_with_weight))
        return action_t

    async def __prediction_worker(self):
        " do prediction in this worker"
        margin = 10  # wait for at most 10 epochs x 0.0001
        while self.running_simulation_num > 0 or margin > 0:
            if self.prediction_queue.empty():
                if margin > 0:
                    margin -= 1
                await asyncio.sleep(
                    self.config.play.prediction_worker_sleep_sec)
                continue
            item_list = [
                self.prediction_queue.get_nowait()
                for _ in range(self.prediction_queue.qsize())
            ]  # type: list[QItem]
            data = np.array([x.state for x in item_list])
            policy_ary, value_ary = self.client.forward(
                data)  # shape=(N, 2, 8, 8)
            for p, v, item in zip(policy_ary, value_ary, item_list):
                item.future.set_result((p, v))

    def _set_first_move(self, node):
        # chose the random num_tree = [1] policy_tree = [每个可能的地方都是1/n]
        legal_array = bit_to_array(find_correct_moves(node.black, node.white),
                                   64)
        action = np.argmax(legal_array)
        update_num_tree_with_one_or_moresides(self.num_tree, node, action,
                                              ["set"], [1])
        update_win_tree_with_one_or_moresides(self.win_tree, node, action,
                                              ["set"], [0])
        update_policy_tree_with_one_or_moresides(
            self.policy_tree, node, ["set"],
            [legal_array / np.sum(legal_array)])

    def _calc_policy_and_action(self, node):
        policy = self._calc_policy(
            node.black, node.white
        )  # 先验 最大的n,  前四步p[n]=num_tree[key][n],后面p矩阵只有var_numb最大位置为1,其余为0.
        action = int(np.random.choice(range(64),
                                      p=policy))  #随机走一个点,p为随机取的各点概率(先验)
        action_by_value = int(
            np.argmax(self.win_rate(node) +
                      (self.num_tree[node] > 0) * 100))  #选走过的、q(胜率)最大的那个位置
        value_diff = self.win_rate(node)[action] - self.win_rate(node)[
            action_by_value]  #
        return policy, action, value_diff

    def _calc_policy(self, own, enemy):
        env = OthelloEnv().update(own, enemy, Stone.black)
        node = create_node(env)
        # if turn < 4
        if env.epoch < self.play_config.change_tau_turn:
            return self.__calc_policy_by_prob(node)  # p value
        else:
            return self.__calc_policy_by_max(node)

    def __calc_policy_by_prob(self, node):
        return self.num_tree[node] / np.sum(self.num_tree[node])  # tau = 1

    def __calc_policy_by_max(self, node):
        action = np.argmax(self.num_tree[node])  # tau = 0
        ret = np.zeros(64)  # one hot
        ret[action] = 1
        return ret

    def _update_thinking_history(self, black, white, action, policy):
        node = TreeNode(black, white, Stone.black.value)
        next_key = self.__get_next_key(black, white, action)
        self.thinking_history = \
            LastAcNQ(action, policy, list(self.win_rate(node)), list(self.num_tree[node]),
                        list(self.win_rate(next_key)), list(self.num_tree[next_key]))

    def _update_avalable(self, black, white, action, policy):
        node = TreeNode(black, white, Stone.black.value)
        next_key = self.__get_next_key(black, white, action)
        self.avalable = LastAva(
            find_correct_moves(node.black, node.white),
            find_correct_moves(next_key.white, next_key.black))

    def __get_next_key(self, own, enemy, action):
        env = OthelloEnv().update(own, enemy, Stone.black)
        env.do(action)
        return create_node(env)

    def __save_data_to_moves(self, own, enemy, policy):
        for flip in [False, True]:
            for rot_right in range(4):
                self.moves.append(
                    flip_and_rotate_right(flip, rot_right, own, enemy, policy))

    async def predict(self, x):
        future = self.loop.create_future()
        await self.prediction_queue.put(QItem(x, future))
        return future
示例#9
0
class DownloadingService(metaclass=Metaclass):
    def __init__(self, crawler_manager: CrawlerManager,
                 downloader_manager: DownloaderManager):
        self.crawler_manager = crawler_manager
        self.downloader_manager = downloader_manager

        self.running_crawler = {}

        self.start_time = int(time.time())

        self.schedule_queue = Queue()

        self.QUIT = False

    def get_running_messages(self):
        msgs = []
        for crawler_name, downloader in self.downloader_manager.downloaders.items(
        ):
            status = "Running" if downloader.is_running else "Not Running"
            msg = "Status: {}" \
                  " | Crawler Name: {}, " \
                  " | Concurrent Number: {}," \
                  " | Request Number: {}," \
                  " | Finished Number: {}," \
                  " | Failed Number: {}".\
                format(status, crawler_name, downloader.concurrent_number, downloader.pushed_request_count,
                       downloader.finished_request_count, downloader.failed_request_count)
            msgs.append(msg)

        msgs.append("运行时长:{}s".format(time.time() - self.start_time))
        return "\n".join(msgs)

    async def status_print_loop(self):
        while True:
            running_time = int(time.time()) - self.start_time
            if running_time > 0 and running_time % 60 == 0:
                log.info(self.get_running_messages())
            await asyncio.sleep(1)
            if self.QUIT is True:
                log.info(self.get_running_messages())
                break
        log.debug("Quit status print loop")

    async def append_new_crawling_mission(self, crawler_name, crawler_params):
        await self.schedule_queue.put((crawler_name, crawler_params))

    async def task_scheduling_loop(self):
        while True:
            try:
                crawler_name, crawler_params = self.schedule_queue.get_nowait()
            except QueueEmpty:
                await asyncio.sleep(1)
            else:
                crawler = self.crawler_manager.create_crawler(
                    crawler_name, crawler_params)
                downloader = self.downloader_manager.get_or_create_downloader(
                    crawler)
                await self.downloader_manager.schedule(crawler, downloader)
            finally:
                if self.QUIT and self.schedule_queue.qsize() == 0:
                    break
        log.debug("Quit task scheduling loop")

    async def start(self, forever=True):

        self.start_time = int(time.time())

        f1 = asyncio.ensure_future(self.task_scheduling_loop())
        f2 = asyncio.ensure_future(
            self.downloader_manager.reschedule_downloaders())
        f3 = asyncio.ensure_future(self.status_print_loop())

        if forever == False:

            def callback(_):
                log.debug("Downloading Mission Done!")
                self.QUIT = True

            f2.add_done_callback(callback)

        await asyncio.gather(f1, f2, f3)
示例#10
0
class Ant(abc.ABC):
    response_pipelines: typing.List[Pipeline] = []
    request_pipelines: typing.List[Pipeline] = []
    item_pipelines: typing.List[Pipeline] = []
    request_cls = Request
    response_cls = Response
    request_timeout = DEFAULT_TIMEOUT.total
    request_retries = 3
    request_retry_delay = 5
    request_proxies: typing.List[typing.Union[str, URL]] = []
    request_max_redirects = 10
    request_allow_redirects = True
    response_in_stream = False
    connection_limit = 100  # see "TCPConnector" in "aiohttp"
    connection_limit_per_host = 0
    concurrent_limit = 100

    def __init__(self,
                 loop: typing.Optional[asyncio.AbstractEventLoop] = None):
        self.loop = loop if loop is not None else asyncio.get_event_loop()
        self.logger = logging.getLogger(self.__class__.__name__)
        self.session: aiohttp.ClientSession = ClientSession(
            response_class=self.response_cls,
            connector=aiohttp.TCPConnector(
                limit=self.connection_limit,
                enable_cleanup_closed=True,
                limit_per_host=self.connection_limit_per_host))
        # coroutine`s concurrency support
        self._queue = Queue(loop=self.loop)
        self._done_queue = Queue(loop=self.loop)
        self._running_count = 0
        self._is_closed = False
        # report var
        self._reports: typing.DefaultDict[str, typing.List[
            int, int]] = defaultdict(lambda: [0, 0])
        self._drop_reports: typing.DefaultDict[str, typing.List[
            int, int]] = defaultdict(lambda: [0, 0])
        self._start_time = time.time()
        self._last_time = self._start_time
        self._report_slot = 60  # report once after one minute by default

    @property
    def name(self):
        return self.__class__.__name__

    @property
    def is_running(self) -> bool:
        return self._running_count > 0

    async def request(
            self,
            url: typing.Union[str, URL],
            method: str = aiohttp.hdrs.METH_GET,
            params: typing.Optional[dict] = None,
            headers: typing.Optional[dict] = None,
            cookies: typing.Optional[dict] = None,
            data: typing.Optional[typing.Union[typing.AnyStr, typing.Dict,
                                               typing.IO]] = None,
            proxy: typing.Optional[typing.Union[str, URL]] = None,
            timeout: typing.Optional[typing.Union[int, float]] = None,
            retries: typing.Optional[int] = None,
            response_in_stream: typing.Optional[bool] = None) -> Response:
        if not isinstance(url, URL):
            url = URL(url)
        if proxy and not isinstance(proxy, URL):
            proxy = URL(proxy)
        elif proxy is None:
            proxy = self.get_proxy()
        if timeout is None:
            timeout = self.request_timeout
        if retries is None:
            retries = self.request_retries
        if response_in_stream is None:
            response_in_stream = self.response_in_stream

        req = self.request_cls(method,
                               url,
                               timeout=timeout,
                               params=params,
                               headers=headers,
                               cookies=cookies,
                               data=data,
                               proxy=proxy,
                               response_in_stream=response_in_stream)
        req = await self._handle_thing_with_pipelines(req,
                                                      self.request_pipelines)
        self.report(req)

        if retries > 0:
            res = await self.make_retry_decorator(
                retries, self.request_retry_delay)(self._request)(req)
        else:
            res = await self._request(req)

        res = await self._handle_thing_with_pipelines(res,
                                                      self.response_pipelines)
        self.report(res)
        return res

    async def collect(self, item: Item) -> None:
        self.logger.debug('Collect item: ' + str(item))
        await self._handle_thing_with_pipelines(item, self.item_pipelines)
        self.report(item)

    async def open(self) -> None:
        self.logger.info('Opening')
        for pipeline in itertools.chain(self.item_pipelines,
                                        self.response_pipelines,
                                        self.request_pipelines):
            obj = pipeline.on_spider_open()
            if asyncio.iscoroutine(obj):
                await obj

    async def close(self) -> None:
        await self.wait_scheduled_coroutines()

        for pipeline in itertools.chain(self.item_pipelines,
                                        self.response_pipelines,
                                        self.request_pipelines):
            obj = pipeline.on_spider_close()
            if asyncio.iscoroutine(obj):
                await obj

        await self.session.close()

        self._is_closed = True
        self.logger.info('Closed')

    @abc.abstractmethod
    async def run(self) -> None:
        """App custom entrance"""

    async def main(self) -> None:
        try:
            await self.open()
            await self.run()
        except Exception as e:
            self.logger.exception('Run ant with ' + e.__class__.__name__)
        try:
            await self.close()
        except Exception as e:
            self.logger.exception('Close ant with ' + e.__class__.__name__)
        # total report
        for name, counts in self._reports.items():
            self.logger.info('Get {:d} {:s} in total'.format(counts[1], name))
        for name, counts in self._drop_reports.items():
            self.logger.info('Drop {:d} {:s} in total'.format(counts[1], name))
        self.logger.info('Run {:s} in {:f} seconds'.format(
            self.__class__.__name__,
            time.time() - self._start_time))

    @staticmethod
    def make_retry_decorator(
            retries: int, delay: float
    ) -> typing.Callable[[typing.Callable], typing.Callable]:
        return retry(wait=wait_fixed(delay),
                     retry=(retry_if_result(lambda res: res.status >= 500)
                            | retry_if_exception_type(
                                exception_types=aiohttp.ClientError)),
                     stop=stop_after_attempt(retries + 1))

    def get_proxy(self) -> typing.Optional[URL]:
        """Chose a proxy, default by random"""
        try:
            return URL(random.choice(self.request_proxies))
        except IndexError:
            return None

    def schedule_coroutine(self, coroutine: typing.Coroutine) -> None:
        """Like "asyncio.ensure_future", it schedule coroutine in event loop
        and return immediately.

        Call "self.wait_scheduled_coroutines" make sure all coroutine has been
        done.
        """
        def _done_callback(f):
            self._running_count -= 1
            self._done_queue.put_nowait(f)
            try:
                if (self.concurrent_limit == -1
                        or self._running_count < self.concurrent_limit):
                    next_coroutine = self._queue.get_nowait()
                    self._running_count += 1
                    asyncio.ensure_future(
                        next_coroutine,
                        loop=self.loop).add_done_callback(_done_callback)
            except QueueEmpty:
                pass

        if self._is_closed:
            self.logger.warning('This pool has be closed!')
            return

        if (self.concurrent_limit == -1
                or self._running_count < self.concurrent_limit):
            self._running_count += 1
            asyncio.ensure_future(
                coroutine, loop=self.loop).add_done_callback(_done_callback)
        else:
            self._queue.put_nowait(coroutine)

    def schedule_coroutines(
            self, coroutines: typing.Iterable[typing.Coroutine]) -> None:
        """A short way to schedule many coroutines.
        """
        for coroutine in coroutines:
            self.schedule_coroutine(coroutine)

    async def wait_scheduled_coroutines(self):
        """Wait scheduled coroutines to be done, can be called many times.
        """
        while self._running_count > 0 or self._done_queue.qsize() > 0:
            await self._done_queue.get()

    def as_completed(
        self,
        coroutines: typing.Iterable[typing.Coroutine],
        limit: typing.Optional[int] = None
    ) -> typing.Generator[typing.Coroutine, None, None]:
        """Like "asyncio.as_completed",
        run and iter coroutines out of the pool.

        :param limit: set to "self.concurrent_limit" by default,
        this "limit" is not shared with pool`s limit
        """
        limit = self.concurrent_limit if limit is None else limit

        coroutines = iter(coroutines)
        queue = Queue(loop=self.loop)
        todo = []

        def _done_callback(f):
            queue.put_nowait(f)
            todo.remove(f)
            try:
                nf = asyncio.ensure_future(next(coroutines))
                nf.add_done_callback(_done_callback)
                todo.append(nf)
            except StopIteration:
                pass

        async def _wait_for_one():
            f = await queue.get()
            return f.result()

        if limit <= 0:
            fs = {
                asyncio.ensure_future(cor, loop=self.loop)
                for cor in coroutines
            }
        else:
            fs = {
                asyncio.ensure_future(cor, loop=self.loop)
                for cor in islice(coroutines, 0, limit)
            }
        for f in fs:
            f.add_done_callback(_done_callback)
            todo.append(f)

        while len(todo) > 0 or queue.qsize() > 0:
            yield _wait_for_one()

    async def as_completed_with_async(
        self,
        coroutines: typing.Iterable[typing.Coroutine],
        limit: typing.Optional[int] = None,
        raise_exception: bool = True,
    ) -> typing.AsyncGenerator[typing.Any, None]:
        """as_completed`s async version, can catch and log exception inside.
        """
        for coro in self.as_completed(coroutines, limit=limit):
            try:
                yield await coro
            except Exception as e:
                if raise_exception:
                    raise e
                else:
                    self.logger.exception('Get exception {:s} in '
                                          '"as_completed_with_async"'.format(
                                              str(e)))

    def report(self, thing: Things, dropped: bool = False) -> None:
        now_time = time.time()
        if now_time - self._last_time > self._report_slot:
            self._last_time = now_time
            for name, counts in self._reports.items():
                count = counts[1] - counts[0]
                counts[0] = counts[1]
                self.logger.info(
                    'Get {:d} {:s} in total with {:d}/{:d}s rate'.format(
                        counts[1], name, count, self._report_slot))
            for name, counts in self._drop_reports.items():
                count = counts[1] - counts[0]
                counts[0] = counts[1]
                self.logger.info(
                    'Drop {:d} {:s} in total with {:d}/{:d} rate'.format(
                        counts[1], name, count, self._report_slot))
        report_type = thing.__class__.__name__
        if dropped:
            reports = self._drop_reports
        else:
            reports = self._reports
        counts = reports[report_type]
        counts[1] += 1

    async def _handle_thing_with_pipelines(
            self, thing: Things, pipelines: typing.List[Pipeline]) -> Things:
        """Process thing one by one, break the process chain when get
        exception.
        """
        self.logger.debug('Process thing: ' + str(thing))
        raw_thing = thing
        for pipeline in pipelines:
            try:
                thing = pipeline.process(thing)
                if asyncio.iscoroutine(thing):
                    thing = await thing
            except Exception as e:
                if isinstance(e, ThingDropped):
                    self.report(raw_thing, dropped=True)
                raise e
        return thing

    async def _request(self, req: Request) -> Response:
        if req.proxy is not None:
            # proxy auth not work in one session with many requests,
            # add auth header to fix it
            if req.proxy.scheme == 'http' and req.proxy.user is not None:
                req.headers[aiohttp.hdrs.PROXY_AUTHORIZATION] = \
                    aiohttp.BasicAuth.from_url(req.proxy).encode()

        # cookies in headers, params in url
        req_kwargs = dict(headers=req.headers,
                          data=req.data,
                          timeout=req.timeout,
                          proxy=req.proxy,
                          max_redirects=self.request_max_redirects,
                          allow_redirects=self.request_allow_redirects)
        response = await self.session._request(req.method, req.url,
                                               **req_kwargs)

        if not req.response_in_stream:
            await response.read()
            response.close()
            await response.wait_for_close()
        return response
示例#11
0
async def radar(radar_Q: Queue = None):

    if radar_Q is None:
        print("Radar task is terminating. The radar_Q is None. Nothing to do")
        return

    try:
        q = Queue()

        while True:

            async with async_timeout.timeout(5) as tm:
                while True:
                    try:
                        incoming_action:INCOMING_ACTION_TYPE = await radar_Q.get()
                        q.put_nowait(incoming_action)
                        STATS['number_rx'] += 1
                        STATS['max_incoming_q_size'] = max(radar_Q.qsize(), STATS['max_incoming_q_size'])
                    except Exception as x:
                        break

            if not tm.expired:
                continue

            time_now = datetime.utcnow()

            while not q.empty():

                item = q.get_nowait()

                icao_rec = item.actionMsg

                # print(F"DB_WORKER_RXED: {action_types} {icao_rec}")

                # await db_process_sbs1_msg2(database, sbs1_msg)
                id = item.aircraftId
                last_seen = icao_rec['last_seen']
                data  = icao_rec['current']

                delta_sec = (time_now - last_seen).total_seconds()
                if delta_sec > 300:
                    print(F"@Radar: Got an old one: {id} {last_seen} {delta_sec}")
                    STATS['rx_expired'] += 1
                    remove_track(id)
                    del history[id]
                    continue

                history[id] = last_seen

                STATS['max_history_size'] = max(STATS['max_history_size'],len(history))

                if data['lon'] is not None and data['lat'] is not None:
                    update_track(id,data['lat'], data['lon'])
                    STATS['updates'] += 1

                update_table_store(id, last_seen, data)
                render_table_store()
            # endwhile

            # cleanup any lingering tracks:
            flush()

            refresh()


    except CancelledError:
            print("Cancellling radar task")

    except Exception as x:
        print(F"Exception {x}")
示例#12
0
async def dump1090TCPListener(incomingQ: Queue, host: str, port: int):

    LOG.info(F"Starting: Opening TCP Stream to Dump Server at: {host}:{port}")

    while True:

        try:
            stream_reader, _ = await open_connection(host, port)
            STATS['connection_closed'] = False
            STATS['connections'] += 1
        except Exception as x:
            print(F"@dump1090TCPListener: Got exception {x}")
            LOG.exception(x)
            STATS['connections_exception'] += 1
            continue

        LOG.info("Got stream_reader")
        try:
            BUFSIZE = 1024
            data = ""
            while True:

                new_data = await stream_reader.read(BUFSIZE)

                if not new_data:
                    LOG.info("receiver: connection closed")
                    STATS['connection_closed'] = True
                    return
                    # break it into lines

                data += new_data.decode('utf-8')

                #print(F"#######\n{data}######")

                cur_pos = 0
                while True:

                    # print(F"DATA\n{data}\n")

                    cr_index = data.find('\n')

                    if cr_index > 0:
                        # print(F"Slice {cur_pos} : {cr_index} Data: {data[cur_pos:cr_index]}")

                        l = data[cur_pos:cr_index].strip()
                        #print(F"Q<-{l}\n", end='')
                        incomingQ.put_nowait(l)
                        STATS["msg_rx"] += 1
                        STATS["max_incomingQ_size"] = max(
                            incomingQ.qsize(), STATS["max_incomingQ_size"])
                        # print(F"done")

                        if STATS["msg_rx"] % 100 == 0:
                            LOG.info(
                                F"Rxed {STATS['msg_rx']} max_q size {STATS['max_incomingQ_size']}"
                            )

                        cur_pos = cr_index + 1
                        data = data[cur_pos:]
                        cur_pos = 0
                    else:
                        # print(F"Slice {cur_pos} : {cr_index} Data: {data[cur_pos:cr_index]}")
                        break

        except CancelledError:
            LOG.info("Cancelling")
            break
        except Exception:
            LOG.exception("General exception")

        finally:
            pass

    LOG.info(F"Exiting")
示例#13
0
class BackendQueue:
    def start(self, loop: asyncio.AbstractEventLoop, multiprocess=False):
        if hasattr(self, 'started') and self.started:
            # prevent a backend callback from starting more than 1 writer and creating more than 1 queue
            return
        self.multiprocess = multiprocess
        if self.multiprocess:
            self.queue = Pipe(duplex=False)
            self.worker = Process(target=BackendQueue.worker, args=(self.writer,), daemon=True)
            self.worker.start()
        else:
            self.queue = Queue()
            self.worker = loop.create_task(self.writer())
        self.started = True

    async def stop(self):
        if self.multiprocess:
            self.queue[1].send(SHUTDOWN_SENTINEL)
            self.worker.join()
        else:
            await self.queue.put(SHUTDOWN_SENTINEL)
        self.running = False

    @staticmethod
    def worker(writer):
        try:
            loop = asyncio.new_event_loop()
            loop.run_until_complete(writer())
        except KeyboardInterrupt:
            pass

    async def writer(self):
        raise NotImplementedError

    async def write(self, data):
        if self.multiprocess:
            self.queue[1].send(data)
        else:
            await self.queue.put(data)

    @asynccontextmanager
    async def read_queue(self) -> list:
        if self.multiprocess:
            msg = self.queue[0].recv()
            if msg == SHUTDOWN_SENTINEL:
                self.running = False
                yield []
            else:
                yield [msg]
        else:
            current_depth = self.queue.qsize()
            if current_depth == 0:
                update = await self.queue.get()
                if update == SHUTDOWN_SENTINEL:
                    yield []
                else:
                    yield [update]
                self.queue.task_done()
            else:
                ret = []
                count = 0
                while current_depth > count:
                    update = await self.queue.get()
                    count += 1
                    if update == SHUTDOWN_SENTINEL:
                        self.running = False
                        break
                    ret.append(update)

                yield ret

                for _ in range(count):
                    self.queue.task_done()