Пример #1
0
    def process_messages(self, messages):
        greenlet_to_message = {}
        processed = []

        self.logger.debug('processesing %d messages', len(messages))

        for message in messages:
            try:
                g = self.pool.spawn(self.func, message)
            except:
                self.logger.exception('cannot submit jobs to pool')
                raise
            greenlet_to_message[g] = message

        for g in gevent.iwait(greenlet_to_message):
            message = greenlet_to_message.pop(g)
            try:
                if g.exception:
                    raise g.exception
            except:
                self.logger.exception('exception processing message %s',
                                      message.message_id)
            else:
                processed.append(message)

        return processed
def chunked_requests(urls, chunk_size=100):
    # limit the size of greentlets to avoid to frequent switch between greenlets
    semaphore = Semaphore(chunk_size)
    # queue up as many greenlets as we need
    requests = [gevent.spawn(download, u, semaphore) for u in urls]  #
    for response in gevent.iwait(requests):
        yield response
Пример #3
0
    def call_kraken(self,
                    request_type,
                    request,
                    instance,
                    krakens_call,
                    context=None):
        """
        For all krakens_call, call the kraken and aggregate the responses

        return the list of all responses
        """
        # TODO: handle min_alternative_journeys
        # TODO: call first bss|bss and do not call walking|walking if no bss in first results
        record_custom_parameter('scenario', 'new_default')
        resp = []
        logger = logging.getLogger(__name__)
        futures = []
        reqctx = copy_flask_request_context()

        def worker(dep_mode, arr_mode, instance, request, flask_request_id):
            with copy_context_in_greenlet_stack(reqctx):
                return (
                    dep_mode,
                    arr_mode,
                    instance.send_and_receive(
                        request, flask_request_id=flask_request_id),
                )

        pool = gevent.pool.Pool(app.config.get('GREENLET_POOL_SIZE', 3))
        for dep_mode, arr_mode, direct_path_type in krakens_call:
            pb_request = create_pb_request(request_type, request, dep_mode,
                                           arr_mode, direct_path_type)
            # we spawn a new greenlet, it won't have access to our thread local request object so we pass the request_id
            futures.append(
                pool.spawn(worker,
                           dep_mode,
                           arr_mode,
                           instance,
                           pb_request,
                           flask_request_id=flask.request.id))

        for future in gevent.iwait(futures):
            dep_mode, arr_mode, local_resp = future.get()
            # for log purpose we put and id in each journeys
            self.nb_kraken_calls += 1
            for idx, j in enumerate(local_resp.journeys):
                j.internal_id = "{resp}-{j}".format(resp=self.nb_kraken_calls,
                                                    j=idx)

            if dep_mode == 'ridesharing':
                switch_back_to_ridesharing(local_resp, True)
            if arr_mode == 'ridesharing':
                switch_back_to_ridesharing(local_resp, False)

            fill_uris(local_resp)
            resp.append(local_resp)
            logger.debug("for mode %s|%s we have found %s journeys", dep_mode,
                         arr_mode, len(local_resp.journeys))

        return resp
Пример #4
0
def run(opts):
    node = pyre.Pyre(Node_name)
    node.set_header(Node_name, Node_value)
    node.start()
    while not node.peers():
        print('No peers.  Waiting.')
        time.sleep(2)
    peers = node.peers()
    dbg_print(opts, 'peers: {}'.format(peers))
    dbg_print(opts, 'sending')
    peer_cycler = itertools.cycle(peers)
    send_tasks = []
    for idx in range(opts.count):
        peer = peer_cycler.__next__()
        msg = '{}. {}'.format(idx, opts.message)
        task = gevent.spawn(send_request, node, peer, msg, opts)
        send_tasks.append(task)
    receive_tasks = []
    for idx in range(opts.count):
        task = gevent.spawn(receive_response, node, opts)
        receive_tasks.append(task)
    # gevent.joinall(list(itertools.chain(send_tasks, receive_tasks)))
    dbg_print(opts, 'before join send_tasks')
    gevent.joinall(send_tasks)
    dbg_print(opts, 'after join send_tasks')
    print('-' * 50)
    for task in gevent.iwait(receive_tasks):
        data1 = task.value
        data2 = data1[3]
        data2 = json.loads(data2)
        print('sent: "{}"  received from {}: "{}"'.format(
            data2['request'], data2['sender'], data2['response']))
    print('-' * 50)
    node.stop()
Пример #5
0
def chunked_requests(urls, chunk_size=100):
    semaphore = Semaphore(chunk_size)
    # 这里生成了一个信号量来让chunk_size下载发生
    requests = [gevent.spawn(download, u, semaphore) for u in urls]
    # 通过信号量用作一个上下文管理器, 我们确保了只有chunk_size数量的greenlet能够在同一时刻运行上下文主体部分
    for response in gevent.iwait(requests):
        yield response
Пример #6
0
def connect_all(state):
    '''
    Connect to all the configured servers in parallel. Reads/writes state.inventory.

    Args:
        state (``pyinfra.api.State`` obj): the state containing an inventory to connect to
    '''

    hosts = [host for host in state.inventory if state.is_host_in_limit(host)]

    greenlet_to_host = {
        state.pool.spawn(host.connect, state): host
        for host in hosts
    }

    with progress_spinner(greenlet_to_host.values()) as progress:
        for greenlet in gevent.iwait(greenlet_to_host.keys()):
            host = greenlet_to_host[greenlet]
            progress(host)

    # Get/set the results
    failed_hosts = set()

    for greenlet, host in six.iteritems(greenlet_to_host):
        # Raise any unexpected exception
        greenlet.get()

        if host.connection:
            state.activate_host(host)
        else:
            failed_hosts.add(host)

    # Remove those that failed, triggering FAIL_PERCENT check
    state.fail_hosts(failed_hosts, activated_count=len(hosts))
Пример #7
0
    def call_kraken(self, request_type, request, instance, krakens_call):
        """
        For all krakens_call, call the kraken and aggregate the responses

        return the list of all responses
        """
        # TODO: handle min_alternative_journeys
        # TODO: call first bss|bss and do not call walking|walking if no bss in first results
        resp = []
        logger = logging.getLogger(__name__)
        futures = []

        def worker(dep_mode, arr_mode, instance, request, request_id):
            return (dep_mode, arr_mode, instance.send_and_receive(request, request_id=request_id))

        pool = gevent.pool.Pool(app.config.get('GREENLET_POOL_SIZE', 3))
        for dep_mode, arr_mode in krakens_call:
            pb_request = create_pb_request(request_type, request, dep_mode, arr_mode)
            #we spawn a new green thread, it won't have access to our thread local request object so we set request_id
            futures.append(pool.spawn(worker, dep_mode, arr_mode, instance, pb_request, request_id=flask.request.id))

        for future in gevent.iwait(futures):
            dep_mode, arr_mode, local_resp = future.get()
            # for log purpose we put and id in each journeys
            self.nb_kraken_calls += 1
            for idx, j in enumerate(local_resp.journeys):
                j.internal_id = "{resp}-{j}".format(resp=self.nb_kraken_calls, j=idx)
            if 'ridesharing' in dep_mode or 'ridesharing' in arr_mode:
                _switch_back_to_ridesharing(local_resp)
            fill_uris(local_resp)
            resp.append(local_resp)
            logger.debug("for mode %s|%s we have found %s journeys", dep_mode, arr_mode, len(local_resp.journeys))

        return resp
Пример #8
0
    def _manage_realtime(self, request, schedules, groub_by_dest=False):
        futures = []
        pool = gevent.pool.Pool(self.instance.realtime_pool_size)

        # Copy the current request context to be used in greenlet
        reqctx = utils.copy_flask_request_context()

        def worker(rt_proxy, route_point, request, stop_schedule):
            # Use the copied request context in greenlet
            with utils.copy_context_in_greenlet_stack(reqctx):
                return (
                    rt_proxy,
                    stop_schedule,
                    self._get_next_realtime_passages(rt_proxy, route_point, request),
                )

        for schedule in schedules:
            route_point = _get_route_point_from_stop_schedule(schedule)
            rt_proxy = self._get_realtime_proxy(route_point)
            if rt_proxy:
                futures.append(pool.spawn(worker, rt_proxy, route_point, request, schedule))

        for future in gevent.iwait(futures):
            rt_proxy, schedule, next_rt_passages = future.get()
            rt_proxy._update_stop_schedule(schedule, next_rt_passages, groub_by_dest)
Пример #9
0
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
    """ Iteratively wait and get on passed greenlets.

    This ensures exceptions in the greenlets are re-raised as soon as possible.
    """
    for item in gevent.iwait(items):
        item.get()
Пример #10
0
    def reset_update_server(self, server_name):
        from gevent.pool import Group

        group = Group()

        def local():
            import dns.resolver
            return dns.resolver.query(server_name, 'TXT').response

        def recursive():
            import dns.resolver
            from settings import NAME_SERVER
            ns = dns.resolver.query(NAME_SERVER, 'NS').response.answer[0]
            ns = ns.items[0].target.to_text()

            import socket
            ns = socket.gethostbyname(ns)

            import dns.message
            import dns.query
            q = dns.message.make_query(server_name, 'TXT')

            return dns.query.udp(q, ns)

        def public(ns):
            def _public():
                import dns.message
                import dns.query
                q = dns.message.make_query(server_name, 'TXT')
                return dns.query.udp(q, ns)

            return _public

        workers = [
            group.apply_async(i) for i in [
                local,
                recursive,
                public('119.29.29.29'),
                public('114.114.114.114'),
                public('8.8.8.8'),
            ]
        ]

        for result in gevent.iwait(workers, 10):
            if result.successful():
                result = result.value
                break

            else:
                log.exception(result.exception)

        else:
            group.kill()
            return False

        group.kill()
        result = result.answer[0]
        url = result.items[0].strings[0]
        self.set_update_url(url)
        return True
Пример #11
0
def run_clients(number_of_clients, server_port, stop_event):
    """
    This function runs on "server" side and starts the needed
    number of test runner processes. It then waits for all
    of these processes to finish and returns.

    :param number_of_clients: Number of subprocesses to run
    :type number_of_clients: int
    :param server_port: Port on which the task server listens for client
                        connections.
    :type server_port: int
    :param stop_event: run_clients will terminate clients and return if set
    :type stop_event: Implements gevent's wait protocol (rawlink, unlink)
    """
    from distutils.spawn import find_executable

    log.info("Starting %s clients\n" % number_of_clients)

    t1 = time.time()

    cwd = os.getcwd()

    # we prefer to run ourselves as client runner.
    runner_script = os.path.abspath(inspect.getfile(inspect.currentframe()))
    if not os.path.exists(runner_script):
        # however when we are installed over setuptools, we
        # end up in an egg and there is no real path to us.
        # for those cases we register us as an executable
        runner_script = find_executable(CLIENT_RUNNER_FILE_NAME)

    assert runner_script

    command_line = '%s "%s" %%s %s' % (sys.executable, runner_script,
                                       server_port)
    # On Unix, attach session id to spawned shell, so we can kill it and its
    # child client process as a process group.
    popen_kwargs = ({} if gevent.subprocess.mswindows else dict(
        preexec_fn=os.setsid))

    clients = [
        gevent.subprocess.Popen(command_line % (worker_id + 1),
                                shell=True,
                                cwd=cwd,
                                **popen_kwargs)
        for worker_id in xrange(number_of_clients)
    ]
    try:
        for waited in gevent.iwait(clients + [stop_event]):
            if waited is stop_event:
                # Just clean up
                break
            else:
                log.info("Client %s exiting" % waited)
    finally:
        for client in clients:
            ensure_client_terminated(client)

    duration = time.time() - t1
    log.info("%s clients served within %.2f s." %
             (number_of_clients, duration))
Пример #12
0
    def next_departures(self, request):
        resp = self.__stop_times(request, api=type_pb2.NEXT_DEPARTURES, departure_filter=request["filter"])
        if request['data_freshness'] != RT_PROXY_DATA_FRESHNESS:
            return resp

        route_points = {
            RoutePoint(stop_point=passage.stop_point, route=passage.route): _create_template_from_passage(
                passage
            )
            for passage in resp.next_departures
        }
        route_points.update(
            (RoutePoint(rp.route, rp.stop_point), _create_template_from_pb_route_point(rp))
            for rp in resp.route_points
        )

        rt_proxy = None
        futures = []
        pool = gevent.pool.Pool(self.instance.realtime_pool_size)

        # Copy the current request context to be used in greenlet
        reqctx = utils.copy_flask_request_context()

        def worker(rt_proxy, route_point, template, request, resp):
            # Use the copied request context in greenlet
            with utils.copy_context_in_greenlet_stack(reqctx):
                return (
                    resp,
                    rt_proxy,
                    route_point,
                    template,
                    self._get_next_realtime_passages(rt_proxy, route_point, request),
                )

        for route_point, template in route_points.items():
            rt_proxy = self._get_realtime_proxy(route_point)
            if rt_proxy:
                futures.append(pool.spawn(worker, rt_proxy, route_point, template, request, resp))

        for future in gevent.iwait(futures):
            resp, rt_proxy, route_point, template, next_rt_passages = future.get()
            rt_proxy._update_passages(resp.next_departures, route_point, template, next_rt_passages)

        # sort
        def sorter(p):
            return p.stop_date_time.departure_date_time

        resp.next_departures.sort(key=sorter)
        count = request['count']
        if len(resp.next_departures) > count:
            del resp.next_departures[count:]

        # handle pagination :
        # If real time information exist, we have to change pagination score.
        if rt_proxy:
            resp.pagination.totalResult = len(resp.next_departures)
            resp.pagination.itemsOnPage = len(resp.next_departures)

        return resp
def run_clients(number_of_clients, server_port, stop_event):
    """
    This function runs on "server" side and starts the needed
    number of test runner processes. It then waits for all
    of these processes to finish and returns.

    :param number_of_clients: Number of subprocesses to run
    :type number_of_clients: int
    :param server_port: Port on which the task server listens for client
                        connections.
    :type server_port: int
    :param stop_event: run_clients will terminate clients and return if set
    :type stop_event: Implements gevent's wait protocol (rawlink, unlink)
    """
    from distutils.spawn import find_executable

    log.info("Starting %s clients\n" % number_of_clients)

    t1 = time.time()

    cwd = os.getcwd()

    # we prefer to run ourselves as client runner.
    runner_script = os.path.abspath(inspect.getfile(inspect.currentframe()))
    if not os.path.exists(runner_script):
        # however when we are installed over setuptools, we
        # end up in an egg and there is no real path to us.
        # for those cases we register us as an executable
        runner_script = find_executable(CLIENT_RUNNER_FILE_NAME)

    assert runner_script

    command_line = '%s "%s" %%s %s' % (sys.executable, runner_script,
                                       server_port)
    # On Unix, attach session id to spawned shell, so we can kill it and its
    # child client process as a process group.
    popen_kwargs = (
        {} if gevent.subprocess.mswindows else dict(preexec_fn=os.setsid)
    )

    clients = [
        gevent.subprocess.Popen(command_line % (worker_id + 1), shell=True,
                                cwd=cwd, **popen_kwargs)
        for worker_id in xrange(number_of_clients)
    ]
    try:
        for waited in gevent.iwait(clients + [stop_event]):
            if waited is stop_event:
                # Just clean up
                break
            else:
                log.info("Client %s exiting" % waited)
    finally:
        for client in clients:
            ensure_client_terminated(client)

    duration = time.time()-t1
    log.info("%s clients served within %.2f s." %
             (number_of_clients, duration))
Пример #14
0
    def reset_update_server(self, server_name):
        from gevent.pool import Group

        group = Group()

        def local():
            import dns.resolver
            return dns.resolver.query(server_name, 'TXT').response

        def recursive():
            import dns.resolver
            from settings import NAME_SERVER
            ns = dns.resolver.query(NAME_SERVER, 'NS').response.answer[0]
            ns = ns.items[0].target.to_text()

            import socket
            ns = socket.gethostbyname(ns)

            import dns.message
            import dns.query
            q = dns.message.make_query(server_name, 'TXT')

            return dns.query.udp(q, ns)

        def public(ns):
            def _public():
                import dns.message
                import dns.query
                q = dns.message.make_query(server_name, 'TXT')
                return dns.query.udp(q, ns)

            return _public

        workers = [group.apply_async(i) for i in [
            local,
            recursive,
            public('119.29.29.29'),
            public('114.114.114.114'),
            public('8.8.8.8'),
        ]]

        for result in gevent.iwait(workers, 10):
            if result.successful():
                result = result.value
                break

            else:
                log.exception(result.exception)

        else:
            group.kill()
            return False

        group.kill()
        result = result.answer[0]
        url = result.items[0].strings[0]
        self.set_update_url(url)
        return True
Пример #15
0
    def test_iwait_partial(self):
        # Test that the iwait context manager allows the iterator to be
        # consumed partially without a memory leak.

        sem = Semaphore()
        let = gevent.spawn(sem.release)
        with gevent.iwait((sem,), timeout=0.01) as iterator:
            self.assertEqual(sem, next(iterator))
        let.get()
Пример #16
0
    def test_iwait_partial(self):
        # Test that the iwait context manager allows the iterator to be
        # consumed partially without a memory leak.

        sem = Semaphore()
        let = gevent.spawn(sem.release)
        with gevent.iwait((sem, ), timeout=0.01) as iterator:
            self.assertEqual(sem, next(iterator))
        let.get()
Пример #17
0
    def log_share(self, client, diff, typ, params, job=None, header_hash=None,
                  header=None, start=None, **kwargs):
        """ Logs a share to external sources for payout calculation and
        statistics """
        #if __debug__:
        #    self.logger.debug(
        #        "Running log share with args {} kwargs {}"
        #        .format((client._id, diff, typ, params), dict(
        #            job=job, header_hash=header_hash, header=hexlify(header))))

        if typ == StratumClient.VALID_SHARE:
            self.logger.debug("Valid share accepted from worker {}.{}!"
                              .format(client.address, client.worker))
            # Grab the raw coinbase out of the job object before gevent can
            # preempt to another thread and change the value. Very important!
            coinbase_raw = job.coinbase.raw

            # Some coins use POW function to do blockhash, while others use
            # SHA256. Allow toggling which is used
            if job.pow_block_hash:
                header_hash_raw = client.algo['module'](header)[::-1]
            else:
                header_hash_raw = sha256(sha256(header).digest()).digest()[::-1]
            hash_hex = hexlify(header_hash_raw)

            submission_threads = []
            # valid network hash?
            if header_hash <= job.bits_target:
                submission_threads.append(spawn(
                    job.found_block,
                    coinbase_raw,
                    client.address,
                    client.worker,
                    hash_hex,
                    header,
                    job,
                    start))

            # check each aux chain for validity
            for chain_id, data in job.merged_data.iteritems():
                if header_hash <= data['target']:
                    submission_threads.append(spawn(
                        data['found_block'],
                        client.address,
                        client.worker,
                        header,
                        coinbase_raw,
                        job,
                        start))

            for gl in gevent.iwait(submission_threads):
                ret = gl.value
                if ret:
                    spawn(self.add_block, **gl.value)
                else:
                    self.logger.error("Submission gl {} returned nothing!"
                                      .format(gl))
Пример #18
0
 def test_iwait_nogarbage(self):
     sem1 = Semaphore()
     sem2 = Semaphore()
     let = gevent.spawn(sem1.release)
     with gevent.iwait((sem1, sem2)) as iterator:
         self.assertEqual(sem1, next(iterator))
         assert len(sem2._links) == 1
     assert sem2._links is None or len(sem2._links) == 0
     let.get()
Пример #19
0
def chunked_requests(urls, chunk_size=100):
    """
    Given an iterable of urls, this function will yield back the contents of the
    URLs. The requests will be batched up in "chunk_size" batches using a
    semaphore
    """
    semaphore = Semaphore(chunk_size)  # <1>
    requests = [gevent.spawn(download, u, semaphore) for u in urls]  # <3>
    for response in gevent.iwait(requests):
        yield response
Пример #20
0
    def test_noiter(self):
        # Test that gevent.iwait returns objects which can be iterated upon
        # without additional calls to iter()

        sem1 = Semaphore()
        sem2 = Semaphore()

        gevent.spawn(sem1.release)
        ready = next(gevent.iwait((sem1, sem2)))
        self.assertEqual(sem1, ready)
Пример #21
0
    def test_iwait_nogarbage(self):
        sem1 = Semaphore()
        sem2 = Semaphore()
        let = gevent.spawn(sem1.release)
        with gevent.iwait((sem1, sem2)) as iterator:
            self.assertEqual(sem1, next(iterator))
            self.assertEqual(sem2.linkcount(), 1)

        self.assertEqual(sem2.linkcount(), 0)
        let.get()
Пример #22
0
    def test_iwait_nogarbage(self):
        sem1 = Semaphore()
        sem2 = Semaphore()
        let = gevent.spawn(sem1.release)
        with gevent.iwait((sem1, sem2)) as iterator:
            self.assertEqual(sem1, next(iterator))
            self.assertEqual(sem2.linkcount(), 1)

        self.assertEqual(sem2.linkcount(), 0)
        let.get()
Пример #23
0
    def test_noiter(self):
        # Test that gevent.iwait returns objects which can be iterated upon
        # without additional calls to iter()

        sem1 = Semaphore()
        sem2 = Semaphore()

        gevent.spawn(sem1.release)
        ready = next(gevent.iwait((sem1, sem2)))
        self.assertEqual(sem1, ready)
Пример #24
0
    def log_share(self,
                  client,
                  diff,
                  typ,
                  params,
                  job=None,
                  header_hash=None,
                  header=None,
                  start=None,
                  **kwargs):
        """ Logs a share to external sources for payout calculation and
        statistics """
        #if __debug__:
        #    self.logger.debug(
        #        "Running log share with args {} kwargs {}"
        #        .format((client._id, diff, typ, params), dict(
        #            job=job, header_hash=header_hash, header=hexlify(header))))

        if typ == StratumClient.VALID_SHARE:
            self.logger.debug("Valid share accepted from worker {}.{}!".format(
                client.address, client.worker))
            # Grab the raw coinbase out of the job object before gevent can
            # preempt to another thread and change the value. Very important!
            coinbase_raw = job.coinbase.raw

            # Some coins use POW function to do blockhash, while others use
            # SHA256. Allow toggling which is used
            if job.pow_block_hash:
                header_hash_raw = client.algo['module'](header)[::-1]
            else:
                header_hash_raw = sha256(
                    sha256(header).digest()).digest()[::-1]
            hash_hex = hexlify(header_hash_raw)

            submission_threads = []
            # valid network hash?
            if header_hash <= job.bits_target:
                submission_threads.append(
                    spawn(job.found_block, coinbase_raw, client.address,
                          client.worker, hash_hex, header, job, start))

            # check each aux chain for validity
            for chain_id, data in job.merged_data.iteritems():
                if header_hash <= data['target']:
                    submission_threads.append(
                        spawn(data['found_block'], client.address,
                              client.worker, header, coinbase_raw, job, start))

            for gl in gevent.iwait(submission_threads):
                ret = gl.value
                if ret:
                    spawn(self.add_block, **gl.value)
                else:
                    self.logger.error(
                        "Submission gl {} returned nothing!".format(gl))
Пример #25
0
def get_text_async(url_dict, chunk_size, concurrent_requests, headers):
    """Asynchronous GET requests for text files"""
    monkeypatch_runner()
    semaphore = Semaphore(concurrent_requests)
    the_request_threads = []
    for filepath, url in get_filepaths_and_urls(url_dict):
        request_thread = gevent.spawn(_get_text_async_thread_builder, url, filepath, chunk_size, headers, semaphore)
        the_request_threads.append(request_thread)

    for the_response in gevent.iwait(the_request_threads):
        yield the_response
Пример #26
0
def sort_servers_closest(
    servers: Sequence[str],
    max_timeout: float = 3.0,
    samples_per_server: int = 3,
    sample_delay: float = 0.125,
) -> Dict[str, float]:
    """Sorts a list of servers by http round-trip time

    Params:
        servers: sequence of http server urls
    Returns:
        sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed and excessively
        slow servers (possibly empty)

    The default timeout was chosen after measuring the long tail of the development matrix servers.
    Under no stress, servers will have a very long tail of up to 2.5 seconds (measured 15/01/2020),
    which can lead to failure during startup if the timeout is too low.
    This increases the timeout so that the network hiccups won't cause Raiden startup failures.
    """
    if not {urlparse(url).scheme for url in servers}.issubset({"http", "https"}):
        raise TransportError("Invalid server urls")

    rtt_greenlets = set(
        spawn_named(
            "get_average_http_response_time",
            get_average_http_response_time,
            url=server_url,
            samples=samples_per_server,
            sample_delay=sample_delay,
        )
        for server_url in servers
    )

    total_timeout = samples_per_server * (max_timeout + sample_delay)

    results = []
    for greenlet in gevent.iwait(rtt_greenlets, timeout=total_timeout):
        result = greenlet.get()
        if result is not None:
            results.append(result)

    gevent.killall(rtt_greenlets)

    if not results:
        raise TransportError(
            f"No Matrix server available with good latency, requests takes more "
            f"than {max_timeout} seconds."
        )

    server_url_to_rtt = dict(sorted(results, key=itemgetter(1)))
    log.debug("Available Matrix homeservers", servers=server_url_to_rtt)
    return server_url_to_rtt
Пример #27
0
    def call_kraken(self, req, instance, tag=None):
        resp = None

        """
            for all combinaison of departure and arrival mode we call kraken
        """
        logger = logging.getLogger(__name__)
        futures = []

        def worker(o_mode, d_mode, instance, request, request_id):
            return (o_mode, d_mode, instance.send_and_receive(request, request_id=request_id))

        pool = gevent.pool.Pool(current_app.config.get('GREENLET_POOL_SIZE', 3))
        for o_mode, d_mode in itertools.product(self.origin_modes, self.destination_modes):
            #since we use multiple green thread we have to copy the request
            local_req = copy.deepcopy(req)
            local_req.journeys.streetnetwork_params.origin_mode = o_mode
            local_req.journeys.streetnetwork_params.destination_mode = d_mode
            if o_mode == 'car' or (is_admin(req.journeys.origin[0].place) and is_admin(req.journeys.destination[0].place)):
                # we don't want direct path for car or for admin to admin journeys
                req.journeys.streetnetwork_params.enable_direct_path = False
            else:
                req.journeys.streetnetwork_params.enable_direct_path = True
            futures.append(pool.spawn(worker, o_mode, d_mode, instance, local_req, request_id=flask.request.id))

        for future in gevent.iwait(futures):
            o_mode, d_mode, local_resp = future.get()
            if local_resp.response_type == response_pb2.ITINERARY_FOUND:

                # if a specific tag was provided, we tag the journeys
                # and we don't call the qualifier, it will be done after
                # with the journeys from the previous query
                if tag:
                    for j in local_resp.journeys:
                        j.type = tag
                else:
                    #we qualify the journeys
                    request_type = "arrival" if req.journeys.clockwise else "departure"
                    qualifier_one(local_resp.journeys, request_type)

                fill_uris(local_resp)
                if not resp:
                    resp = local_resp
                else:
                    self.merge_response(resp, local_resp)
            if not resp:
                resp = local_resp
            logger.debug("for mode %s|%s we have found %s journeys: %s", o_mode, d_mode, len(local_resp.journeys), [j.type for j in local_resp.journeys])

        return resp
Пример #28
0
    def get_albums(self):
        # 获取相册页面数
        pages = self.get_page_nums()

        # 获取相册列表
        tasks = [
            gevent.spawn(self.get_album_by_page, page + 1)
            for page in range(min(args.album, pages))
        ]
        for task in gevent.iwait(tasks):
            album_objs = task.get()
            for album in album_objs:
                album.start()
                self._albums.append(album)
Пример #29
0
    def get_photos(self):
        # 获取照片页面数
        pages = self.get_page_nums()

        # 获取照片列表
        tasks = [
            gevent.spawn(self.get_photo_by_page, page + 1)
            for page in range(min(args.photo, pages))
        ]
        for task in gevent.iwait(tasks):
            photo_objs = task.get()
            for photo in photo_objs:
                photo.start()
                self._photos.append(photo)
Пример #30
0
def main():
    def check_interval(value):
        ivalue = int(value)
        if ivalue < 1:
            raise ArgumentError(
                'Interval value should be an int bigger than 0')
        return ivalue

    parser = ArgumentParser(description='Tool monitoring sites status')
    parser.add_argument(
        '--interval',
        type=check_interval,
        default=INTERVAL,
        help=('Time interval in seconds (1 or bigger) ' +
              'between status checks (default: {})'.format(INTERVAL)))
    parser.add_argument('--port',
                        type=int,
                        default=PORT,
                        help='Server port (default: {})'.format(PORT))

    args = parser.parse_args()
    interval = args.interval
    port = args.port

    logger.info('=' * 79)
    logger.info('Sites Monitor Running with time interval={}, port={}'.format(
        interval, port))

    gevent.iwait([
        gevent.spawn(status_worker, site, text, interval)
        for site, text in config.sites
    ])

    logger.info('Running server at localhost:{}'.format(port))
    logger.info('=' * 79)

    WSGIServer(('', port), application).serve_forever()
Пример #31
0
    def _deploy_namespaces(self, nr_namepaces, name,  size, storage_type, password, nodes):
        """
        generic function to deploy a group namespaces

        This function will yield namespaces as they are created
        It can return once nr_namespaces has been created or if we cannot create namespaces on any nodes.
        It is up to the caller to count the number of namespaces received from this function to know if the deployed enough namespaces
        """

        if storage_type not in ['ssd', 'hdd']:
            raise ValueError("storage_type must be 'ssd' or 'hdd', not %s" % storage_type)

        storage_key = 'sru' if storage_type == 'ssd' else 'hru'

        required_nr_namespaces = nr_namepaces
        deployed_nr_namespaces = 0
        while deployed_nr_namespaces < required_nr_namespaces:
            # sort nodes by the amount of storage available
            nodes = sort_by_less_used(nodes, storage_key)
            self.logger.info('number of possible nodes to use for namespace deployments %s', len(nodes))
            if len(nodes) <= 0:
                return

            gls = set()
            for i in range(required_nr_namespaces - deployed_nr_namespaces):
                node = nodes[i % len(nodes)]
                self.logger.info("try to install namespace %s on node %s", name, node['node_id'])

                gl = self._pool.spawn(self._install_namespace,
                                      node=node,
                                      name=name,
                                      disk_type=storage_type,
                                      size=size,
                                      password=password)
                gls.add(gl)

            for g in gevent.iwait(gls):
                if g.exception and g.exception.node in nodes:
                    self.logger.error(
                        "we could not deploy on node %s, remove it from the possible node to use", node['node_id'])
                    nodes.remove(g.exception.node)
                else:
                    namespace, node = g.value
                    deployed_nr_namespaces += 1

                    # update amount of ressource so the next iteration of the loop will sort the list of nodes properly
                    nodes[nodes.index(node)]['used_resources'][storage_key] += size

                    yield (namespace, node)
Пример #32
0
    def get_users(self):
        # 获取用户页数
        pages = self.get_user_pages()

        # 获取用户列表
        tasks = [
            gevent.spawn(self.get_user_by_page, page + 1)
            for page in range(min(args.user, pages))
        ]
        for task in gevent.iwait(tasks):
            user_ids = task.get()
            for user_id in user_ids:
                user = User(user_id, session=self._session)
                user.start()
                self._users.append(user)
Пример #33
0
    def wait_unordered(self, count=-1, timeout=None):
        """
        Yields the response of the requests in the order they ended.

        If *count* is a positive integer, yields at most *count* responses.
        If *timeout* is specified, this method will block at most *timeout* seconds.
        """
        count = int(count)
        self._pool, pool = [], self._pool
        with gevent.Timeout(timeout):
            for greenlet in gevent.iwait(pool):
                yield self._finished(greenlet)
                count -= 1
                if count == 0:
                    break
Пример #34
0
    def update(self, server_name):
        if not self.reset_update_server(server_name):
            raise Exception

        hub = get_hub()
        noti = hub.loop.async()
        lock = RLock()
        stats = []

        def progress(s):
            with lock:
                stats.append(s)
                noti.send()

        remote = self.remote
        remote.transfer_progress = progress

        def do_fetch():
            try:
                return remote.fetch()
            except Exception as e:
                return e

        fetch = hub.threadpool.spawn(do_fetch)

        while True:
            noti_w = gevent.spawn(lambda: hub.wait(noti))
            for r in gevent.iwait([noti_w, fetch]):
                break

            noti_w.kill()

            if r is fetch:
                rst = r.get()
                if isinstance(rst, Exception):
                    raise rst
                else:
                    return

            v = None
            with lock:
                if stats:
                    v = stats[-1]

                stats[:] = []

            if v:
                yield v
Пример #35
0
    def update(self, server_name):
        if not self.reset_update_server(server_name):
            raise Exception

        hub = get_hub()
        noti = hub.loop. async ()
        lock = RLock()
        stats = []

        def progress(s):
            with lock:
                stats.append(s)
                noti.send()

        remote = self.remote
        remote.transfer_progress = progress

        def do_fetch():
            try:
                return remote.fetch()
            except Exception as e:
                return e

        fetch = hub.threadpool.spawn(do_fetch)

        while True:
            noti_w = gevent.spawn(lambda: hub.wait(noti))
            for r in gevent.iwait([noti_w, fetch]):
                break

            noti_w.kill()

            if r is fetch:
                rst = r.get()
                if isinstance(rst, Exception):
                    raise rst
                else:
                    return

            v = None
            with lock:
                if stats:
                    v = stats[-1]

                stats[:] = []

            if v:
                yield v
Пример #36
0
def main():
    finished = 0
    # Wait on a group that includes one that will already be
    # done, plus some that will finish as we watch
    done_worker = gevent.spawn(worker, "done")
    gevent.joinall((done_worker, ))

    workers = [gevent.spawn(worker, i) for i in range(3)]
    workers.append(done_worker)
    for g in gevent.iwait(workers):
        finished += 1
        # Simulate doing something that causes greenlets to switch;
        # a non-zero timeout is crucial
        gevent.sleep(0.01)

    assert finished == 4
    def __oci_add_nodes(self, count=1, node_spec=None):
        """
        Wrapper around __oci_add_node() method. Launches Greenlets to
        perform add nodes operation in parallel using gevent.

        :param count: number of nodes to add
        :param node_spec: dict containing instance launch specification
        :return: list of Nodes
        """
        greenlets = []
        for _ in range(count):
            greenlets.append(gevent.spawn(self.__oci_add_node, node_spec))

        for result in gevent.iwait(greenlets):
            if result.value:
                yield result.value
Пример #38
0
def main():
    finished = 0
    # Wait on a group that includes one that will already be
    # done, plus some that will finish as we watch
    done_worker = gevent.spawn(worker, "done")
    gevent.joinall((done_worker,))

    workers = [gevent.spawn(worker, i) for i in range(3)]
    workers.append(done_worker)
    for g in gevent.iwait(workers):
        finished += 1
        # Simulate doing something that causes greenlets to switch;
        # a non-zero timeout is crucial
        gevent.sleep(0.01)

    assert finished == 4
Пример #39
0
    def build_journeys(self, map_response, crowfly_stop_points, odt_stop_points):
        futures = []
        for dep_mode, arr_mode, journey in map_response:
            # from
            futures.append(self.pool.spawn(self._build_from, journey, g.requested_origin,
                                           crowfly_stop_points, odt_stop_points,
                                           dep_mode, g.fallback_direct_path_pool, g.origins_fallback))
            # to
            futures.append(self.pool.spawn(self._build_to, journey, g.requested_destination,
                                           crowfly_stop_points, odt_stop_points,
                                           arr_mode, g.fallback_direct_path_pool, g.destinations_fallback))

        for future in gevent.iwait(futures):
            journey = future.get()
            journey.durations.total = journey.duration
            journey.sections.sort(SectionSorter())
Пример #40
0
    def get(self, k, provider=None):
        if not provider:
            providers = self.storages
        else:
            providers = [s for s in self.storages if s.__class__.__name__.lower() == provider.lower()]

        jobs = [gevent.spawn(with_timeout, self.hard_timeout, _get, s, k) for s in providers]

        ret = (None, None)
        for done in gevent.iwait(jobs, timeout=self.timeout):
            if done.successful():
                value = done.value
                if value and value[0]:
                    ret = done.value

                gevent.killall(jobs, block=False)
                break
        return ret
Пример #41
0
    def test(self):
        finished = 0
        # Wait on a group that includes one that will already be
        # done, plus some that will finish as we watch
        done_worker = gevent.spawn(worker, "done")
        gevent.joinall((done_worker, ))

        workers = [gevent.spawn(worker, i) for i in range(3)]
        workers.append(done_worker)
        for _ in gevent.iwait(workers):
            finished += 1
            # Simulate doing something that causes greenlets to switch;
            # a non-zero timeout is crucial
            try:
                gevent.sleep(0.01)
            except ValueError as ex:
                self.assertEqual(ex.args[0], 2)

        self.assertEqual(finished, 4)
Пример #42
0
    def process_messages(self, messages):
        greenlet_to_message = {}
        to_delete = []

        self.logger.debug('processesing %d messages', len(messages))

        for message in messages:
            try:
                g = self.pool.spawn(self.func, message)
            except:
                self.logger.exception('cannot submit jobs to pool')
                raise
            greenlet_to_message[g] = message

        for g in gevent.iwait(greenlet_to_message):
            message = greenlet_to_message[g]
            if g.successful():
                to_delete.append(message)
            else:
                self.logger.exception('exception processing message %s',
                                      message['MessageId'])

        return to_delete
Пример #43
0
    def forward(self, upstream_sock, timeout=7, after_started_timeout=360, encrypt=None, decrypt=None,
                delayed_penalty=None, on_forward_started=None):

        if self.forward_started:
            if 5228 == self.dst_port: # Google Service
                upstream_sock.settimeout(None)
            else: # More than 5 minutes
                upstream_sock.settimeout(after_started_timeout)
        else:
            upstream_sock.settimeout(timeout)
        self.downstream_sock.settimeout(None)

        def from_upstream_to_downstream():
            try:
                while True:
                    data = upstream_sock.recv(262144)
                    upstream_sock.counter.received(len(data))
                    if data:
                        if not self.forward_started:
                            self.forward_started = True
                            if 5228 == self.dst_port: # Google Service
                                upstream_sock.settimeout(None)
                            else: # More than 5 minutes
                                upstream_sock.settimeout(after_started_timeout)
                            self.apply_delayed_penalties()
                            if on_forward_started:
                                on_forward_started()
                        if decrypt:
                            data = decrypt(data)
                        self.downstream_sock.sendall(data)
                    else:
                        return
            except socket.error:
                return
            except gevent.GreenletExit:
                return
            except:
                LOGGER.exception('forward u2d failed')
                return sys.exc_info()[1]

        def from_downstream_to_upstream():
            try:
                while True:
                    data = self.downstream_sock.recv(262144)
                    if data:
                        if encrypt:
                            data = encrypt(data)
                        upstream_sock.counter.sending(len(data))
                        upstream_sock.sendall(data)
                    else:
                        return
            except socket.error:
                return
            except gevent.GreenletExit:
                return
            except:
                LOGGER.exception('forward d2u failed')
                return sys.exc_info()[1]
            finally:
                upstream_sock.close()

        u2d = gevent.spawn(from_upstream_to_downstream)
        d2u = gevent.spawn(from_downstream_to_upstream)
        try:
            for greenlet in gevent.iwait([u2d, d2u]):
                e = greenlet.get()
                if e:
                    raise e
                break
            try:
                upstream_sock.close()
            except:
                pass
            if not self.forward_started:
                self.fall_back(reason='forward does not receive any response', delayed_penalty=delayed_penalty)
        finally:
            try:
                u2d.kill()
            except:
                pass
            try:
                d2u.kill()
            except:
                pass
Пример #44
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager)

        v6_filter_updater = IptablesUpdater("filter", ip_version=6)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config,
                                        IPV6,
                                        v6_filter_updater,
                                        v6_dispatch_chains,
                                        v6_rules_manager)

        update_splitter = UpdateSplitter(config,
                                         [v4_ipset_mgr, v6_ipset_mgr],
                                         [v4_rules_manager, v6_rules_manager],
                                         [v4_ep_manager, v6_ep_manager],
                                         [v4_filter_updater,
                                          v6_filter_updater],
                                         v4_masq_manager)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            update_splitter,

            v4_nat_updater,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,

            v6_filter_updater,
            v6_ipset_mgr,
            v6_rules_manager,
            v6_dispatch_chains,
            v6_ep_manager,

            iface_watcher,
            etcd_api,
        ]

        monitored_items = [actor.greenlet for actor in top_level_actors]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)
        futils.register_diags("Top-level actors", dump_top_level_actors)
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #45
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        hosts_ipset_v4 = IpsetActor(HOSTS_IPSET_V4)

        etcd_api = EtcdAPI(config, hosts_ipset_v4)
        etcd_api.start()
        # Ask the EtcdAPI to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        config_loaded = etcd_api.load_config(async=False)
        config_loaded.wait()

        # Ensure the Kernel's global options are correctly configured for
        # Calico.
        devices.configure_global_kernel_config()

        _log.info("Main greenlet: Configuration loaded, starting remaining " "actors...")

        monitored_items = []
        if config.PROM_METRICS_ENABLED:
            httpd = HTTPServer(("0.0.0.0", config.PROM_METRICS_PORT), MetricsHandler)
            stats_server = gevent.Greenlet(httpd.serve_forever)
            stats_server.start()
            monitored_items.append(stats_server)

        v4_filter_updater = IptablesUpdater("filter", ip_version=4, config=config)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4, config=config)
        v4_ipset_mgr = IpsetManager(IPV4, config)
        v4_masq_manager = MasqueradeManager(IPV4, v4_nat_updater)
        v4_rules_manager = RulesManager(config, 4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_fip_manager = FloatingIPManager(config, 4, v4_nat_updater)
        v4_ep_manager = EndpointManager(
            config,
            IPV4,
            v4_filter_updater,
            v4_dispatch_chains,
            v4_rules_manager,
            v4_fip_manager,
            etcd_api.status_reporter,
        )

        cleanup_updaters = [v4_filter_updater, v4_nat_updater]
        cleanup_ip_mgrs = [v4_ipset_mgr]
        update_splitter_args = [v4_ipset_mgr, v4_rules_manager, v4_ep_manager, v4_masq_manager, v4_nat_updater]

        v6_enabled = os.path.exists("/proc/sys/net/ipv6")
        if v6_enabled:
            v6_raw_updater = IptablesUpdater("raw", ip_version=6, config=config)
            v6_filter_updater = IptablesUpdater("filter", ip_version=6, config=config)
            v6_nat_updater = IptablesUpdater("nat", ip_version=6, config=config)
            v6_ipset_mgr = IpsetManager(IPV6, config)
            v6_rules_manager = RulesManager(config, 6, v6_filter_updater, v6_ipset_mgr)
            v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
            v6_fip_manager = FloatingIPManager(config, 6, v6_nat_updater)
            v6_ep_manager = EndpointManager(
                config,
                IPV6,
                v6_filter_updater,
                v6_dispatch_chains,
                v6_rules_manager,
                v6_fip_manager,
                etcd_api.status_reporter,
            )
            cleanup_updaters.append(v6_filter_updater)
            cleanup_ip_mgrs.append(v6_ipset_mgr)
            update_splitter_args += [v6_ipset_mgr, v6_rules_manager, v6_ep_manager, v6_raw_updater, v6_nat_updater]

        cleanup_mgr = CleanupManager(config, cleanup_updaters, cleanup_ip_mgrs)
        update_splitter_args.append(cleanup_mgr)
        update_splitter = UpdateSplitter(update_splitter_args)
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        hosts_ipset_v4.start()
        cleanup_mgr.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_masq_manager.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()
        v4_fip_manager.start()

        if v6_enabled:
            v6_raw_updater.start()
            v6_filter_updater.start()
            v6_ipset_mgr.start()
            v6_nat_updater.start()
            v6_rules_manager.start()
            v6_dispatch_chains.start()
            v6_ep_manager.start()
            v6_fip_manager.start()

        iface_watcher.start()

        top_level_actors = [
            hosts_ipset_v4,
            cleanup_mgr,
            v4_filter_updater,
            v4_nat_updater,
            v4_ipset_mgr,
            v4_masq_manager,
            v4_rules_manager,
            v4_dispatch_chains,
            v4_ep_manager,
            v4_fip_manager,
            iface_watcher,
            etcd_api,
        ]

        if v6_enabled:
            top_level_actors += [
                v6_raw_updater,
                v6_filter_updater,
                v6_nat_updater,
                v6_ipset_mgr,
                v6_rules_manager,
                v6_dispatch_chains,
                v6_ep_manager,
                v6_fip_manager,
            ]

        monitored_items += [actor.greenlet for actor in top_level_actors]

        # Try to ensure that the nf_conntrack_netlink kernel module is present.
        # This works around an issue[1] where the first call to the "conntrack"
        # command fails while waiting for the module to load.
        # [1] https://github.com/projectcalico/calico/issues/986
        load_nf_conntrack()

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v4_nat_updater, ip_version=4)
        if v6_enabled:
            install_global_rules(config, v6_filter_updater, v6_nat_updater, ip_version=6, raw_updater=v6_raw_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        etcd_api.start_watch(update_splitter, async=True)

        # Register a SIG_USR handler to trigger a diags dump.
        def dump_top_level_actors(log):
            for a in top_level_actors:
                # The output will include queue length and the like.
                log.info("%s", a)

        futils.register_diags("Top-level actors", dump_top_level_actors)
        futils.register_process_statistics()
        try:
            gevent.signal(signal.SIGUSR1, functools.partial(futils.dump_diags))
        except AttributeError:
            # It doesn't matter too much if we fail to do this.
            _log.warning("Unable to install diag dump handler")
            pass

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #46
0
    def log_share(self, client, diff, typ, params, job=None, header_hash=None,
                  header=None):
        if typ == StratumClient.VALID_SHARE:
            start = time.time()
            self.logger.debug("Valid share accepted from worker {}.{}!"
                              .format(client.address, client.worker))
            # Grab the raw coinbase out of the job object before gevent can preempt
            # to another thread and change the value. Very important!
            coinbase_raw = job.coinbase.raw

            # Some coins use POW function to do blockhash, while others use SHA256.
            # Allow toggling
            if job.pow_block_hash:
                header_hash_raw = client.algo['module'](header)[::-1]
            else:
                header_hash_raw = sha256(sha256(header).digest()).digest()[::-1]
            hash_hex = hexlify(header_hash_raw)

            submission_threads = []
            # valid network hash?
            if header_hash <= job.bits_target:
                submission_threads.append(spawn(
                    job.found_block,
                    coinbase_raw,
                    client.address,
                    client.worker,
                    hash_hex,
                    header,
                    job,
                    start))

            # check each aux chain for validity
            for chain_id, data in job.merged_data.iteritems():
                if header_hash <= data['target']:
                    submission_threads.append(spawn(
                        data['found_block'],
                        client.address,
                        client.worker,
                        header,
                        coinbase_raw,
                        job,
                        start))

            for gl in gevent.iwait(submission_threads):
                ret = gl.value
                if ret:
                    spawn(self.add_block, **ret)
                else:
                    self.logger.error("Submission gl {} returned nothing!"
                                      .format(gl))

        for reporter in self.child_reporters:
            reporter.log_share(client, diff, typ, params, job=job,
                               header_hash=header_hash, header=header)

        # reporting for vardiff rates
        slc_time = (int(time.time()) // 60) * 60
        address = client.address
        if typ == StratumClient.VALID_SHARE:
            slc = self._per_address_slices.setdefault(slc_time, {})
            if address not in slc:
                slc[address] = diff
            else:
                slc[address] += diff
Пример #47
0
def _main_greenlet(config):
    """
    The root of our tree of greenlets.  Responsible for restarting
    its children if desired.
    """
    try:
        _log.info("Connecting to etcd to get our configuration.")
        etcd_watcher = EtcdWatcher(config)
        etcd_watcher.start()
        # Ask the EtcdWatcher to fill in the global config object before we
        # proceed.  We don't yet support config updates.
        etcd_watcher.load_config(async=False)

        _log.info("Main greenlet: Configuration loaded, starting remaining "
                  "actors...")
        v4_filter_updater = IptablesUpdater("filter", ip_version=4)
        v4_nat_updater = IptablesUpdater("nat", ip_version=4)
        v4_ipset_mgr = IpsetManager(IPV4)
        v4_rules_manager = RulesManager(4, v4_filter_updater, v4_ipset_mgr)
        v4_dispatch_chains = DispatchChains(config, 4, v4_filter_updater)
        v4_ep_manager = EndpointManager(config,
                                        IPV4,
                                        v4_filter_updater,
                                        v4_dispatch_chains,
                                        v4_rules_manager)

        v6_filter_updater = IptablesUpdater("filter", ip_version=6)
        v6_ipset_mgr = IpsetManager(IPV6)
        v6_rules_manager = RulesManager(6, v6_filter_updater, v6_ipset_mgr)
        v6_dispatch_chains = DispatchChains(config, 6, v6_filter_updater)
        v6_ep_manager = EndpointManager(config,
                                        IPV6,
                                        v6_filter_updater,
                                        v6_dispatch_chains,
                                        v6_rules_manager)

        update_splitter = UpdateSplitter(config,
                                         [v4_ipset_mgr, v6_ipset_mgr],
                                         [v4_rules_manager, v6_rules_manager],
                                         [v4_ep_manager, v6_ep_manager],
                                         [v4_filter_updater, v6_filter_updater])
        iface_watcher = InterfaceWatcher(update_splitter)

        _log.info("Starting actors.")
        update_splitter.start()

        v4_filter_updater.start()
        v4_nat_updater.start()
        v4_ipset_mgr.start()
        v4_rules_manager.start()
        v4_dispatch_chains.start()
        v4_ep_manager.start()

        v6_filter_updater.start()
        v6_ipset_mgr.start()
        v6_rules_manager.start()
        v6_dispatch_chains.start()
        v6_ep_manager.start()

        iface_watcher.start()

        monitored_items = [
            update_splitter.greenlet,

            v4_nat_updater.greenlet,
            v4_filter_updater.greenlet,
            v4_nat_updater.greenlet,
            v4_ipset_mgr.greenlet,
            v4_rules_manager.greenlet,
            v4_dispatch_chains.greenlet,
            v4_ep_manager.greenlet,

            v6_filter_updater.greenlet,
            v6_ipset_mgr.greenlet,
            v6_rules_manager.greenlet,
            v6_dispatch_chains.greenlet,
            v6_ep_manager.greenlet,

            iface_watcher.greenlet,
            etcd_watcher.greenlet
        ]

        # Install the global rules before we start polling for updates.
        _log.info("Installing global rules.")
        install_global_rules(config, v4_filter_updater, v6_filter_updater,
                             v4_nat_updater)

        # Start polling for updates. These kicks make the actors poll
        # indefinitely.
        _log.info("Starting polling for interface and etcd updates.")
        f = iface_watcher.watch_interfaces(async=True)
        monitored_items.append(f)
        f = etcd_watcher.watch_etcd(update_splitter, async=True)
        monitored_items.append(f)

        # Wait for something to fail.
        _log.info("All top-level actors started, waiting on failures...")
        stopped_greenlets_iter = gevent.iwait(monitored_items)

        stopped_greenlet = next(stopped_greenlets_iter)
        try:
            stopped_greenlet.get()
        except Exception:
            _log.exception("Greenlet failed: %s", stopped_greenlet)
            raise
        else:
            _log.error("Greenlet %s unexpectedly returned.", stopped_greenlet)
            raise AssertionError("Greenlet unexpectedly returned")
    except:
        _log.exception("Exception killing main greenlet")
        raise
Пример #48
0
def chunked_requests(urls, chunk_size=100):
    semaphore = Semaphore(chunk_size)
    requests = [gevent.spawn(download, u, semaphore) for u in urls]
    for response in gevent.iwait(requests):
        yield response