예제 #1
0
def start(num_of_clients):
    task_pool = []
    for i in range(num_of_clients):
        cookie_str = datetime.datetime.now().strftime("%H%M%S%f")
        task = gevent.spawn(send_heartbeat, i, SHOW_ID, cookie_str, CLIENT_SLEEP_TIME)
        task_pool.append(task)
    gevent.wait(task_pool)
예제 #2
0
    def exec_in_coroutine(self, stream, ident, parent):
        import gevent
        task = gevent.spawn(_execute_request, self, stream, ident, parent)

        while not task.value:
            gevent.wait(timeout=0.01)
            self.frontends[0].iopub_channel.process_events()
예제 #3
0
    def stop_and_wait(self):
        # Stop handling incoming packets, but don't close the socket. The
        # socket can only be safely closed after all outgoing tasks are stopped
        self.server.stop_accepting()

        # Stop processing the outgoing queues
        self.event_stop.set()
        gevent.wait(self.greenlets)

        # All outgoing tasks are stopped. Now it's safe to close the socket. At
        # this point there might be some incoming message being processed,
        # keeping the socket open is not useful for these.
        self.server.stop()

        # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket
        # so we do that ourselves here.
        # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208
        # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J
        try:
            self.server._socket.close()  # pylint: disable=protected-access
        except socket.error:
            pass

        # Set all the pending results to False
        for async_result in self.messageids_to_asyncresults.values():
            async_result.set(False)
예제 #4
0
파일: runner.py 프로젝트: hurrycane/keep
def execute_from_cli(hostname, cluster, with_ui, peers):

  polar_client = PolarClient(hosts=peers, handler=GeventHandler())
  polar_client.start()

  crow = CrowContext(polar_client, hostname, cluster)

  actor_name = "%s-%s" % (hostname, cluster)
  actor_system = ActorSystem(
    polar_client=polar_client,
    name=actor_name
  )

  actor_system.actor_of(KeepRpcActor.props(
    crow=crow,
    polar_client=polar_client
  ))

  crow.announce()

  if with_ui == True:
    from keep.ui import app

    app.config["actor_flask"] = actor_system
    app.config["polar_client"] = polar_client

    # adapt polar client wo work with the WSGI adapter

    http_server = WSGIServer(('', 5000), app)

    print "Stared UI on port 5000"
    http_server.serve_forever()

  gevent.wait()
예제 #5
0
    def start_listeners(self):
        """
        Starts all listeners in managed greenlets.

        Usually called by the ProcManager, unless using IonProcess manually.
        """
        try:
            # disable normal error reporting, this method should only be called from startup
            self.thread_manager._failure_notify_callback = None

            # spawn all listeners in startup listeners (from initializer, or added later)
            for listener in self._startup_listeners:
                self.add_endpoint(listener)

            with Timeout(seconds=CFG.get_safe('container.messaging.timeout.start_listener', 30)):
                gevent.wait([x.get_ready_event() for x in self.listeners])

        except Timeout:

            # remove failed endpoints before reporting failure above
            for listener, proc in self._listener_map.iteritems():
                if proc.proc.dead:
                    log.info("removed dead listener: %s", listener)
                    self.listeners.remove(listener)
                    self.thread_manager.children.remove(proc)

            raise IonProcessError("start_listeners did not complete in expected time")

        finally:
            self.thread_manager._failure_notify_callback = self._child_failed
예제 #6
0
    def test_rawlink_on_unacquired_runs_notifiers(self):
        # https://github.com/gevent/gevent/issues/1287

        # Rawlinking a ready semaphore should fire immediately,
        # not raise LoopExit
        s = Semaphore()
        gevent.wait([s])
예제 #7
0
    def run(self, actorlist):
        for actor in actorlist:
            assert command.has_cmd(actor["cmd"]),actor["cmd"]
            for _ in xrange(actor.get("count", 1)):
                self.workers.append(gevent.spawn(command.do_cmd, Game(self.srv_addr), actor["cmd"], actor["args"]))

        gevent.wait(self.workers)
예제 #8
0
def get_biobrick_data():
    def get_data(obj):
        name = obj.to_dict()['name']
        print 'Trying to get information of %s ...' % name
        fp = urlopen('http://parts.igem.org/cgi/xml/part.cgi?part=' + name)
        doc = ET.parse(fp)
        part = doc.find('.//part')
        if part is None:
            print 'Not found: %s.' % name
            return
        print 'Success: %s.' % name
        part_id = part.find('./part_id').text
        short_name = part.find('./part_short_name').text
        nickname = part.find('./part_nickname').text or ''
        description = part.find('./part_short_desc').text
        sequence = part.find('./sequences/seq_data').text.replace('\n', '')
        obj.part_id = int(part_id)
        obj.short_name = short_name
        obj.nickname = nickname
        obj.description = description
        obj.sequence = sequence

    jobs = []
    jobs.extend(gevent.spawn(get_data, obj) for obj in Receptor.query)
    jobs.extend(gevent.spawn(get_data, obj) for obj in Promoter.query)
    jobs.extend(gevent.spawn(get_data, obj) for obj in RBS.query)
    jobs.extend(gevent.spawn(get_data, obj) for obj in Terminator.query)
    jobs.extend(gevent.spawn(get_data, obj) for obj in Output.query)
    gevent.wait(jobs)

    db.session.commit()
def test_direct_transfer_to_offline_node(raiden_network, token_addresses):
    token_address = token_addresses[0]
    app0, app1 = raiden_network

    # Wait until the initialization of the node is complete and then stop it
    gevent.wait([app1.raiden.start_event])
    app1.raiden.stop()

    amount = 10
    target = app1.raiden.address
    async_result = app0.raiden.direct_transfer_async(
        token_address,
        amount,
        target,
        identifier=1,
    )

    assert async_result.wait(5) is None

    app1.raiden.start()

    assert async_result.wait(5) is True

    assert_mirror(
        channel(app0, app1, token_address),
        channel(app1, app0, token_address),
    )
    def query(self):
        jobs = [gevent.spawn(self.do_query, i) for i in xrange(self.num_)]
        gevent.wait(jobs)

        self.out_put_.extend([self.process_index_, reduce(
            lambda x, y: x + y, self.before_merge_res_)])
        pass
예제 #11
0
def mainloop(sup):
    try:
        gevent.wait()
    except KeyboardInterrupt:
        pass
    finally:
        sup.stop()
예제 #12
0
def resources():
    group = gevent.pool.Group()
    source_view = gevent.spawn(
        flask.copy_current_request_context(cloud_view),
        hooks.source,
    )
    if "destination" not in flask.current_app.config["CLOUDS"]:
        gevent.wait([source_view])
        return flask.jsonify(
            reset=flask.current_app.config["CLOUDS_RESET"],
            source=source_view.value,
            # TODO(akscram): A set of hosts that don't belong to any cloud.
            hosts=[],
            # TODO(akscram): A set of current events.
            events=[],
        )
    destination_view = gevent.spawn(
        flask.copy_current_request_context(cloud_view),
        hooks.destination,
    )
    gevent.wait([source_view, destination_view])
    return flask.jsonify(
        reset=flask.current_app.config["CLOUDS_RESET"],
        source=source_view.value,
        destination=destination_view.value,
        # TODO(akscram): A set of hosts that don't belong to any cloud.
        hosts=[],
        # TODO(akscram): A set of current events.
        events=[],
    )
예제 #13
0
파일: __main__.py 프로젝트: Dav1dde/emfas
def main():
    parser = argparse.ArgumentParser('emfas')
    parser.add_argument('--api-key', required=True, help='moomash api key')
    parser.add_argument('-v', '--verbose', action='count')
    parser.add_argument('url', help='twitch url')
    ns = parser.parse_args()

    emfas = Emfas(ns.api_key)
    sp = TwitchSegmentProvider2(ns.url)
    emfas.start(sp)

    if ns.verbose > 0:
        setup_logging()
        if ns.verbose < 1:
            logging.getLogger('requests').setLevel(logging.WARNING)
        if ns.verbose < 2:
            sp.ls.set_loglevel(logging.WARNING)
        if ns.verbose < 3:
            logging.getLogger('requests.packages.urllib3.connectionpool')\
                .setLevel(logging.WARNING)

    def every_minute():
        songs = emfas.identify()
        print 'Songs:', ', '.join(map(unicode, songs))

        gevent.spawn_later(60, every_minute)
    gevent.spawn_later(60, every_minute)

    gevent.wait([emfas._worker])
예제 #14
0
    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.transport.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(getattr(self.transport, 'greenlets', []))
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        self.blockchain_events.reset()

        if self.db_lock is not None:
            self.db_lock.release()
예제 #15
0
def stress_send_and_receive_parallel_transfers(
        rest_apis,
        token_address,
        identifier_generator,
        deposit,
):
    """Send transfers of value one in parallel"""
    pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))

    foward_transfers = [
        gevent.spawn(
            sequential_transfers,
            server_from=server_from,
            server_to=server_to,
            number_of_transfers=deposit,
            token_address=token_address,
            identifier_generator=identifier_generator,
        )
        for server_from, server_to in pairs
    ]

    backwards_transfers = [
        gevent.spawn(
            sequential_transfers,
            server_from=server_from,
            server_to=server_to,
            number_of_transfers=deposit,
            token_address=token_address,
            identifier_generator=identifier_generator,
        )
        for server_to, server_from in pairs
    ]

    gevent.wait(foward_transfers + backwards_transfers)
    def stop(self):
        """ Stop the node. """
        # Needs to come before any greenlets joining
        self.stop_event.set()
        self.protocol.stop_and_wait()
        self.alarm.stop_async()

        wait_for = [self.alarm]
        wait_for.extend(self.protocol.greenlets)
        wait_for.extend(self.greenlet_task_dispatcher.stop())
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        gevent.wait(wait_for, timeout=self.shutdown_timeout)

        # Filters must be uninstalled after the alarm task has stopped. Since
        # the events are polled by an alarm task callback, if the filters are
        # uninstalled before the alarm task is fully stopped the callback
        # `poll_blockchain_events` will fail.
        #
        # We need a timeout to prevent an endless loop from trying to
        # contact the disconnected client
        try:
            with gevent.Timeout(self.shutdown_timeout):
                self.blockchain_events.uninstall_all_event_listeners()
        except (gevent.timeout.Timeout, RaidenShuttingDown):
            pass

        # save the state after all tasks are done
        if self.serialization_file:
            save_snapshot(self.serialization_file, self)

        if self.db_lock is not None:
            self.db_lock.release()
예제 #17
0
def restart_network(raiden_network, retry_timeout):
    for app in raiden_network:
        app.stop()

    wait_network = [
        gevent.spawn(restart_app, app)
        for app in raiden_network
    ]

    gevent.wait(wait_network)

    new_network = [
        greenlet.get()
        for greenlet in wait_network
    ]

    # The tests assume the nodes are available to transfer
    for app0, app1 in combinations(new_network, 2):
        waiting.wait_for_healthy(
            app0.raiden,
            app1.raiden.address,
            retry_timeout,
        )

    return new_network
예제 #18
0
    def run(self):
        ob = ObserverManager(11011)
        ob.start()

        p = PlayerManager(11012)
        p.start()
        gevent.wait()
예제 #19
0
def test_regression_unfiltered_routes(
        raiden_network,
        token_addresses,
        settle_timeout,
        deposit,
):
    """ The transfer should proceed without triggering an assert.

    Transfers failed in networks where two or more paths to the destination are
    possible but they share same node as a first hop.
    """
    app0, app1, app2, app3, app4 = raiden_network
    token = token_addresses[0]
    registry_address = app0.raiden.default_registry.address

    # Topology:
    #
    #  0 -> 1 -> 2 -> 4
    #       |         ^
    #       +--> 3 ---+
    app_channels = [
        (app0, app1),
        (app1, app2),
        (app1, app3),
        (app3, app4),
        (app2, app4),
    ]

    greenlets = []
    for first_app, second_app in app_channels:
        greenlets.append(gevent.spawn(
            payment_channel_open_and_deposit,
            first_app,
            second_app,
            token,
            deposit,
            settle_timeout,
        ))
    gevent.wait(greenlets)

    wait_for_channels(
        app_channels,
        registry_address,
        [token],
        deposit,
    )

    payment_network_identifier = app0.raiden.default_registry.address
    token_network_identifier = views.get_token_network_identifier_by_token_address(
        views.state_from_app(app0),
        payment_network_identifier,
        token,
    )
    transfer = app0.raiden.mediated_transfer_async(
        token_network_identifier=token_network_identifier,
        amount=1,
        target=app4.raiden.address,
        identifier=1,
    )
    assert transfer.wait()
예제 #20
0
파일: g.py 프로젝트: akissa/spamc
 def run(self):
     self.server = SpamdTCP(self.address)
     # self.port = server.server_port
     gevent.signal(signal.SIGTERM, self.server.close)
     gevent.signal(signal.SIGINT, self.server.close)
     self.server.start()
     gevent.wait()
예제 #21
0
 def run(self, stdin, log):
     jobs = [gevent.spawn(service.load()) for service
             in pkg.get_entry_points("canopy.kaleidoscope")]
     try:
         gevent.wait(jobs)
     except KeyboardInterrupt:
         print("killed abruptly")
예제 #22
0
파일: core.py 프로젝트: kanzure/pyconfd
def main():
    """
    Import plugin modules. Find subclasses of Plugin. Use those subclasses.
    """
    modules = import_plugins()
    plugins = find_plugins()
    jobs = launch_plugins(plugins)
    gevent.wait(jobs)
예제 #23
0
 def server_forever(listener):
     global service
     log.info('worker [%d] start',os.getpid())
     service = GStreamServer( listener, processor = processor, inputTransportFactory = tfactory, inputProtocolFactory = pfactory, spawn=max_conn)
     signal.signal(signal.SIGTERM, signal_worker_handler)
     service.start()
     gevent.wait()
     log.info('worker [%d] exit', os.getpid())
예제 #24
0
 def targets(self, activity):
     activities = self.get_contacts_by_activity[activity['id']]
     contacts = [int(c) for c in activities[TARGETS]]
     pool = ThreadPool(THREADS)
     contacts = [pool.spawn(self.get_contact, c) for c in contacts]
     gevent.wait()
     contacts = [c.get()['sort_name'] for c in contacts]
     return ', '.join(contacts)
예제 #25
0
    def __call__(self):
        self.change_permissions()

        periodical_tasks = []

        if self.args['--recalculate']:

            def after_task(periodical_task):
                periodical_task.task.fill_farm_usage_d(force=True)
                periodical_task.stop()

            if 'poller' in self.config['billing']:
                poller_task = billing.RecalculatePollerBilling(self.analytics, self.config)
                periodical_tasks.append(helper.PeriodicalTask(poller_task,
                                                              timeout=60 * 60 * 6,
                                                              after=after_task))
            if 'aws-detailed-billing' in self.config['billing']:
                aws_billing_task = billing.RecalculateAWSBilling(self.analytics, self.config)
                periodical_tasks.append(helper.PeriodicalTask(aws_billing_task,
                                                              timeout=60 * 60 * 12,
                                                              after=after_task))
        else:

            def after_task(periodical_task):
                periodical_task.task.fill_farm_usage_d()
                if self.config['dtime_to']:
                    periodical_task.stop()
                else:
                    periodical_task.task.config['dtime_from'] = False

            if 'poller' in self.config['billing']:
                poller_task = billing.PollerBilling(self.analytics, self.config)
                periodical_tasks.append(helper.PeriodicalTask(poller_task,
                                                              period=poller_period,
                                                              timeout=poller_timeout,
                                                              after=after_task))

            if 'aws-detailed-billing' in self.config['billing']:
                aws_task = billing.AWSBilling(self.analytics, self.config)
                periodical_tasks.append(helper.PeriodicalTask(aws_task,
                                                              period=aws_period,
                                                              timeout=aws_timeout,
                                                              after=after_task))

            if 'azure' in self.config['billing']:
                azure_task = billing.AzureBilling(self.analytics, self.config)
                periodical_tasks.append(helper.PeriodicalTask(azure_task,
                                                              period=azure_period,
                                                              timeout=azure_timeout,
                                                              after=after_task))

        results = []
        for periodical_task in periodical_tasks:
            results.append(periodical_task())
            if len(periodical_tasks) > 1:
                time.sleep(launch_delay)

        gevent.wait(results)
예제 #26
0
    def _work(self, burst=False):
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        setup_loghandlers()
        self._install_signal_handlers()

        self.did_perform_work = False
        self.register_birth()
        self.log.info(
            "RQ gevent worker (greenlet pool size={0}) {1!r} started, version {2}".format(
                self.gevent_pool.size, self.key, VERSION
            )
        )
        self.set_state(WorkerStatus.STARTED)

        try:
            while True:
                try:
                    self.check_for_suspension(burst)

                    if self.should_run_maintenance_tasks:
                        self.clean_registries()

                    if self._stop_requested:
                        self.log.info("Stopping on request.")
                        break

                    timeout = None if burst else max(1, self.default_worker_ttl - 60)

                    result = self.dequeue_job_and_maintain_ttl(timeout)
                    if result is None and burst:
                        self.log.info("RQ worker {0!r} done, quitting".format(self.key))

                        try:
                            # Make sure dependented jobs are enqueued.
                            gevent.wait(self.gevent_greenlets)
                        except LoopExit:
                            pass
                        result = self.dequeue_job_and_maintain_ttl(timeout)

                    if result is None:
                        break
                except StopRequested:
                    break

                job, queue = result
                self.execute_job(job, queue)

        finally:
            if not self.is_horse:
                self.register_death()
        return self.did_perform_work
예제 #27
0
    def wait(self, tasks, timeout=None, count=None):
        """
        Wait for tasks to finish with optional timeout.
        """

        assert isinstance(tasks, list)
        assert len(tasks) > 0

        gevent.wait(tasks, timeout, count)
예제 #28
0
def run_many(tests, expected=None, failfast=False):
    global NWORKERS, pool
    start = time()
    total = 0
    failed = {}

    NWORKERS = min(len(tests), NWORKERS)
    pool = Pool(NWORKERS)
    util.BUFFER_OUTPUT = NWORKERS > 1

    def run_one(cmd, **kwargs):
        result = util.run(cmd, **kwargs)
        if result:
            if failfast:
                sys.exit(1)
            # the tests containing AssertionError might have failed because
            # we spawned more workers than CPUs
            # we therefore will retry them sequentially
            failed[result.name] = [cmd, kwargs, 'AssertionError' in (result.output or '')]

    try:
        try:
            for cmd, options in tests:
                total += 1
                spawn(run_one, cmd, **(options or {}))
            gevent.wait()
        except KeyboardInterrupt:
            try:
                if pool:
                    util.log('Waiting for currently running to finish...')
                    pool.join()
            except KeyboardInterrupt:
                util.report(total, failed, exit=False, took=time() - start, expected=expected)
                util.log('(partial results)\n')
                raise
    except:
        traceback.print_exc()
        pool.kill()  # this needed to kill the processes
        raise

    toretry = [key for (key, (cmd, kwargs, can_retry)) in failed.items() if can_retry]
    failed_then_succeeded = []

    if NWORKERS > 1 and toretry:
        util.log('\nWill retry %s failed tests without concurrency:\n- %s\n', len(toretry), '\n- '.join(toretry))
        for name, (cmd, kwargs, _ignore) in failed.items():
            if not util.run(cmd, buffer_output=False, **kwargs):
                failed.pop(name)
                failed_then_succeeded.append(name)

    if failed_then_succeeded:
        util.log('\n%s tests failed during concurrent run but succeeded when ran sequentially:', len(failed_then_succeeded))
        util.log('- ' + '\n- '.join(failed_then_succeeded))

    util.log('gevent version %s from %s', gevent.__version__, gevent.__file__)
    util.report(total, failed, took=time() - start, expected=expected)
    assert not pool, pool
예제 #29
0
 def handle(self, **options):
     p = pool.Pool(20)
     jobs = [
         p.spawn(self.crawlblog, blog)
         for blog in Blog.objects.filter(skip_crawl=False)
     ]
     wait(jobs)
     for post in self.zulip_queue:
         announce_new_post(post, debug=settings.DEBUG)
예제 #30
0
def test_main_no_args():
    with capture() as f:
        greenlet = gevent.spawn(main, [])
        gevent.sleep(0.1)

    assert greenlet.started, f
    greenlet.kill()
    gevent.wait([greenlet])
    assert not greenlet.started
예제 #31
0
 def start(self):
     i_count = 1
     while self.have_content:
         self.pool.map(self.get_one_page,
                       [x for x in range(i_count, i_count + 50)])
         gevent.wait()
         i_count += 50
     print(self.bugs)
     file_handle = open('wy_no_1.csv', 'w')
     file_handle.write('bug_id, rank, name, corp, date\n')
     for one_bug in self.bugs:
         file_handle.write('{0}, {1}, {2}, {3}, {4}\n'.format(
             one_bug['id'], one_bug['rank'], one_bug['name'],
             one_bug['corp'], one_bug['date']))
         print(one_bug)
     file_handle.close()
예제 #32
0
    def run(self):
        self.logger.info("Firing up workers")
        for i in xrange(self.config['workers']):
            worker = Worker(self, i)
            self.workers.append(worker)
            worker.start()

        self.logger.info("Starting zmq puller")
        self.puller = Puller(self)
        self.puller.start()

        gevent.signal(signal.SIGINT, self.exit, "SIGINT")
        gevent.signal(signal.SIGHUP, self.exit, "SIGHUP")

        self._exit_signal = Event()
        self._exit_signal.wait()

        # stop all greenlets
        for gl in self.workers:
            self.logger.info("Requesting stop for {} greenlet".format(gl))
            gl.kill(timeout=self.config['term_timeout'], block=False)

        self.logger.info("Requesting stop for puller")
        self.puller.kill(timeout=self.config['term_timeout'], block=False)

        try:
            if gevent.wait(timeout=self.config['term_timeout']):
                self.logger.info("All threads exited normally")
            else:
                self.logger.info("Timeout reached, shutting down forcefully")
        except KeyboardInterrupt:
            self.logger.info("Shutdown requested again by system, "
                             "exiting without cleanup")

        self.logger.info("=" * 80)
예제 #33
0
 def main(self):
     self._logger.info("PyFuzz2 Server started...")
     self._beacon_server.start_server()
     self._beacon_worker.start_worker()
     self._report_server.start_server()
     self._report_worker.start_worker()
     self._web_server.start_server()
     self._node_client_worker.start_worker()
     self._db_worker.start_worker()
     self._web_worker.start_worker()
     while True:
         try:
             gevent.wait()
         except KeyboardInterrupt:
             self.__shut_down()
             exit(0)
 def test_later(self):
     # exiting because the spawned greenlet finished execution (spawn_later (=timer) variant)
     x = gevent.spawn_later(SMALL_TICK, lambda: 5)
     with self.runs_in_given_time(SMALL_TICK):
         result = gevent.wait(timeout=10)
     self.assertTrue(result)
     self.assertTrue(x.dead, x)
예제 #35
0
    def test_concurrent_error_recovery(self):
        """when recovering from an error in a green thread environment one thread
        could have added the table while the other thread was asleep, this will
        test to make sure two threads failing at the same time will both recover
        correctly"""
        i = self.get_interface()
        s = self.get_schema()
        #i.set_table(s)
        for x in range(1, 3):
            gevent.spawn(i.insert, s, {'foo': x, 'bar': str(x)})

        gevent.wait()

        q = Query()
        r = list(i.get(s, q))
        self.assertEqual(2, len(r))
예제 #36
0
def main():
    branch = 'rawhide'

    arguments = docopt(__doc__)
    package_set = _shell_utils.sanitize_pkg_names(arguments['<pkg>'])
    if not package_set:
        package_set = parse_package_list(
            'CERTBOT-ALL-PACKAGES-AND-PLUGINS.txt')

    print_in_progress('pulling new changes')
    pkgs_in_process = {}
    for pkg_name in package_set:
        pkg_path = (THIS_DIR / '..' / pkg_name).resolve()

        if _git.has_uncommitted_changes(pkg_path):
            error_msg = 'uncommitted changes, skipping package'
            print_status_output(pkg_name, is_error=True, msg=error_msg)
            continue
        _git.switch_to_branch(branch, pkg_path)
        result = _git.pull('origin', pkg_path, ff_only=True, gevent=True)
        pkgs_in_process[result] = pkg_name

    while pkgs_in_process:
        done = gevent.wait(pkgs_in_process.keys(), count=1)
        for finished_proc in done:
            pkg_name = pkgs_in_process.pop(finished_proc)
            print_status_output(f'{pkg_name} ({branch})', is_error=False)
예제 #37
0
    def main(self):
        """
        Main Kobbra Server method
        """
        ConsoleLogger.banner()

        #mus = MusServer(self.managers)
        info = InfoServer(self.managers)

        #mus.start()
        info.start()

        gevent.wait()

        self.managers.Database().Close()
        ConsoleLogger.log("DBG", "Exiting!")
예제 #38
0
파일: matrix.py 프로젝트: tokenbase/raiden
    def stop(self):
        """ Try to gracefully stop the greenlet synchronously

        Stop isn't expected to re-raise greenlet _run exception
        (use self.greenlet.get() for that),
        but it should raise any stop-time exception """
        if self._stop_event.ready():
            return
        self._stop_event.set()
        self._client.set_presence_state(UserPresence.OFFLINE.value)

        self._client.stop_listener_thread(
        )  # stop sync_thread, wait client's greenlets
        # wait own greenlets, no need to get on them, exceptions should be raised in _run()
        gevent.wait(self.greenlets)
        self._client.logout()
예제 #39
0
 def _translate():
     while True:
         r = gevent.wait(objects=[pp2_], count=1)
         m = r[0].read()
         print('translate')
         pp2_.reset('pp2_ translate reset')
         p2_.write(((sid, pid), m.msg), m.imp)
예제 #40
0
 def foo(ch1, ch2):
     print('Foo function. Ch1:', ch1, 'Ch2:', ch2)
     while True:
         r = gevent.wait([ch1])
         m = r[0].read()
         print('Read mesage on ch1:', m)
         ch2.write(m.msg, m.imp)
예제 #41
0
 def run(self):
     while True:
         ready = gevent.wait(
             objects=[self.z2a, self.f2a, self.p2a, self._a2z, self._p2z],
             count=1)
         r = ready[0]
         if r == self.z2a:
             m = self.z2a.read()
             self.z2a.reset()
             self.env_msg(m)
             #self._z2a.write( m )
         elif r == self.f2a:
             m = self.f2a.read()
             self.f2a.reset()
             self.func_msg(m)
         elif r == self.p2a:
             self.p2a.reset()
             dump.dump()
         elif r == self._a2z:
             m = self._a2z.read()
             self._a2z.reset()
             self.a2z.write(m)
         elif r == self._p2z:
             m = self._p2z.read()
             self._p2z.reset()
             dump.dump()
         else:
             dump.dump()
예제 #42
0
    def test_main(self):
        def main_task():
            with self.tracer.start_active_span('parent', True):
                tasks = self.submit_callbacks()
                gevent.joinall(tasks)

        gevent.spawn(main_task)
        gevent.wait(timeout=5.0)

        spans = self.tracer.finished_spans()
        self.assertEquals(len(spans), 4)
        self.assertNamesEqual(spans, ['task', 'task', 'task', 'parent'])

        for i in range(3):
            self.assertSameTrace(spans[i], spans[-1])
            self.assertIsChildOf(spans[i], spans[-1])
예제 #43
0
def restart_network(raiden_network, retry_timeout):
    for app in raiden_network:
        app.stop()

    wait_network = [gevent.spawn(restart_app, app) for app in raiden_network]

    gevent.wait(wait_network)

    new_network = [greenlet.get() for greenlet in wait_network]

    # The tests assume the nodes are available to transfer
    for app0, app1 in combinations(new_network, 2):
        waiting.wait_for_healthy(app0.raiden, app1.raiden.address,
                                 retry_timeout)

    return new_network
    def test_event(self):
        # exiting because of event (the spawned greenlet still runs)
        x = gevent.spawn_later(10, lambda: 5)
        event = Event()
        event_set = gevent.spawn_later(SMALL_TICK, event.set)
        with self.runs_in_given_time(SMALL_TICK):
            result = gevent.wait([event])
        self.assertEqual(result, [event])
        self.assertFalse(x.dead, x)
        self.assertTrue(event_set.dead)
        self.assertTrue(event.is_set)
        x.kill()
        with self.runs_in_no_time():
            result = gevent.wait()

        self.assertTrue(result)
예제 #45
0
def open_and_wait_for_channels(app_channels, registry_address, token, deposit,
                               settle_timeout):
    greenlets = []
    for first_app, second_app in app_channels:
        greenlets.append(
            gevent.spawn(
                payment_channel_open_and_deposit,
                first_app,
                second_app,
                token,
                deposit,
                settle_timeout,
            ))
    gevent.wait(greenlets)

    wait_for_channels(app_channels, registry_address, [token], deposit)
예제 #46
0
        def test_future_wait_gevent_function(self):
            # The future object can be waited on with gevent functions.
            self.assertEqual(monkey.is_module_patched('threading'),
                             self.MONKEY_PATCHED)
            pool = self.pool

            def fn():
                gevent.sleep(0.5)
                return 42

            future = pool.submit(fn)

            def spawned():
                return 2016

            spawned_greenlet = gevent.spawn(spawned)

            done = gevent.wait((future, ))
            self.assertEqual(list(done), [future])
            self.assertTrue(spawned_greenlet.ready())
            self.assertEqual(spawned_greenlet.value, 2016)

            pool.kill()
            del future
            del pool
            del self.pool
예제 #47
0
    def init_kraken_instances(self):
        """
        Call all kraken instances (as found in the instances dir) and store it's metadata
        """
        futures = []
        purge_cache_needed = False
        for instance in self.instances.values():
            if not instance.is_initialized:
                futures.append(gevent.spawn(instance.init))

        gevent.wait(futures)
        for future in futures:
            # we check if an instance needs the cache to be purged
            if future.get():
                self._clear_cache()
                break
예제 #48
0
    def handle(self, *args, **options):
        def _handle_base(base):
            base.stop()
            base.destroy()
            base.start()

        greenlets = []

        transaction.set_autocommit(False)
        for base in Base.objects.filter(
                executor__pid__isnull=False).select_for_update(nowait=False):
            g = gevent.spawn(_handle_base, base)
            greenlets.append(g)
        gevent.wait(greenlets)
        transaction.commit()
        transaction.set_autocommit(True)
예제 #49
0
    def start(self):
        self.register_logger("gevent_helpers")
        for comp in self.components.itervalues():
            comp.manager = self
            comp.counters = self.register_stat_counters(
                comp, comp.one_min_stats, comp.one_sec_stats)
            if comp is not self:
                comp.logger = self.register_logger(comp.name)
                comp.start()

        # Starts the greenlet
        Component.start(self)
        # Start the datagram control server if it's been inited
        if self.config['datagram']['enabled']:
            DatagramServer.start(self, )

        # This is the main thread of execution, so just continue here waiting
        # for exit signals
        ######
        # Register shutdown signals
        gevent.signal(signal.SIGUSR1, self.dump_objgraph)
        gevent.signal(signal.SIGHUP, exit, "SIGHUP")
        gevent.signal(signal.SIGINT, exit, "SIGINT")
        gevent.signal(signal.SIGTERM, exit, "SIGTERM")

        try:
            gevent.wait()
        # Allow a force exit from multiple exit signals
        finally:
            self.logger.info(
                "Exiting requested, allowing {} seconds for cleanup.".format(
                    self.config['term_timeout']))
            try:
                for comp in self.components.itervalues():
                    self.logger.debug(
                        "Calling stop on component {}".format(comp))
                    comp.stop()
                if gevent.wait(timeout=self.config['term_timeout']):
                    self.logger.info("All threads exited normally")
                else:
                    self.logger.info(
                        "Timeout reached, shutting down forcefully")
            except gevent.GreenletExit:
                self.logger.info("Shutdown requested again by system, "
                                 "exiting without cleanup")
            self.logger.info("Exit")
            self.logger.info("=" * 80)
예제 #50
0
def main(filename):
    print('Importing {}'.format(filename))
    group_singular = {
        'conifers': [
            'conifer', 'plant', 'land plant', 'botany'],
        'reptiles': [
            'reptile', 'animal', 'cold blood', 'cold bloded', 'vertebrate',
            'fauna'],
        'turtles (non-marine)': [
            'turtle', 'animal', 'non-marine', 'cold blood', 'cold bloded',
            'vertebrate', 'fauna'],
        'butterflies': [
            'butterfly', 'animal', 'insect', 'moths and butterflies', 'fauna',
            'invertebrate'],
        'dragonflies': [
            'dragonfly', 'animal', 'insect', 'dragonflies and damseflies',
            'invertebrate', 'fauna'],
        'mammals': [
            'mammal', 'animal', 'warm blood', 'warm blooded', 'vertebrate',
            'fauna'],
        'birds': [
            'bird', 'animal', 'warm blood', 'warm blooded', 'vertebrate',
            'fauna'],
        'amphibians': [
            'amfibian', 'animal', 'vertebrate', 'fauna'],
        'sphingid moths': [
            'sphingid moth', 'moth', 'animal', 'insect', 'invertebrate',
            'fauna', 'moths and butterflies'],
        'bumblebees': [
            'bumblebee', 'bee', 'bees', 'animal', 'insect', 'invertebrate'],
    }
    with open(filename, newline='') as f:
        count = 0
        # "Scientific Name","Common Name","Family","Taxonomic Group"
        for row in csv.reader(f, delimiter=',', quotechar='"'):
            count += 1
            common = row[1]
            if common == 'null':
                common = row[0]
            gevent.spawn(
                post_species, row[0], common,
                [row[2], row[3]] + group_singular[row[3].lower()])
            if count >= 100:
                gevent.wait()
                count = 0
    gevent.wait()
    return 0
예제 #51
0
    def start_blacklist(self, project):
        """

        :param project:
        :return:
        """
        def __scan(file_path):
            for rule in self.blacklist_rule:
                flag = rule.verify(file_path)
                if flag:
                    rule.get_info(file_path)
                    if project.web_url:
                        report = '{0}/blob/{1}/{2}#L{3}'.format(
                            project.web_url, project.branch, file_path,
                            rule.start_line)
                    else:
                        report = ''
                    author, author_email = get_git_author(
                        project.get_last_author(file_path))
                    vuln = Vulnerability(
                        task_id=project.task_id,
                        rule_key=rule.key,
                        risk_id=rule.risk_id,
                        title=rule.name,
                        file=file_path,
                        author=author,
                        author_email=author_email,
                        hash=project.get_last_commit(),
                        start_line=rule.start_line,
                        end_line=rule.end_line,
                        report=report,
                        code_example=rule.code_example,
                        engine=self.key,
                    )
                    vuln_key = hash_md5('{0}_{1}_{2}'.format(
                        file_path, rule.id, rule.start_line))
                    if vuln_key not in kb.result[project.key]:
                        kb.result[project.key][vuln_key] = vuln.info
                        project.logger.debug(
                            '[PluginScanner] {0}'.format(vuln))

        pool = ThreadPool(project.threads)
        for fpath, dirs, fs in os.walk(project.scan_path):
            for f in fs:
                pool.spawn(__scan, os.path.join(fpath, f))
            gevent.wait()
예제 #52
0
파일: protocol.py 프로젝트: ycaihua/raiden
    def stop_and_wait(self):
        # Stop handling incoming packets, but don't close the socket. The
        # socket can only be safely closed after all outgoing tasks are stopped
        self.transport.stop_accepting()

        # Stop processing the outgoing queues
        self.event_stop.set()
        gevent.wait(self.greenlets)

        # All outgoing tasks are stopped. Now it's safe to close the socket. At
        # this point there might be some incoming message being processed,
        # keeping the socket open is not useful for these.
        self.transport.stop()

        # Set all the pending results to False
        for waitack in self.senthashes_to_states.values():
            waitack.async_result.set(False)
예제 #53
0
파일: speed.py 프로젝트: VidRoll/raiden
def test_latency(apps, assets, num_transfers, amount):
    def start_transfers(idx, curr_asset, num_transfers):
        curr_app = apps[idx]
        asset_manager = curr_app.raiden.get_manager_by_asset_address(
            curr_asset)

        all_paths = asset_manager.channelgraph.get_paths_of_length(
            source=curr_app.raiden.address,
            num_hops=2,
        )
        path = all_paths[0]
        target = path[-1]

        finished = gevent.event.Event()

        def _transfer():
            api = curr_app.raiden.api
            for i in range(num_transfers):
                async_result = api.transfer_async(curr_asset, amount, target)
                async_result.wait()

            finished.set()

        gevent.spawn(_transfer)
        return finished

    finished_events = []

    # Start all transfers
    start_time = time.time()
    for idx, curr_asset in enumerate(assets):
        finished = start_transfers(idx, curr_asset, num_transfers)
        finished_events.append(finished)

    # Wait until the transfers for all assets are done
    gevent.wait(finished_events)
    elapsed = time.time() - start_time
    completed_transfers = num_transfers * len(assets)

    tps = completed_transfers / elapsed
    print('Completed {} transfers. tps:{:.5} latency:{:.5} time:{:.5}s'.format(
        completed_transfers,
        tps,
        elapsed / completed_transfers,
        elapsed,
    ))
예제 #54
0
    def run(self):
        """Starts all the registered services"""

        for service in self.services:
            if service.enabled:
                self.servers.append(spawn(service.run))
        self.logger.info("%d services launched", len(self.servers))

        try:
            if len(self.servers) > 0 and not self.stopped.is_set():
                wait()
        except KeyboardInterrupt:
            self.logger.info('Stopping services')
            self.stop()
            raise KeyboardInterrupt
        finally:
            joinall(self.servers, 2)
예제 #55
0
def test_main_sys_args():
    old_argv = list(sys.argv)
    config = os.path.join(os.path.dirname(__file__), '..', '..', 'config',
                          'absearch.ini')
    sys.argv = ['', config]

    try:
        with capture() as f:
            greenlet = gevent.spawn(main)
            gevent.sleep(0.1)

        assert greenlet.started, f
        greenlet.kill()
        gevent.wait([greenlet])
        assert not greenlet.started
    finally:
        sys.argv[:] = old_argv
예제 #56
0
 def test_ref_attribute(self):
     # checking "ref=False" attribute
     w = gevent.get_hub().loop.timer(10)
     w.start(lambda: None)
     w.ref = False
     with self.runs_in_no_time():
         result = gevent.wait()
     self.assertTrue(result)
예제 #57
0
 def test_callback(self):
     # exiting because the spawned greenlet finished execution (spawn (=callback) variant)
     x = gevent.spawn(lambda: 5)
     with self.runs_in_no_time():
         result = gevent.wait(timeout=10)
     self.assertTrue(result)
     self.assertTrue(x.dead, x)
     self.assertEqual(x.value, 5)
예제 #58
0
    def test_run(self, chat):
        client = Mock()

        chat.pubsub = Mock()
        chat.pubsub.channels = ['quorum']
        chat.pubsub.listen.return_value = [{
            'type': 'message',
            'channel': 'quorum',
            'data': 'Calloo! Callay!',
        }]
        chat.greenlet = Mock()

        chat.subscribe(client, 'quorum')
        chat.run()

        gevent.wait()  # wait for event loop
        client.send.assert_called_once_with('quorum:Calloo! Callay!')
예제 #59
0
    def work(self, burst=False):
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        setup_loghandlers()
        self._install_signal_handlers()

        self.did_perform_work = False
        self.register_birth()
        self.log.info('RQ worker started, version %s' % VERSION)
        self.set_state('starting')
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break

                timeout = None if burst else max(1, self.default_worker_ttl - 60)
                try:
                    result = self.dequeue_job_and_maintain_ttl(timeout)

                    if result is None and burst:
                        try:
                            # Make sure dependented jobs are enqueued.
                            gevent.wait(self.children)
                        except LoopExit:
                            pass
                        result = self.dequeue_job_and_maintain_ttl(timeout)

                    if result is None:
                        break
                except StopRequested:
                    break

                job, queue = result
                self.execute_job(job, queue)

        finally:
            if not self.is_horse:
                self.register_death()
        return self.did_perform_work
예제 #60
0
def test_direct_transfer_to_offline_node(raiden_network, token_addresses,
                                         deposit):
    app0, app1 = raiden_network
    token_address = token_addresses[0]
    chain_state = views.state_from_app(app0)
    payment_network_id = app0.raiden.default_registry.address
    token_network_identifier = views.get_token_network_identifier_by_token_address(
        chain_state,
        payment_network_id,
        token_address,
    )
    # Wait until the initialization of the node is complete and then stop it
    gevent.wait([app1.raiden.start_event])
    app1.raiden.stop()

    amount = 10
    target = app1.raiden.address
    payment_identifier = 13
    app0.raiden.direct_transfer_async(
        token_network_identifier,
        amount,
        target,
        identifier=payment_identifier,
    )

    app1.raiden.start()
    exception = ValueError(
        'Waiting for transfer received success in the WAL timed out')
    with gevent.Timeout(seconds=5, exception=exception):
        wait_for_transfer_success(
            app1.raiden,
            payment_identifier,
            amount,
            app1.raiden.alarm.sleep_time,
        )

    no_outstanding_locks = []
    assert_synced_channel_state(
        token_network_identifier,
        app0,
        deposit - amount,
        no_outstanding_locks,
        app1,
        deposit + amount,
        no_outstanding_locks,
    )