Exemplo n.º 1
0
    def test_empty_iterator(self):
        g = gen.WaitIterator()
        self.assertTrue(g.done(), 'empty generator iterated')

        with self.assertRaises(ValueError):
            g = gen.WaitIterator(False, bar=False)

        self.assertEqual(g.current_index, None, "bad nil current index")
        self.assertEqual(g.current_future, None, "bad nil current future")
Exemplo n.º 2
0
    def test_already_done(self):
        f1 = Future()  # type: Future[int]
        f2 = Future()  # type: Future[int]
        f3 = Future()  # type: Future[int]
        f1.set_result(24)
        f2.set_result(42)
        f3.set_result(84)

        g = gen.WaitIterator(f1, f2, f3)
        i = 0
        while not g.done():
            r = yield g.next()
            # Order is not guaranteed, but the current implementation
            # preserves ordering of already-done Futures.
            if i == 0:
                self.assertEqual(g.current_index, 0)
                self.assertIs(g.current_future, f1)
                self.assertEqual(r, 24)
            elif i == 1:
                self.assertEqual(g.current_index, 1)
                self.assertIs(g.current_future, f2)
                self.assertEqual(r, 42)
            elif i == 2:
                self.assertEqual(g.current_index, 2)
                self.assertIs(g.current_future, f3)
                self.assertEqual(r, 84)
            i += 1

        self.assertEqual(g.current_index, None, "bad nil current index")
        self.assertEqual(g.current_future, None, "bad nil current future")

        dg = gen.WaitIterator(f1=f1, f2=f2)

        while not dg.done():
            dr = yield dg.next()
            if dg.current_index == "f1":
                self.assertTrue(
                    dg.current_future == f1 and dr == 24,
                    "WaitIterator dict status incorrect",
                )
            elif dg.current_index == "f2":
                self.assertTrue(
                    dg.current_future == f2 and dr == 42,
                    "WaitIterator dict status incorrect",
                )
            else:
                self.fail("got bad WaitIterator index {}".format(
                    dg.current_index))

            i += 1

        self.assertEqual(dg.current_index, None, "bad nil current index")
        self.assertEqual(dg.current_future, None, "bad nil current future")
Exemplo n.º 3
0
    def compute_many(self, bstream, msgs, report=False):
        with log_errors():
            transfer_start = time()
            good, bad, data, num_transferred = yield self.gather_many(msgs)
            transfer_end = time()

            for msg in msgs:
                msg.pop('who_has', None)

            if bad:
                logger.warn("Could not find data for %s", sorted(bad))
            for k, v in bad.items():
                bstream.send({'status': 'missing-data',
                              'key': k,
                              'keys': list(v)})

            if good:
                futures = [self.compute_one(data, report=report, **msg)
                                         for msg in good]
                wait_iterator = gen.WaitIterator(*futures)
                result = yield wait_iterator.next()
                if num_transferred:
                    result['transfer_start'] = transfer_start
                    result['transfer_stop'] = transfer_end
                bstream.send(result)
                while not wait_iterator.done():
                    msg = yield wait_iterator.next()
                    bstream.send(msg)
Exemplo n.º 4
0
 def join(self, timeout=None):
     futures = dict(
         eof=self.source.end_of_input.wait(timeout),
         input_error=self.source.input_error.wait(timeout),
         output_error=self.drain.output_error.wait(timeout),
     )
     wait_iterator = gen.WaitIterator(**futures)
     while not wait_iterator.done():
         try:
             yield wait_iterator.next()
         except gen.TimeoutError:
             self.logger.warning('Wait timeout occurred; aborting')
             break
         except Exception as err:
             self.logger.error("Error %s from %s",
                               err, wait_iterator.current_future)
         else:
             if wait_iterator.current_index == 'queue' and \
               (self.source.state == CLOSING or
                self.drain.state == CLOSING):
                 self.logger.info('Queue drained')
                 break
             if wait_iterator.current_index == 'eof':
                 self.logger.info('End of input reached')
                 break
             elif wait_iterator.current_index == 'input_error' and \
                self.source.state != CLOSING:
                 yield self.close('Error occurred in source')
                 break
             elif wait_iterator.current_index == 'output_error' and \
                self.drain.state != CLOSING:
                 yield self.close('Error occurred in drain')
                 break
Exemplo n.º 5
0
    def post(self):
        response_sent = False
        r = self.request
        request_futures = [
            self.http_request_until_success(
                gen_request_with_new_host(r, servers[i]))
            for i in range(len(servers))
        ]
        wait_iterator = gen.WaitIterator(*request_futures)

        # Act on the first successful response
        while not wait_iterator.done():
            try:
                result = yield wait_iterator.next()
            except Exception as e:
                logger.error("Error {} from {}".format(
                    e, wait_iterator.current_future))

            # Return the first response
            if (not response_sent):
                self.clear()
                for name, value in result.headers.get_all():
                    self.set_header(name, value)
                self.set_status(result.code)
                self.write(result.body)
                self.finish()
                http_responses_counter.labels(code=result.code,
                                              source=SRC_PROXY).inc()

                response_sent = True
                logger.debug(
                    "First successful response received from '{}', and was returned to client."
                    .format(servers[wait_iterator.current_index]))
Exemplo n.º 6
0
    async def _send_request_gather_response(self, results_array,
                                            proxy_request_payloads):
        client = AsyncHTTPClient()
        logging.info('start _send_request_gather_response request %s',
                     self.request)
        waiter = gen.WaitIterator(*[
            client.fetch(self.create_proxy_request(proxy_request_payload))
            for proxy_request_payload in proxy_request_payloads
        ])

        while not waiter.done():
            response = await waiter.next()
            results_array[waiter.current_index] = tornado.escape.json_decode(
                response.body)

        logging.info('gathering requests %s', self.request)
        final_result = []
        for r in results_array:
            final_result += r['results']

        response_data = results_array[0]
        response_data['results'] = final_result
        logging.info('stop _send_request_gather_response request %s',
                     self.request)
        return response_data
Exemplo n.º 7
0
    def test_iterator(self):
        futures = [Future(), Future(), Future(), Future()]

        self.finish_coroutines(0, futures)

        g = gen.WaitIterator(*futures)

        i = 0
        while not g.done():
            try:
                r = yield g.next()
            except ZeroDivisionError:
                self.assertIs(g.current_future, futures[0],
                              'exception future invalid')
            else:
                if i == 0:
                    self.assertEqual(r, 24, 'iterator value incorrect')
                    self.assertEqual(g.current_index, 2, 'wrong index')
                elif i == 2:
                    self.assertEqual(r, 42, 'iterator value incorrect')
                    self.assertEqual(g.current_index, 1, 'wrong index')
                elif i == 3:
                    self.assertEqual(r, 84, 'iterator value incorrect')
                    self.assertEqual(g.current_index, 3, 'wrong index')
            i += 1
Exemplo n.º 8
0
    def compute_many(self, bstream, msgs, report=False):
        good, bad, data, num_transferred, diagnostics = yield self.gather_many(
            msgs)

        if bad:
            logger.warn("Could not find data for %s", sorted(bad))

        for msg in msgs:
            msg.pop('who_has', None)

        for k, v in bad.items():
            bstream.send({'status': 'missing-data', 'key': k, 'keys': list(v)})

        if good:
            futures = [
                self.compute_one(data, report=report, **msg) for msg in good
            ]
            wait_iterator = gen.WaitIterator(*futures)
            result = yield wait_iterator.next()
            if diagnostics:
                result.update(diagnostics)
            bstream.send(result)
            while not wait_iterator.done():
                msg = yield wait_iterator.next()
                bstream.send(msg)
Exemplo n.º 9
0
    def with_interrupt(self, yieldable):
        """
        Yield a yieldable which will be interrupted if this future is interrupted ::

            from tornado import ioloop, gen
            loop = ioloop.IOLoop.current()

            interruptable = InterutableFuture()
            loop.add_callback(interruptable.interrupt, RuntimeError("STOP"))
            loop.run_sync(lambda: interruptable.with_interrupt(gen.sleep(2)))
            >>> RuntimeError: STOP


        :param yieldable: The yieldable
        :return: The result of the yieldable
        """
        # Wait for one of the two to finish, if it's us that finishes we expect that it was
        # because of an exception that will have been raised automatically
        wait_iterator = gen.WaitIterator(yieldable, self)
        result = yield wait_iterator.next()  # pylint: disable=stop-iteration-return
        if not wait_iterator.current_index == 0:
            raise RuntimeError("This interruptible future had it's result set unexpectedly to {}".format(result))

        result = yield [yieldable, self][0]
        raise gen.Return(result)
Exemplo n.º 10
0
def unicorn_tank(request, response):
    req = yield request.read()
    r = msgpack.unpackb(req)
    get = r[1].split("/")[1].split("_")
    size = int(get[0])
    count = int(get[1])
    f = open('/dev/urandom', 'rb')
    data = f.read(size * 1024)

    response.write(msgpack.packb((200, DEFAULT_HEADERS)))
    #response.write("Create " + str(count) + " nodes with size: " + str(size) + "kb\n")

    error = 0
    i = 0
    futures = {}
    while i < count:
        i = i + 1
        futures[str(i)] = create_delete_node(unicorn, data, response)

    wait_iterator = gen.WaitIterator(**futures)
    while not wait_iterator.done():
        try:
            yield wait_iterator.next()
            #response.write(str(wait_iterator.current_index) + " done\n")
        except:
            error = error + 1
            #response.write(str(wait_iterator.current_index) + " failed\n")

    if error == 0:
        response.write(msgpack.packb((200, DEFAULT_HEADERS)))
    else:
        response.write(msgpack.packb((500, DEFAULT_HEADERS)))

    response.close()
Exemplo n.º 11
0
def judge_and_alert(instance):
    def _get_query_datas():
        # 构造出所有的告警项的ES请求参数
        alert_items = get_alert_items()
        _query_constructor = partial(get_alert_type_strategy_and_query_body,
                                     hostname=instance)
        return map(_query_constructor, alert_items)

    querys = _get_query_datas()
    strategys = {q['alertname']: q['strategy'] for q in querys}

    # alertname为key,发送es查询的协程为value
    workdic = {q['alertname']: send_es_query(q['query_body']) for q in querys}
    # 多个告警项并发查询ES聚合结果
    waiter = gen.WaitIterator(**workdic)
    while not waiter.done():
        try:
            es_ret = yield waiter.next()
        except Exception as e:
            logging.error(e, exc_info=True)
        else:
            alertname = waiter.current_index
            # 在这里处理es的返回值
            alert_content = yield judge_result_and_make_alert_msg(
                alertname, strategys[alertname], json.loads(es_ret))
            # 有告警内容时才需要告警
            if alert_content.get('body'):
                receive_group = strategys[alertname]['contact_group']
                alert_level = strategys[alertname]['level']
                alerting = Alert(receive_group, alert_level, alert_content)
                # 发送告警的协程
                yield alerting.send()
Exemplo n.º 12
0
 def test_no_ref(self):
     # In this usage, there is no direct hard reference to the
     # WaitIterator itself, only the Future it returns. Since
     # WaitIterator uses weak references internally to improve GC
     # performance, this used to cause problems.
     yield gen.with_timeout(datetime.timedelta(seconds=0.1),
                            gen.WaitIterator(gen.sleep(0)).next())
def fetch_and_handle():
    """fetch and handle"""
    http_client = AsyncHTTPClient()
    dic_device = dict()
    arr_req = []
    URLS = generate_url(5)
    for index, url in enumerate(URLS):
        dic_device[index] = "custom-request-" + str(index) + ": "
        arr_req.append(http_client.fetch(url))

    # start of waiter
    start = timer()
    req_waiter = gen.WaitIterator(*arr_req)
    while not req_waiter.done():
        try:
            result = yield req_waiter.next()
        except Exception as e:
            index = req_waiter.current_index
            # print('index exception', index)
            print("Error {} for request {}".format(e,
                                                   req_waiter.current_future))
        else:
            index = req_waiter.current_index
            # print('index', index)
            result = result.body
            request_id = dic_device[index]
            print("{} back and start to work for 1 second".format(index))
            yield gen.sleep(1)
            print("{} finish sleeping".format(index))
            # print("Result {} received from future {} for {}".format(
            #     result.decode('utf8'), index, request_id))
    end = timer()
    print(end - start)
Exemplo n.º 14
0
Arquivo: card.py Projeto: hphu/Dominet
def parallel_selects(futures, players, callback):
    wait_iterator = gen.WaitIterator(*futures)
    while not wait_iterator.done():
        selected = yield wait_iterator.next()
        selecting_player_index = wait_iterator.current_index
        selecting_player = players[selecting_player_index]
        yield gen.maybe_future(callback(selected, selecting_player))
Exemplo n.º 15
0
    def send(self, request_by_broker):
        """
        Sends a dict of requests keyed on broker ID and handles responses.

        Returns a dictionary of the results of
        ``handle_<response.api>_response`` method calls, keyed to the
        corresponding broker ID.

        Raises ``UnhandledResponseError`` if the client subclass does not have
        a ``handle_<response.api>_response`` method available to handle an
        incoming response object.

        If an error occurs in a response, the ``heal_cluster`` flag is set
        and the ``heal()`` method on the cluster is called after processing
        each response.

        Responses are handled in the order they come in, but this method does
        not yield a value until all responses are handled.
        """
        iterator = gen.WaitIterator(
            **{
                str(broker_id): self.cluster[broker_id].send(request)
                for broker_id, request in six.iteritems(request_by_broker)
            })

        results = {}

        while not iterator.done():
            self.heal_check = time.time()
            try:
                response = yield iterator.next()
            except BrokerConnectionError as e:
                log.info("Connection to %s:%s lost", e.host, e.port)
                self.heal_cluster = True
                break
            except iostream.StreamClosedError:
                log.info("Connection to broker lost.")
                self.heal_cluster = True
                break
            except Exception:
                log.exception("Error sending request.")
                self.heal_cluster = True
                continue

            self.heal_check = time.time()
            handler = getattr(self, "handle_%s_response" % response.api, None)
            if handler is None:
                raise UnhandledResponseError(response.api)

            result = yield gen.maybe_future(handler(response))
            results[int(iterator.current_index)] = result

        if self.heal_cluster:
            self.heal_cluster = False
            yield self.cluster.heal()

        raise gen.Return(results)
Exemplo n.º 16
0
def _as_completed(fs, queue):
    groups = groupby(lambda f: f.key, fs)
    firsts = [v[0] for v in groups.values()]
    wait_iterator = gen.WaitIterator(*[f.event.wait() for f in firsts])

    while not wait_iterator.done():
        result = yield wait_iterator.next()
        # TODO: handle case of restarted futures
        future = firsts[wait_iterator.current_index]
        for f in groups[future.key]:
            queue.put_nowait(f)
    def poll(self):
        wait_i = gen.WaitIterator(*self.hello_futures())

        while not wait_i.done():
            try:
                reply = yield wait_i.next()

            except Exception as e:
                print(e)

            else:
                print(reply)
Exemplo n.º 18
0
def ignore_exceptions(coroutines, *exceptions):
    """ Process list of coroutines, ignoring certain exceptions

    >>> coroutines = [cor(...) for ...]  # doctest: +SKIP
    >>> x = yield ignore_exceptions(coroutines, TypeError)  # doctest: +SKIP
    """
    wait_iterator = gen.WaitIterator(*coroutines)
    results = []
    while not wait_iterator.done():
        with ignoring(*exceptions):
            result = yield wait_iterator.next()
            results.append(result)
    raise gen.Return(results)
Exemplo n.º 19
0
def All(*args):
    """ Wait on many tasks at the same time

    Err once any of the tasks err.

    See https://github.com/tornadoweb/tornado/issues/1546
    """
    if len(args) == 1 and isinstance(args[0], Iterable):
        args = args[0]
    tasks = gen.WaitIterator(*args)
    results = [None for _ in args]
    while not tasks.done():
        result = yield tasks.next()
        results[tasks.current_index] = result
    raise gen.Return(results)
Exemplo n.º 20
0
def fetch_and_handle(urls):
    """Fetches the urls and handles/processes the response"""

    tables = {}
    nurls = len(urls)
    done = 0
    AsyncHTTPClient.configure(NoQueueTimeoutHTTPClient)
    http_client = AsyncHTTPClient(defaults=dict(request_timeout=180))

    start = time.time()

    if isinstance(urls, dict):
        waiter = gen.WaitIterator(
            **{k: http_client.fetch(v)
               for k, v in urls.items()})
    elif isinstance(urls, list):
        waiter = gen.WaitIterator(*[http_client.fetch(url) for url in urls])

    while not waiter.done():
        try:
            result = yield waiter.next()
            tables[waiter.current_index] = json.loads(result.body)
        except Exception as e:
            print("Error {} from {}".format(e, waiter.current_future))
            print(waiter.current_index)
            continue

        else:
            done += 1
            estimated_time = (time.time() - start) * (nurls - done) / done
            print(
                f"{round((done/nurls)*100, 2)}% ({convert_time(estimated_time)})",
                end="\r",
            )

    return tables
Exemplo n.º 21
0
def skynet(level=0, index=0):
    if level >= LEVELS:
        raise gen.Return(index)

    futures = [
        skynet(level=level + 1, index=index * SONS + x)
        for x in range(0, SONS)
    ]

    sum_ = 0
    wait_iterator = gen.WaitIterator(*futures)
    while not wait_iterator.done():
        got = yield wait_iterator.next()
        sum_ += got

    raise gen.Return(sum_)
Exemplo n.º 22
0
    def test_write_while_connecting(self: typing.Any):
        stream = self._make_client_iostream()
        connect_fut = stream.connect(("127.0.0.1", self.get_http_port()))
        # unlike the previous tests, try to write before the connection
        # is complete.
        write_fut = stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
        self.assertFalse(connect_fut.done())

        # connect will always complete before write.
        it = gen.WaitIterator(connect_fut, write_fut)
        resolved_order = []
        while not it.done():
            yield it.next()
            resolved_order.append(it.current_future)
        self.assertEqual(resolved_order, [connect_fut, write_fut])

        data = yield stream.read_until_close()
        self.assertTrue(data.endswith(b"Hello"))

        stream.close()
Exemplo n.º 23
0
 def afk_cb():
     self.is_afk = True
     afk_players = [
         x for x in self.player.game.players if x.waiter.is_afk
     ]
     send_afk_msg_to = [
         x for x in self.player.get_opponents() if x not in afk_players
     ]
     futures = []
     for i in send_afk_msg_to:
         futures.append(
             i.select(
                 1, 1, ["Yes"],
                 "{} {} not responded for awhile, force forefeit?".
                 format(", ".join([i.name for i in afk_players]),
                        "have" if len(afk_players) > 1 else "has")))
     wait_iterator = gen.WaitIterator(*futures)
     while not wait_iterator.done():
         selected = yield wait_iterator.next()
         if selected == ["Yes"]:
             self.player.game.end_game(afk_players)
Exemplo n.º 24
0
def All(args, quiet_exceptions=()):
    """ Wait on many tasks at the same time

    Err once any of the tasks err.

    See https://github.com/tornadoweb/tornado/issues/1546

    Parameters
    ----------
    args: futures to wait for
    quiet_exceptions: tuple, Exception
        Exception types to avoid logging if they fail
    """
    tasks = gen.WaitIterator(*args)
    results = [None for _ in args]
    while not tasks.done():
        try:
            result = yield tasks.next()
        except Exception:

            @gen.coroutine
            def quiet():
                """ Watch unfinished tasks

                Otherwise if they err they get logged in a way that is hard to
                control.  They need some other task to watch them so that they
                are not orphaned
                """
                for task in list(tasks._unfinished):
                    try:
                        yield task
                    except quiet_exceptions:
                        pass

            quiet()
            raise

        results[tasks.current_index] = result
    raise gen.Return(results)
Exemplo n.º 25
0
async def Any(args, quiet_exceptions=()):
    """Wait on many tasks at the same time and return when any is finished

    Err once any of the tasks err.

    Parameters
    ----------
    args: futures to wait for
    quiet_exceptions: tuple, Exception
        Exception types to avoid logging if they fail
    """
    tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
    results = [None for _ in args]
    while not tasks.done():
        try:
            result = await tasks.next()
        except Exception:

            @gen.coroutine
            def quiet():
                """Watch unfinished tasks

                Otherwise if they err they get logged in a way that is hard to
                control.  They need some other task to watch them so that they
                are not orphaned
                """
                for task in list(tasks._unfinished):
                    try:
                        yield task
                    except quiet_exceptions:
                        pass

            quiet()
            raise

        results[tasks.current_index] = result
        break
    return results
Exemplo n.º 26
0
    def info(self, apps):
        infos = {}
        for names in split_by_groups(apps, 25):
            futures = {}
            for app in names:
                res_future = (yield self.node.info(app, self._flags)).rx.get()
                futures[app] = gen.with_timeout(self.timeout, res_future)

            wait_iterator = gen.WaitIterator(**futures)
            while not wait_iterator.done():
                try:
                    info = yield wait_iterator.next()
                    infos[wait_iterator.current_index] = info
                except gen.TimeoutError:
                    infos[wait_iterator.current_index] = self.on_timeout_reply
                except Exception as err:
                    infos[wait_iterator.current_index] = str(err)

        result = {
            'apps': infos,
            'count': len(infos),
        }
        raise gen.Return(result)
Exemplo n.º 27
0
 async def f():
     i = 0
     g = gen.WaitIterator(*futures)
     try:
         async for r in g:
             if i == 0:
                 self.assertEqual(r, 24, 'iterator value incorrect')
                 self.assertEqual(g.current_index, 2, 'wrong index')
             else:
                 raise Exception("expected exception on iteration 1")
             i += 1
     except ZeroDivisionError:
         i += 1
     async for r in g:
         if i == 2:
             self.assertEqual(r, 42, 'iterator value incorrect')
             self.assertEqual(g.current_index, 1, 'wrong index')
         elif i == 3:
             self.assertEqual(r, 84, 'iterator value incorrect')
             self.assertEqual(g.current_index, 3, 'wrong index')
         else:
             raise Exception("didn't expect iteration %d" % i)
         i += 1
     self.finished = True
Exemplo n.º 28
0
    def printresults():
        logger.log(
            STATS, '{} {} {} {} {}'.format(start.strftime("%Y-%m-%d %H:%M:%S"),
                                           end.strftime("%Y-%m-%d %H:%M:%S"),
                                           runtime, queryPerSec, queryratio))

        querypermin = queryPerSec * 60
        endtime = datetime.now(timezone('UTC')) + timedelta(minutes=runtime)
        line = list()
        popularitylist = list()
        newquerylist = list()

        if filename != "":
            newquerylist = QueryGenerator.generateQueriesFromFile(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, querytype, queryratio, filename)
        elif isbatch == True:
            newquerylist = QueryGenerator.generateQueries(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, popularitylist, querytype, queryratio,
                logger)
        else:
            #logger.info("Run.py start queryendtime "+str(start)+", "+str(endtime))
            queryStartInterval = start
            queryEndInterval = start + timedelta(minutes=1)
            for i in range(0, runtime):
                logger.info("Start generating queries for interval " +
                            str(queryStartInterval) + " - " +
                            str(queryEndInterval))
                newquerylist.extend(
                    QueryGenerator.generateQueries(
                        queryStartInterval, queryEndInterval, querypermin,
                        timeAccessGenerator, periodAccessGenerator,
                        popularitylist, querytype, queryratio, logger))
                queryEndInterval = queryEndInterval + timedelta(minutes=1)

            logger.info("Finished generating queries. num queries generated " +
                        str(len(newquerylist)))

        if filename != "" or isbatch == True:
            count = 0
            time = datetime.now(timezone('UTC'))
            logger.info("Time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
            nextminute = time + timedelta(minutes=1)
            for query in newquerylist:
                try:
                    line.append(
                        applyOperation(query, config, brokernameurl, logger))
                except Exception as inst:
                    logger.error(type(inst))  # the exception instance
                    logger.error(inst.args)  # arguments stored in .args
                    logger.error(
                        inst)  # __str__ allows args to be printed directly
                    x, y = inst.args
                    logger.error('x =', x)
                    logger.error('y =', y)

                count = count + 1
                if count >= querypermin:
                    timediff = (nextminute -
                                datetime.now(timezone('UTC'))).total_seconds()
                    if timediff > 0:
                        yield gen.sleep(timediff)
                    count = 0
                    time = datetime.now(timezone('UTC'))
                    logger.info("Time: {}".format(
                        time.strftime("%Y-%m-%d %H:%M:%S")))
                    nextminute = time + timedelta(minutes=1)
        else:
            # frequency of queries per millisecond
            queryPerMilliSecond = float(queryPerSec) / 1000
            # number of samples spaced by 1 millisecond
            numSamples = runtime * 60 * 1000
            numQueries, querySchedule = genPoissonQuerySchedule(
                queryPerMilliSecond, numSamples)
            logger.info("Poisson numQueries = " + str(numQueries))

            queryScheduleIdx = 0
            count = 0
            while count < len(newquerylist):
                sample = querySchedule[queryScheduleIdx]
                #logger.info("Poisson sample is "+str(sample[0])+", "+str(sample[1]))
                if (sample[0] == 0):
                    #logger.info("Sleeping for "+str(sample[1]))
                    yield gen.sleep(
                        float(sample[1]) /
                        1000)  # divide by 1000 to convert it into seconds
                else:
                    for i in range(0, sample[0]):
                        try:
                            line.append(
                                applyOperation(newquerylist[count], config,
                                               brokernameurl, logger))
                            #applyOperation(newquerylist[count], config, brokernameurl, logger)
                            newquerylist[count].setTxTime(datetime.now())
                            #logger.info("Running query "+str(sample[0]))
                        except Exception as inst:
                            logger.error(type(inst))  # the exception instance
                            logger.error(
                                inst.args)  # arguments stored in .args
                            logger.error(
                                inst
                            )  # __str__ allows args to be printed directly
                        count = count + 1
                        if count >= len(newquerylist):
                            break
                queryScheduleIdx = queryScheduleIdx + 1

        wait_iterator = gen.WaitIterator(*line)
        while not wait_iterator.done():
            try:
                result = yield wait_iterator.next()
            except Exception as e:
                logger.error("Error {} from {}".format(
                    e, wait_iterator.current_future))