Example #1
0
def main():
    connection = couchbase.client.Couchbase('192.168.1.154:8091', 'default', '');
    bucket = connection['default']

    now = int(time.time())
    start_time = now - 30 * 60

    def read_traffic(key):
        for row in bucket.view('_design/ray/_view/live_congestion',
            limit=100,
            stale='ok',
            startkey=[key, start_time],
            endkey=[key, now + 1],
            ):
            if row is None or 'value' not in row:
                return 'now found'
            value = row['value']
            return 'key=%d, record_count=%d, average_congestion=%.2f, age=%.2fs' % \
                (key, value['count'], value['congestion'],
                      now - value['age'])

    pool = gevent.pool.Pool(size=200)
    for result in pool.imap_unordered(read_traffic,
                                      itertools.cycle(xrange(0, 10000 * 50, 100))):

        print result
    pool.join()
Example #2
0
    def get_all_vms(self):
        """
        returns all vm information by name
        """
        # TODO prevent one bad VM from tainting all results
        timestamp = utctimestamp()

        vm_getter = functools.partial(self._get_vm, timestamp)

        pool = gevent.pool.Pool(20)

        return list(pool.imap_unordered(vm_getter, self.ki.list_vms()))
Example #3
0
    def get_all_vms(self):
        """
        returns all vm information by name
        """
        # TODO prevent one bad VM from tainting all results
        timestamp = utctimestamp()

        vm_getter = functools.partial(self._get_vm, timestamp)

        pool = gevent.pool.Pool(20)

        return list(pool.imap_unordered(vm_getter, self.ki.list_vms()))
Example #4
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-p', '--port', type=int, default=12345)
    parser.add_argument('hack',
                        choices=list(request_choices),
                        nargs='?',
                        default='thread')

    args = parser.parse_args()
    request_func = request_choices[args.hack]

    channel = grpc.insecure_channel('localhost:%d' % args.port)
    stub = echoserver_grpc.EchoerStub(channel)

    requests = [
        (1, "short 1", 1),
        (2, "short 2", 1),
        (3, "long", 4),
        (4, "middle", 2),
    ] + [(10 + n, "sequential %d" % n, n) for n in range(10)]

    map_func = lambda (i, m, s): request_func(stub, i, m, s)

    start = datetime.now()
    pool = gevent.pool.Pool()
    mapped = pool.imap_unordered(map_func, requests)

    gevent.sleep(3)

    try:
        for rsp in mapped:
            log(rsp.id, "Returned")
    except Exception as e:
        print("Exception while running:", e)

    end = datetime.now()
    print("Finished in", end - start)
Example #5
0
    def inner_iterable():
        """ This will be called inside the pool's main greenlet, which ID also needs to be registered """
        if current_job:
            set_current_job(current_job)

        for x in iterable:
            yield x

        if current_job:
            set_current_job(None)

    start_time = time.time()
    pool = gevent.pool.Pool(size=pool_size)

    if unordered:
        iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size)
    else:
        iterator = pool.imap(inner_func, inner_iterable())

    for x in iterator:
        if flatten:
            for y in x:
                yield y
        else:
            yield x

    pool.join(raise_error=True)
    total_time = time.time() - start_time

    log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
Example #6
0
        """ This will be called inside the pool's main greenlet, which ID also needs to be registered """
        if current_job:
            set_current_job(current_job)

        for x in iterable:
            yield x

        if current_job:
            set_current_job(None)

    start_time = time.time()
    pool = gevent.pool.Pool(size=pool_size)

    if unordered:
        iterator = pool.imap_unordered(inner_func,
                                       inner_iterable(),
                                       maxsize=buffer_size or pool_size)
    else:
        iterator = pool.imap(inner_func, inner_iterable())

    for x in iterator:
        if flatten:
            for y in x:
                yield y
        else:
            yield x

    pool.join(raise_error=True)
    total_time = time.time() - start_time

    log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
Example #7
0
def subpool_imap(pool_size, func, iterable, flatten=False, unordered=False, buffer_size=None):
    """ Generator version of subpool_map. Should be used with unordered=True for optimal performance """

    if not pool_size:
        for args in iterable:
            yield func(*args)

    counter = itertools_count()

    current_job = get_current_job()

    def inner_func(*args):
        """ As each call to 'func' will be done in a random greenlet of the subpool, we need to
        register their IDs with set_current_job() to make get_current_job() calls work properly
        inside 'func'.
    """
        next(counter)
        if current_job:
            set_current_job(current_job)

        try:
            ret = func(*args)
        except Exception as exc:
            trace = traceback.format_exc()
            log.error("Error in subpool: %s \n%s" % (exc, trace))
            raise

        if current_job:
            set_current_job(None)
        return ret

    def inner_iterable():
        """ This will be called inside the pool's main greenlet, which ID also needs to be registered """
        if current_job:
            set_current_job(current_job)

        for x in iterable:
            yield x

        if current_job:
            set_current_job(None)

    start_time = time.time()
    pool = gevent.pool.Pool(size=pool_size)

    if unordered:
        iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size)
    else:
        iterator = pool.imap(inner_func, inner_iterable())

    for x in iterator:
        if flatten:
            for y in x:
                yield y
        else:
            yield x

    pool.join(raise_error=True)
    total_time = time.time() - start_time

    log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
Example #8
0
def lookup_parallel(hosts_rbls):
    pool = gevent.pool.Pool(size=PARALELLISM)
    in_rbl = False
    for result in pool.imap_unordered(lookup, hosts_rbls):
        in_rbl = in_rbl or result
    return in_rbl
Example #9
0
def lookup_parallel(hosts_rbls):
    pool = gevent.pool.Pool(size=PARALELLISM)
    in_rbl = False
    for result in pool.imap_unordered(lookup, hosts_rbls):
        in_rbl = in_rbl or result
    return in_rbl
Example #10
0
def subpool_imap(pool_size,
                 func,
                 iterable,
                 flatten=False,
                 unordered=False,
                 buffer_size=None):
    """ Generator version of subpool_map. Should be used with unordered=True for optimal performance """

    if not pool_size:
        for args in iterable:
            yield func(*args)

    counter = itertools_count()

    current_job = get_current_job()

    def inner_func(*args):
        """ As each call to 'func' will be done in a random greenlet of the subpool, we need to
        register their IDs with set_current_job() to make get_current_job() calls work properly
        inside 'func'.
    """
        next(counter)
        if current_job:
            set_current_job(current_job)

        try:
            ret = func(*args)
        except Exception as exc:
            trace = traceback.format_exc()
            log.error("Error in subpool: %s \n%s" % (exc, trace))
            raise

        if current_job:
            set_current_job(None)
        return ret

    def inner_iterable():
        """ This will be called inside the pool's main greenlet, which ID also needs to be registered """
        if current_job:
            set_current_job(current_job)

        for x in iterable:
            yield x

        if current_job:
            set_current_job(None)

    start_time = time.time()
    pool = gevent.pool.Pool(size=pool_size)

    if unordered:
        iterator = pool.imap_unordered(inner_func,
                                       inner_iterable(),
                                       maxsize=buffer_size or pool_size)
    else:
        iterator = pool.imap(inner_func, inner_iterable())

    for x in iterator:
        if flatten:
            for y in x:
                yield y
        else:
            yield x

    pool.join(raise_error=True)
    total_time = time.time() - start_time

    log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))