Example #1
0
    def __init__(self, mock_api=False, trust=False, client_kwargs=None):
        # instantiate api
        api_class = MockAPI if mock_api else API
        self.api = api_class(**(client_kwargs or {}))
        self.logger = logging.getLogger('importer.api.eventful.consumer')
        self.event_horizon = None
        self.trust = trust
        self.client_call_limit = conf.API_CALL_LIMIT if self.trust else conf.SAFE_API_CALL_LIMIT

        self.venue_ids = set()
        self.images_by_event_id = {}
        self.images_by_venue_id = {}
        self.venues_by_venue_id = {}

        # a green pile for images w/ fairly high concurrency
        # (webservers are ok with it)
        self.event_image_pile = eventlet.GreenPile(15)
        # a green pile for event details w/ lower concurrency
        # (appservers are not too ok with it)
        self.event_detail_pile = eventlet.GreenPile(10)
        # a green pile for venue images w/ fairly high concurrency
        # (webservers are ok with it)
        self.venue_image_pile = eventlet.GreenPile(15)
        # a green pile for venue details w/ lower concurrency
        # (appservers are not too ok with it)
        self.venue_detail_pile = eventlet.GreenPile(10)
Example #2
0
    def test_constructing_from_pool(self):
        pool = eventlet.GreenPool(2)
        pile1 = eventlet.GreenPile(pool)
        pile2 = eventlet.GreenPile(pool)

        def bunch_of_work(pile, unique):
            for i in range(10):
                pile.spawn(passthru, i + unique)

        eventlet.spawn(bunch_of_work, pile1, 0)
        eventlet.spawn(bunch_of_work, pile2, 100)
        eventlet.sleep(0)
        self.assertEqual(list(pile2), list(range(100, 110)))
        self.assertEqual(list(pile1), list(range(10)))
Example #3
0
def main():
    query_message = ZeroAccessUtil.buildMessage()

    SEPARATOR = '/'
    if sys.platform == 'win32':
        SEPARATOR = "\\"

    message = ZeroAccessUtil.buildMessage()
    print message.encode("hex")

    nonQueryedNodes = mul.Queue(5000)

    zeroaccess_bootstrap_seeds_path = "Data" + SEPARATOR + "zeroaccess_node.dat"
    zeroaccess_nodes = ZeroAccessUtil.read_zeroaccess_data_from_file(
        zeroaccess_bootstrap_seeds_path)

    pile = eventlet.GreenPile()
    for x in zeroaccess_nodes[:10]:
        pile.spawn(query, x, message)

    # note that the pile acts as a collection of return values from the functions
    # if any exceptions are raised by the function they'll get raised here
    key = [ord('2'), ord('p'), ord('t'), ord('f')]
    for node, result in zip(zeroaccess_nodes[:10], pile):
        if (result == ''):
            print 'no response from ' + socket.inet_ntoa(
                struct.pack('I', node.get_ip()))
            continue
        print 'received'
        original_message = ZeroAccessUtil.xorMessage(result[0], key)
        crc32, retL_command, b_flag, ip_count = struct.unpack(
            'IIII', original_message[:16])
        print socket.inet_ntoa(struct.pack(
            'I', node.get_ip())) + ' --> ip count:  ' + str(ip_count)
Example #4
0
    def _collect_status_data(self):
        try:
            now = datetime.datetime.now()
            children = self.controller.children.values()
            status_data = {
                'active_children_count':
                len([c for c in children if c.active]),
                'killed_children_count':
                len([c for c in children if not c.active]),
                'configured_children_count': self.controller.num_processes,
                'now': now.ctime(),
                'pid': os.getpid(),
                'uptime': format_timedelta(now - self.controller.started_at),
                'started_at': self.controller.started_at.ctime(),
                'config': self.controller.config
            }
            # fire up a few greenthreads to wait on children's responses
            p = eventlet.GreenPile()
            for child in self.controller.children.values():
                p.spawn(self.collect_child_status, child)
            status_data['children'] = dict([pid_cd for pid_cd in p])

            # total concurrent connections
            status_data['concurrent_requests'] = sum([
                child.get('concurrent_requests', 0)
                for child in status_data['children'].values()
            ])
        finally:
            # wipe out the waiter so that subsequent requests create new ones
            self.status_waiter = None
        return status_data
Example #5
0
def app(environ, start_rsp):
    pool = eventlet.GreenPool(100)
    pile = eventlet.GreenPile(pool)
    for url in environ['wsgi.input'].readlines():
        pile.spawn(fetch_title, url)
    titles = '\n'.join(pile)
    start_rsp()
Example #6
0
    def test_wrapper_isolation(self):
        pile = eventlet.GreenPile()
        [pile.spawn(isolation_query, i) for i in range(10)]

        for scope, scope_values in pile:
            for val in scope_values:
                self.assertEqual(scope, val)
Example #7
0
    def test_create_swift_account_fail(self):
        self.ret_index = {}
        self.pa = 0

        def create_tenant(*args):
            if self.pa == 0:
                self.pa += 1
                raise KSClientException('Fake msg')
            else:
                self.pa += 1
                return FakeKSTenant('foo1')

        def create_swift_user(*args):
            pass

        client = FakeKSClient()

        self.stubs.Set(client.tenants, 'create', create_tenant)
        self.stubs.Set(filler, 'create_swift_user', create_swift_user)

        concurrency = int(
            utils.get_config('concurrency',
                             'filler_keystone_client_concurrency'))
        pile = eventlet.GreenPile(concurrency)
        filler.create_swift_account(client, pile, 3, 1, self.ret_index)

        self.assertEqual(len(self.ret_index.keys()), 2)
Example #8
0
    def test_a_buncha_stuff(self):
        assert_ = self.assert_

        class Dummy(object):
            def foo(self, when, token=None):
                assert_(token is not None)
                time.sleep(random.random() / 200.0)
                return token

        def sender_loop(loopnum):
            obj = tpool.Proxy(Dummy())
            count = 100
            for n in six.moves.range(count):
                eventlet.sleep(random.random() / 200.0)
                now = time.time()
                token = loopnum * count + n
                rv = obj.foo(now, token=token)
                self.assertEqual(token, rv)
                eventlet.sleep(random.random() / 200.0)

        cnt = 10
        pile = eventlet.GreenPile(cnt)
        for i in six.moves.range(cnt):
            pile.spawn(sender_loop, i)
        results = list(pile)
        self.assertEqual(len(results), cnt)
        tpool.killall()
Example #9
0
    def main(self):
        if self.args.bucket and self.args.bucket_folder:
            self.conn = S3Connection()
            self.bucket = self.conn.get_bucket(self.args.bucket)
            rs = self.bucket.list(self.args.bucket_folder)

            key_counter = 0
            keys_to_delete = []

            pile = eventlet.GreenPile(self.args.concurrency)

            for key in rs:
                keys_to_delete.append(key.name)
                key_counter += 1

                if key_counter % 1000 == 0:
                    pile.spawn(self.delete, keys_to_delete)
                    keys_to_delete = []

            pile.spawn(self.delete, keys_to_delete)  # Pick up stragglers

            # Wait for all greenlets to finish
            list(pile)

        else:
            print "Can't do it. Not enouch specified. You'll nuke the kernel."
Example #10
0
    def _transform_object(self, zobj):
        tags = set()
        metadata = dict()

        zobj.metadata = Metadata()
        d = zobj.transformed

        pool = eventlet.GreenPool()
        pile = eventlet.GreenPile(pool)

        for tag in self.eligible_tags:
            pile.spawn(match_pro, zobj, tag, self.logger, self.debug)

        if zobj.metadata.empty():
            for tag in self.common_tags:
                # try:
                #     meta = Metadata()
                #     meta = tag.process(d, meta)
                #     if meta is not None:
                #         zobj.metadata.merge(meta)
                # except Exception as e:
                #     if self.logger is not None:
                #         error = "%s: %s %s" % (tag.__class__.__name__,
                #                                type(e).__name__, str(e))
                #         self.logger.debug(error)
                #     if self.debug:
                #         raise e
                pile.spawn(match_pro, zobj, tag, self.logger, self.debug)
        pool.waitall()
        if zobj.metadata.empty() is False:
            return zobj
Example #11
0
    def delete_container(self, dest_storage_cnx, dest_token, orig_containers,
                         dest_containers):
        set1 = set((x['name']) for x in orig_containers)
        set2 = set((x['name']) for x in dest_containers)
        delete_diff = set2 - set1

        pool = eventlet.GreenPool(size=self.concurrency)
        pile = eventlet.GreenPile(pool)
        for container in delete_diff:
            try:
                dest_container_stats, dest_objects = swiftclient.get_container(
                    None,
                    dest_token,
                    container,
                    http_conn=dest_storage_cnx,
                    full_listing=True,
                )
            except (swiftclient.client.ClientException), e:
                logging.info("error getting container: %s, %s" %
                             (container, e.http_reason))
                continue

            for obj in dest_objects:
                logging.info("deleting obj: %s ts:%s", obj['name'],
                             obj['last_modified'])
                pile.spawn(self.delete_object, dest_storage_cnx, dest_token,
                           container, obj['name'])
            pool.waitall()
            logging.info("deleting container: %s", container)
            pile.spawn(swiftclient.delete_container,
                       '',
                       dest_token,
                       container,
                       http_conn=dest_storage_cnx)
Example #12
0
    def test_start_many_slow_connections(self):
        """ Test starting many slow connections """

        pile = eventlet.GreenPile()
        [pile.spawn(self.get_connection) for _ in xrange(self.NUM_ITER)]
        for conn in pile:
            self.assertIsInstance(conn, RexProEventletConnection)
Example #13
0
    def spawn_order_check(self, concurrency):
        # checks that piles are strictly ordered
        p = eventlet.GreenPile(concurrency)

        def makework(count, unique):
            for i in six.moves.range(count):
                token = (unique, i)
                p.spawn(pressure, token)

        iters = 1000
        eventlet.spawn(makework, iters, 1)
        eventlet.spawn(makework, iters, 2)
        eventlet.spawn(makework, iters, 3)
        p.spawn(pressure, (0, 0))
        latest = [-1] * 4
        received = 0
        it = iter(p)
        while True:
            try:
                i = six.next(it)
            except StressException as exc:
                i = exc.args[0]
            except StopIteration:
                break
            received += 1
            if received % 5 == 0:
                eventlet.sleep(0.0001)
            unique, order = i
            assert latest[unique] < order
            latest[unique] = order
        for l in latest[1:]:
            self.assertEqual(l, iters - 1)
Example #14
0
def geturls():
    urls = ['www.google.com', 'www.yandex.ru', 'www.python.org']
    pile = eventlet.GreenPile()
    for url in urls:
        pile.spawn(geturl, url)

    for url, rsp in zip(urls, pile):
        print('%s: %s' % (url, rsp))
Example #15
0
 def test_unique_connections(self):
     pool = self.get_pool()
     pile = eventlet.GreenPile()
     for i in xrange(self.NUM_ITER):
         pile.spawn(self.spawn_slow_network_and_query_slow_response, pool,
                    self.SLOW_NETWORK_QUERY, 1, {
                        'value': i,
                        i: 'value'
                    })
Example #16
0
def setup_siege_urls(hostnames):
    """Launches a coroutine to write siege urls based on user input."""
    print '  Configuring urls... ',
    pile = eventlet.GreenPile(pool)
    for h in hostnames:
        pile.spawn(_setup_siege_urls, h)
    responses = list(pile)
    print 'Done!'
    return responses
Example #17
0
def run_cmd_on_nodes(node_list, cmd):
    pile = eventlet.GreenPile()
    results = {}
    for node in node_list:
        pile.spawn(run_cmd_on_node, node, cmd)
    for node, res in zip(node_list, pile):
        results[node] = res
    logging.disable(logging.NOTSET)
    return results
Example #18
0
    def check_hosts(self, hosts):
        hosts_statics = {}
        pile = eventlet.GreenPile()
        for host in hosts:
            pile.spawn(self._check_host, host)

        for host, host_state in zip(hosts, pile):
            hosts_statics[host] = host_state

        return hosts_statics
Example #19
0
def run_doctor_on_nodes(node_list, check_cmd):
    pile = eventlet.GreenPile()
    result = []
    for node in node_list:
        pile.spawn(run_doctor_cmd_on_node, node['role'], node['name'],
                   check_cmd)
    for node, res in zip(node_list, pile):
        result.append(res)
    logging.disable(logging.NOTSET)
    return result
Example #20
0
    def test_contention(self):
        from tests import tpool_test
        prox = tpool.Proxy(tpool_test)

        pile = eventlet.GreenPile(4)
        pile.spawn(lambda: self.assertEqual(prox.one, 1))
        pile.spawn(lambda: self.assertEqual(prox.two, 2))
        pile.spawn(lambda: self.assertEqual(prox.three, 3))
        results = list(pile)
        self.assertEqual(len(results), 3)
Example #21
0
    def test_nested_values(self):
        pile = eventlet.GreenPile()
        [pile.spawn(nested_wrappers, chr(i)) for i in range(97, 103)]

        for scope, scope_v in pile:
            self.assertEqual(scope, scope_v.name)
            for inner_scope, scope_values in scope_v['nested_values']:
                for val in scope_values:
                    self.assertEqual(inner_scope, val)
            scope_v.delete()
Example #22
0
def setup_cannons(hostnames):
    """Launches a coroutine to configure each host and waits for them to
    complete before compiling a list of responses
    """
    print '  Loading cannons... ',
    pile = eventlet.GreenPile(pool)
    for h in hostnames:
        pile.spawn(_setup_a_cannon, h)
    responses = list(pile)
    print 'Done!'
    return responses
Example #23
0
    def mbtiles_grid_tiles(self):
        """
    Get grid tiles and upload.
    """
        tile_count = self.mbtiles.execute(
            'select count(zoom_level) from grids;').fetchone()[0]
        if not tile_count > 0:
            return False

        # Progress bar
        widgets = [
            '- Uploading %s grid tiles: ' % (tile_count),
            progressbar.Percentage(), ' ',
            progressbar.Bar(), ' ',
            progressbar.ETA()
        ]
        progress = progressbar.ProgressBar(widgets=widgets,
                                           maxval=tile_count).start()
        completed = 0

        # Create eventlet pile
        pile = eventlet.GreenPile(self.args.concurrency)

        # Get tiles
        tiles = self.mbtiles.execute(
            'select zoom_level, tile_column, tile_row, grid from grids;')
        t = tiles.fetchone()
        while t:
            key = '%s/%s/%s/%s.grid.json' % (self.tileset, t[0], t[1], t[2])

            # Get actual json data
            grid_data = self.mbtiles.execute(
                'select key_name, key_json FROM grid_data WHERE zoom_level = %s and tile_column = %s and tile_row = %s;'
                % (t[0], t[1], t[2])).fetchall()
            grid_data_parse = {}
            for d in grid_data:
                grid_data_parse[d[0]] = json.loads(d[1])

            # Put together
            grid = json.loads(zlib.decompress(t[3]).decode('utf-8'))
            grid['data'] = grid_data_parse

            # Upload data
            (grid, mime_type) = self.jsonp(grid)
            pile.spawn(self.send_file, key, grid, mime_type=mime_type)

            # Get next and update
            t = tiles.fetchone()
            completed = completed + 1
            progress.update(completed)

        # Wait for pile and stop progress bar
        list(pile)
        progress.finish()
Example #24
0
def get_historical_data(symbol):
    if symbol:
        companies = ListedCompany.objects.filter(symbol=symbol)
    else:
        companies = ListedCompany.objects.all()[:10]
    print "loading" + str(companies)
    pool = eventlet.GreenPool(5)
    pile = eventlet.GreenPile(pool)
    for company in companies:
        print company
        pile.spawn(get_company_data, company)
Example #25
0
def run_doctor_on_nodes(node_list, check_cmd):
    pile = eventlet.GreenPile()
    result = []
    for node in node_list:
        LOG.info('%s%s Push check cmd to %-13s (%-10s) %s%s' %
                 ('<', '=' * 2, node['name'], node['role'], '=' * 2, '>'))
        pile.spawn(run_doctor_cmd_on_node, node['role'], node['name'],
                   check_cmd)
    for node, res in zip(node_list, pile):
        result.append(res)
    logging.disable(logging.NOTSET)
    return result
Example #26
0
 def test_pile_spawn_times_out(self):
     p = eventlet.GreenPile(4)
     for i in range(4):
         p.spawn(passthru, i)
     # now it should be full and this should time out
     eventlet.Timeout(0)
     self.assertRaises(eventlet.Timeout, p.spawn, passthru, "time out")
     # verify that the spawn breakage didn't interrupt the sequence
     # and terminates properly
     for i in range(4, 10):
         p.spawn(passthru, i)
     self.assertEqual(list(p), list(range(10)))
Example #27
0
def slam_host(cannon_hosts, target):
    """Coordinates `cannon_hosts` to use the specified siege coordates on
    `target` and report back the performance.
    """
    pile = eventlet.GreenPile(pool)
    for h in cannon_hosts:
        pile.spawn(fire_cannon, h, target)
    responses = list(pile)

    try:
        report = parse_responses(responses)
    except UnparsableData, e:
        return "Unable to parse data properly: %s" % e
Example #28
0
    def test_many_network_calls(self):
        """ Test known responses on a network that should be slow, we should get them all asynchronously """

        pile = eventlet.GreenPile()

        for i in xrange(self.NUM_ITER):
            pile.spawn(spawn_slow_network_and_query_slow_response, self,
                       self.SLOW_NETWORK_QUERY, 1, {
                           'value': i,
                           i: 'value'
                       })

        for result in pile:
            print_(result)
            self.assertIsInstance(result, dict)
Example #29
0
def app(environ, start_response):
    if environ["REQUEST_METHOD"] != "POST":
        start_response("403 Forbidden", [])
        return []

    pile = eventlet.GreenPile(pool)
    for line in environ["wsgi.input"].readlines():
        print(line)
        url = line.strip()
        if url:
            pile.spawn(fetch, url)

    titles = "\n".join(pile)
    start_response("200 OK", [{"Content-Type", "text/plain"}])
    return [titles]
Example #30
0
def app(environ, start_response):
    if environ['REQUEST_METHOD'] != 'POST':
        start_response('403 Forbidden', [])
        return []

    # the pile collects the result of a concurrent operation -- in this case,
    # the collection of feed titles
    pile = eventlet.GreenPile(pool)
    for line in environ['wsgi.input'].readlines():
        url = line.strip()
        if url:
            pile.spawn(fetch_title, url)
    # since the pile is an iterator over the results,
    # you can use it in all sorts of great Pythonic ways
    titles = '\n'.join(pile)
    start_response('200 OK', [('Content-type', 'text/plain')])
    return [titles]