Exemplo n.º 1
0
 def _call_backends(self, func, perm_checker, *args, **kwargs):
     pile = GreenPile(self.pool)
     reqid = request_id()
     for backend in (backend for backend in self.backends.values()
                     if backend.permissions_ok(perm_checker)):
         pile.spawn(self._backend_call, func(backend), reqid, *args, **kwargs)
     for result in pile:
         if result:
             for value in result:
                 yield value
 def _run_all(self, executor, data):
     # Performs an operation locally and then mimics it on remote clients.
     # This function only returns data for the local instance, but will
     # wait for all remote instances to finish (and ignores their success or
     # failure).
     if not self.remote:
         return self._attempt(self.local, executor, data)
     pile = GreenPile(self.pool)
     ret = self.pool.spawn(self._attempt, self.local, executor, data)
     for server in self.remote:
         pile.spawn(self._attempt, server, executor, data)
     try:
         return ret.wait()
     except TooManyRetries, e:
         self.log.error(e.message)
         raise
Exemplo n.º 3
0
def test_concurrency():

    container = Mock()
    container.config = config
    container.service_name = "fooservice"

    entrypoint = DummyProvider()
    service_instance = Mock()

    def inject(worker_ctx):
        orm_session = OrmSession(DeclBase)
        orm_session.container = container
        return orm_session.acquire_injection(worker_ctx)

    # get injections concurrently
    pile = GreenPile()
    for _ in xrange(CONCURRENT_REQUESTS):
        worker_ctx = WorkerContext(container, service_instance, entrypoint)
        pile.spawn(inject, worker_ctx)
    results = set(pile)

    # injections should all be unique
    assert len(results) == CONCURRENT_REQUESTS
Exemplo n.º 4
0
def test_concurrency():

    container = Mock()
    container.config = config
    container.service_name = "fooservice"

    entrypoint = DummyProvider()
    service_instance = Mock()

    def inject(worker_ctx):
        orm_session = OrmSession(DeclBase)
        orm_session.container = container
        return orm_session.acquire_injection(worker_ctx)

    # get injections concurrently
    pile = GreenPile()
    for _ in xrange(CONCURRENT_REQUESTS):
        worker_ctx = WorkerContext(container, service_instance, entrypoint)
        pile.spawn(inject, worker_ctx)
    results = set(pile)

    # injections should all be unique
    assert len(results) == CONCURRENT_REQUESTS
Exemplo n.º 5
0
    def setup_security_groups_support(self, reset_state=True):
        """
        We will track security groups -> dvportgroup mapping though the use
        of custom attributes.

        A custom attribute must be defined initially and then queried at
        start for its id, which is then used to map to a particular value.
        """
        service_content = self.connection.vim.retrieve_service_content()
        custom_field_manager = service_content.customFieldsManager

        result = vim_util.get_object_properties(self.connection.vim,
                                                custom_field_manager, "field")
        for field in result[0].propSet[0].val.CustomFieldDef:
            if SECURITY_GROUPS_ATTRIBUTE == field.name:
                LOG.debug("Found custom attribute for security groups with key %s", field.key)
                self.security_groups_attribute_key = field.key
                break
        else:
            LOG.debug("No custom attribute for security groups found, will create one.")
            field = self.connection.invoke_api(self.connection.vim,
                                               "AddCustomFieldDef",
                                               custom_field_manager,
                                               name=SECURITY_GROUPS_ATTRIBUTE,
                                               moType="DistributedVirtualPortgroup")
            LOG.debug("Created custom attribute for security groups with key %s", field.key)
            self.security_groups_attribute_key = field.key

        if reset_state:
            wait_pile = False
            pile = GreenPile(self.pool) if self.pool else GreenPile()
            # Will drop all security group rules from matching dvportgroups and remove empty portgroups
            for uuid, dvs in six.iteritems(self.uuid_dvs_map):
                sg_tagged_pgs = dvs.get_pg_per_sg_attribute(self.security_groups_attribute_key)
                for sg_set, pg in six.iteritems(sg_tagged_pgs):
                    wait_pile = True
                    if len(pg["vm"]) == 0:
                        pile.spawn(dvs._delete_port_group, pg["ref"], pg["name"], ignore_in_use=True)
                    else:
                        port_config = dvs.builder.port_setting()
                        port_config.blocked = dvs.builder.blocked(False)
                        port_config.filterPolicy = dvs.builder.filter_policy([], None)
                        pile.spawn(dvs.update_dvportgroup,
                                   pg["ref"],
                                   pg["configVersion"],
                                   port_config,
                                   name=dvs.dvportgroup_name(sg_set))
            if wait_pile:
                for result in pile:
                    pass
Exemplo n.º 6
0
    def _put_stream(self, account, container, obj_name, src, sysmeta, chunks,
                    headers=None):
        global_checksum = hashlib.md5()
        total_bytes_transferred = 0
        content_chunks = []

        def _connect_put(chunk):
            raw_url = chunk["url"]
            parsed = urlparse(raw_url)
            try:
                chunk_path = parsed.path.split('/')[-1]
                hdrs = {}
                hdrs["transfer-encoding"] = "chunked"
                hdrs[chunk_headers["content_id"]] = sysmeta['id']
                hdrs[chunk_headers["content_version"]] = sysmeta['version']
                hdrs[chunk_headers["content_path"]] = utils.quote(obj_name)
                hdrs[chunk_headers["content_size"]] = sysmeta['content_length']
                hdrs[chunk_headers["content_chunkmethod"]] = \
                    sysmeta['chunk_method']
                hdrs[chunk_headers["content_mimetype"]] = sysmeta['mime_type']
                hdrs[chunk_headers["content_policy"]] = sysmeta['policy']
                hdrs[chunk_headers["content_chunksnb"]] = len(chunks)
                hdrs[chunk_headers["container_id"]] = \
                    utils.name2cid(account, container)
                hdrs[chunk_headers["chunk_pos"]] = chunk["pos"]
                hdrs[chunk_headers["chunk_id"]] = chunk_path
                with ConnectionTimeout(CONNECTION_TIMEOUT):
                    conn = http_connect(
                        parsed.netloc, 'PUT', parsed.path, hdrs)
                    conn.chunk = chunk
                return conn
            except (Exception, Timeout):
                pass

        def _send_data(conn):
            while True:
                data = conn.queue.get()
                if not conn.failed:
                    try:
                        with ChunkWriteTimeout(CHUNK_TIMEOUT):
                            conn.send(data)
                    except (Exception, ChunkWriteTimeout):
                        conn.failed = True
                conn.queue.task_done()

        for pos in range(len(chunks)):
            current_chunks = chunks[pos]

            pile = GreenPile(len(current_chunks))

            for current_chunk in current_chunks:
                pile.spawn(_connect_put, current_chunk)

            conns = [conn for conn in pile if conn]

            min_conns = 1

            if len(conns) < min_conns:
                raise exc.OioException("RAWX connection failure")

            bytes_transferred = 0
            total_size = current_chunks[0]["size"]
            chunk_checksum = hashlib.md5()
            try:
                with utils.ContextPool(len(current_chunks)) as pool:
                    for conn in conns:
                        conn.failed = False
                        conn.queue = Queue(PUT_QUEUE_DEPTH)
                        pool.spawn(_send_data, conn)

                    while True:
                        remaining_bytes = total_size - bytes_transferred
                        if WRITE_CHUNK_SIZE < remaining_bytes:
                            read_size = WRITE_CHUNK_SIZE
                        else:
                            read_size = remaining_bytes
                        with ClientReadTimeout(CLIENT_TIMEOUT):
                            try:
                                data = src.read(read_size)
                            except (ValueError, IOError) as e:
                                raise SourceReadError(str(e))
                            if len(data) == 0:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                                break
                        chunk_checksum.update(data)
                        global_checksum.update(data)
                        bytes_transferred += len(data)
                        for conn in conns:
                            if not conn.failed:
                                conn.queue.put('%x\r\n%s\r\n' % (len(data),
                                                                 data))
                            else:
                                conns.remove(conn)

                        if len(conns) < min_conns:
                            raise exc.OioException("RAWX write failure")

                    for conn in conns:
                        if conn.queue.unfinished_tasks:
                            conn.queue.join()

                conns = [conn for conn in conns if not conn.failed]

            except SourceReadError:
                raise
            except ClientReadTimeout:
                raise
            except Timeout as e:
                raise exc.OioTimeout(str(e))
            except Exception as e:
                raise exc.OioException(
                    "Exception during chunk write %s" % str(e))

            final_chunks = []
            for conn in conns:
                resp = conn.getresponse(True)
                if resp.status in (200, 201):
                    conn.chunk["size"] = bytes_transferred
                    final_chunks.append(conn.chunk)
                conn.close()
            if len(final_chunks) < min_conns:
                raise exc.OioException("RAWX write failure")

            checksum = chunk_checksum.hexdigest()
            for chunk in final_chunks:
                chunk["hash"] = checksum
            content_chunks += final_chunks
            total_bytes_transferred += bytes_transferred

        content_checksum = global_checksum.hexdigest()

        return content_chunks, total_bytes_transferred, content_checksum
def use_eventlet():
    pool = GreenPool(100)
    pile = GreenPile(pool)
    for i in range(10):
        pile.spawn(do_large_computation)
    print pile.next()
Exemplo n.º 8
0
def _pile(pool=None):
    if pool:
        return GreenPile(pool)
    else:
        return GreenPile()