Пример #1
0
def async_protocol_manager(coro):
    _, protocol = yield From(coro)
    raise Return(protocol_manager(protocol))
Пример #2
0
def do_bind_operation(dua, message_id, dict_req):
    logging.debug('do BIND operation for DUA %s: %s' %
                  (dua.identity, str(dict_req)))
    while len(
            dua.pending
    ) > 1:  # wait until only the bind operation is in the pending dict
        asyncio.sleep(0.1)

    server_sasl_credentials = None
    if dict_req[
            'version'] != 3:  # protocol version check (RFC4511 4.2 - line 878)
        result = build_ldap_result(
            RESULT_PROTOCOL_ERROR,
            diagnostic_message='only LDAP version 3 protocol allowed')
        dua.user = dua.dsa.user_backend.anonymous()
    else:
        if dict_req['authentication']['simple'] == '' and not dict_req[
                'name']:  # anonymous simple authentication (RFC4511 4.2 - line 883)
            dua.user = dua.dsa.user_backend.anonymous()
            result = build_ldap_result(
                RESULT_SUCCESS,
                diagnostic_message='anonymous authentication successful')
        elif dict_req['name'] and dict_req['authentication'][
                'simple']:  # simple authentication (RFC4511 4.2 - line 888)
            dua.user = dua.dsa.user_backend.find_user(dict_req['name'])
            if dua.user:
                if not dua.dsa.user_backend.check_credentials(
                        dua.user, dict_req['authentication']['simple']):
                    if NATIVE_ASYNCIO:
                        yield from asyncio.sleep(3)  # pause if invalid user
                    else:
                        yield From(asyncio.sleep(3))  # pause if invalid user
                    result = build_ldap_result(
                        RESULT_INVALID_CREDENTIALS,
                        diagnostic_message='invalid credentials')
                    dua.user = dua.dsa.user_backend.anonymous()
                else:  # successful simple authentication
                    result = build_ldap_result(
                        RESULT_SUCCESS,
                        diagnostic_message='user authentication successful')
            else:
                if NATIVE_ASYNCIO:
                    yield from asyncio.sleep(3)  # pause if not existent user
                else:
                    yield From(asyncio.sleep(3))  # pause if not existent user
                result = build_ldap_result(RESULT_INVALID_CREDENTIALS,
                                           diagnostic_message='user not found')
                dua.user = dua.dsa.user_backend.anonymous()
        elif dict_req['authentication']['sasl']:  # sasl authentication
            result = build_ldap_result(RESULT_AUTH_METHOD_NOT_SUPPORTED,
                                       diagnostic_message='SASL not available')
            dua.user = dua.dsa.user_backend.anonymous()
        else:  # undefined
            dua.abort()
            if NATIVE_ASYNCIO:
                return None, None
            else:
                raise Return((None, None))
    response = build_bind_response(result, server_sasl_credentials)
    if NATIVE_ASYNCIO:
        return response, 'bindResponse'
    else:
        raise Return((response, 'bindResponse'))
Пример #3
0
 def c1(result):
     if (yield From(ev.wait())):
         result.append(1)
     raise Return(True)
Пример #4
0
 def getaddrinfo(*args, **kw):
     raise Return([])
Пример #5
0
 def start(self):
     raise Return(self)
Пример #6
0
 def aslist(self):
     """ Return the result as a Python ``list``. """
     data = yield From(self._result._read(count=self._result.count))
     raise Return(data)
Пример #7
0
        def keep_packet(pkt):
            self._packets.append(pkt)

            if packet_count != 0 and len(
                    self._packets) - initial_packet_amount >= packet_count:
                raise Return()
Пример #8
0
 def coro_slow_append(result, value, delay=SHORT_SLEEP):
     yield From(asyncio.sleep(delay))
     result.append(value)
     raise Return(value * 10)
Пример #9
0
 def get_backend(self, **kw):
     raise Return(self.client.get_object(**kw))
Пример #10
0
    def send(self,
             request,
             stream=False,
             timeout=None,
             verify=True,
             cert=None,
             proxies=None):
        """Sends PreparedRequest object. Returns Response object.

        :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
        :param stream: (optional) Whether to stream the request content.
        :param timeout: (optional) How long to wait for the server to send
            data before giving up, as a float, or a (`connect timeout, read
            timeout <user/advanced.html#timeouts>`_) tuple.
        :type timeout: float or tuple
        :param verify: (optional) Whether to verify SSL certificates.
        :param cert: (optional) Any user-provided SSL certificate to be trusted.
        :param proxies: (optional) The proxies dictionary to apply to the request.
        """

        conn = self.get_connection(request.url, proxies)

        self.cert_verify(conn, request.url, verify, cert)
        url = self.request_url(request, proxies)
        self.add_headers(request)

        chunked = not (request.body is None
                       or 'Content-Length' in request.headers)

        if isinstance(timeout, tuple):
            try:
                connect, read = timeout
                timeout = TimeoutSauce(connect=connect, read=read)
            except ValueError as e:
                # this may raise a string formatting error.
                err = ("Invalid timeout {0}. Pass a (connect, read) "
                       "timeout tuple, or a single float to set "
                       "both timeouts to the same value".format(timeout))
                raise ValueError(err)
        else:
            timeout = TimeoutSauce(connect=timeout, read=timeout)

        try:
            if not chunked:
                resp = yield From(
                    conn.urlopen(method=request.method,
                                 url=url,
                                 body=request.body,
                                 headers=request.headers,
                                 redirect=False,
                                 assert_same_host=False,
                                 preload_content=False,
                                 decode_content=False,
                                 retries=Retry(self.max_retries, read=False),
                                 timeout=timeout))

            # Send the request.
            else:
                if hasattr(conn, 'proxy_pool'):
                    conn = conn.proxy_pool

                low_conn = conn._get_conn(timeout=timeout)

                try:
                    low_conn.putrequest(request.method,
                                        url,
                                        skip_accept_encoding=True)

                    for header, value in request.headers.items():
                        low_conn.putheader(header, value)

                    low_conn.endheaders()

                    for i in request.body:
                        low_conn.send(hex(len(i))[2:].encode('utf-8'))
                        low_conn.send(b'\r\n')
                        low_conn.send(i)
                        low_conn.send(b'\r\n')
                    low_conn.send(b'0\r\n\r\n')

                    r = low_conn.getresponse()
                    resp = yield From(
                        HTTPResponse.from_httplib(r,
                                                  pool=conn,
                                                  connection=low_conn,
                                                  preload_content=False,
                                                  decode_content=False))
                except:
                    # If we hit any problems here, clean up the connection.
                    # Then, reraise so that we can handle the actual exception.
                    low_conn.close()
                    raise
                else:
                    # All is well, return the connection to the pool.
                    conn._put_conn(low_conn)

        except (ProtocolError, socket.error) as err:
            raise ConnectionError(err, request=request)

        except MaxRetryError as e:
            if isinstance(e.reason, ConnectTimeoutError):
                raise ConnectTimeout(e, request=request)

            raise ConnectionError(e, request=request)

        except _ProxyError as e:
            raise ProxyError(e)

        except (_SSLError, _HTTPError) as e:
            if isinstance(e, _SSLError):
                raise SSLError(e, request=request)
            elif isinstance(e, ReadTimeoutError):
                raise ReadTimeout(e, request=request)
            else:
                raise

        raise Return(self.build_response(request, resp))
Пример #11
0
        def _async_execute_process_pty(protocol_class,
                                       cmd,
                                       cwd,
                                       env,
                                       shell,
                                       stderr_to_stdout=True):
            loop = get_loop()
            # Create the PTY's
            stdout_master, stdout_slave = pty.openpty()
            if stderr_to_stdout:
                stderr_master, stderr_slave = stdout_master, stdout_slave
            else:
                stderr_master, stderr_slave = pty.openpty()

            def protocol_factory():
                return protocol_class(None, stdout_master, stderr_master)

            # Start the subprocess
            if shell is True:
                transport, protocol = yield From(
                    loop.subprocess_shell(protocol_factory,
                                          " ".join(cmd),
                                          cwd=cwd,
                                          env=env,
                                          stdout=stdout_slave,
                                          stderr=stderr_slave,
                                          close_fds=False))
            else:
                transport, protocol = yield From(
                    loop.subprocess_exec(protocol_factory,
                                         *cmd,
                                         cwd=cwd,
                                         env=env,
                                         stdout=stdout_slave,
                                         stderr=stderr_slave,
                                         close_fds=False))

            # Close our copies of the slaves,
            # the child's copy of the slave remain open until it terminates
            os.close(stdout_slave)
            if not stderr_to_stdout:
                os.close(stderr_slave)

            # Create Protocol classes
            class PtyStdoutProtocol(asyncio.Protocol):
                def connection_made(self, transport):
                    if hasattr(protocol, 'on_stdout_open'):
                        protocol.on_stdout_open()

                def data_received(self, data):
                    if hasattr(protocol, 'on_stdout_received'):
                        protocol.on_stdout_received(data)

                def connection_lost(self, exc):
                    if hasattr(protocol, 'on_stdout_close'):
                        protocol.on_stdout_close(exc)

            class PtyStderrProtocol(asyncio.Protocol):
                def connection_made(self, transport):
                    if hasattr(protocol, 'on_stderr_open'):
                        protocol.on_stderr_open()

                def data_received(self, data):
                    if hasattr(protocol, 'on_stderr_received'):
                        protocol.on_stderr_received(data)

                def connection_lost(self, exc):
                    if hasattr(protocol, 'on_stderr_close'):
                        protocol.on_stderr_close(exc)

            # Add the pty's to the read loop
            # Also store the transport, protocol tuple for each call to
            # connect_read_pipe, to prevent the destruction of the protocol
            # class instance, otherwise no data is received.
            protocol.stdout_tuple = yield From(
                loop.connect_read_pipe(PtyStdoutProtocol,
                                       os.fdopen(stdout_master, 'rb', 0)))
            if not stderr_to_stdout:
                protocol.stderr_tuple = yield From(
                    loop.connect_read_pipe(PtyStderrProtocol,
                                           os.fdopen(stderr_master, 'rb', 0)))
            # Return the protocol and transport
            raise Return(transport, protocol)
Пример #12
0
    def start_build(self, build_job):
        """ Starts a build. """
        if self._component_status not in (ComponentStatus.WAITING,
                                          ComponentStatus.RUNNING):
            logger.debug(
                'Could not start build for component %s (build %s, worker version: %s): %s',
                self.builder_realm, build_job.repo_build.uuid,
                self._worker_version, self._component_status)
            raise Return()

        logger.debug(
            'Starting build for component %s (build %s, worker version: %s)',
            self.builder_realm, build_job.repo_build.uuid,
            self._worker_version)

        self._current_job = build_job
        self._build_status = StatusHandler(self.build_logs,
                                           build_job.repo_build.uuid)
        self._image_info = {}

        yield From(self._set_status(ComponentStatus.BUILDING))

        # Send the notification that the build has started.
        build_job.send_notification('build_start')

        # Parse the build configuration.
        try:
            build_config = build_job.build_config
        except BuildJobLoadException as irbe:
            yield From(
                self._build_failure('Could not load build job information',
                                    irbe))
            raise Return()

        base_image_information = {}

        # Add the pull robot information, if any.
        if build_job.pull_credentials:
            base_image_information[
                'username'] = build_job.pull_credentials.get('username', '')
            base_image_information[
                'password'] = build_job.pull_credentials.get('password', '')

        # Retrieve the repository's fully qualified name.
        repo = build_job.repo_build.repository
        repository_name = repo.namespace_user.username + '/' + repo.name

        # Parse the build queue item into build arguments.
        #  build_package: URL to the build package to download and untar/unzip.
        #                 defaults to empty string to avoid requiring a pointer on the builder.
        #  sub_directory: The location within the build package of the Dockerfile and the build context.
        #  repository: The repository for which this build is occurring.
        #  registry: The registry for which this build is occuring (e.g. 'quay.io').
        #  pull_token: The token to use when pulling the cache for building.
        #  push_token: The token to use to push the built image.
        #  tag_names: The name(s) of the tag(s) for the newly built image.
        #  base_image: The image name and credentials to use to conduct the base image pull.
        #   username: The username for pulling the base image (if any).
        #   password: The password for pulling the base image (if any).
        context, dockerfile_path = self.extract_dockerfile_args(build_config)
        build_arguments = {
            'build_package': build_job.get_build_package_url(self.user_files),
            'context': context,
            'dockerfile_path': dockerfile_path,
            'repository': repository_name,
            'registry': self.registry_hostname,
            'pull_token': build_job.repo_build.access_token.get_code(),
            'push_token': build_job.repo_build.access_token.get_code(),
            'tag_names': build_config.get('docker_tags', ['latest']),
            'base_image': base_image_information,
        }

        # If the trigger has a private key, it's using git, thus we should add
        # git data to the build args.
        #  url: url used to clone the git repository
        #  sha: the sha1 identifier of the commit to check out
        #  private_key: the key used to get read access to the git repository

        # TODO(remove-unenc): Remove legacy field.
        private_key = None
        if build_job.repo_build.trigger is not None and \
           build_job.repo_build.trigger.secure_private_key is not None:
            private_key = build_job.repo_build.trigger.secure_private_key.decrypt(
            )

        if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \
           private_key is None and \
           build_job.repo_build.trigger is not None:
            private_key = build_job.repo_build.trigger.private_key

        if private_key is not None:
            build_arguments['git'] = {
                'url': build_config['trigger_metadata'].get('git_url', ''),
                'sha': BuildComponent._commit_sha(build_config),
                'private_key': private_key or '',
            }

        # If the build args have no buildpack, mark it as a failure before sending
        # it to a builder instance.
        if not build_arguments['build_package'] and not build_arguments['git']:
            logger.error('%s: insufficient build args: %s',
                         self._current_job.repo_build.uuid, build_arguments)
            yield From(
                self._build_failure(
                    'Insufficient build arguments. No buildpack available.'))
            raise Return()

        # Invoke the build.
        logger.debug('Invoking build: %s', self.builder_realm)
        logger.debug('With Arguments: %s', build_arguments)

        def build_complete_callback(result):
            """ This function is used to execute a coroutine as the callback. """
            trollius.ensure_future(self._build_complete(result))

        self.call("io.quay.builder.build",
                  **build_arguments).add_done_callback(build_complete_callback)

        # Set the heartbeat for the future. If the builder never receives the build call,
        # then this will cause a timeout after 30 seconds. We know the builder has registered
        # by this point, so it makes sense to have a timeout.
        self._last_heartbeat = datetime.datetime.utcnow(
        ) + BUILD_HEARTBEAT_DELAY
Пример #13
0
    def _build_complete(self, result):
        """ Wraps up a completed build. Handles any errors and calls self._build_finished. """
        build_id = self._current_job.repo_build.uuid

        try:
            # Retrieve the result. This will raise an ApplicationError on any error that occurred.
            result_value = result.result()
            kwargs = {}

            # Note: If we are hitting an older builder that didn't return ANY map data, then the result
            # value will be a bool instead of a proper CallResult object.
            # Therefore: we have a try-except guard here to ensure we don't hit this pitfall.
            try:
                kwargs = result_value.kwresults
            except:
                pass

            try:
                yield From(self._build_status.set_phase(BUILD_PHASE.COMPLETE))
            except InvalidRepositoryBuildException:
                logger.warning(
                    'Build %s was not found; repo was probably deleted',
                    build_id)
                raise Return()

            yield From(self._build_finished(BuildJobResult.COMPLETE))

            # Label the pushed manifests with the build metadata.
            manifest_digests = kwargs.get('digests') or []
            repository = registry_model.lookup_repository(
                self._current_job.namespace, self._current_job.repo_name)
            if repository is not None:
                for digest in manifest_digests:
                    with UseThenDisconnect(app.config):
                        manifest = registry_model.lookup_manifest_by_digest(
                            repository, digest, require_available=True)
                        if manifest is None:
                            continue

                        registry_model.create_manifest_label(
                            manifest, INTERNAL_LABEL_BUILD_UUID, build_id,
                            'internal', 'text/plain')

            # Send the notification that the build has completed successfully.
            self._current_job.send_notification(
                'build_success',
                image_id=kwargs.get('image_id'),
                manifest_digests=manifest_digests)
        except ApplicationError as aex:
            worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))

            # Write the error to the log.
            yield From(
                self._build_status.set_error(
                    worker_error.public_message(),
                    worker_error.extra_data(),
                    internal_error=worker_error.is_internal_error(),
                    requeued=self._current_job.has_retries_remaining()))

            # Send the notification that the build has failed.
            self._current_job.send_notification(
                'build_failure', error_message=worker_error.public_message())

            # Mark the build as completed.
            if worker_error.is_internal_error():
                logger.exception(
                    '[BUILD INTERNAL ERROR: Remote] Build ID: %s: %s',
                    build_id, worker_error.public_message())
                yield From(self._build_finished(BuildJobResult.INCOMPLETE))
            else:
                logger.debug('Got remote failure exception for build %s: %s',
                             build_id, aex)
                yield From(self._build_finished(BuildJobResult.ERROR))

        # Remove the current job.
        self._current_job = None
Пример #14
0
    def _on_log_message(self, phase, json_data):
        """ Tails log messages and updates the build status. """
        # Update the heartbeat.
        self._last_heartbeat = datetime.datetime.utcnow()

        # Parse any of the JSON data logged.
        log_data = {}
        if json_data:
            try:
                log_data = json.loads(json_data)
            except ValueError:
                pass

        # Extract the current status message (if any).
        fully_unwrapped = ''
        keys_to_extract = ['error', 'status', 'stream']
        for key in keys_to_extract:
            if key in log_data:
                fully_unwrapped = log_data[key]
                break

        # Determine if this is a step string.
        current_step = None
        current_status_string = str(fully_unwrapped.encode('utf-8'))

        if current_status_string and phase == BUILD_PHASE.BUILDING:
            current_step = extract_current_step(current_status_string)

        # Parse and update the phase and the status_dict. The status dictionary contains
        # the pull/push progress, as well as the current step index.
        with self._build_status as status_dict:
            try:
                changed_phase = yield From(
                    self._build_status.set_phase(phase,
                                                 log_data.get('status_data')))
                if changed_phase:
                    logger.debug('Build %s has entered a new phase: %s',
                                 self.builder_realm, phase)
                elif self._current_job.repo_build.phase == BUILD_PHASE.CANCELLED:
                    build_id = self._current_job.repo_build.uuid
                    logger.debug(
                        'Trying to move cancelled build into phase: %s with id: %s',
                        phase, build_id)
                    raise Return(False)
            except InvalidRepositoryBuildException:
                build_id = self._current_job.repo_build.uuid
                logger.warning(
                    'Build %s was not found; repo was probably deleted',
                    build_id)
                raise Return(False)

            BuildComponent._process_pushpull_status(status_dict, phase,
                                                    log_data, self._image_info)

            # If the current message represents the beginning of a new step, then update the
            # current command index.
            if current_step is not None:
                status_dict['current_command'] = current_step

            # If the json data contains an error, then something went wrong with a push or pull.
            if 'error' in log_data:
                yield From(self._build_status.set_error(log_data['error']))

        if current_step is not None:
            yield From(self._build_status.set_command(current_status_string))
        elif phase == BUILD_PHASE.BUILDING:
            yield From(self._build_status.append_log(current_status_string))
        raise Return(True)
Пример #15
0
    def resolve_redirects(self,
                          resp,
                          req,
                          stream=False,
                          timeout=None,
                          verify=True,
                          cert=None,
                          proxies=None):
        """Receives a Response. Returns a generator of Responses."""

        i = 0
        hist = []  # keep track of history
        redir_responses = []

        while resp.is_redirect:
            prepared_request = req.copy()

            if i > 0:
                # Update history and keep track of redirects.
                hist.append(resp)
                new_hist = list(hist)
                resp.history = new_hist

            try:
                yield From(
                    resp.content)  # Consume socket so it can be released
            except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
                yield From(resp.raw.read(decode_content=False))

            if i >= self.max_redirects:
                raise TooManyRedirects('Exceeded %s redirects.' %
                                       self.max_redirects)

            # Release the connection back into the pool.
            resp.close()

            url = resp.headers['location']
            method = req.method

            # Handle redirection without scheme (see: RFC 1808 Section 4)
            if url.startswith('//'):
                parsed_rurl = urlparse(resp.url)
                url = '%s:%s' % (parsed_rurl.scheme, url)

            # The scheme should be lower case...
            parsed = urlparse(url)
            url = parsed.geturl()

            # Facilitate relative 'location' headers, as allowed by RFC 7231.
            # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
            # Compliant with RFC3986, we percent encode the url.
            if not urlparse(url).netloc:
                url = urljoin(resp.url, requote_uri(url))
            else:
                url = requote_uri(url)

            prepared_request.url = to_native_string(url)
            # Cache the url, unless it redirects to itself.
            if resp.is_permanent_redirect and req.url != prepared_request.url:
                self.redirect_cache[req.url] = prepared_request.url

            # http://tools.ietf.org/html/rfc7231#section-6.4.4
            if (resp.status_code == codes.see_other and method != 'HEAD'):
                method = 'GET'

            # Do what the browsers do, despite standards...
            # First, turn 302s into GETs.
            if resp.status_code == codes.found and method != 'HEAD':
                method = 'GET'

            # Second, if a POST is responded to with a 301, turn it into a GET.
            # This bizarre behaviour is explained in Issue 1704.
            if resp.status_code == codes.moved and method == 'POST':
                method = 'GET'

            prepared_request.method = method

            # https://github.com/kennethreitz/requests/issues/1084
            if resp.status_code not in (codes.temporary_redirect,
                                        codes.permanent_redirect):
                if 'Content-Length' in prepared_request.headers:
                    del prepared_request.headers['Content-Length']

                prepared_request.body = None

            headers = prepared_request.headers
            try:
                del headers['Cookie']
            except KeyError:
                pass

            extract_cookies_to_jar(prepared_request._cookies, prepared_request,
                                   resp.raw)
            prepared_request._cookies.update(self.cookies)
            prepared_request.prepare_cookies(prepared_request._cookies)

            # Rebuild auth and proxy information.
            proxies = self.rebuild_proxies(prepared_request, proxies)
            self.rebuild_auth(prepared_request, resp)

            # Override the original request.
            req = prepared_request

            resp = yield From(
                self.send(
                    req,
                    stream=stream,
                    timeout=timeout,
                    verify=verify,
                    cert=cert,
                    proxies=proxies,
                    allow_redirects=False,
                ))

            extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)

            i += 1
            redir_responses.append(resp)

        raise Return(iter(redir_responses))
Пример #16
0
 def delete_backend(self, **kw):
     raise Return(self.client.delete_object(**kw))
Пример #17
0
 def recv():
     msgback = (yield From(reader.readline()))
     msgback = msgback.decode("utf-8").rstrip()
     print("< " + msgback)
     raise Return(msgback)
Пример #18
0
 def envs(self, **kw):
     contents = yield From(
         self.list_backend(prefix=self.prefix, delimiter='/'))
     raise Return(
         list(k['Prefix'].split('/')[1]
              for k in contents.get('CommonPrefixes', [])))
Пример #19
0
 def getter(f):
     """ Coroutine which processes one item. """
     key, value = yield From(f)
     key, value = self._parse(key, value)
     raise Return(key, value)
Пример #20
0
 def create_bucket(self, name=None, **kw):
     # administrator creates bucket for projects
     name = name or self.bucket
     result = self.client.create_bucket(Bucket=name)
     raise Return(result)
Пример #21
0
    def start_builder(self, realm, token, build_uuid):
        region = self.executor_config["EC2_REGION"]
        channel = self.executor_config.get("COREOS_CHANNEL", "stable")

        coreos_ami = self.executor_config.get("COREOS_AMI", None)
        if coreos_ami is None:
            get_ami_callable = partial(self._get_coreos_ami, region, channel)
            coreos_ami = yield From(
                self._loop.run_in_executor(None, get_ami_callable))

        user_data = self.generate_cloud_config(realm, token, build_uuid,
                                               channel, self.manager_hostname)
        logger.debug("Generated cloud config for build %s: %s", build_uuid,
                     user_data)

        ec2_conn = self._get_conn()

        ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType(
            size=int(self.executor_config.get("BLOCK_DEVICE_SIZE", 48)),
            volume_type="gp2",
            delete_on_termination=True,
        )
        block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping()
        block_devices["/dev/xvda"] = ssd_root_ebs

        interfaces = None
        if self.executor_config.get("EC2_VPC_SUBNET_ID", None) is not None:
            interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
                subnet_id=self.executor_config["EC2_VPC_SUBNET_ID"],
                groups=self.executor_config["EC2_SECURITY_GROUP_IDS"],
                associate_public_ip_address=True,
            )
            interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(
                interface)

        try:
            reservation = yield From(
                ec2_conn.run_instances(
                    coreos_ami,
                    instance_type=self.executor_config["EC2_INSTANCE_TYPE"],
                    key_name=self.executor_config.get("EC2_KEY_NAME", None),
                    user_data=user_data,
                    instance_initiated_shutdown_behavior="terminate",
                    block_device_map=block_devices,
                    network_interfaces=interfaces,
                ))
        except boto.exception.EC2ResponseError as ec2e:
            logger.exception("Unable to spawn builder instance")
            metric_queue.ephemeral_build_worker_failure.Inc()
            raise ec2e

        if not reservation.instances:
            raise ExecutorException("Unable to spawn builder instance.")
        elif len(reservation.instances) != 1:
            raise ExecutorException("EC2 started wrong number of instances!")

        launched = AsyncWrapper(reservation.instances[0])

        # Sleep a few seconds to wait for AWS to spawn the instance.
        yield From(trollius.sleep(_TAG_RETRY_SLEEP))

        # Tag the instance with its metadata.
        for i in range(0, _TAG_RETRY_COUNT):
            try:
                yield From(
                    launched.add_tags({
                        "Name": "Quay Ephemeral Builder",
                        "Realm": realm,
                        "Token": token,
                        "BuildUUID": build_uuid,
                    }))
            except boto.exception.EC2ResponseError as ec2e:
                if ec2e.error_code == "InvalidInstanceID.NotFound":
                    if i < _TAG_RETRY_COUNT - 1:
                        logger.warning(
                            "Failed to write EC2 tags for instance %s for build %s (attempt #%s)",
                            launched.id,
                            build_uuid,
                            i,
                        )
                        yield From(trollius.sleep(_TAG_RETRY_SLEEP))
                        continue

                    raise ExecutorException("Unable to find builder instance.")

                logger.exception("Failed to write EC2 tags (attempt #%s)", i)

        logger.debug("Machine with ID %s started for build %s", launched.id,
                     build_uuid)
        raise Return(launched.id)
    def read(self, amt=None, decode_content=None, cache_content=False):
        """
        Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
        parameters: ``decode_content`` and ``cache_content``.

        :param amt:
            How much of the content to read. If specified, caching is skipped
            because it doesn't make sense to cache partial content as the full
            response.

        :param decode_content:
            If True, will attempt to decode the body based on the
            'content-encoding' header.

        :param cache_content:
            If True, will save the returned data such that the same result is
            returned despite of the state of the underlying file object. This
            is useful if you want the ``.data`` property to continue working
            after having ``.read()`` the file object. (Overridden if ``amt`` is
            set.)
        """
        # Note: content-encoding value should be case-insensitive, per RFC 7230
        # Section 3.2
        content_encoding = self.headers.get('content-encoding', '').lower()
        if self._decoder is None:
            if content_encoding in self.CONTENT_DECODERS:
                self._decoder = _get_decoder(content_encoding)
        if decode_content is None:
            decode_content = self.decode_content

        if self._fp is None:
            return

        flush_decoder = False

        try:
            try:
                if amt is None:
                    # cStringIO doesn't like amt=None
                    data = yield From(self._fp.read())
                    flush_decoder = True
                else:
                    cache_content = False
                    data = yield From(self._fp.read(amt))
                    if amt != 0 and not data:  # Platform-specific: Buggy versions of Python.
                        # Close the connection when no data is returned
                        #
                        # This is redundant to what httplib/http.client _should_
                        # already do.  However, versions of python released before
                        # December 15, 2012 (http://bugs.python.org/issue16298) do
                        # not properly close the connection in all cases. There is
                        # no harm in redundantly calling close.
                        self._fp.close()
                        flush_decoder = True

            except asyncio.TimeoutError:
                # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
                # there is yet no clean way to get at it from this context.
                raise ReadTimeoutError(self._pool, None, 'Read timed out.')

            except BaseSSLError as e:
                # FIXME: Is there a better way to differentiate between SSLErrors?
                if not 'read operation timed out' in str(e):  # Defensive:
                    # This shouldn't happen but just in case we're missing an edge
                    # case, let's avoid swallowing SSL errors.
                    raise

                raise ReadTimeoutError(self._pool, None, 'Read timed out.')

            except HTTPException as e:
                # This includes IncompleteRead.
                raise ProtocolError('Connection broken: %r' % e, e)

            self._fp_bytes_read += len(data)

            try:
                if decode_content and self._decoder:
                    data = self._decoder.decompress(data)
            except (IOError, zlib.error) as e:
                raise DecodeError(
                    "Received response with content-encoding: %s, but "
                    "failed to decode it." % content_encoding, e)

            if flush_decoder and decode_content and self._decoder:
                buf = self._decoder.decompress(binary_type())
                data += buf + self._decoder.flush()

            if cache_content:
                self._body = data

            raise Return(data)

        finally:
            if self._original_response and self._original_response.isclosed():
                self.release_conn()
Пример #23
0
 def getaddrinfo(*args, **kw):
     yield From(None)
     raise Return([(2, 1, 6, '', ('107.6.106.82', 80))])
Пример #24
0
 def connect_read_pipe_mock(*args, **kw):
     connect = connect_read_pipe(*args, **kw)
     transport, protocol = yield From(connect)
     transport.pause_reading = mock.Mock()
     transport.resume_reading = mock.Mock()
     raise Return(transport, protocol)
Пример #25
0
def connect(address=default_address):
    manager = yield From(pygazebo.connect(address=tuple(address)))
    raise Return(manager)
Пример #26
0
    def request(self,
                method,
                url,
                params=None,
                data=None,
                headers=None,
                cookies=None,
                files=None,
                auth=None,
                timeout=None,
                allow_redirects=True,
                proxies=None,
                hooks=None,
                stream=None,
                verify=None,
                cert=None,
                json=None):
        """Constructs a :class:`Request <Request>`, prepares it and sends it.
        Returns :class:`Response <Response>` object.

        :param method: method for the new :class:`Request` object.
        :param url: URL for the new :class:`Request` object.
        :param params: (optional) Dictionary or bytes to be sent in the query
            string for the :class:`Request`.
        :param data: (optional) Dictionary or bytes to send in the body of the
            :class:`Request`.
        :param json: (optional) json to send in the body of the
            :class:`Request`.
        :param headers: (optional) Dictionary of HTTP Headers to send with the
            :class:`Request`.
        :param cookies: (optional) Dict or CookieJar object to send with the
            :class:`Request`.
        :param files: (optional) Dictionary of ``'filename': file-like-objects``
            for multipart encoding upload.
        :param auth: (optional) Auth tuple or callable to enable
            Basic/Digest/Custom HTTP Auth.
        :param timeout: (optional) How long to wait for the server to send
            data before giving up, as a float, or a (`connect timeout, read
            timeout <user/advanced.html#timeouts>`_) tuple.
        :type timeout: float or tuple
        :param allow_redirects: (optional) Set to True by default.
        :type allow_redirects: bool
        :param proxies: (optional) Dictionary mapping protocol to the URL of
            the proxy.
        :param stream: (optional) whether to immediately download the response
            content. Defaults to ``False``.
        :param verify: (optional) if ``True``, the SSL cert will be verified.
            A CA_BUNDLE path can also be provided.
        :param cert: (optional) if String, path to ssl client cert file (.pem).
            If Tuple, ('cert', 'key') pair.
        """

        method = builtin_str(method)

        # Create the Request.
        req = Request(
            method=method.upper(),
            url=url,
            headers=headers,
            files=files,
            data=data or {},
            json=json,
            params=params or {},
            auth=auth,
            cookies=cookies,
            hooks=hooks,
        )
        prep = self.prepare_request(req)

        proxies = proxies or {}

        settings = self.merge_environment_settings(prep.url, proxies, stream,
                                                   verify, cert)

        # Send the request.
        send_kwargs = {
            'timeout': timeout,
            'allow_redirects': allow_redirects,
        }
        send_kwargs.update(settings)
        resp = yield From(self.send(prep, **send_kwargs))

        raise Return(resp)
Пример #27
0
 def acquire_lock():
     raise Return((yield From(lock)))
Пример #28
0
    def send(self, request, **kwargs):
        """Send a given PreparedRequest."""
        # Set defaults that the hooks can utilize to ensure they always have
        # the correct parameters to reproduce the previous request.
        kwargs.setdefault('stream', self.stream)
        kwargs.setdefault('verify', self.verify)
        kwargs.setdefault('cert', self.cert)
        kwargs.setdefault('proxies', self.proxies)

        # It's possible that users might accidentally send a Request object.
        # Guard against that specific failure case.
        if not isinstance(request, PreparedRequest):
            raise ValueError('You can only send PreparedRequests.')

        checked_urls = set()
        while request.url in self.redirect_cache:
            checked_urls.add(request.url)
            new_url = self.redirect_cache.get(request.url)
            if new_url in checked_urls:
                break
            request.url = new_url

        # Set up variables needed for resolve_redirects and dispatching of hooks
        allow_redirects = kwargs.pop('allow_redirects', True)
        stream = kwargs.get('stream')
        timeout = kwargs.get('timeout')
        verify = kwargs.get('verify')
        cert = kwargs.get('cert')
        proxies = kwargs.get('proxies')
        hooks = request.hooks

        # Get the appropriate adapter to use
        adapter = self.get_adapter(url=request.url)

        # Start time (approximately) of the request
        start = datetime.utcnow()

        # Send the request
        r = yield From(adapter.send(request, **kwargs))

        # Total elapsed time of the request (approximately)
        r.elapsed = datetime.utcnow() - start

        # Response manipulation hooks
        r = yield From(dispatch_hook('response', hooks, r, **kwargs))

        # Persist cookies
        if r.history:

            # If the hooks create history then we want those cookies too
            for resp in r.history:
                extract_cookies_to_jar(self.cookies, resp.request, resp.raw)

        extract_cookies_to_jar(self.cookies, request, r.raw)

        # Redirect resolving generator.
        gen = yield From(
            self.resolve_redirects(r,
                                   request,
                                   stream=stream,
                                   timeout=timeout,
                                   verify=verify,
                                   cert=cert,
                                   proxies=proxies))

        # Resolve redirects if allowed.
        history = [resp for resp in gen] if allow_redirects else []

        # Shuffle things around if there's history.
        if history:
            # Insert the first (original) request at the start
            history.insert(0, r)
            # Get the last request made
            r = history.pop()
            r.history = history

        if not stream:
            yield From(r.content)

        raise Return(r)
Пример #29
0
 def c1(result):
     yield From(cond.acquire())
     if (yield From(cond.wait())):
         result.append(1)
     raise Return(True)
Пример #30
0
def shoot(cfgs, status_callback):
    """
    Performs multi-tank multi-config test.
    Accepts one or more config dicts.
    Returns list of session ID's.
    Raises TankLocked and TestFailed.
    """

    try:
        sessions = [
            SessionWrapper(
                ft.partial(  # pylint: disable=W0142
                    status_callback, i),
                **cfg) for i, cfg in enumerate(cfgs)
        ]
    except Exception:
        logger.exception("Failed to initialize session objects, config:\n%s",
                         yaml.safe_dump(cfgs))
        raise
    prepares = []
    runs = []
    stops = []
    try:
        try:
            prepares = [async (session.prepare()) for session in sessions]
            yield From(gather(*prepares))  # pylint: disable=W0142
            logger.info("All tanks are prepared. STARTING TO SHOOT.")
            runs = [async (session.run_until_finish()) for session in sessions]
            yield From(gather(*runs))  # pylint: disable=W0142
        except KeyboardInterrupt:
            logger.info("Test interrupted")
            raise
        except CancelledError:
            logger.info("Test cancelled")
            raise
        except TestFailed:
            logger.info("Test failed")
            raise
        except Exception:
            logger.exception("Exception occured in Test.run_until_finish()")
            raise
        except BaseException:
            logger.exception(
                "Something strange caught by Test.run_until_finish()")
            raise
    except BaseException as ex:
        logger.info("Stopping remaining tank sessions...")
        stops = [
            async (session.stop()) for session in sessions
            if not session.finished
        ]
        yield From(gather(*stops, return_exceptions=True))  # pylint: disable=W0142
        raise ex
    finally:
        for task in prepares + runs + stops:
            task.cancel()

    logger.info("All tanks are done.")
    raise Return([
        session.session.s_id if session.session is not None else None
        for session in sessions
    ])