示例#1
0
    def handle_401(self, r: R, **kwargs):
        """Takes the given response and re-tries auth with a new nonce."""

        if r.status_code == 401 and self.num_401_calls < 2:

            self.num_401_calls += 1

            # Renew nonce
            nonce = self.new_nonce(max(self._nonce(), self.last_nonce + 1))

            # Consume content and release the original connection
            # to allow our new request to reuse the same one.
            r.content
            r.close()
            prep = r.request.copy()
            cookies = prep._cookies
            requests.auth.extract_cookies_to_jar(cookies, r.request, r.raw)
            prep.prepare_cookies(cookies)

            self.auth_request(prep, nonce)

            _r = r.connection.send(prep, **kwargs)
            _r.history.append(r)
            _r.request = prep

            return _r

        return r
示例#2
0
def stops_incrementing(
    counter: Value, response: requests.Response, tolerance: int = 15
) -> bool:
    # Check that a counter stops incrementing after the response is closed.

    # tolerance (int):
    # Maximum number of events the server is allowed to send after the
    # connection has been closed.

    def wait_for_events(expect_many=False):
        nonlocal counter
        num_before = counter.value
        time.sleep(0.1)
        num_after = counter.value
        if expect_many:
            assert num_after - num_before >= 5 * tolerance, (
                num_after,
                num_before,
                tolerance,
            )
        return num_after

    sent_before_closing = wait_for_events(expect_many=True)
    response.close()
    sent_after_closing = wait_for_events() - sent_before_closing
    assert sent_after_closing <= tolerance, (sent_after_closing, tolerance)
    return True
示例#3
0
    def handle_401(self, r: R, **kwargs):
        """Takes the given response and re-tries auth with a new nonce."""

        if r.status_code == 401 and self.num_401_calls < 2:

            self.num_401_calls += 1

            # Renew nonce
            nonce = self.new_nonce(max(self._nonce(), self.last_nonce + 1))

            # Consume content and release the original connection
            # to allow our new request to reuse the same one.
            r.content
            r.close()
            prep = r.request.copy()
            cookies = prep._cookies
            requests.auth.extract_cookies_to_jar(cookies, r.request, r.raw)
            prep.prepare_cookies(cookies)

            self.auth_request(prep, nonce)

            _r = r.connection.send(prep, **kwargs)
            _r.history.append(r)
            _r.request = prep

            return _r

        return r
示例#4
0
    def _decode_response(
        self, response: requests.Response
    ) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
        """Decode the response as JSON entry or return a dictionary with the error

        Parameters
        ----------
        response: requests.Response
            Response to decode
        debug : bool
            Debugging flag. In this case failed requests get printed

        Returns
        -------
        dict
        JSON decoded entry or error
        """

        if "latest-darwin-py" in response.headers:
            self._handle_latest_darwin_py(response.headers["latest-darwin-py"])

        try:
            return response.json()
        except ValueError:
            self.log.error(f"[ERROR {response.status_code}] {response.text}")
            response.close()
            return {
                "error": "Response is not JSON encoded",
                "status_code": response.status_code,
                "text": response.text
            }
示例#5
0
 def _download(self, 
               response: Response, 
               fobj: Optional[BytesIO] = None,
               chunk_size: int = 1024,
               stream_hook: Optional[Callable[[Response, 
                                               BytesIO, 
                                               int
                                               ], BytesIO
                                              ]] = None,
               hook_kwargs: Optional[Dict] = None
               ) -> BytesIO:
     '''
     File download manager for Nessus
     
     Args:
         response (Response): The Response object to work on
         fobj (BytesIO, optional): 
             The file object to write to. If unspecified, an in-memory
             object will be created and returned
         chunk_size (int, optional):
             The chunk size to use when writing to the file object.  The
             default is unspecified is ``1024`` bytes.
         stream_hook (Callable[Response, BytesIO, int], optional):
             If specified, the output will be passed to the stream hook
             instead of processing ourselves.
         hook_kwargs (Dict, optional):
             Any additional keyword arguments that should be passed on to
             the stream hook.
     
     Returns:
         BytesIO:
             The file object
     '''
     def base_hook(resp: Response, 
                   fobj: BytesIO, 
                   chunk_size: int, 
                   **kwargs
                   ):
         ''' Default stream hook '''
         for chunk in resp.iter_content(chunk_size=chunk_size):
             if chunk:
                 fobj.write(chunk)
     
     # Set the default attributes values if nothing was passed to them
     if fobj is None:
         fobj = BytesIO()
     if hook_kwargs is None:
         hook_kwargs = {}
     if stream_hook is None:
         stream_hook = base_hook
     
     # Call the stream hook with the Response object passed to us
     stream_hook(response, fobj, chunk_size, **hook_kwargs)
     
     # seek the file back to the beginning, close the response, and return
     # the file object to the caller
     fobj.seek(0)
     response.close()
     return fobj
def _process_content(response: requests.Response, max_size: int) -> str:
    """
    Checks content for allowed size and return it
    :param response:
    :param max_size:
    :return: content string
    """
    content = ''
    for chunk in response.iter_content(1024, decode_unicode=True):
        content += chunk
        if len(content) > max_size:
            response.close()
            raise SizeException(f'File size is too large, {response.url}')

    return content
示例#7
0
 def post(url, data, headers, verify):
     self.assertEqual(
         url,
         'http://localhost:8080/auth/realms/teamplay/protocol/openid-connect/token'
     )
     self.assertTrue(
         ordered(data) == ordered(
             {
                 'code': 'mycode',
                 'scope': 'abc def',
                 'grant_type': 'authorization_code',
                 'client_id': 'thetick',
                 'redirect_uri': 'http://yourwebsite.com/redirect'
             }))
     self.assertTrue(
         ordered(headers) == ordered(
             {
                 'Content-Type':
                 'application/x-www-form-urlencoded;charset=UTF-8',
                 'Accept': 'application/json',
                 'Authorization': 'Basic dGhldGljazphcnRodXI='
             }))
     response = Response()
     response.status_code = 200
     response.close = close
     response.json = lambda: {'access_token': 'I grant thy access'}
     return response
示例#8
0
    def send(self,
             request,
             stream=None,
             timeout=None,
             verify=None,
             cert=None,
             proxies=None):
        pathname = url_to_path(request.url)

        resp = Response()
        resp.status_code = 200
        resp.url = request.url

        try:
            stats = stat(pathname)
        except OSError as exc:
            resp.status_code = 404
            resp.raw = exc
        else:
            modified = formatdate(stats.st_mtime, usegmt=True)
            content_type = guess_type(pathname)[0] or "text/plain"
            resp.headers = CaseInsensitiveDict({
                "Content-Type": content_type,
                "Content-Length": stats.st_size,
                "Last-Modified": modified,
            })

            resp.raw = open(pathname, "rb")
            resp.close = resp.raw.close

        return resp
示例#9
0
文件: localfs.py 项目: Korijn/conda
    def send(self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None):
        pathname = url_to_path(request.url)

        resp = Response()
        resp.status_code = 200
        resp.url = request.url

        try:
            stats = stat(pathname)
        except OSError as exc:
            resp.status_code = 404
            resp.raw = exc
        else:
            modified = formatdate(stats.st_mtime, usegmt=True)
            content_type = guess_type(pathname)[0] or "text/plain"
            resp.headers = CaseInsensitiveDict({
                "Content-Type": content_type,
                "Content-Length": stats.st_size,
                "Last-Modified": modified,
            })

            resp.raw = open(pathname, "rb")
            resp.close = resp.raw.close

        return resp
示例#10
0
    def handle_401(self, r: Response, **kwargs) -> Response:
        if r.status_code != 401:
            return r

        if r.headers['Content-Type'].lower() == 'application/json':
            res_data = r.json()
            try:
                message = res_data['error']['message']
            except KeyError:
                return r
            if message != 'token invalid':
                return r

        if self._thread_local.pos is not None:
            # Rewind the file position indicator of the body to where
            # it was to resend the request.
            # noinspection PyUnresolvedReferences
            r.request.body.seek(self._thread_local.pos)

        if self._thread_local.num_401_calls < 2:
            self._thread_local.num_401_calls += 1

            # Consume content and release the original connection
            # to allow our new request to reuse the same one.
            # noinspection PyStatementEffect
            r.content
            r.close()
            prep = r.request.copy()
            # noinspection PyUnresolvedReferences,PyProtectedMember
            extract_cookies_to_jar(prep._cookies, r.request, r.raw)
            # noinspection PyUnresolvedReferences,PyProtectedMember
            prep.prepare_cookies(prep._cookies)

            self.invalidate_token()

            self.apply_authorization_header(prep)

            # noinspection PyUnresolvedReferences
            _r = r.connection.send(prep, **kwargs)
            _r.history.append(r)
            _r.request = prep

            return _r

        self._thread_local.num_401_calls = 1
        return r
示例#11
0
    def send(self,
             request,
             stream=None,
             timeout=None,
             verify=None,
             cert=None,
             proxies=None):

        resp = Response()
        resp.status_code = 200
        resp.url = request.url

        try:
            import boto
        except ImportError:
            stderrlog.info('\nError: boto is required for S3 channels. '
                           'Please install it with `conda install boto`\n'
                           'Make sure to run `source deactivate` if you '
                           'are in a conda environment.\n')
            resp.status_code = 404
            return resp

        conn = boto.connect_s3()

        bucket_name, key_string = url_to_s3_info(request.url)

        # Get the bucket without validation that it exists and that we have
        # permissions to list its contents.
        bucket = conn.get_bucket(bucket_name, validate=False)

        try:
            key = bucket.get_key(key_string)
        except boto.exception.S3ResponseError as exc:
            # This exception will occur if the bucket does not exist or if the
            # user does not have permission to list its contents.
            resp.status_code = 404
            resp.raw = exc
            return resp

        if key and key.exists:
            modified = key.last_modified
            content_type = key.content_type or "text/plain"
            resp.headers = CaseInsensitiveDict({
                "Content-Type": content_type,
                "Content-Length": key.size,
                "Last-Modified": modified,
            })

            _, self._temp_file = mkstemp()
            key.get_contents_to_filename(self._temp_file)
            f = open(self._temp_file, 'rb')
            resp.raw = f
            resp.close = resp.raw.close
        else:
            resp.status_code = 404

        return resp
示例#12
0
 def get(url, verify):
     response = Response()
     response.status_code = 200
     response.close = close
     if url == 'http://localhost:8080/auth/realms/teamplay/.well-known/openid-configuration':
         response.json = lambda: json.loads(WELL_KNOWN)
     elif url == 'http://localhost:8080/auth/realms/teamplay/protocol/openid-connect/certs':
         response.json = lambda: json.loads(KEYS)
     else:
         self.fail('Unexpected url: {}'.format(url))
     return response
示例#13
0
文件: s3.py 项目: ESSS/conda
    def send(self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None):

        resp = Response()
        resp.status_code = 200
        resp.url = request.url

        try:
            import boto
        except ImportError:
            stderrlog.info('\nError: boto is required for S3 channels. '
                           'Please install it with `conda install boto`\n'
                           'Make sure to run `source deactivate` if you '
                           'are in a conda environment.\n')
            resp.status_code = 404
            return resp

        conn = boto.connect_s3()

        bucket_name, key_string = url_to_s3_info(request.url)

        # Get the bucket without validation that it exists and that we have
        # permissions to list its contents.
        bucket = conn.get_bucket(bucket_name, validate=False)

        try:
            key = bucket.get_key(key_string)
        except boto.exception.S3ResponseError as exc:
            # This exception will occur if the bucket does not exist or if the
            # user does not have permission to list its contents.
            resp.status_code = 404
            resp.raw = exc
            return resp

        if key and key.exists:
            modified = key.last_modified
            content_type = key.content_type or "text/plain"
            resp.headers = CaseInsensitiveDict({
                "Content-Type": content_type,
                "Content-Length": key.size,
                "Last-Modified": modified,
            })

            _, self._temp_file = mkstemp()
            key.get_contents_to_filename(self._temp_file)
            f = open(self._temp_file, 'rb')
            resp.raw = f
            resp.close = resp.raw.close
        else:
            resp.status_code = 404

        return resp
示例#14
0
    def __init__(self,
                 requests_response: requests.Response,
                 max_size: Optional[int],
                 error_is_client_side: bool = False):
        """Constructor."""

        try:
            # Read the raw data right away without waiting for a call to raw_data() to make sure that the server doesn't
            # time out while returning stuff
            self.__response_data = self.__read_response_data(
                requests_response=requests_response, max_size=max_size)

            # Release the response to return connection back to the pool
            # (http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow)
            requests_response.close()

        except Exception as ex:
            log.warning("Error reading data for URL {}: {}".format(
                requests_response.url, str(ex)))

            error_response = requests.Response()
            error_response.status_code = HTTPStatus.REQUEST_TIMEOUT.value
            error_response.reason = HTTPStatus.REQUEST_TIMEOUT.phrase
            error_response.request = requests_response.request
            error_response.history = []

            requests_response = error_response

            # We treat timeouts as client-side errors too because we can retry on them
            error_is_client_side = True

            self.__response_data = str(ex).encode('utf-8')

        self.__requests_response = requests_response
        self.__error_is_client_side = error_is_client_side

        self.__previous_response = None
        self.__request = None
示例#15
0
def handle_saf(res: requests.Response, ignore_limited: bool = False):
    try:
        for line in res.iter_lines(decode_unicode=True):
            if not line:
                continue

            try:
                saf_msg = json.loads(line)
            except json.JSONDecodeError as e:
                raise dnsdb2.ProtocolError(
                    f'could not decode json: {line}') from e

            cond = saf_msg.get('cond')
            obj = saf_msg.get('obj')
            msg = saf_msg.get('msg')

            if cond == COND_BEGIN:
                continue
            elif cond == COND_SUCCEEDED:
                return

            if obj:
                yield obj

            if cond == COND_ONGOING or not cond:
                continue
            elif cond == COND_LIMITED:
                if ignore_limited:
                    return
                raise dnsdb2.QueryLimited(msg)
            elif cond == COND_FAILED:
                raise dnsdb2.QueryFailed(msg)
            else:
                raise dnsdb2.ProtocolError(f'invalid cond: {cond}')

        raise dnsdb2.QueryTruncated()
    finally:
        res.close()
示例#16
0
    def __init__(self,
                 requests_response: requests.Response,
                 max_size: Optional[int],
                 error_is_client_side: bool = False):
        """Constructor."""

        try:
            # Read the raw data right away without waiting for a call to raw_data() to make sure that the server doesn't
            # time out while returning stuff
            self.__response_data = self.__read_response_data(requests_response=requests_response, max_size=max_size)

            # Release the response to return connection back to the pool
            # (http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow)
            requests_response.close()

        except Exception as ex:
            log.warning("Error reading data for URL {}: {}".format(requests_response.url, str(ex)))

            error_response = requests.Response()
            error_response.status_code = HTTPStatus.REQUEST_TIMEOUT.value
            error_response.reason = HTTPStatus.REQUEST_TIMEOUT.phrase
            error_response.request = requests_response.request
            error_response.history = []

            requests_response = error_response

            # We treat timeouts as client-side errors too because we can retry on them
            error_is_client_side = True

            self.__response_data = str(ex).encode('utf-8')

        self.__requests_response = requests_response
        self.__error_is_client_side = error_is_client_side

        self.__previous_response = None
        self.__request = None
示例#17
0
    def send(self, request, stream=False, timeout=None, **kwargs):

        parsed_url = urllib.parse.urlparse(request.url)
        file_path = parsed_url.path

        # Strip the leading slash, if present.
        if file_path.startswith('/'):
            file_path = file_path[1:]

        try:
            self.conn = self.get_connection(parsed_url.netloc, timeout)
        except ConnectionRefusedError as exc:
            # Wrap this in a requests exception.
            # in requests 2.2.1, ConnectionError does not take keyword args
            raise requests.exceptions.ConnectionError() from exc

        resp = Response()
        resp.url = request.url

        try:
            size = self.conn.size(file_path)
        except error_perm:
            resp.status_code = 404
            return resp

        if stream:
            # We have to do this in a background thread, since ftplib's and requests' approaches are the opposite:
            # ftplib is callback based, and requests needs to expose an iterable. (Push vs pull)

            # When the queue size is reached, puts will block. This provides some backpressure.
            queue = Queue(maxsize=100)
            done_sentinel = object()

            def handle_transfer():
                # Download all the chunks into a queue, then place a sentinel object into it to signal completion.
                self.conn.retrbinary('RETR ' + file_path, queue.put)
                queue.put(done_sentinel)

            Thread(target=handle_transfer).start()

            def stream(amt=8192, decode_content=False):
                """A generator, yielding chunks from the queue."""
                # We maintain a buffer so the consumer gets exactly the number of bytes requested.
                buffer = bytearray()
                while True:
                    data = queue.get()

                    if data is not done_sentinel:
                        buffer.extend(data)
                        if len(buffer) >= amt:
                            result = buffer[:amt]
                            buffer = buffer[amt:]

                            yield result
                    else:
                        if buffer:
                            yield buffer
                        return

            Raw = namedtuple('raw', 'stream')

            raw = Raw(stream)

            resp.status_code = 200
            resp.raw = raw
            resp.headers['content-length'] = size
            resp.close = lambda: self.conn.close()
            return resp

        else:
            # Not relevant for Ubuntu Make.
            raise NotImplementedError
示例#18
0
    def __read_response_data(self,
                             requests_response: requests.Response) -> str:
        """Read data from Response object. Raises on read errors, callers are expected to catch exceptions."""
        max_size = self.max_size()

        response_data = ""
        read_response_data = True

        if max_size is not None:
            content_length = requests_response.headers.get(
                'Content-Length', None)

            try:
                if content_length is not None:

                    # HTTP spec allows one to combine multiple headers into one so Content-Length might look
                    # like "Content-Length: 123, 456"
                    if ',' in content_length:
                        content_length = content_length.split(',')
                        content_length = list(map(int, content_length))
                        content_length = max(content_length)

                    content_length = int(content_length)

            except Exception as ex:
                log.warning(
                    "Unable to read Content-Length for URL '%(url)s': %(exception)s"
                    % {
                        'url': requests_response.url,
                        'exception': str(ex),
                    })
                content_length = None

            if content_length is not None:
                if content_length > max_size:
                    log.warning("Content-Length exceeds %d for URL %s" % (
                        max_size,
                        requests_response.url,
                    ))

                    # Release the response to return connection back to the pool
                    # (http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow)
                    requests_response.close()

                    read_response_data = False

        if read_response_data:

            # requests's "apparent_encoding" is not used because chardet might OOM on big binary data responses
            encoding = requests_response.encoding

            if encoding is not None:

                # If "Content-Type" HTTP header contains a string "text" and doesn't have "charset" property,
                # "requests" falls back to setting the encoding to ISO-8859-1, which is probably not right
                # (encoding might have been defined in the HTML content itself via <meta> tag), so we use the
                # "apparent encoding" instead
                if encoding.lower() == 'iso-8859-1':
                    # Will try to auto-detect later
                    encoding = None

            # Some pages report some funky encoding; in that case, fallback to UTF-8
            if encoding is not None:
                try:
                    codecs.lookup(encoding)
                except LookupError:
                    log.warning("Invalid encoding %s for URL %s" % (
                        encoding,
                        requests_response.url,
                    ))

                    # Autodetect later
                    encoding = None

            # 100 KB should be enough for for chardet to be able to make an informed decision
            chunk_size = 1024 * 100
            decoder = None
            response_data_size = 0

            for chunk in requests_response.raw.stream(chunk_size,
                                                      decode_content=True):

                if encoding is None:
                    # Test the encoding guesser's opinion, just like browsers do
                    try:
                        encoding = chardet.detect(chunk)['encoding']
                    except Exception as ex:
                        log.warning(
                            "Unable to detect encoding for URL %s: %s" % (
                                requests_response.url,
                                str(ex),
                            ))
                        encoding = None

                    # If encoding is not in HTTP headers nor can be determined from content itself, assume that
                    # it's UTF-8
                    if encoding is None:
                        encoding = 'UTF-8'

                if decoder is None:
                    decoder = codecs.getincrementaldecoder(encoding)(
                        errors='replace')

                decoded_chunk = decoder.decode(chunk)

                response_data += decoded_chunk
                response_data_size += len(
                    chunk)  # byte length, not string length

                # Content-Length might be missing / lying, so we measure size while fetching the data too
                if max_size is not None:
                    if response_data_size > max_size:
                        log.warning("Data size exceeds %d for URL %s" % (
                            max_size,
                            requests_response.url,
                        ))

                        # Release the response to return connection back to the pool
                        # (http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow)
                        requests_response.close()

                        break

        return response_data