Пример #1
0
        def range_iter():
            results = {}

            while True:
                next_range = self._next()

                headers, fragment_iters = next_range
                content_range = headers.get('Content-Range')
                if content_range is not None:
                    fragment_start, fragment_end, fragment_length = \
                            parse_content_range(content_range)
                elif self.fragment_length <= 0:
                    fragment_start = None
                    fragment_end = None
                    fragment_length = 0
                else:
                    fragment_start = 0
                    fragment_end = self.fragment_length - 1
                    fragment_length = self.fragment_length

                self._add_ranges_for_fragment(fragment_length, range_infos)

                satisfiable = False

                for range_info in range_infos:
                    satisfiable |= range_info['satisfiable']
                    k = (range_info['resp_fragment_start'],
                         range_info['resp_fragment_end'])
                    results.setdefault(k, []).append(range_info)

                try:
                    range_info = results[(fragment_start, fragment_end)].pop(0)
                except KeyError:
                    self.logger.error(
                        "Invalid range: %s, available: %s (reqid=%s)",
                        repr((fragment_start, fragment_end)), results.keys(),
                        self.reqid)
                    raise
                if self.perfdata is not None:
                    ec_start = monotonic_time()
                segment_iter = self._decode_segments(fragment_iters)
                if self.perfdata is not None:
                    ec_end = monotonic_time()
                    rawx_perfdata = self.perfdata.setdefault('rawx', dict())
                    rawx_perfdata['ec'] = rawx_perfdata.get('ec', 0.0) \
                        + ec_end - ec_start

                if not range_info['satisfiable']:
                    io.consume(segment_iter)
                    continue

                byterange_iter = self._iter_range(range_info, segment_iter)

                result = {
                    'start': range_info['resp_meta_start'],
                    'end': range_info['resp_meta_end'],
                    'iter': byterange_iter
                }

                yield result
Пример #2
0
    def connect(cls, chunk, sysmeta, reqid=None,
                connection_timeout=None, write_timeout=None, **kwargs):
        raw_url = chunk.get("real_url", chunk["url"])
        parsed = urlparse(raw_url)
        chunk_path = parsed.path.split('/')[-1]
        hdrs = headers_from_object_metadata(sysmeta)
        if reqid:
            hdrs[REQID_HEADER] = reqid

        hdrs[CHUNK_HEADERS["chunk_pos"]] = chunk["pos"]
        hdrs[CHUNK_HEADERS["chunk_id"]] = chunk_path

        # in the trailer
        # metachunk_size & metachunk_hash
        trailers = (CHUNK_HEADERS["metachunk_size"],
                    CHUNK_HEADERS["metachunk_hash"])
        if kwargs.get('chunk_checksum_algo'):
            trailers = trailers + (CHUNK_HEADERS["chunk_hash"], )
        hdrs["Trailer"] = ', '.join(trailers)
        with ConnectionTimeout(
                connection_timeout or io.CONNECTION_TIMEOUT):
            perfdata = kwargs.get('perfdata', None)
            if perfdata is not None:
                connect_start = monotonic_time()
            conn = io.http_connect(
                parsed.netloc, 'PUT', parsed.path, hdrs, scheme=parsed.scheme)
            conn.set_cork(True)
            if perfdata is not None:
                connect_end = monotonic_time()
                perfdata_rawx = perfdata.setdefault('rawx', dict())
                perfdata_rawx['connect.' + chunk['url']] = \
                    connect_end - connect_start
            conn.chunk = chunk
        return cls(chunk, conn, write_timeout=write_timeout,
                   reqid=reqid, **kwargs)
Пример #3
0
        def send(data):
            self.checksum.update(data)
            self.global_checksum.update(data)
            # get the encoded fragments
            if self.perfdata is not None:
                ec_start = monotonic_time()
            fragments = ec_stream.send(data)
            if self.perfdata is not None:
                ec_end = monotonic_time()
                rawx_perfdata = self.perfdata.setdefault('rawx', dict())
                rawx_perfdata['ec'] = rawx_perfdata.get('ec', 0.0) \
                    + ec_end - ec_start
            if fragments is None:
                # not enough data given
                return

            current_writers = list(writers)
            failed_chunks = list()
            for writer in current_writers:
                fragment = fragments[chunk_index[writer]]
                if not writer.failed:
                    if writer.checksum:
                        writer.checksum.update(fragment)
                    writer.send(fragment)
                else:
                    current_writers.remove(writer)
                    failed_chunks.append(writer.chunk)
            sleep(0)
            self.quorum_or_fail([w.chunk for w in current_writers],
                                failed_chunks)
Пример #4
0
 def _send_data(self, conn):
     """
     Send data to an open connection, taking data blocks from `conn.queue`.
     """
     conn.upload_start = None
     while True:
         data = conn.queue.get()
         if isinstance(data, text_type):
             data = data.encode('utf-8')
         if not conn.failed:
             try:
                 with green.ChunkWriteTimeout(self.write_timeout):
                     if self.perfdata is not None \
                             and conn.upload_start is None:
                         conn.upload_start = monotonic_time()
                     conn.send(b'%x\r\n' % len(data))
                     conn.send(data)
                     conn.send(b'\r\n')
                 if not data:
                     if self.perfdata is not None:
                         fin_start = monotonic_time()
                     # Last segment sent, disable TCP_CORK to flush buffers
                     conn.set_cork(False)
                     if self.perfdata is not None:
                         fin_end = monotonic_time()
                         rawx_perfdata = self.perfdata.setdefault(
                             'rawx', dict())
                         chunk_url = conn.chunk['url']
                         rawx_perfdata['upload_finish.' + chunk_url] = \
                             fin_end - fin_start
                 green.eventlet_yield()
             except (Exception, green.ChunkWriteTimeout) as err:
                 conn.failed = True
                 conn.chunk['error'] = str(err)
Пример #5
0
 def test_deadline_to_timeout(self):
     self.assertRaises(DeadlineReached, deadline_to_timeout,
                       monotonic_time() - 0.001, True)
     self.assertRaises(DeadlineReached, deadline_to_timeout,
                       monotonic_time(), True)
     deadline = monotonic_time() + 1.0
     to = deadline_to_timeout(deadline, True)
     self.assertLessEqual(to, 1.0)
     self.assertGreater(to, 0.9)
Пример #6
0
 def _update_rawx_perfdata(self, *args, **kwargs):
     perfdata = kwargs.get('perfdata') or self.perfdata
     if perfdata is not None:
         req_start = utils.monotonic_time()
     res = func(self, *args, **kwargs)
     if perfdata is not None:
         req_end = utils.monotonic_time()
         val = perfdata.get('rawx', 0.0) + req_end - req_start
         perfdata['rawx'] = val
     return res
Пример #7
0
 def _update_rawx_perfdata(self, *args, **kwargs):
     perfdata = kwargs.get('perfdata') or self.perfdata
     if perfdata is not None:
         req_start = utils.monotonic_time()
     res = func(self, *args, **kwargs)
     if perfdata is not None:
         req_end = utils.monotonic_time()
         perfdata_rawx = perfdata.setdefault('rawx', dict())
         overall = perfdata_rawx.get('overall', 0.0) + req_end - req_start
         perfdata_rawx['overall'] = overall
     return res
 def test_object_list_empty_container(self):
     """
     Ensure object listing of an empty container takes less than 35ms.
     """
     container = self.__class__.__name__ + random_str(8)
     self.api.container_create(self.account, container)
     self.containers.add(container)
     for _ in range(8):
         start = monotonic_time()
         self.api.object_list(self.account, container)
         duration = monotonic_time() - start
         logging.info("Object list took %.6fs", duration)
         self.assertLess(duration, 0.035)
Пример #9
0
    def _get_request(self, chunk):
        """
        Connect to a chunk, fetch headers but don't read data.
        Save the response object in `self.sources` list.
        """
        try:
            with green.ConnectionTimeout(self.connection_timeout):
                raw_url = chunk.get("real_url", chunk["url"])
                parsed = urlparse(raw_url)
                if self.perfdata is not None:
                    connect_start = monotonic_time()
                conn = http_connect(parsed.netloc,
                                    'GET',
                                    parsed.path,
                                    self.request_headers,
                                    scheme=parsed.scheme)
                if self.perfdata is not None:
                    connect_end = monotonic_time()
                    rawx_perfdata = self.perfdata.setdefault('rawx', dict())
                    rawx_perfdata['connect.' + chunk['url']] = \
                        connect_end - connect_start
            with green.OioTimeout(self.read_timeout):
                source = conn.getresponse()
                source.conn = conn
                if self.perfdata is not None:
                    source.download_start = monotonic_time()
        except (SocketError, Timeout) as err:
            self.logger.error('Connection failed to %s (reqid=%s): %s', chunk,
                              self.reqid, err)
            self._resp_by_chunk[chunk["url"]] = (0, text_type(err))
            return False
        except Exception as err:
            self.logger.exception('Connection failed to %s (reqid=%s)', chunk,
                                  self.reqid)
            self._resp_by_chunk[chunk["url"]] = (0, text_type(err))
            return False

        if source.status in (200, 206):
            self.status = source.status
            self._headers = [(k.lower(), v) for k, v in source.getheaders()]
            self.sources.append((source, chunk))
            return True
        else:
            self.logger.warn("Invalid response from %s (reqid=%s): %d %s",
                             chunk, self.reqid, source.status, source.reason)
            self._resp_by_chunk[chunk["url"]] = (source.status,
                                                 text_type(source.reason))
            close_source(source, self.logger)
        return False
Пример #10
0
    def _get_response(self, conn):
        """
        Wait for server response.

        :returns: a tuple with `conn` and the reponse object or an exception.
        """
        try:
            with green.ChunkWriteTimeout(self.write_timeout):
                resp = conn.getresponse()
                if self.perfdata is not None:
                    upload_end = monotonic_time()
                    perfdata_rawx = self.perfdata.setdefault('rawx', dict())
                    url_chunk = conn.chunk['url']
                    perfdata_rawx[url_chunk] = \
                        perfdata_rawx.get(url_chunk, 0.0) \
                        + upload_end - conn.upload_start
        except Timeout as err:
            resp = err
            logger.error('Failed to read response from %s (reqid=%s): %s',
                         conn.chunk, self.reqid, err)
        except Exception as err:
            resp = err
            logger.exception("Failed to read response from %s (reqid=%s)",
                             conn.chunk, self.reqid)
        return (conn, resp)
Пример #11
0
 def _send_data(self, conn):
     """
     Send data to an open connection, taking data blocks from `conn.queue`.
     """
     conn.upload_start = None
     while True:
         data = conn.queue.get()
         if isinstance(data, text_type):
             data = data.encode('utf-8')
         if not conn.failed:
             try:
                 with green.ChunkWriteTimeout(self.write_timeout):
                     if self.perfdata is not None \
                             and conn.upload_start is None:
                         conn.upload_start = monotonic_time()
                     conn.send(b'%x\r\n' % len(data))
                     conn.send(data)
                     conn.send(b'\r\n')
                 if not data:
                     # Last segment sent, disable TCP_CORK to flush buffers
                     conn.set_cork(False)
                 sleep(0)
             except (Exception, green.ChunkWriteTimeout) as err:
                 conn.failed = True
                 conn.chunk['error'] = str(err)
Пример #12
0
    def _send(self):
        """Send coroutine loop"""
        self.conn.upload_start = None
        while not self.failed:
            # fetch input data from the queue
            data = self.queue.get()
            # use HTTP transfer encoding chunked
            # to write data to RAWX
            try:
                with ChunkWriteTimeout(self.write_timeout):
                    if self.perfdata is not None \
                            and self.conn.upload_start is None:
                        self.conn.upload_start = monotonic_time()
                    self.conn.send("%x\r\n" % len(data))
                    self.conn.send(data)
                    self.conn.send("\r\n")
                    self.bytes_transferred += len(data)
                eventlet_yield()
            except (Exception, ChunkWriteTimeout) as exc:
                self.failed = True
                msg = str(exc)
                self.logger.warn("Failed to write to %s (%s, reqid=%s)",
                                 self.chunk, msg, self.reqid)
                self.chunk['error'] = 'write: %s' % msg

        # Drain the queue before quitting
        while True:
            try:
                self.queue.get_nowait()
            except Empty:
                break
Пример #13
0
    def test_deadline_from_read_timeout(self):
        kwargs = dict()
        set_deadline_from_read_timeout(kwargs)
        # nothing changed
        self.assertFalse(kwargs)

        now = monotonic_time()
        kwargs['read_timeout'] = 10.0
        set_deadline_from_read_timeout(kwargs)
        # deadline is computed from read timeout
        self.assertIn('deadline', kwargs)
        self.assertLessEqual(kwargs['deadline'],
                             now + kwargs['read_timeout'] + 0.1)

        prev_deadline = kwargs['deadline']
        set_deadline_from_read_timeout(kwargs)
        # deadline is not recomputed
        self.assertIn('deadline', kwargs)
        self.assertEqual(prev_deadline, kwargs['deadline'])

        prev_deadline = kwargs['read_timeout']
        set_deadline_from_read_timeout(kwargs, force=True)
        # deadline is recomputed
        self.assertIn('deadline', kwargs)
        self.assertNotEqual(prev_deadline, kwargs['deadline'])
Пример #14
0
    def finish(self, metachunk_size, metachunk_hash):
        """
        Send metachunk_size and metachunk_hash as trailers.

        :returns: the chunk object if the upload has failed, else None
        """
        self.wait()
        if self.failed:
            self.logger.debug("NOT sending end marker and trailers to %s, "
                              "because upload has failed", self.chunk['url'])
            return self.chunk
        self.logger.debug("Sending end marker and trailers to %s",
                          self.chunk['url'])
        parts = [
            '0\r\n',
            '%s: %s\r\n' % (CHUNK_HEADERS['metachunk_size'],
                            metachunk_size),
            '%s: %s\r\n' % (CHUNK_HEADERS['metachunk_hash'],
                            metachunk_hash)
        ]
        if self.checksum:
            parts.append('%s: %s\r\n' % (CHUNK_HEADERS['chunk_hash'],
                                         self.checksum.hexdigest()))
        parts.append('\r\n')
        to_send = ''.join(parts).encode('utf-8')
        if self.perfdata is not None:
            fin_start = monotonic_time()
        try:
            with ChunkWriteTimeout(self.write_timeout):
                self.conn.send(to_send)
                # Last segment sent, disable TCP_CORK to flush buffers
                self.conn.set_cork(False)
        except (Exception, ChunkWriteTimeout) as exc:
            self.failed = True
            msg = text_type(exc)
            self.logger.warn("Failed to finish %s (%s, reqid=%s)",
                             self.chunk, msg, self.reqid)
            self.chunk['error'] = 'finish: %s' % msg
            return self.chunk
        finally:
            if self.perfdata is not None:
                fin_end = monotonic_time()
                rawx_perfdata = self.perfdata.setdefault('rawx', dict())
                chunk_url = self.conn.chunk['url']
                rawx_perfdata['upload_finish.' + chunk_url] = \
                    fin_end - fin_start
        return None
Пример #15
0
    def _connect_put(self, chunk):
        """
        Create a connection in order to PUT `chunk`.

        :returns: a tuple with the connection object and `chunk`
        """
        raw_url = chunk.get("real_url", chunk["url"])
        parsed = urlparse(raw_url)
        try:
            chunk_path = parsed.path.split('/')[-1]
            hdrs = headers_from_object_metadata(self.sysmeta)
            hdrs[CHUNK_HEADERS["chunk_pos"]] = chunk["pos"]
            hdrs[CHUNK_HEADERS["chunk_id"]] = chunk_path
            hdrs.update(self.headers)
            hdrs = encode(hdrs)

            with green.ConnectionTimeout(self.connection_timeout):
                if self.perfdata is not None:
                    connect_start = monotonic_time()
                conn = io.http_connect(parsed.netloc,
                                       'PUT',
                                       parsed.path,
                                       hdrs,
                                       scheme=parsed.scheme)
                conn.set_cork(True)
                if self.perfdata is not None:
                    connect_end = monotonic_time()
                    perfdata_rawx = self.perfdata.setdefault('rawx', dict())
                    perfdata_rawx['connect.' + chunk['url']] = \
                        connect_end - connect_start
                conn.chunk = chunk
            return conn, chunk
        except (SocketError, Timeout) as err:
            msg = str(err)
            self.logger.warn("Failed to connect to %s (reqid=%s): %s", chunk,
                             self.reqid, err)
        except Exception as err:
            msg = str(err)
            self.logger.exception("Failed to connect to %s (reqid=%s)", chunk,
                                  self.reqid)
        chunk['error'] = msg
        return None, chunk
Пример #16
0
    def encode_and_send(self, ec_stream, data, writers):
        """
        Encode a buffer of data through `ec_stream`,
        and dispatch the encoded data to the chunk writers.

        :returns: the list of writers that are still writing
        """
        current_writers = list(writers)
        self.checksum.update(data)
        self.global_checksum.update(data)
        # get the encoded fragments
        if self.perfdata is not None:
            ec_start = monotonic_time()
        fragments = ec_stream.send(data)
        if self.perfdata is not None:
            ec_end = monotonic_time()
            rawx_perfdata = self.perfdata.setdefault('rawx', dict())
            rawx_perfdata['ec'] = rawx_perfdata.get('ec', 0.0) \
                + ec_end - ec_start
        if fragments is None:
            # not enough data given
            return current_writers

        for writer in writers:
            fragment = fragments[writer.chunk['num']]
            if not writer.failed:
                if writer.checksum:
                    writer.checksum.update(fragment)
                writer.send(fragment)
            else:
                current_writers.remove(writer)
                self.failed_chunks.append(writer.chunk)
        eventlet_yield()
        self.quorum_or_fail([w.chunk for w in current_writers],
                            self.failed_chunks)
        return current_writers
Пример #17
0
 def getresponse(self):
     """Read the HTTP response from the connection"""
     # As the server may buffer data before writing it to non-volatile
     # storage, we don't know if we have to wait while sending data or
     # while reading response, thus we apply the same timeout to both.
     with Timeout(self.write_timeout):
         resp = self.conn.getresponse()
         if self.perfdata is not None:
             perfdata_rawx = self.perfdata.setdefault('rawx', dict())
             url_chunk = self.conn.chunk['url']
             upload_end = monotonic_time()
             perfdata_rawx[url_chunk] = \
                 perfdata_rawx.get(url_chunk, 0.0) \
                 + upload_end - self.conn.upload_start
         return resp
Пример #18
0
 def _should_notify(self, account, container):
     if not self.check_account:
         return True
     now = monotonic_time()
     enabled, last_update = self.cache.get((account, container), (None, 0))
     if now - last_update > self.cache_duration:
         ctinfo = self.account.container_show(
             account,
             container,
             connection_timeout=self.connection_timeout,
             read_timeout=self.read_timeout,
             reqid=request_id('ev-repl-'))
         enabled = ctinfo.get(BUCKET_PROP_REPLI_ENABLED, False)
         self.cache[(account, container)] = (enabled, now)
     return enabled
Пример #19
0
 def _should_notify(self, account, container):
     if not self.check_account:
         return True
     now = monotonic_time()
     enabled, last_update = self.cache.get((account, container), (None, 0))
     if now - last_update > self.cache_duration:
         try:
             ctinfo = self.account.container_show(
                 account,
                 container,
                 force_master=self.force_master,
                 connection_timeout=self.connection_timeout,
                 read_timeout=self.read_timeout,
                 reqid=request_id('ev-repl-'))
             enabled = ctinfo.get(BUCKET_PROP_REPLI_ENABLED, False)
             self.cache[(account, container)] = (enabled, now)
         except Exception:
             self.logger.exception(
                 "Not updating the cached value %s=%s for %s/%s",
                 BUCKET_PROP_REPLI_ENABLED, enabled, account, container)
     return enabled
Пример #20
0
    def _decode_segments(self, fragment_iterators):
        """
        Reads from fragments and yield full segments
        """
        # we use eventlet Queue to read fragments
        queues = []
        # each iterators has its queue
        for _j in range(len(fragment_iterators)):
            queues.append(LightQueue(1))

        def put_in_queue(fragment_iterator, queue):
            """
            Coroutine to read the fragments from the iterator
            """
            try:
                for fragment in fragment_iterator:
                    # put the read fragment in the queue
                    queue.put(fragment)
                    # the queues are of size 1 so this coroutine blocks
                    # until we decode a full segment
            except GreenletExit:
                # ignore
                pass
            except ChunkReadTimeout as err:
                self.logger.error('%s (reqid=%s)', err, self.reqid)
            except Exception:
                self.logger.exception("Exception on reading (reqid=%s)",
                                      self.reqid)
            finally:
                queue.resize(2)
                # put None to indicate the decoding loop
                # this is over
                queue.put(None)
                # close the iterator
                fragment_iterator.close()

        # we use eventlet GreenPool to manage the read of fragments
        with ContextPool(len(fragment_iterators)) as pool:
            # spawn coroutines to read the fragments
            for fragment_iterator, queue in zip(fragment_iterators, queues):
                pool.spawn(put_in_queue, fragment_iterator, queue)

            # main decoding loop
            while True:
                data = []
                # get the fragments from the queues
                for queue in queues:
                    fragment = queue.get()
                    data.append(fragment)

                if not all(data):
                    # one of the readers returned None
                    # impossible to read segment
                    break
                # actually decode the fragments into a segment
                if self.perfdata is not None:
                    ec_start = monotonic_time()
                try:
                    segment = self.storage_method.driver.decode(data)
                except exceptions.ECError:
                    # something terrible happened
                    self.logger.exception(
                        "ERROR decoding fragments (reqid=%s)", self.reqid)
                    raise
                finally:
                    if self.perfdata is not None:
                        ec_end = monotonic_time()
                        rawx_pdata = self.perfdata.setdefault('rawx', dict())
                        rawx_pdata['ec'] = rawx_pdata.get('ec', 0.0) \
                            + ec_end - ec_start

                yield segment
Пример #21
0
    def _direct_request(self, method, url, headers=None, data=None, json=None,
                        params=None, admin_mode=False, pool_manager=None,
                        force_master=False, **kwargs):
        """
        Make an HTTP request.

        :param method: HTTP method to use (e.g. "GET")
        :type method: `str`
        :param url: URL to request
        :type url: `str`
        :keyword admin_mode: allow operations on slave or worm namespaces
        :type admin_mode: `bool`
        :keyword deadline: deadline for the request, in monotonic time.
            Supersedes `read_timeout`.
        :type deadline: `float` seconds
        :keyword timeout: optional timeout for the request (in seconds).
            May be a `urllib3.Timeout(connect=connection_timeout,
            read=read_timeout)`.
            This method also accepts `connection_timeout` and `read_timeout`
            as separate arguments.
        :type timeout: `float` or `urllib3.Timeout`
        :keyword headers: optional headers to add to the request
        :type headers: `dict`
        :keyword force_master: request will run on master service only.
        :type force_master: `bool`

        :raise oio.common.exceptions.OioTimeout: in case of read, write
        or connection timeout
        :raise oio.common.exceptions.OioNetworkException: in case of
        connection error
        :raise oio.common.exceptions.OioException: in other case of HTTP error
        :raise oio.common.exceptions.ClientException: in case of HTTP status
        code >= 400
        """
        out_kwargs = dict(kwargs)

        # Ensure headers are all strings
        if headers:
            out_headers = {k: str(v) for k, v in headers.items()}
        else:
            out_headers = dict()
        if self.admin_mode or admin_mode:
            out_headers[ADMIN_HEADER] = '1'
        if self.force_master or force_master:
            out_headers[FORCEMASTER_HEADER] = '1'

        # Look for a request deadline, deduce the timeout from it.
        if kwargs.get('deadline', None) is not None:
            to = deadline_to_timeout(kwargs['deadline'], True)
            to = min(to, kwargs.get('read_timeout', to))
            out_kwargs['timeout'] = urllib3.Timeout(
                connect=kwargs.get('connection_timeout', CONNECTION_TIMEOUT),
                read=to)
            # Shorten the deadline by 1% to compensate for the time spent
            # connecting and reading response.
            out_headers[TIMEOUT_HEADER] = int(to * 990000.0)

        # Ensure there is a timeout
        if 'timeout' not in out_kwargs:
            out_kwargs['timeout'] = urllib3.Timeout(
                connect=kwargs.get('connection_timeout', CONNECTION_TIMEOUT),
                read=kwargs.get('read_timeout', READ_TIMEOUT))
        if TIMEOUT_HEADER not in out_headers:
            to = out_kwargs['timeout']
            if isinstance(to, urllib3.Timeout):
                to = to.read_timeout
            else:
                to = float(to)
            out_headers[TIMEOUT_HEADER] = int(to * 1000000.0)

        # Look for a request ID
        if 'reqid' in kwargs:
            out_headers[REQID_HEADER] = str(kwargs['reqid'])

        if len(out_headers.get(REQID_HEADER, '')) > STRLEN_REQID:
            out_headers[REQID_HEADER] = \
                out_headers[REQID_HEADER][:STRLEN_REQID]
            self.__logger().warn('Request ID truncated to %d characters',
                                 STRLEN_REQID)

        # Convert json and add Content-Type
        if json:
            out_headers["Content-Type"] = "application/json"
            data = jsonlib.dumps(json)

        # Trigger performance measurments
        perfdata = kwargs.get('perfdata', None)
        if perfdata is not None:
            out_headers[PERFDATA_HEADER] = 'enabled'

        # Explicitly keep or close the connection
        if 'Connection' not in out_headers:
            out_headers['Connection'] = self.connection

        out_kwargs['headers'] = out_headers
        out_kwargs['body'] = data

        # Add query string
        if params:
            out_param = []
            for k, v in params.items():
                if v is not None:
                    if isinstance(v, unicode):
                        v = unicode(v).encode('utf-8')
                    out_param.append((k, v))
            encoded_args = urlencode(out_param)
            url += '?' + encoded_args

        if not pool_manager:
            pool_manager = self.pool_manager

        try:
            if perfdata is not None:
                request_start = monotonic_time()
            resp = pool_manager.request(method, url, **out_kwargs)
            if perfdata is not None:
                request_end = monotonic_time()
                service_perfdata = perfdata.setdefault(
                    self.service_type, dict())
                service_perfdata['total'] = service_perfdata.get(
                    'total', 0.0) + request_end - request_start
            body = resp.data
            if body:
                try:
                    body = jsonlib.loads(body)
                except ValueError:
                    pass
            if perfdata is not None and PERFDATA_HEADER in resp.headers:
                service_perfdata = perfdata[self.service_type]
                for header_val in resp.headers[PERFDATA_HEADER].split(','):
                    kv = header_val.split('=', 1)
                    service_perfdata[kv[0]] = service_perfdata.get(
                        kv[0], 0.0) + float(kv[1]) / 1000000.0
        except urllib3.exceptions.HTTPError as exc:
            oio_exception_from_httperror(exc,
                                         reqid=out_headers.get(REQID_HEADER),
                                         url=url)
        if resp.status >= 400:
            raise exceptions.from_response(resp, body)
        return resp, body
Пример #22
0
    def iter_from_resp(self, source, parts_iter, part, chunk):
        bytes_consumed = 0
        count = 0
        buf = b''
        if self.perfdata is not None:
            rawx_perfdata = self.perfdata.setdefault('rawx', dict())
            chunk_url = chunk['url']
        while True:
            try:
                with green.ChunkReadTimeout(self.read_timeout):
                    data = part.read(READ_CHUNK_SIZE)
                    count += 1
                    buf += data
            except (green.ChunkReadTimeout, IOError) as crto:
                try:
                    self.recover(bytes_consumed)
                except (exc.UnsatisfiableRange, ValueError):
                    raise
                except exc.EmptyByteRange:
                    # we are done already
                    break
                buf = b''
                # find a new source to perform recovery
                new_source, new_chunk = self._get_source()
                if new_source:
                    self.logger.warn(
                        "Failed to read from %s (%s), "
                        "retrying from %s (reqid=%s)", chunk, crto, new_chunk,
                        self.reqid)
                    close_source(source[0], self.logger)
                    # switch source
                    source[0] = new_source
                    chunk = new_chunk
                    parts_iter[0] = make_iter_from_resp(source[0])
                    try:
                        _j, _j, _j, _j, part = \
                            self.get_next_part(parts_iter)
                    except StopIteration:
                        # failed to recover
                        # we did our best
                        return

                else:
                    self.logger.warn("Failed to read from %s (%s, reqid=%s)",
                                     chunk, crto, self.reqid)
                    # no valid source found to recover
                    raise
            else:
                # discard bytes
                if buf and self.discard_bytes:
                    if self.discard_bytes < len(buf):
                        buf = buf[self.discard_bytes:]
                        bytes_consumed += self.discard_bytes
                        self.discard_bytes = 0
                    else:
                        self.discard_bytes -= len(buf)
                        bytes_consumed += len(buf)
                        buf = b''

                # no data returned
                # flush out buffer
                if not data:
                    if buf:
                        bytes_consumed += len(buf)
                        yield buf
                    buf = b''
                    break

                # If buf_size is defined, yield bounded data buffers
                if self.buf_size is not None:
                    while len(buf) >= self.buf_size:
                        read_d = buf[:self.buf_size]
                        buf = buf[self.buf_size:]
                        yield read_d
                        bytes_consumed += len(read_d)
                else:
                    yield buf
                    bytes_consumed += len(buf)
                    buf = b''

                # avoid starvation by yielding
                # every once in a while
                if count % 10 == 0:
                    eventlet_yield()

        if self.perfdata is not None:
            download_end = monotonic_time()
            key = 'download.' + chunk_url
            rawx_perfdata[key] = rawx_perfdata.get(key, 0.0) \
                + download_end - source[0].download_start