示例#1
0
    def chunk_delete_many(self, chunks, cid=None, **kwargs):
        """
        :rtype: `list` of either `urllib3.response.HTTPResponse`
            or `urllib3.exceptions.HTTPError`, with an extra "chunk"
            attribute.
        """
        headers = kwargs['headers'].copy()
        if cid is not None:
            # This is only to get a nice access log
            headers['X-oio-chunk-meta-container-id'] = cid
        timeout = kwargs.get('timeout')
        if not timeout:
            timeout = urllib3.Timeout(CHUNK_TIMEOUT)

        def __delete_chunk(chunk_):
            try:
                resp = self.http_pool.request("DELETE",
                                              chunk_['url'],
                                              headers=headers,
                                              timeout=timeout)
                resp.chunk = chunk_
                return resp
            except urllib3.exceptions.HTTPError as ex:
                ex.chunk = chunk_
                return ex

        pile = GreenPile(PARALLEL_CHUNKS_DELETE)
        for chunk in chunks:
            pile.spawn(__delete_chunk, chunk)
        resps = [resp for resp in pile if resp]
        return resps
示例#2
0
文件: base.py 项目: parkys1/swift
    def make_requests(self, req, ring, part, method, path, headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param req: a request sent by the client
        :param ring: the ring used for finding backend servers
        :param part: the partition number
        :param method: the method to send to the backend
        :param path: the path to send to the backend
        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :param query_string: optional query string to send to the backend
        :returns: a swob.Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = GreenthreadSafeIterator(self.iter_nodes(ring, part))
        pile = GreenPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path,
                       head, query_string, self.app.logger.thread_locals)
        response = [resp for resp in pile if resp]
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
        statuses, reasons, resp_headers, bodies = zip(*response)
        return self.best_response(req, statuses, reasons, bodies,
                                  '%s %s' % (self.server_type, req.method),
                                  headers=resp_headers)
示例#3
0
文件: reconciler.py 项目: 701/swift
def direct_get_container_policy_index(container_ring, account_name,
                                      container_name):
    """
    Talk directly to the primary container servers to figure out the storage
    policy index for a given container.

    :param container_ring: ring in which to look up the container locations
    :param account_name: name of the container's account
    :param container_name: name of the container
    :returns: storage policy index, or None if it couldn't get a quorum
    """
    def _eat_client_exception(*args):
        try:
            return direct_head_container(*args)
        except ClientException as err:
            if err.http_status == 404:
                return err.http_headers
        except (Timeout, socket.error):
            pass

    pile = GreenPile()
    part, nodes = container_ring.get_nodes(account_name, container_name)
    for node in nodes:
        pile.spawn(_eat_client_exception, node, part, account_name,
                   container_name)

    headers = [x for x in pile if x is not None]
    if len(headers) < quorum_size(len(nodes)):
        return
    return best_policy_index(headers)
示例#4
0
    def make_requests(self, req, ring, part, method, path, headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param req: a request sent by the client
        :param ring: the ring used for finding backend servers
        :param part: the partition number
        :param method: the method to send to the backend
        :param path: the path to send to the backend
        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :param query_string: optional query string to send to the backend
        :returns: a swob.Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = GreenthreadSafeIterator(self.iter_nodes(ring, part))
        pile = GreenPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path,
                       head, query_string, self.app.logger.thread_locals)
        response = [resp for resp in pile if resp]
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
        statuses, reasons, resp_headers, bodies = zip(*response)
        return self.best_response(req, statuses, reasons, bodies,
                                  '%s %s' % (self.server_type, req.method),
                                  headers=resp_headers)
示例#5
0
    def process(self, env, cb):
        event = Event(env)
        if event.event_type == EventTypes.CONTENT_DELETED:
            pile = GreenPile(PARALLEL_CHUNKS_DELETE)

            chunks = []

            for item in event.data:
                if item.get('type') == 'chunks':
                    chunks.append(item)
            if len(chunks):

                def delete_chunk(chunk):
                    resp = None
                    p = urlparse(chunk['id'])
                    try:
                        with Timeout(CHUNK_TIMEOUT):
                            conn = http_connect(p.netloc, 'DELETE', p.path)
                            resp = conn.getresponse()
                            resp.chunk = chunk
                    except (Exception, Timeout) as e:
                        self.logger.warn('error while deleting chunk %s "%s"',
                                         chunk['id'], str(e.message))
                    return resp

                for chunk in chunks:
                    pile.spawn(delete_chunk, chunk)

                resps = [resp for resp in pile if resp]

                for resp in resps:
                    if resp.status != 204:
                        self.logger.warn('failed to delete chunk %s (HTTP %s)',
                                         resp.chunk['id'], resp.status)
        return self.app(env, cb)
示例#6
0
文件: ec.py 项目: fvennetier/oio-sds
    def get_stream(self):
        range_infos = self._get_range_infos()
        chunk_iter = iter(self.chunks)

        # we use eventlet GreenPool to manage readers
        with utils.ContextPool(self.storage_method.ec_nb_data) as pool:
            pile = GreenPile(pool)
            # we use eventlet GreenPile to spawn readers
            for _j in range(self.storage_method.ec_nb_data):
                pile.spawn(self._get_fragment, chunk_iter, self.storage_method)

            readers = []
            for reader, parts_iter in pile:
                if reader.status in (200, 206):
                    readers.append((reader, parts_iter))
                # TODO log failures?

        # with EC we need at least ec_nb_data valid readers
        if len(readers) >= self.storage_method.ec_nb_data:
            # all readers should return the same Content-Length
            # so just take the headers from one of them
            resp_headers = HeadersDict(readers[0][0].headers)
            fragment_length = int(resp_headers.get('Content-Length'))
            r = [it for reader, it in readers]
            stream = ECStream(self.storage_method, r, range_infos,
                              self.meta_length, fragment_length)
            # start the stream
            stream.start()
            return stream
        else:
            raise exc.OioException("Not enough valid sources to read")
示例#7
0
    def make_requests(self,
                      req,
                      ring,
                      part,
                      method,
                      path,
                      headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :returns: a webob Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = self.iter_nodes(part, start_nodes, ring)
        pile = GreenPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path, head,
                       query_string, self.app.logger.thread_locals)
        response = [resp for resp in pile if resp]
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', ''))
        statuses, reasons, bodies = zip(*response)
        return self.best_response(req, statuses, reasons, bodies,
                                  '%s %s' % (self.server_type, req.method))
示例#8
0
文件: ec.py 项目: uneidel/oio-sds
    def get_stream(self):
        range_infos = self._get_range_infos()
        chunk_iter = iter(self.chunks)

        # we use eventlet GreenPool to manage readers
        with green.ContextPool(self.storage_method.ec_nb_data) as pool:
            pile = GreenPile(pool)
            # we use eventlet GreenPile to spawn readers
            for _j in range(self.storage_method.ec_nb_data):
                pile.spawn(self._get_fragment, chunk_iter, range_infos,
                           self.storage_method)

            readers = []
            for reader, parts_iter in pile:
                if reader.status in (200, 206):
                    readers.append((reader, parts_iter))
                # TODO log failures?

        # with EC we need at least ec_nb_data valid readers
        if len(readers) >= self.storage_method.ec_nb_data:
            # all readers should return the same Content-Length
            # so just take the headers from one of them
            resp_headers = HeadersDict(readers[0][0].headers)
            fragment_length = int(resp_headers.get('Content-Length'))
            read_iterators = [it for _, it in readers]
            stream = ECStream(self.storage_method, read_iterators, range_infos,
                              self.meta_length, fragment_length)
            # start the stream
            stream.start()
            return stream
        else:
            raise exceptions.OioException("Not enough valid sources to read")
示例#9
0
文件: ec.py 项目: lanweichang/oio-sds
    def _get_results(self, writers):
        # get the results from writers
        success_chunks = []
        failed_chunks = []

        # we use eventlet GreenPile to read the responses from the writers
        pile = GreenPile(len(writers))

        for writer in writers:
            if writer.failed:
                failed_chunks.append(writer.chunk)
                continue
            pile.spawn(self._get_response, writer)

        def _handle_resp(writer, resp):
            if resp:
                if resp.status == 201:
                    # TODO check checksum in response
                    success_chunks.append(writer.chunk)
                else:
                    writer.failed = True
                    logger.error("Wrong status code from %s (%s)",
                                 writer.chunk, resp.status)
                    writer.chunk['error'] = 'HTTP %s' % resp.status
                    failed_chunks.append(writer.chunk)

        for (writer, resp) in pile:
            _handle_resp(writer, resp)

        quorum = self._check_quorum(success_chunks)

        return success_chunks + failed_chunks, quorum
示例#10
0
文件: ec.py 项目: fvennetier/oio-sds
    def _get_results(self, writers):
        # get the results from writers
        success_chunks = []
        failed_chunks = []

        # we use eventlet GreenPile to read the responses from the writers
        pile = GreenPile(len(writers))

        for writer in writers:
            if writer.failed:
                failed_chunks.append(writer.chunk)
                continue
            pile.spawn(self._get_response, writer)

        def _handle_resp(writer, resp):
            if resp:
                if resp.status == 201:
                    # TODO check checksum in response
                    success_chunks.append(writer.chunk)
                else:
                    writer.failed = True
                    logger.error("Wrong status code from %s (%s)",
                                 writer.chunk, resp.status)
                    writer.chunk['error'] = 'HTTP %s' % resp.status
                    failed_chunks.append(writer.chunk)

        for (writer, resp) in pile:
            _handle_resp(writer, resp)

        quorum = self._check_quorum(success_chunks)

        return success_chunks + failed_chunks, quorum
示例#11
0
def direct_get_container_policy_index(container_ring, account_name,
                                      container_name):
    """
    Talk directly to the primary container servers to figure out the storage
    policy index for a given container.

    :param container_ring: ring in which to look up the container locations
    :param account_name: name of the container's account
    :param container_name: name of the container
    :returns: storage policy index, or None if it couldn't get a majority
    """
    def _eat_client_exception(*args):
        try:
            return direct_head_container(*args)
        except ClientException as err:
            if err.http_status == 404:
                return err.http_headers
        except (Timeout, socket.error):
            pass

    pile = GreenPile()
    part, nodes = container_ring.get_nodes(account_name, container_name)
    for node in nodes:
        pile.spawn(_eat_client_exception, node, part, account_name,
                   container_name)

    headers = [x for x in pile if x is not None]
    if len(headers) < majority_size(len(nodes)):
        return
    return best_policy_index(headers)
示例#12
0
    def process(self, env, cb):
        event = Event(env)
        if event.event_type == EventTypes.CONTENT_DELETED:
            pile = GreenPile(PARALLEL_CHUNKS_DELETE)
            url = event.env.get('url')
            chunks = []
            content_headers = None
            for item in event.data:
                if item.get('type') == 'chunks':
                    chunks.append(item)
                if item.get("type") == 'contents_headers':
                    content_headers = item
            if len(chunks):

                def delete_chunk(chunk):
                    resp = None
                    p = urlparse(chunk['id'])
                    try:
                        with Timeout(CHUNK_TIMEOUT):
                            conn = http_connect(p.netloc, 'DELETE', p.path)
                            resp = conn.getresponse()
                            resp.chunk = chunk
                    except (Exception, Timeout) as e:
                        self.logger.warn('error while deleting chunk %s "%s"',
                                         chunk['id'], str(e.message))
                    return resp

                def delete_chunk_backblaze(chunks, url, storage_method):
                    meta = {}
                    meta['container_id'] = url['id']
                    chunk_list = []
                    for chunk in chunks:
                        chunk['url'] = chunk['id']
                        chunk_list.append(chunk)
                    key_file = self.conf.get('key_file')
                    backblaze_info = BackblazeUtils.get_credentials(
                        storage_method, key_file)
                    try:
                        BackblazeDeleteHandler(meta, chunk_list,
                                               backblaze_info).delete()
                    except OioException as e:
                        self.logger.warn('delete failed: %s' % str(e))

                chunk_method = content_headers['chunk-method']
                # don't load storage method other than backblaze
                if chunk_method.startswith('backblaze'):
                    storage_method = STORAGE_METHODS.load(chunk_method)
                    delete_chunk_backblaze(chunks, url, storage_method)
                    return self.app(env, cb)
                for chunk in chunks:
                    pile.spawn(delete_chunk, chunk)

                resps = [resp for resp in pile if resp]

                for resp in resps:
                    if resp.status != 204:
                        self.logger.warn('failed to delete chunk %s (HTTP %s)',
                                         resp.chunk['id'], resp.status)
        return self.app(env, cb)
示例#13
0
 def __init__(self, host='10.28.141.171', port=9410):
     """
     :param host: zipkin collector IP addoress (default '10.28.141.171')
     :param port: zipkin collector port (default 9410)
     """
     self.host = host
     self.port = port
     self.pile = GreenPile(1)
     self._connect()
示例#14
0
 def _handle_rawx(self, url, chunks, headers, storage_method, reqid):
     pile = GreenPile(PARALLEL_CHUNKS_DELETE)
     cid = url.get('id')
     for chunk in chunks:
         pile.spawn(self.delete_chunk, chunk, cid, reqid)
     resps = [resp for resp in pile if resp]
     for resp in resps:
         if resp.status != 204:
             self.logger.warn('failed to delete chunk %s (HTTP %s)',
                              resp.chunk['id'], resp.status)
示例#15
0
文件: ec.py 项目: fvennetier/oio-sds
    def _get_writers(self):
        # init writers to the chunks
        pile = GreenPile(len(self.meta_chunk))

        # we use eventlet GreenPile to spawn the writers
        for pos, chunk in enumerate(self.meta_chunk):
            pile.spawn(self._get_writer, chunk)

        writers = [w for w in pile]
        return writers
示例#16
0
文件: ec.py 项目: lanweichang/oio-sds
    def _get_writers(self):
        # init writers to the chunks
        pile = GreenPile(len(self.meta_chunk))

        # we use eventlet GreenPile to spawn the writers
        for pos, chunk in enumerate(self.meta_chunk):
            pile.spawn(self._get_writer, chunk)

        writers = [w for w in pile]
        return writers
示例#17
0
 def _handle_rawx(self, url, chunks, headers, storage_method, reqid):
     pile = GreenPile(PARALLEL_CHUNKS_DELETE)
     cid = url.get('id')
     for chunk in chunks:
         pile.spawn(self.delete_chunk, chunk, cid, reqid)
     resps = [resp for resp in pile if resp]
     for resp in resps:
         if resp.status != 204:
             self.logger.warn(
                 'failed to delete chunk %s (HTTP %s)',
                 resp.chunk['id'], resp.status)
示例#18
0
文件: ec.py 项目: uneidel/oio-sds
    def _get_writers(self):
        """
        Initialize writers for all chunks of the metachunk and connect them
        """
        pile = GreenPile(len(self.meta_chunk))

        # we use eventlet GreenPile to spawn the writers
        for _pos, chunk in enumerate(self.meta_chunk):
            pile.spawn(self._get_writer, chunk)

        writers = [w for w in pile]
        return writers
示例#19
0
 def send_to_peers(self, peers, key):
     pile = GreenPile(len(peers))
     # Have the first peer to sync to the local cluster
     sync_to_peer = self.my_cluster
     for peer in peers:
         # create thread per peer and send a request
         pile.spawn(self.send_to_peer, peer, sync_to_peer, key)
         # Have the next peer to sync to the present peer
         sync_to_peer = peer
     # collect the results, if anyone failed....
     response = [resp for resp in pile if resp]
     while len(response) < len(peers):
         response.append((HTTP_SERVICE_UNAVAILABLE, None, None))
     return response
示例#20
0
文件: ec.py 项目: uneidel/oio-sds
 def frag_iter():
     pile = GreenPile(len(resps))
     while True:
         for resp in resps:
             pile.spawn(_get_frag, resp)
         try:
             with Timeout(self.read_timeout):
                 frag = [frag for frag in pile]
         except (Exception, Timeout):
             # TODO complete error message
             logger.exception('ERROR rebuilding')
             break
         if not all(frag):
             break
         rebuilt_frag = self._reconstruct(frag)
         yield rebuilt_frag
示例#21
0
文件: ec.py 项目: fvennetier/oio-sds
 def frag_iter():
     pile = GreenPile(len(resps))
     while True:
         for resp in resps:
             pile.spawn(_get_frag, resp)
         try:
             with Timeout(self.read_timeout):
                 frag = [frag for frag in pile]
         except (Exception, Timeout):
             # TODO complete error message
             self.logger.exception('ERROR rebuilding')
             break
         if not all(frag):
             break
         rebuilt_frag = self._reconstruct(frag)
         yield rebuilt_frag
示例#22
0
 def __init__(self, host='127.0.0.1', port=9410):
     """
     :param host: zipkin collector IP addoress (default '127.0.0.1')
     :param port: zipkin collector port (default 9410)
     """
     self.host = host
     self.port = port
     self.pile = GreenPile(1)
     self._connect()
示例#23
0
class ZipkinClient(object):

    def __init__(self, host='127.0.0.1', port=9410):
        """
        :param host: zipkin collector IP addoress (default '127.0.0.1')
        :param port: zipkin collector port (default 9410)
        """
        self.host = host
        self.port = port
        self.pile = GreenPile(1)
        self._connect()

    def _connect(self):
        socket = TSocket.TSocket(self.host, self.port)
        self.transport = TTransport.TFramedTransport(socket)
        protocol = TBinaryProtocol.TBinaryProtocol(self.transport,
                                                   False, False)
        self.scribe_client = scribe.Client(protocol)
        try:
            self.transport.open()
        except TTransport.TTransportException as e:
            warnings.warn(e.message)

    def _build_message(self, thrift_obj):
        trans = TTransport.TMemoryBuffer()
        protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans)
        thrift_obj.write(protocol)
        return base64.b64encode(trans.getvalue())

    def send_to_collector(self, span):
        self.pile.spawn(self._send, span)

    def _send(self, span):
        log_entry = scribe.LogEntry(CATEGORY, self._build_message(span))
        try:
            self.scribe_client.Log([log_entry])
        except Exception as e:
            msg = 'ZipkinClient send error %s' % str(e)
            warnings.warn(msg)
            self._connect()

    def close(self):
        self.transport.close()
示例#24
0
class ZipkinClient(object):
    def __init__(self, host='10.28.141.171', port=9410):
        """
        :param host: zipkin collector IP addoress (default '10.28.141.171')
        :param port: zipkin collector port (default 9410)
        """
        self.host = host
        self.port = port
        self.pile = GreenPile(1)
        self._connect()

    def _connect(self):
        socket = TSocket.TSocket(self.host, self.port)
        self.transport = TTransport.TFramedTransport(socket)
        protocol = TBinaryProtocol.TBinaryProtocol(self.transport, False,
                                                   False)
        self.scribe_client = scribe.Client(protocol)
        try:
            self.transport.open()
        except TTransport.TTransportException as e:
            warnings.warn(e.message)

    def _build_message(self, thrift_obj):
        trans = TTransport.TMemoryBuffer()
        protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans)
        thrift_obj.write(protocol)
        return base64.b64encode(trans.getvalue())

    def send_to_collector(self, span):
        self.pile.spawn(self._send, span)

    def _send(self, span):
        log_entry = scribe.LogEntry(CATEGORY, self._build_message(span))
        try:
            self.scribe_client.Log([log_entry])
        except Exception as e:
            msg = 'ZipkinClient send error %s' % str(e)
            warnings.warn(msg)
            self._connect()

    def close(self):
        self.transport.close()
示例#25
0
    def _get_put_connections(self, req, nodes, partition, outgoing_headers,
                             policy, expect):
        """
        Establish connections to storage nodes for PUT request
        """
        obj_ring = policy.object_ring
        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))

        for nheaders in outgoing_headers:
            if expect:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]

        return conns
示例#26
0
文件: base.py 项目: WIZARD-CXY/swift
    def make_requests(self, req, ring, part, method, path, headers, query_string=""):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :returns: a swob.Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = self.iter_nodes(part, start_nodes, ring)
        pile = GreenPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path, head, query_string, self.app.logger.thread_locals)
        response = [resp for resp in pile if resp]
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, "", ""))
        statuses, reasons, bodies = zip(*response)
        return self.best_response(req, statuses, reasons, bodies, "%s %s" % (self.server_type, req.method))
示例#27
0
文件: obj.py 项目: igusher/swift
    def _get_put_connections(self, req, nodes, partition, outgoing_headers,
                             policy, expect):
        """
        Establish connections to storage nodes for PUT request
        """
        obj_ring = policy.object_ring
        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))

        for nheaders in outgoing_headers:
            if expect:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]

        return conns
示例#28
0
 def fragment_payload_iter():
     # We need a fragment from each connections, so best to
     # use a GreenPile to keep them ordered and in sync
     pile = GreenPile(len(responses))
     while True:
         for resp in responses:
             pile.spawn(_get_one_fragment, resp)
         try:
             with Timeout(self.node_timeout):
                 fragment_payload = [fragment for fragment in pile]
         except (Exception, Timeout):
             self.logger.exception(
                 _("Error trying to rebuild %(path)s " "policy#%(policy)d frag#%(frag_index)s"),
                 {"path": path, "policy": policy, "frag_index": frag_index},
             )
             break
         if not all(fragment_payload):
             break
         rebuilt_fragment = self._reconstruct(policy, fragment_payload, frag_index)
         yield rebuilt_fragment
示例#29
0
    def take_action(self, parsed_args):
        self.log.debug('take_action(%s)', parsed_args)
        digits = self.app.client_manager.get_meta1_digits()
        workers_count = parsed_args.workers

        conf = {'namespace': self.app.client_manager.namespace}
        if parsed_args.proxy:
            conf.update({'proxyd_url': parsed_args.proxy})
        else:
            ns_conf = load_namespace_conf(conf['namespace'])
            proxy = ns_conf.get('proxy')
            conf.update({'proxyd_url': proxy})

        workers = list()
        with green.ContextPool(workers_count) as pool:
            pile = GreenPile(pool)
            prefix_queue = Queue(16)

            # Prepare some workers
            for i in range(workers_count):
                w = WarmupWorker(conf, self.log)
                workers.append(w)
                pile.spawn(w.run, prefix_queue)

            # Feed the queue
            trace_increment = 0.01
            trace_next = trace_increment
            sent, total = 0, float(count_prefixes(digits))
            for prefix in generate_prefixes(digits):
                sent += 1
                prefix_queue.put(prefix)
                # Display the progression
                ratio = float(sent) / total
                if ratio >= trace_next:
                    self.log.info("... %d%%", int(ratio * 100.0))
                    trace_next += trace_increment

            self.log.debug("Send the termination marker")
            prefix_queue.join()

        self.log.info("All the workers are done")
示例#30
0
def main():
    args = parser.parse_args()
    policy = get_policy(args)
    try:
        account, container, obj = utils.split_path(args.path,
                                                   minsegs=3,
                                                   maxsegs=3,
                                                   rest_with_last=True)
    except ValueError:
        return 'ERROR: invalid path %r' % args.path
    headers = {
        'X-Backend-Storage-Policy-Index': int(policy),
    }
    part, nodes = policy.object_ring.get_nodes(account, container, obj)

    pile = GreenPile(len(nodes))
    for node in nodes:
        pile.spawn(_get_response, node, part, args.path, headers, policy)

    responses = [resp for resp in pile]
    # TODO: if we don't have enough responses to rebuild bail

    h = hashlib.md5()
    while True:
        for resp in responses:
            pile.spawn(_get_one_fragment, resp, resp.fragment_size)
        fragment_payload = [fragment for fragment in pile if fragment]
        if not fragment_payload:
            break
        segment = policy.pyeclib_driver.decode(fragment_payload)
        h.update(segment)
        if args.output:
            sys.stdout.write(segment)
    sys.stderr.write('%s\n' % h.hexdigest())
示例#31
0
文件: ec.py 项目: uneidel/oio-sds
    def _get_results(self, writers):
        # get the results from writers
        success_chunks = []
        failed_chunks = []

        # we use eventlet GreenPile to read the responses from the writers
        pile = GreenPile(len(writers))

        for writer in writers:
            if writer.failed:
                failed_chunks.append(writer.chunk)
                continue
            pile.spawn(self._get_response, writer)

        def _handle_resp(writer, resp):
            if resp:
                if resp.status == 201:
                    checksum = resp.getheader(chunk_headers['chunk_hash'])
                    if checksum and \
                            checksum.lower() != writer.checksum.hexdigest():
                        writer.chunk['error'] = \
                            "checksum mismatch: %s (local), %s (rawx)" % \
                            (checksum.lower(), writer.checksum.hexdigest())
                        failed_chunks.append(writer.chunk)
                    else:
                        success_chunks.append(writer.chunk)
                else:
                    logger.error("Wrong status code from %s (%s)",
                                 writer.chunk, resp.status)
                    writer.chunk['error'] = 'resp: HTTP %s' % resp.status
                    failed_chunks.append(writer.chunk)
            else:
                failed_chunks.append(writer.chunk)

        for (writer, resp) in pile:
            _handle_resp(writer, resp)

        self.quorum_or_fail(success_chunks, failed_chunks)

        return success_chunks + failed_chunks
示例#32
0
文件: ec.py 项目: fvennetier/oio-sds
    def rebuild(self):
        pile = GreenPile(len(self.meta_chunk))

        nb_data = self.storage_method.ec_nb_data

        headers = {}
        for chunk in self.meta_chunk:
            pile.spawn(self._get_response, chunk, headers)

        resps = []
        for resp in pile:
            if not resp:
                continue
            resps.append(resp)
            if len(resps) >= self.storage_method.ec_nb_data:
                break
        else:
            logger.error('Unable to read enough valid sources to rebuild')
            raise exc.UnrecoverableContent('Unable to rebuild chunk')

        rebuild_iter = self._make_rebuild_iter(resps[:nb_data])
        return rebuild_iter
示例#33
0
文件: ec.py 项目: lanweichang/oio-sds
    def rebuild(self):
        pile = GreenPile(len(self.meta_chunk))

        nb_data = self.storage_method.ec_nb_data

        headers = {}
        for chunk in self.meta_chunk:
            pile.spawn(self._get_response, chunk, headers)

        resps = []
        for resp in pile:
            if not resp:
                continue
            resps.append(resp)
            if len(resps) >= self.storage_method.ec_nb_data:
                break
        else:
            logger.error('Unable to read enough valid sources to rebuild')
            raise exc.UnrecoverableContent('Unable to rebuild chunk')

        rebuild_iter = self._make_rebuild_iter(resps[:nb_data])
        return rebuild_iter
示例#34
0
    def handle_object_delete(self, event):
        """
        Handle object deletion.
        Delete the chunks of the object.
        :param event:
        """
        self.logger.debug('worker handle object delete')
        pile = GreenPile(PARALLEL_CHUNKS_DELETE)

        chunks = []

        for item in event.get('data'):
            if item.get('type') == 'chunks':
                chunks.append(item)
        if not len(chunks):
            self.logger.warn('No chunks found in event data')
            return

        def delete_chunk(chunk):
            resp = None
            try:
                with Timeout(CHUNK_TIMEOUT):
                    resp = self.session.delete(chunk['id'])
            except (Exception, Timeout) as e:
                self.logger.warn('error while deleting chunk %s "%s"',
                                 chunk['id'], str(e.message))
            return resp

        for chunk in chunks:
            pile.spawn(delete_chunk, chunk)

        resps = [resp for resp in pile if resp]

        for resp in resps:
            if resp.status_code == 204:
                self.logger.debug('deleted chunk %s' % resp.url)
            else:
                self.logger.warn('failed to delete chunk %s' % resp.url)
示例#35
0
    def handle_object_delete(self, event):
        """
        Handle object deletion.
        Delete the chunks of the object.
        :param event:
        """
        self.logger.debug('worker "%s" handle object delete', self.name)
        pile = GreenPile(PARALLEL_CHUNKS_DELETE)

        chunks = []

        for item in event.get('data'):
            if item.get('type') == 'chunks':
                chunks.append(item)
        if not len(chunks):
            self.logger.warn('No chunks found in event data')
            return

        def delete_chunk(chunk):
            resp = None
            try:
                with Timeout(CHUNK_TIMEOUT):
                    resp = self.session.delete(chunk['id'])
            except (Exception, Timeout) as e:
                self.logger.warn('error while deleting chunk %s "%s"',
                                 chunk['id'], str(e.message))
            return resp

        for chunk in chunks:
            pile.spawn(delete_chunk, chunk)

        resps = [resp for resp in pile if resp]

        for resp in resps:
            if resp.status_code == 204:
                self.logger.info('deleted chunk %s' % resp.url)
            else:
                self.logger.warn('failed to delete chunk %s' % resp.url)
示例#36
0
 def __init__(self, rate, auth, total_ops=None, data_size=1024*1024, container='nacho', base='item-', queue_size=None):
     self.rate = rate
     self.dist = NegExp(self.rate)
     self.data_size = data_size
     self.ops = 0
     self.errors = 0
     self.container = container
     self.base = base
     self.outstanding = 0
     self.total_ops = total_ops
     self.running = False
     if queue_size:
         self.pile = GreenPile(queue_size)
     else:
         self.pile = GreenPile()
     self._done = Event()
     # self.client = Connection(authurl='http://localhost:8080/auth/v1.0', user='******', key='testing')
     self.storage_url, self.token = get_auth(auth, 'test:tester', 'testing')
     self.data = "x" * data_size
     LOG.info("Object-size=%s" % (len(self.data)))
     LOG.info("Object-base=%s" % base)
     if total_ops:
         LOG.info("This benchmark will take aprox %.0f seconds" % (total_ops / rate))
示例#37
0
    def copy_make_requests(self,account, req, ring, part, method, path, headers,
                    query_string=''):

        start_nodes = ring.get_part_nodes(part)
        nodes = self.iter_nodes(part, start_nodes, ring)
        pile = GreenPile(len(start_nodes))
        for head in headers:

            pile.spawn(self._make_request, account,nodes, part, method, path,
                       head, query_string, self.app.logger.thread_locals)

        if req.GET.get('async') == 'true':
            statuses = (201,)
            reasons = ('Created',)
            tx_id = req.environ.get('HTTP_X_TRANS_ID')
            bodies = ('{"status": "0", "msg": "%s"}' % (tx_id),)

        else:
            response = [resp for resp in pile if resp] 
            statuses, reasons, bodies = zip(*response)

        return self.best_response(req, statuses, reasons, bodies,
                  '%s %s' % (self.server_type, req.method))
示例#38
0
    def process(self, env, cb):
        event = Event(env)
        if event.event_type == EventTypes.CONTENT_DELETED:
            pile = GreenPile(PARALLEL_CHUNKS_DELETE)

            chunks = []

            for item in event.data:
                if item.get('type') == 'chunks':
                    chunks.append(item)
            if len(chunks):
                def delete_chunk(chunk):
                    resp = None
                    p = urlparse(chunk['id'])
                    try:
                        with Timeout(CHUNK_TIMEOUT):
                            conn = http_connect(
                                p.hostname, p.port, 'DELETE', p.path)
                            resp = conn.getresponse()
                            resp.chunk = chunk
                    except (Exception, Timeout) as e:
                        self.logger.warn(
                            'error while deleting chunk %s "%s"',
                            chunk['id'], str(e.message))
                    return resp

                for chunk in chunks:
                    pile.spawn(delete_chunk, chunk)

                resps = [resp for resp in pile if resp]

                for resp in resps:
                    if resp.status != 204:
                        self.logger.warn(
                            'failed to delete chunk %s (HTTP %s)',
                            resp.chunk['id'], resp.status)
        return self.app(env, cb)
示例#39
0
文件: agent.py 项目: lzmths/oio-sds
    def handle_object_delete(self, event):
        """
        Handle object deletion.
        Delete the chunks of the object.
        :param event:
        """
        self.logger.debug('worker "%s" handle object delete', self.name)
        pile = GreenPile(PARALLEL_CHUNKS_DELETE)

        chunks = []

        for item in event.get("data"):
            if item.get("type") == "chunks":
                chunks.append(item)
        if not len(chunks):
            self.logger.warn("No chunks found in event data")
            return

        def delete_chunk(chunk):
            resp = None
            try:
                with Timeout(CHUNK_TIMEOUT):
                    resp = self.session.delete(chunk["id"])
            except (Exception, Timeout) as e:
                self.logger.exception(e)
            return resp

        for chunk in chunks:
            pile.spawn(delete_chunk, chunk)

        resps = [resp for resp in pile if resp]

        for resp in resps:
            if resp.status_code == 204:
                self.logger.info("deleted chunk %s" % resp.url)
            else:
                self.logger.warn("failed to delete chunk %s" % resp.url)
示例#40
0
    def make_requests(self, req, ring, part, method, path, headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.
        
        发送HTTP请求到多个节点, 并且将结果聚合在一起.
        并发的尝试主要节点, 如果有必要就迭代其他能处理handoff的节点。
        如果尝试正常的节点的时候出现问题,就找到其他存放副本的节点。

        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :returns: a swob.Response object
        """
        # 根据controller/account.py中传递过来的partition
        # 再次获取nodes
        start_nodes = ring.get_part_nodes(part)
        
        nodes = self.iter_nodes(part, start_nodes, ring)
        
        # 调用eventlet并发执行_make_request请求
        pile = GreenPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path,
                       head, query_string, self.app.logger.thread_locals)
        
        # 获得请求结果
        response = [resp for resp in pile if resp]
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', ''))
        statuses, reasons, bodies = zip(*response)
        
        # 获取最优的返回结果
        return self.best_response(req, statuses, reasons, bodies,
                                  '%s %s' % (self.server_type, req.method))
示例#41
0
 def fragment_payload_iter():
     # We need a fragment from each connections, so best to
     # use a GreenPile to keep them ordered and in sync
     pile = GreenPile(len(responses))
     while True:
         for resp in responses:
             pile.spawn(_get_one_fragment, resp)
         try:
             with Timeout(self.node_timeout):
                 fragment_payload = [fragment for fragment in pile]
         except (Exception, Timeout):
             self.logger.exception(
                 _("Error trying to rebuild %(path)s "
                   "policy#%(policy)d frag#%(frag_index)s"), {
                       'path': path,
                       'policy': policy,
                       'frag_index': frag_index,
                   })
             break
         if not all(fragment_payload):
             break
         rebuilt_fragment = self._reconstruct(policy, fragment_payload,
                                              frag_index)
         yield rebuilt_fragment
示例#42
0
def add_to_reconciler_queue(container_ring,
                            account,
                            container,
                            obj,
                            obj_policy_index,
                            obj_timestamp,
                            op,
                            force=False,
                            conn_timeout=5,
                            response_timeout=15):
    """
    Add an object to the container reconciler's queue. This will cause the
    container reconciler to move it from its current storage policy index to
    the correct storage policy index.

    :param container_ring: container ring
    :param account: the misplaced object's account
    :param container: the misplaced object's container
    :param obj: the misplaced object
    :param obj_policy_index: the policy index where the misplaced object
                             currently is
    :param obj_timestamp: the misplaced object's X-Timestamp. We need this to
                          ensure that the reconciler doesn't overwrite a newer
                          object with an older one.
    :param op: the method of the operation (DELETE or PUT)
    :param force: over-write queue entries newer than obj_timestamp
    :param conn_timeout: max time to wait for connection to container server
    :param response_timeout: max time to wait for response from container
                             server

    :returns: .misplaced_object container name, False on failure. "Success"
              means a majority of containers got the update.
    """
    container_name = get_reconciler_container_name(obj_timestamp)
    object_name = get_reconciler_obj_name(obj_policy_index, account, container,
                                          obj)
    if force:
        # this allows an operator to re-enqueue an object that has
        # already been popped from the queue to be reprocessed, but
        # could potentially prevent out of order updates from making it
        # into the queue
        x_timestamp = Timestamp.now().internal
    else:
        x_timestamp = obj_timestamp
    q_op_type = get_reconciler_content_type(op)
    headers = {
        'X-Size': 0,
        'X-Etag': obj_timestamp,
        'X-Timestamp': x_timestamp,
        'X-Content-Type': q_op_type,
    }

    def _check_success(*args, **kwargs):
        try:
            direct_put_container_object(*args, **kwargs)
            return 1
        except (ClientException, Timeout, socket.error):
            return 0

    pile = GreenPile()
    part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
                                           container_name)
    for node in nodes:
        pile.spawn(_check_success,
                   node,
                   part,
                   MISPLACED_OBJECTS_ACCOUNT,
                   container_name,
                   object_name,
                   headers=headers,
                   conn_timeout=conn_timeout,
                   response_timeout=response_timeout)

    successes = sum(pile)
    if successes >= majority_size(len(nodes)):
        return container_name
    else:
        return False
    def recovery_container(self, method, send_intermediate_resp=False):
        """Make connections and get response"""
        self.logger.info("running recovery_container")
        global finish_recovery_flag
        try:
            client_timeout = int(self.conf.get('CLIENT_TIMEOUT', 20))
        except Exception as err:
            self.logger.info("Error is %s" % err)

        #Check Destinations to send
        if not self.container_node_list:
            self.logger.info("No Container Node List Found %s" % \
                self.container_node_list)
            raise NodeList("No node list found")

        self.logger.info("self.container_node_list: not empty")
        node_iter = GreenthreadSafeIterator(self.container_node_list)
        self.logger.info("pile size:%s" % self.pile_size)

        #Make Threads corresponding to destinations
        pile = GreenPile(self.pile_size)
        for node in node_iter:
            nheaders = {}
            #self.pile.spawn(self._connect_put_node, node, \
            pile.spawn(self._connect_put_node, node, \
                nheaders, self.logger.thread_locals, method)

        eventlet.sleep(3)
        #for context switches

        #Get Alive connections
        conns = [conn for conn in pile if conn]
        self.logger.info("%s/%s connections made " % (conns, self.pile_size))

        statuses = [conn.resp.status for conn in conns if conn.resp]
        try:
            with ContextPool(self.pile_size) as pool:
                for conn in conns:
                    conn.failed = False
                    key = "%s:%s" % (conn.node['ip'], conn.node['port'])

                    self.logger.info("Reading data")
                    conn.reader = pickle.dumps(self.dictionary_new[key])

                    self.logger.debug("Sending Data List: %s" % \
                        self.dictionary_new[key])
                    pool.spawn(self._send_file, conn, '/recovery_process')
                pool.waitall()

            conns = [conn for conn in conns if not conn.failed]
            #Make a list of alive connections

            self.logger.info("Alive Connections: %s" % conns)
        except ChunkReadTimeout as err:
            self.logger.warn(_('ERROR Client read timeout (%ss)'), err.seconds)
            self.logger.increment('client_timeout')
            conn.close()
        except ChunkWriteTimeout as err:
            self.logger.warn(_('ERROR  write timeout (%ss)'), err.seconds)
            self.logger.increment('client_timeout')
            conn.close()
        except Exception as ex:
            self.logger.error("Exception raised: %s" % ex)
            conn.close()
        except Timeout as ex:
            self.logger.info("Timeout occured : %s" % ex)
            self.logger.error(_('ERROR Exception causing client disconnect'))
            conn.close()

        eventlet.sleep(3)
        #for context switches

        statuses, reasons, bodies, comp_list_sent = \
            self._get_put_responses(conns, self.container_node_list, send_intermediate_resp)
        self.logger.info("Returned status:%s,reason:%s,bodies:%s" % \
            (statuses,reasons,bodies))

        eventlet.sleep(3)
        #for context switches

        if send_intermediate_resp:
            self.logger.info("send_intermediate_res: ")
            #finish_recovery_flag = True
        #set flag for finish recovery to send end_strm.

        return (statuses, reasons, bodies, comp_list_sent)
示例#44
0
文件: reconciler.py 项目: 701/swift
def add_to_reconciler_queue(container_ring, account, container, obj,
                            obj_policy_index, obj_timestamp, op,
                            force=False, conn_timeout=5, response_timeout=15):
    """
    Add an object to the container reconciler's queue. This will cause the
    container reconciler to move it from its current storage policy index to
    the correct storage policy index.

    :param container_ring: container ring
    :param account: the misplaced object's account
    :param container: the misplaced object's container
    :param obj: the misplaced object
    :param obj_policy_index: the policy index where the misplaced object
                             currently is
    :param obj_timestamp: the misplaced object's X-Timestamp. We need this to
                          ensure that the reconciler doesn't overwrite a newer
                          object with an older one.
    :param op: the method of the operation (DELETE or PUT)
    :param force: over-write queue entries newer than obj_timestamp
    :param conn_timeout: max time to wait for connection to container server
    :param response_timeout: max time to wait for response from container
                             server

    :returns: .misplaced_object container name, False on failure. "Success"
              means a quorum of containers got the update.
    """
    container_name = get_reconciler_container_name(obj_timestamp)
    object_name = get_reconciler_obj_name(obj_policy_index, account,
                                          container, obj)
    if force:
        # this allows an operator to re-enqueue an object that has
        # already been popped from the queue to be reprocessed, but
        # could potentially prevent out of order updates from making it
        # into the queue
        x_timestamp = Timestamp(time.time()).internal
    else:
        x_timestamp = obj_timestamp
    q_op_type = get_reconciler_content_type(op)
    headers = {
        'X-Size': 0,
        'X-Etag': obj_timestamp,
        'X-Timestamp': x_timestamp,
        'X-Content-Type': q_op_type,
    }

    def _check_success(*args, **kwargs):
        try:
            direct_put_container_object(*args, **kwargs)
            return 1
        except (ClientException, Timeout, socket.error):
            return 0

    pile = GreenPile()
    part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
                                           container_name)
    for node in nodes:
        pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
                   container_name, object_name, headers=headers,
                   conn_timeout=conn_timeout,
                   response_timeout=response_timeout)

    successes = sum(pile)
    if successes >= quorum_size(len(nodes)):
        return container_name
    else:
        return False
示例#45
0
文件: obj.py 项目: absolutarin/swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(
            self.account_name, self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)

        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        if not containers:
            return HTTPNotFound(request=req)

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        partition, nodes = obj_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)

        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            # make sure proxy-server uses the right policy index
            _headers = {'X-Backend-Storage-Policy-Index': policy_index,
                        'X-Newest': 'True'}
            hreq = Request.blank(req.path_info, headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), obj_ring, partition,
                hreq.swift_entity_path)

        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                        hresp.environ['swift_x_timestamp'] >= req_timestamp:
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        if object_versions and not req.environ.get('swift_versioned_copy'):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                            src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        req, delete_at_container, delete_at_part, \
            delete_at_nodes = self._config_obj_expiration(req)

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': min_conns})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': min_conns})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
                                                                   nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
示例#46
0
    def stream(self, source, size=None):
        bytes_transferred = 0
        meta_chunk = self.meta_chunk
        meta_checksum = hashlib.md5()
        pile = GreenPile(len(meta_chunk))
        failed_chunks = []
        current_conns = []

        for chunk in meta_chunk:
            pile.spawn(self._connect_put, chunk)

        for conn, chunk in [d for d in pile]:
            if not conn:
                failed_chunks.append(chunk)
            else:
                current_conns.append(conn)

        self.quorum_or_fail([co.chunk for co in current_conns], failed_chunks)

        bytes_transferred = 0
        try:
            with green.ContextPool(len(meta_chunk)) as pool:
                for conn in current_conns:
                    conn.failed = False
                    conn.queue = Queue(io.PUT_QUEUE_DEPTH)
                    pool.spawn(self._send_data, conn)

                while True:
                    if size is not None:
                        remaining_bytes = size - bytes_transferred
                        if io.WRITE_CHUNK_SIZE < remaining_bytes:
                            read_size = io.WRITE_CHUNK_SIZE
                        else:
                            read_size = remaining_bytes
                    else:
                        read_size = io.WRITE_CHUNK_SIZE
                    with green.SourceReadTimeout(self.read_timeout):
                        try:
                            data = source.read(read_size)
                        except (ValueError, IOError) as e:
                            raise SourceReadError(str(e))
                        if len(data) == 0:
                            for conn in current_conns:
                                if not conn.failed:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    self.checksum.update(data)
                    meta_checksum.update(data)
                    bytes_transferred += len(data)
                    # copy current_conns to be able to remove a failed conn
                    for conn in current_conns[:]:
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' % (len(data), data))
                        else:
                            current_conns.remove(conn)
                            failed_chunks.append(conn.chunk)

                    self.quorum_or_fail([co.chunk for co in current_conns],
                                        failed_chunks)

                for conn in current_conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()

        except green.SourceReadTimeout:
            logger.warn('Source read timeout')
            raise
        except SourceReadError:
            logger.warn('Source read error')
            raise
        except Timeout as to:
            logger.exception('Timeout writing data')
            raise exc.OioTimeout(to)
        except Exception:
            logger.exception('Exception writing data')
            raise

        success_chunks = []

        for conn in current_conns:
            if conn.failed:
                failed_chunks.append(conn.chunk)
                continue
            pile.spawn(self._get_response, conn)

        meta_checksum_hex = meta_checksum.hexdigest()
        for (conn, resp) in pile:
            if resp:
                self._handle_resp(conn, resp, meta_checksum_hex,
                                  success_chunks, failed_chunks)
        self.quorum_or_fail(success_chunks, failed_chunks)

        for chunk in success_chunks:
            chunk["size"] = bytes_transferred
            chunk["hash"] = meta_checksum_hex

        return bytes_transferred, meta_checksum_hex, success_chunks
示例#47
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(
            self.account_name, self.container_name, req)
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        try:
            ml = req.message_length()
        except ValueError as e:
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body=str(e))
        except AttributeError as e:
            return HTTPNotImplemented(request=req, content_type='text/plain',
                                      body=str(e))
        if ml is not None and ml > MAX_FILE_SIZE:
            return HTTPRequestEntityTooLarge(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                return HTTPBadRequest(request=req,
                                      content_type='text/plain',
                                      body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = normalize_delete_at_timestamp(
                time.time() + x_delete_after)
        partition, nodes = self.app.object_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)

        #### CHANGED CODE ####
        d = dict()
        logging.info("===Original nodes===%s",str(nodes))
        temp_nodes = []
        flag = 0
        f = open("/home/swift/spindowndevices","r")
        sdlist = f.read().strip().split("\n")
        logging.info("===Spun down devices===:%s",str(sdlist))
        f.close()

        sddict = dict()
        for i in sdlist[1:]:
            if(i.split(":")[0] in sddict):
                sddict[i.split(":")[0]].append(i.split(":")[1])
            else:
                sddict[i.split(":")[0]] = []
                sddict[i.split(":")[0]].append(i.split(":")[1])

        upnodes = []
        for item in nodes:
            if(item['ip'] not in sddict):
                upnodes.append(item)
            else:
                if(item['device'] not in sddict[item['ip']]):
                    upnodes.append(item)
	logging.info("===SDICT==%s",str(sddict))
        logging.info("====UPNODES===%s",str(upnodes))
        downnodes = [item for item in nodes if item['ip'] in sddict and item['device'] in sddict[item['ip']]]
        temp_nodes = upnodes
        if(len(downnodes) > 0):
            d = ast.literal_eval(open("/home/swift/nodes.txt","r").read())
            # d_temp=pickle.load("/home/hduser/swift/proxy/controllers/nodes.p","rb")
            # print("===Current dict===:",d)
            for item in downnodes:
                if(partition in d and item not in d[partition]):
                    d[partition].append(item)
                    # print("===Modified dict===:",d)
                else:
                    d[partition] = [item]
                    # print("===Modified dict===:",d)
        fo = open("/home/swift/nodes.txt","w")
        fo.write(str(d)+"\n")
        fo.close()

        # Code to spin up a device if none are running already.
        # if(len(upnodes) == 0):
        #    dev = nodes[0]['device']
        #    f = open("/home/swift/spindowndevices","r")
        #    s = f.read()
            
        #    os.system("mount /dev/"+str(dev))
            
        logging.info('===In controller PUT===:')
        logging.info("===Partition===%s",str(partition))
        nodes = temp_nodes

        logging.info("===Final Nodes===:%s",str(nodes))

        check_ssd()
        #### CHANGED CODE ####

        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), self.app.object_ring, partition,
                hreq.swift_entity_path)
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req.headers['X-Timestamp'] = \
                    normalize_timestamp(req.headers['x-timestamp'])
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                    float(hresp.environ['swift_x_timestamp']) >= \
                        float(req.headers['x-timestamp']):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
        else:
            req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            src_container_name, src_obj_name = check_copy_from_header(req)
            ver, acct, _rest = req.split_path(2, 3, True)
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            source_header = '/%s/%s/%s/%s' % (ver, acct,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)
            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = normalize_delete_at_timestamp(
                    int(req.headers['x-delete-at']))
                if int(x_delete_at) < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            req.environ.setdefault('swift.log_info', []).append(
                'x-delete-at:%s' % x_delete_at)
            delete_at_container = normalize_delete_at_timestamp(
                int(x_delete_at) /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_container = delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(self.app.object_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'

        #################################  CHANGED_CODE  ###################################################################
            # Replaced node_iter by nodes in the following line to make sure that a new list with different order isnt used.
            # Change from node_iter to nodes to make sure it writes to the same device. 
            # Without this, it gets a new list of nodes from the ring in a different order and connects to the first one.

            pile.spawn(self._connect_put_node, nodes, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        #################################  CHANGED_CODE ###################################################################

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': min_conns})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0

        #### CHANGED CODE ####
        key = hash_path(self.account_name,self.container_name,self.object_name)
        os.system("mkdir -p /SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key))
        f= open("/SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key)+"/"+str(self.object_name),"w")
        ####
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                    
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                            #### CHANGED CODE ####
                            f.write(chunk)
                            f.close()
                            #######
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': min_conns})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
                                                                   nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        if source_header:
            resp.headers['X-Copied-From'] = quote(
                source_header.split('/', 3)[3])
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(float(req.headers['X-Timestamp']))
        return resp
示例#48
0
文件: obj.py 项目: saebyuk/swift
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(
            self.account_name, self.container_name,
            account_autocreate=self.app.account_autocreate)
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                    return HTTPBadRequest(request=req,
                                          content_type='text/plain',
                                          body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = int(req.headers['x-delete-at'])
                if x_delete_at < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            delete_at_container = str(
                x_delete_at /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_part = delete_at_nodes = None
        partition, nodes = self.app.object_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
                                        hreq.path_info, len(nodes))
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req.headers['X-Timestamp'] = \
                    normalize_timestamp(float(req.headers['x-timestamp']))
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                    float(hresp.environ['swift_x_timestamp']) >= \
                        float(req.headers['x-timestamp']):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
        else:
            req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        if not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            content_type_manually_set = False
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'x-object-manifest' in req.headers or \
                          'x-object-manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            source_header = unquote(source_header)
            acct = req.path_info.split('/', 2)[1]
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            if not source_header.startswith('/'):
                source_header = '/' + source_header
            source_header = '/' + acct + source_header
            try:
                src_container_name, src_obj_name = \
                    source_header.split('/', 3)[2:]
            except ValueError:
                return HTTPPreconditionFailed(
                    request=req,
                    body='X-Copy-From header must be of the form'
                         '<container name>/<object name>')
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            source_resp = self.GET(source_req)
            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            new_req = Request.blank(req.path_info,
                                    environ=req.environ, headers=req.headers)
            data_source = source_resp.app_iter
            new_req.content_length = source_resp.content_length
            if new_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if new_req.content_length > MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            new_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del new_req.headers['X-Copy-From']
            if not content_type_manually_set:
                new_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    new_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, new_req)
                copy_headers_into(req, new_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                new_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = new_req
        node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
        pile = GreenPile(len(nodes))
        chunked = req.headers.get('transfer-encoding')

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                [conn.queue.put('0\r\n\r\n') for conn in conns]
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
示例#49
0
文件: obj.py 项目: sun7shines/Cloudfs
    def PUT(self, req):
        
        account_partition, accounts = self.account_info(self.account_name,autocreate=False)
        account = accounts[0]
        (container_partition, containers,object_versions ) = self.container_info(self.account_name, self.container_name,
                account_autocreate=self.app.account_autocreate)
        
        if not containers:
            return jresponse('-1', 'not found', req,404)
        
        
        delete_at_part = delete_at_nodes = None
        
        partition, nodes = self.app.object_ring.get_nodes(self.account_name, self.container_name, self.object_name)
        req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        
        error_response = check_object_creation(req, self.object_name)
        if error_response:
            return error_response
        
        overwrite = req.GET.get('overwrite')
        
        if 'true'==overwrite and object_versions :
            
            hreq = Request.blank(req.path_info, environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes,
                hreq.path_info, len(nodes))
            
            is_manifest = 'x-static-large-object' in req.headers or \
                          'x-static-large-object' in hresp.headers
                          
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                
                lcontainer = object_versions.split('/')[0]
                lprefix = self.object_name + '/'
                
                new_ts = normalize_timestamp(float(time.time()))
                vers_obj_name = lprefix + new_ts
                
                move_headers = {
                    'Destination': '/%s/%s' % (lcontainer, vers_obj_name)}
                move_req = Request.blank(req.path_info, headers=move_headers)
                move_resp = self.MOVE_VERSION(move_req)
                if is_client_error(move_resp.status_int):
                    # missing container or bad permissions
                    return jresponse('-1', 'bad permissions', req,412)
                elif not is_success(move_resp.status_int):
                    # could not copy the data, bail
                    return jresponse('-1', 'ServiceUnavailable', req,503)
                
        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        
            
        node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
        pile = GreenPile(len(nodes))
        for container in containers:
            nheaders = dict(req.headers.iteritems())
            nheaders['Connection'] = 'close'
            nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container
            nheaders['X-Container-Partition'] = container_partition
            nheaders['X-Container-Device'] = container['device']
            
            nheaders['X-Account-Host'] = '%(ip)s:%(port)s' % account
            nheaders['X-Account-Partition'] = account_partition
            nheaders['X-Account-Device'] = self.account_name
                        
            nheaders['Expect'] = '100-continue'
            if delete_at_nodes:
                node = delete_at_nodes.pop(0)
                nheaders['X-Delete-At-Host'] = '%(ip)s:%(port)s' % node
                nheaders['X-Delete-At-Partition'] = delete_at_part
                nheaders['X-Delete-At-Device'] = node['device']
                
            if overwrite:
                nheaders['x-overwrite'] = overwrite
                
            pile.spawn(self._connect_put_node,self.account_name, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals,req.query_string)
        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return jresponse('-1', 'ServiceUnavailable', req,503)
        
        bytes_transferred = 0
        start_time=time.time()

        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            
                            break
                    bytes_transferred += len(chunk)

                    dural_time=float(time.time()) - float(start_time)
                    if(dural_time>0):
                        speed = float(bytes_transferred)/float(dural_time)/(1000*1000)
                        while(speed >1):
                            sleep(0.1)
                            dural_time=float(time.time()) - float(start_time)
                            speed = float(bytes_transferred)/float(dural_time)/(1000*1000)

                    if bytes_transferred > MAX_FILE_SIZE:
                        return jresponse('-1', 'RequestEntityTooLarge', req,413)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_('Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return jresponse('-1', 'ServiceUnavailable', req,503)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            return jresponse('-1', 'RequestTimeout', req,408)
示例#50
0
    def stream(self, source, size=None):
        bytes_transferred = 0

        def _connect_put(chunk):
            raw_url = chunk["url"]
            parsed = urlparse(raw_url)
            try:
                chunk_path = parsed.path.split('/')[-1]
                h = {}
                h["transfer-encoding"] = "chunked"
                # FIXME: remove key incoherencies
                # TODO: automatize key conversions
                h[chunk_headers["content_id"]] = self.sysmeta['id']
                h[chunk_headers["content_version"]] = self.sysmeta['version']
                h[chunk_headers["content_path"]] = \
                    utils.quote(self.sysmeta['content_path'])
                h[chunk_headers["content_chunkmethod"]] = \
                    self.sysmeta['chunk_method']
                h[chunk_headers["content_policy"]] = self.sysmeta['policy']
                h[chunk_headers["container_id"]] = self.sysmeta['container_id']
                h[chunk_headers["chunk_pos"]] = chunk["pos"]
                h[chunk_headers["chunk_id"]] = chunk_path

                # Used during reconstruction of EC chunks
                if self.sysmeta['chunk_method'].startswith('ec'):
                    h[chunk_headers["metachunk_size"]] = \
                        self.sysmeta["metachunk_size"]
                    h[chunk_headers["metachunk_hash"]] = \
                        self.sysmeta["metachunk_hash"]

                with ConnectionTimeout(io.CONNECTION_TIMEOUT):
                    conn = io.http_connect(
                        parsed.netloc, 'PUT', parsed.path, h)
                    conn.chunk = chunk
                return conn, chunk
            except (Exception, Timeout) as e:
                msg = str(e)
                logger.exception("Failed to connect to %s (%s)", chunk, msg)
                chunk['error'] = msg
                return None, chunk

        meta_chunk = self.meta_chunk

        pile = GreenPile(len(meta_chunk))

        failed_chunks = []

        current_conns = []

        for chunk in meta_chunk:
            pile.spawn(_connect_put, chunk)

        results = [d for d in pile]

        for conn, chunk in results:
            if not conn:
                failed_chunks.append(chunk)
            else:
                current_conns.append(conn)

        quorum = self._check_quorum(current_conns)
        if not quorum:
            raise exc.OioException("RAWX write failure, quorum not satisfied")

        bytes_transferred = 0
        try:
            with utils.ContextPool(len(meta_chunk)) as pool:
                for conn in current_conns:
                    conn.failed = False
                    conn.queue = Queue(io.PUT_QUEUE_DEPTH)
                    pool.spawn(self._send_data, conn)

                while True:
                    if size is not None:
                        remaining_bytes = size - bytes_transferred
                        if io.WRITE_CHUNK_SIZE < remaining_bytes:
                            read_size = io.WRITE_CHUNK_SIZE
                        else:
                            read_size = remaining_bytes
                    else:
                        read_size = io.WRITE_CHUNK_SIZE
                    with SourceReadTimeout(io.CLIENT_TIMEOUT):
                        try:
                            data = source.read(read_size)
                        except (ValueError, IOError) as e:
                            raise SourceReadError(str(e))
                        if len(data) == 0:
                            for conn in current_conns:
                                conn.queue.put('0\r\n\r\n')
                            break
                    self.checksum.update(data)
                    bytes_transferred += len(data)
                    for conn in current_conns:
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' % (len(data),
                                                             data))
                        else:
                            current_conns.remove(conn)

                    quorum = self._check_quorum(current_conns)
                    if not quorum:
                        raise exc.OioException("RAWX write failure")

                for conn in current_conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()

        except SourceReadTimeout:
            logger.warn('Source read timeout')
            raise
        except SourceReadError:
            logger.warn('Source read error')
            raise
        except Timeout:
            logger.exception('Timeout writing data')
            raise
        except Exception:
            logger.exception('Exception writing data')
            raise

        success_chunks = []

        for conn in current_conns:
            if conn.failed:
                failed_chunks.append(conn.chunk)
                continue
            pile.spawn(self._get_response, conn)

        def _handle_resp(conn, resp):
            if resp:
                if resp.status == 201:
                    success_chunks.append(conn.chunk)
                else:
                    conn.failed = True
                    conn.chunk['error'] = 'HTTP %s' % resp.status
                    failed_chunks.append(conn.chunk)
                    logger.error("Wrong status code from %s (%s)",
                                 conn.chunk, resp.status)
            conn.close()

        for (conn, resp) in pile:
            if resp:
                _handle_resp(conn, resp)
        quorum = self._check_quorum(success_chunks)
        if not quorum:
            raise exc.OioException("RAWX write failure")

        meta_checksum = self.checksum.hexdigest()
        for chunk in success_chunks:
            chunk["size"] = bytes_transferred
            chunk["hash"] = meta_checksum

        return bytes_transferred, meta_checksum, success_chunks + failed_chunks
示例#51
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(
            self.account_name, self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)
        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        try:
            ml = req.message_length()
        except ValueError as e:
            return HTTPBadRequest(request=req, content_type='text/plain',
                                  body=str(e))
        except AttributeError as e:
            return HTTPNotImplemented(request=req, content_type='text/plain',
                                      body=str(e))
        if ml is not None and ml > constraints.MAX_FILE_SIZE:
            return HTTPRequestEntityTooLarge(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                return HTTPBadRequest(request=req,
                                      content_type='text/plain',
                                      body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = normalize_delete_at_timestamp(
                time.time() + x_delete_after)
        partition, nodes = obj_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            # make sure proxy-server uses the right policy index
            _headers = {'X-Backend-Storage-Policy-Index': policy_index,
                        'X-Newest': 'True'}
            hreq = Request.blank(req.path_info, headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), obj_ring, partition,
                hreq.swift_entity_path)
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                        hresp.environ['swift_x_timestamp'] >= req_timestamp:
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            src_container_name, src_obj_name = check_copy_from_header(req)
            ver, acct, _rest = req.split_path(2, 3, True)
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            source_header = '/%s/%s/%s/%s' % (ver, acct,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()
            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)
            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = normalize_delete_at_timestamp(
                    int(req.headers['x-delete-at']))
                if int(x_delete_at) < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            req.environ.setdefault('swift.log_info', []).append(
                'x-delete-at:%s' % x_delete_at)
            delete_at_container = normalize_delete_at_timestamp(
                int(x_delete_at) /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_container = delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': min_conns})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': min_conns})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns,
                                                                   nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies,
                                  _('Object PUT'), etag=etag)
        if source_header:
            resp.headers['X-Copied-From'] = quote(
                source_header.split('/', 3)[3])
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
示例#52
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        container_info = self.container_info(
            self.account_name, self.container_name)
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        if 'x-delete-after' in req.headers:
            try:
                x_delete_after = int(req.headers['x-delete-after'])
            except ValueError:
                    return HTTPBadRequest(request=req,
                                          content_type='text/plain',
                                          body='Non-integer X-Delete-After')
            req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after)
        partition, nodes = self.app.object_ring.get_nodes(
            self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if 'x-timestamp' in req.headers or \
                (object_versions and not
                 req.environ.get('swift_versioned_copy')):
            hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(
                hreq, _('Object'), self.app.object_ring, partition,
                hreq.path_info)
        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req.headers['X-Timestamp'] = \
                    normalize_timestamp(float(req.headers['x-timestamp']))
                if hresp.environ and 'swift_x_timestamp' in hresp.environ and \
                    float(hresp.environ['swift_x_timestamp']) >= \
                        float(req.headers['x-timestamp']):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req, content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                         'was %r' % req.headers['x-timestamp'])
        else:
            req.headers['X-Timestamp'] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        if not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            content_type_manually_set = False
        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'x-object-manifest' in req.headers or \
                          'x-object-manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(
                                            hresp.headers['last-modified'],
                                            '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
                copy_environ = {'REQUEST_METHOD': 'COPY',
                                'swift_versioned_copy': True
                                }
                copy_req = Request.blank(req.path_info, headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            source_header = unquote(source_header)
            acct = req.path_info.split('/', 2)[1]
            if isinstance(acct, unicode):
                acct = acct.encode('utf-8')
            if not source_header.startswith('/'):
                source_header = '/' + source_header
            source_header = '/' + acct + source_header
            try:
                src_container_name, src_obj_name = \
                    source_header.split('/', 3)[2:]
            except ValueError:
                return HTTPPreconditionFailed(
                    request=req,
                    body='X-Copy-From header must be of the form'
                         '<container name>/<object name>')
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            source_resp = self.GET(source_req)
            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            new_req = Request.blank(req.path_info,
                                    environ=req.environ, headers=req.headers)
            data_source = source_resp.app_iter
            new_req.content_length = source_resp.content_length
            if new_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if new_req.content_length > MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            new_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del new_req.headers['X-Copy-From']
            if not content_type_manually_set:
                new_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if not config_true_value(
                    new_req.headers.get('x-fresh-metadata', 'false')):
                copy_headers_into(source_resp, new_req)
                copy_headers_into(req, new_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                new_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = new_req

        if 'x-delete-at' in req.headers:
            try:
                x_delete_at = int(req.headers['x-delete-at'])
                if x_delete_at < time.time():
                    return HTTPBadRequest(
                        body='X-Delete-At in past', request=req,
                        content_type='text/plain')
            except ValueError:
                return HTTPBadRequest(request=req, content_type='text/plain',
                                      body='Non-integer X-Delete-At')
            delete_at_container = str(
                x_delete_at /
                self.app.expiring_objects_container_divisor *
                self.app.expiring_objects_container_divisor)
            delete_at_part, delete_at_nodes = \
                self.app.container_ring.get_nodes(
                    self.app.expiring_objects_account, delete_at_container)
        else:
            delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes(self.app.object_ring, partition))
        pile = GreenPile(len(nodes))
        chunked = req.headers.get('transfer-encoding')

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'
            pile.spawn(self._connect_put_node, node_iter, partition,
                       req.path_info, nheaders, self.app.logger.thread_locals)

        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'),
                {'conns': len(conns), 'nodes': len(nodes) // 2 + 1})
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                [conn.queue.put('0\r\n\r\n') for conn in conns]
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put(
                                '%x\r\n%s\r\n' % (len(chunk), chunk)
                                if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(_(
                            'Object PUT exceptions during'
                            ' send, %(conns)s/%(nodes)s required connections'),
                            {'conns': len(conns), 'nodes': len(nodes) / 2 + 1})
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(
                _('ERROR Client read timeout (%ss)'), err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
示例#53
0
文件: obj.py 项目: missall/swift
    def PUT(self, req, start_time=None, stats_type="PUT"):
        """HTTP PUT request handler."""
        if not start_time:
            start_time = time.time()
        (
            container_partition,
            containers,
            _junk,
            req.acl,
            req.environ["swift_sync_key"],
            object_versions,
        ) = self.container_info(self.account_name, self.container_name, account_autocreate=self.app.account_autocreate)
        if "swift.authorize" in req.environ:
            aresp = req.environ["swift.authorize"](req)
            if aresp:
                self.app.logger.increment("auth_short_circuits")
                return aresp
        if not containers:
            self.app.logger.timing_since("%s.timing" % (stats_type,), start_time)
            return HTTPNotFound(request=req)
        if "x-delete-after" in req.headers:
            try:
                x_delete_after = int(req.headers["x-delete-after"])
            except ValueError:
                self.app.logger.increment("errors")
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-After")
            req.headers["x-delete-at"] = "%d" % (time.time() + x_delete_after)
        if "x-delete-at" in req.headers:
            try:
                x_delete_at = int(req.headers["x-delete-at"])
                if x_delete_at < time.time():
                    self.app.logger.increment("errors")
                    return HTTPBadRequest(body="X-Delete-At in past", request=req, content_type="text/plain")
            except ValueError:
                self.app.logger.increment("errors")
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-At")
            delete_at_container = str(
                x_delete_at / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor
            )
            delete_at_part, delete_at_nodes = self.app.container_ring.get_nodes(
                self.app.expiring_objects_account, delete_at_container
            )
        else:
            delete_at_part = delete_at_nodes = None
        partition, nodes = self.app.object_ring.get_nodes(self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if "x-timestamp" in req.headers or (object_versions and not req.environ.get("swift_versioned_copy")):
            hreq = Request.blank(req.path_info, headers={"X-Newest": "True"}, environ={"REQUEST_METHOD": "HEAD"})
            hresp = self.GETorHEAD_base(hreq, _("Object"), partition, nodes, hreq.path_info, len(nodes))
        # Used by container sync feature
        if "x-timestamp" in req.headers:
            try:
                req.headers["X-Timestamp"] = normalize_timestamp(float(req.headers["x-timestamp"]))
                if (
                    hresp.environ
                    and "swift_x_timestamp" in hresp.environ
                    and float(hresp.environ["swift_x_timestamp"]) >= float(req.headers["x-timestamp"])
                ):
                    self.app.logger.timing_since("%.timing" % (stats_type,), start_time)
                    return HTTPAccepted(request=req)
            except ValueError:
                self.app.logger.increment("errors")
                return HTTPBadRequest(
                    request=req,
                    content_type="text/plain",
                    body="X-Timestamp should be a UNIX timestamp float value; " "was %r" % req.headers["x-timestamp"],
                )
        else:
            req.headers["X-Timestamp"] = normalize_timestamp(time.time())
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        if not req.headers.get("content-type"):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers["Content-Type"] = guessed_type or "application/octet-stream"
            content_type_manually_set = False
        error_response = check_object_creation(req, self.object_name)
        if error_response:
            self.app.logger.increment("errors")
            return error_response
        if object_versions and not req.environ.get("swift_versioned_copy"):
            is_manifest = "x-object-manifest" in req.headers or "x-object-manifest" in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split("/")[0]
                prefix_len = "%03x" % len(self.object_name)
                lprefix = prefix_len + self.object_name + "/"
                ts_source = hresp.environ.get("swift_x_timestamp")
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(hresp.headers["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
                new_ts = normalize_timestamp(ts_source)
                vers_obj_name = lprefix + new_ts
                copy_headers = {"Destination": "%s/%s" % (lcontainer, vers_obj_name)}
                copy_environ = {"REQUEST_METHOD": "COPY", "swift_versioned_copy": True}
                copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ["wsgi.input"].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), "")
        source_header = req.headers.get("X-Copy-From")
        source_resp = None
        if source_header:
            source_header = unquote(source_header)
            acct = req.path_info.split("/", 2)[1]
            if isinstance(acct, unicode):
                acct = acct.encode("utf-8")
            if not source_header.startswith("/"):
                source_header = "/" + source_header
            source_header = "/" + acct + source_header
            try:
                src_container_name, src_obj_name = source_header.split("/", 3)[2:]
            except ValueError:
                self.app.logger.increment("errors")
                return HTTPPreconditionFailed(
                    request=req, body="X-Copy-From header must be of the form" "<container name>/<object name>"
                )
            source_req = req.copy_get()
            source_req.path_info = source_header
            source_req.headers["X-Newest"] = "true"
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            source_resp = self.GET(source_req)
            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                self.app.logger.timing_since("%s.timing" % (stats_type,), start_time)
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            new_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers)
            data_source = source_resp.app_iter
            new_req.content_length = source_resp.content_length
            if new_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                self.app.logger.increment("errors")
                return HTTPRequestEntityTooLarge(request=req)
            new_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del new_req.headers["X-Copy-From"]
            if not content_type_manually_set:
                new_req.headers["Content-Type"] = source_resp.headers["Content-Type"]
            if new_req.headers.get("x-fresh-metadata", "false").lower() not in TRUE_VALUES:
                for k, v in source_resp.headers.items():
                    if k.lower().startswith("x-object-meta-"):
                        new_req.headers[k] = v
                for k, v in req.headers.items():
                    if k.lower().startswith("x-object-meta-"):
                        new_req.headers[k] = v
            req = new_req
        node_iter = self.iter_nodes(partition, nodes, self.app.object_ring)
        pile = GreenPile(len(nodes))
        for container in containers:
            nheaders = dict(req.headers.iteritems())
            nheaders["Connection"] = "close"
            nheaders["X-Container-Host"] = "%(ip)s:%(port)s" % container
            nheaders["X-Container-Partition"] = container_partition
            nheaders["X-Container-Device"] = container["device"]
            nheaders["Expect"] = "100-continue"
            if delete_at_nodes:
                node = delete_at_nodes.pop(0)
                nheaders["X-Delete-At-Host"] = "%(ip)s:%(port)s" % node
                nheaders["X-Delete-At-Partition"] = delete_at_part
                nheaders["X-Delete-At-Device"] = node["device"]
            pile.spawn(
                self._connect_put_node, node_iter, partition, req.path_info, nheaders, self.app.logger.thread_locals
            )
        conns = [conn for conn in pile if conn]
        if len(conns) <= len(nodes) / 2:
            self.app.logger.error(
                _("Object PUT returning 503, %(conns)s/%(nodes)s " "required connections"),
                {"conns": len(conns), "nodes": len(nodes) // 2 + 1},
            )
            self.app.logger.increment("errors")
            return HTTPServiceUnavailable(request=req)
        chunked = req.headers.get("transfer-encoding")
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                [conn.queue.put("0\r\n\r\n") for conn in conns]
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > MAX_FILE_SIZE:
                        self.app.logger.increment("errors")
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put("%x\r\n%s\r\n" % (len(chunk), chunk) if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) <= len(nodes) / 2:
                        self.app.logger.error(
                            _("Object PUT exceptions during" " send, %(conns)s/%(nodes)s required connections"),
                            {"conns": len(conns), "nodes": len(nodes) / 2 + 1},
                        )
                        self.app.logger.increment("errors")
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout, err:
            self.app.logger.warn(_("ERROR Client read timeout (%ss)"), err.seconds)
            self.app.logger.increment("client_timeouts")
            return HTTPRequestTimeout(request=req)
示例#54
0
    def process(self, env, cb):
        event = Event(env)
        if event.event_type == EventTypes.CONTENT_DELETED:
            pile = GreenPile(PARALLEL_CHUNKS_DELETE)
            url = event.env.get('url')
            chunks = []
            content_headers = None
            for item in event.data:
                if item.get('type') == 'chunks':
                    chunks.append(item)
                if item.get("type") == 'contents_headers':
                    content_headers = item
            if len(chunks):
                def delete_chunk(chunk):
                    resp = None
                    p = urlparse(chunk['id'])
                    try:
                        with Timeout(CHUNK_TIMEOUT):
                            conn = http_connect(p.netloc, 'DELETE', p.path)
                            resp = conn.getresponse()
                            resp.chunk = chunk
                    except (Exception, Timeout) as e:
                        self.logger.warn(
                            'error while deleting chunk %s "%s"',
                            chunk['id'], str(e.message))
                    return resp

                def delete_chunk_backblaze(chunks, url,
                                           content_headers, storage_method):
                    meta = {}
                    meta['container_id'] = url['id']
                    chunk_list = []
                    for chunk in chunks:
                        chunk['url'] = chunk['id']
                        chunk_list.append([chunk])
                    key_file = self.conf.get('key_file')
                    backblaze_info = BackblazeUtils.put_meta_backblaze(
                        storage_method, key_file)
                    try:
                        BackblazeDeleteHandler(meta, chunk_list,
                                               backblaze_info).delete()
                    except OioException as e:
                        self.logger.warn('delete failed: %s' % str(e))
                chunk_method = content_headers['chunk-method']
                # don't load storage method else than with b2
                if chunk_method.find('backblaze') != -1:
                    storage_method = STORAGE_METHODS.load(chunk_method)
                    delete_chunk_backblaze(chunks, url,
                                           content_headers, storage_method)
                    return self.app(env, cb)
                for chunk in chunks:
                    pile.spawn(delete_chunk, chunk)

                resps = [resp for resp in pile if resp]

                for resp in resps:
                    if resp.status != 204:
                        self.logger.warn(
                            'failed to delete chunk %s (HTTP %s)',
                            resp.chunk['id'], resp.status)
        return self.app(env, cb)
示例#55
0
    def stream(self, source, size):
        bytes_transferred = 0

        def _connect_put(chunk):
            raw_url = chunk["url"]
            parsed = urlparse(raw_url)
            try:
                chunk_path = parsed.path.split('/')[-1]
                h = {}
                h["transfer-encoding"] = "chunked"
                h[chunk_headers["content_id"]] = self.sysmeta['id']
                h[chunk_headers["content_version"]] = self.sysmeta['version']
                h[chunk_headers["content_path"]] = \
                    utils.quote(self.sysmeta['content_path'])
                h[chunk_headers["content_chunkmethod"]] = \
                    self.sysmeta['chunk_method']
                h[chunk_headers["content_policy"]] = self.sysmeta['policy']
                h[chunk_headers["container_id"]] = self.sysmeta['container_id']
                h[chunk_headers["chunk_pos"]] = chunk["pos"]
                h[chunk_headers["chunk_id"]] = chunk_path
                with ConnectionTimeout(io.CONNECTION_TIMEOUT):
                    conn = io.http_connect(parsed.netloc, 'PUT', parsed.path,
                                           h)
                    conn.chunk = chunk
                return conn, chunk
            except (Exception, Timeout) as e:
                msg = str(e)
                logger.error("Failed to connect to %s (%s)", chunk, msg)
                chunk['error'] = msg
                return None, chunk

        meta_chunk = self.meta_chunk

        pile = GreenPile(len(meta_chunk))

        failed_chunks = []

        current_conns = []

        for chunk in meta_chunk:
            pile.spawn(_connect_put, chunk)

        results = [d for d in pile]

        for conn, chunk in results:
            if not conn:
                failed_chunks.append(chunk)
            else:
                current_conns.append(conn)

        quorum = False
        quorum = self._check_quorum(current_conns)
        if not quorum:
            raise exc.OioException("RAWX write failure")

        bytes_transferred = 0
        try:
            with utils.ContextPool(len(meta_chunk)) as pool:
                for conn in current_conns:
                    conn.failed = False
                    conn.queue = Queue(io.PUT_QUEUE_DEPTH)
                    pool.spawn(self._send_data, conn)

                while True:
                    remaining_bytes = size - bytes_transferred
                    if io.WRITE_CHUNK_SIZE < remaining_bytes:
                        read_size = io.WRITE_CHUNK_SIZE
                    else:
                        read_size = remaining_bytes
                    with SourceReadTimeout(io.CLIENT_TIMEOUT):
                        try:
                            data = source.read(read_size)
                        except (ValueError, IOError) as e:
                            raise SourceReadError(str(e))
                        if len(data) == 0:
                            for conn in current_conns:
                                conn.queue.put('0\r\n\r\n')
                            break
                    self.checksum.update(data)
                    bytes_transferred += len(data)
                    for conn in current_conns:
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' % (len(data), data))
                        else:
                            current_conns.remove(conn)

                    quorum = self._check_quorum(current_conns)
                    if not quorum:
                        raise exc.OioException("RAWX write failure")

                for conn in current_conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()

        except SourceReadTimeout:
            logger.warn('Source read timeout')
            raise
        except SourceReadError:
            logger.warn('Source read error')
            raise
        except Timeout:
            logger.exception('Timeout writing data')
            raise
        except Exception:
            logger.exception('Exception writing data')
            raise

        success_chunks = []

        for conn in current_conns:
            if conn.failed:
                failed_chunks.append(conn.chunk)
                continue
            pile.spawn(self._get_response, conn)

        def _handle_resp(conn, resp):
            if resp:
                if resp.status == 201:
                    success_chunks.append(conn.chunk)
                else:
                    conn.failed = True
                    conn.chunk['error'] = 'HTTP %s' % resp.status
                    failed_chunks.append(conn.chunk)
                    logger.error("Wrong status code from %s (%s)", conn.chunk,
                                 resp.status)
            conn.close()

        for (conn, resp) in pile:
            if resp:
                _handle_resp(conn, resp)
        quorum = self._check_quorum(success_chunks)
        if not quorum:
            raise exc.OioException("RAWX write failure")

        meta_checksum = self.checksum.hexdigest()
        for chunk in success_chunks:
            chunk["size"] = bytes_transferred
            chunk["hash"] = meta_checksum

        return bytes_transferred, meta_checksum, success_chunks + failed_chunks
示例#56
0
文件: obj.py 项目: anishnarang/gswift
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and '*' not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req,
                                  content_type='text/plain',
                                  body='If-None-Match only supports *')
        container_info = self.container_info(self.account_name,
                                             self.container_name, req)
        policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
                                       container_info['storage_policy'])
        obj_ring = self.app.get_object_ring(policy_index)

        # pass the policy index to storage nodes via req header
        req.headers['X-Backend-Storage-Policy-Index'] = policy_index
        container_partition = container_info['partition']
        containers = container_info['nodes']
        req.acl = container_info['write_acl']
        req.environ['swift_sync_key'] = container_info['sync_key']
        object_versions = container_info['versions']
        if 'swift.authorize' in req.environ:
            aresp = req.environ['swift.authorize'](req)
            if aresp:
                return aresp

        if not containers:
            return HTTPNotFound(request=req)

        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = \
            config_true_value(req.headers.get('x-detect-content-type'))
        if detect_content_type or not req.headers.get('content-type'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers['Content-Type'] = guessed_type or \
                'application/octet-stream'
            if detect_content_type:
                req.headers.pop('x-detect-content-type')
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or \
            check_content_type(req)
        if error_response:
            return error_response

        partition, nodes = obj_ring.get_nodes(self.account_name,
                                              self.container_name,
                                              self.object_name)
        ####################################  CHANGED_CODE  ############################################################
        # Change the nodes list to contain only one dictionary item instead of the original 3 returned by the ring.
        d = dict()
        # d[partition] = nodes[1:]
        # f.write(str(d)+"\n")
        # f.close()
        print("===Original Nodes===")
        print(nodes)
        temp_nodes = []
        flag = 0
        f = open("/home/hduser/swift/swift/proxy/controllers/spindowndevices",
                 "r")
        sdlist = f.read().split("\n")
        print("===Spun down devices===:", sdlist)
        f.close()

        upnodes = [item for item in nodes if item['device'] not in sdlist]
        downnodes = [item for item in nodes if item['device'] in sdlist]
        temp_nodes = upnodes
        if (len(downnodes) > 0):
            d = ast.literal_eval(
                open("/home/hduser/swift/swift/proxy/controllers/nodes.txt",
                     "r").read())
            # d_temp=pickle.load("/home/hduser/swift/proxy/controllers/nodes.p","rb")
            # print("===Current dict===:",d)
            for item in downnodes:
                if (partition in d):
                    d[partition].append(item)
                    # print("===Modified dict===:",d)
                else:
                    d[partition] = [item]
                    # print("===Modified dict===:",d)
        # pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        # print("Before writing:",d)
        fo = open("/home/hduser/swift/swift/proxy/controllers/nodes.txt", "w")
        fo.write(str(d) + "\n")
        fo.close()
        # pickle.dump(d,open("/home/hduser/swift/swift/proxy/controllers/nodes.p","wb"))
        ## Old method, IGNORE
        # for item in nodes:
        #     device = item['device']
        #     if(device not in sdlist):
        #     # if(os.path.ismount("path"))
        #         temp_nodes.append(item)
        #         flag = 1
        #         break
        #     else:
        #         pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        #         # d = pickle.load(open("/home/hduser/nodes.p","rb"))
        #         import ast
        #         d = ast.literal_eval(open("/home/hduser/nodes.txt","r").read())
        #         print("===Current dict===:",d)
        #         if(partition in d):
        #             print("In IF")
        #             d[partition].append(item)
        #             print("===Modified dict===:",d)
        #         else:
        #             print("In ELSE")
        #             d[partition] = [item]
        #             print("===Modified dict===:",d)
        #         pickle.dump(d,open("/home/hduser/nodes.p","wb"))
        #         fo = open("/home/hduser/nodes.txt","w")
        #         fo.write(str(d)+"\n")

        # Code to spin up a device if none are running already.
        if (len(upnodes) == 0):
            dev = nodes[0]['device']
            print("===ALL NODES DOWN===")
            print("===Mounting device===", dev)
            os.system("mount /dev/" + str(dev))

        print('===In controller PUT===:')
        print("===Partition===", partition)
        nodes = temp_nodes

        print('===In controller PUT===:')
        print("===Partition===", partition)
        nodes = temp_nodes
        print("===Nodes===:", nodes)

        check_ssd()
        ############################################  CHANGED_CODE  ########################################################

        # do a HEAD request for checking object versions
        if object_versions and not req.environ.get('swift_versioned_copy'):
            # make sure proxy-server uses the right policy index
            _headers = {
                'X-Backend-Storage-Policy-Index': policy_index,
                'X-Newest': 'True'
            }
            hreq = Request.blank(req.path_info,
                                 headers=_headers,
                                 environ={'REQUEST_METHOD': 'HEAD'})
            hresp = self.GETorHEAD_base(hreq, _('Object'), obj_ring, partition,
                                        hreq.swift_entity_path)

        # Used by container sync feature
        if 'x-timestamp' in req.headers:
            try:
                req_timestamp = Timestamp(req.headers['X-Timestamp'])
            except ValueError:
                return HTTPBadRequest(
                    request=req,
                    content_type='text/plain',
                    body='X-Timestamp should be a UNIX timestamp float value; '
                    'was %r' % req.headers['x-timestamp'])
            req.headers['X-Timestamp'] = req_timestamp.internal
        else:
            req.headers['X-Timestamp'] = Timestamp(time.time()).internal

        if object_versions and not req.environ.get('swift_versioned_copy'):
            is_manifest = 'X-Object-Manifest' in req.headers or \
                          'X-Object-Manifest' in hresp.headers
            if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split('/')[0]
                prefix_len = '%03x' % len(self.object_name)
                lprefix = prefix_len + self.object_name + '/'
                ts_source = hresp.environ.get('swift_x_timestamp')
                if ts_source is None:
                    ts_source = time.mktime(
                        time.strptime(hresp.headers['last-modified'],
                                      '%a, %d %b %Y %H:%M:%S GMT'))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {
                    'Destination': '%s/%s' % (lcontainer, vers_obj_name)
                }
                copy_environ = {
                    'REQUEST_METHOD': 'COPY',
                    'swift_versioned_copy': True
                }
                copy_req = Request.blank(req.path_info,
                                         headers=copy_headers,
                                         environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ['wsgi.input'].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), '')
        source_header = req.headers.get('X-Copy-From')
        source_resp = None
        if source_header:
            if req.environ.get('swift.orig_req_method', req.method) != 'POST':
                req.environ.setdefault('swift.log_info', []).append(
                    'x-copy-from:%s' % source_header)
            ver, acct, _rest = req.split_path(2, 3, True)
            src_account_name = req.headers.get('X-Copy-From-Account', None)
            if src_account_name:
                src_account_name = check_account_format(req, src_account_name)
            else:
                src_account_name = acct
            src_container_name, src_obj_name = check_copy_from_header(req)
            source_header = '/%s/%s/%s/%s' % (ver, src_account_name,
                                              src_container_name, src_obj_name)
            source_req = req.copy_get()

            # make sure the source request uses it's container_info
            source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
            source_req.path_info = source_header
            source_req.headers['X-Newest'] = 'true'
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            orig_account_name = self.account_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            self.account_name = src_account_name
            sink_req = Request.blank(req.path_info,
                                     environ=req.environ,
                                     headers=req.headers)
            source_resp = self.GET(source_req)

            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get(
                'swift.copy_hook',
                (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            self.account_name = orig_account_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag

            # we no longer need the X-Copy-From header
            del sink_req.headers['X-Copy-From']
            if 'X-Copy-From-Account' in sink_req.headers:
                del sink_req.headers['X-Copy-From-Account']
            if not content_type_manually_set:
                sink_req.headers['Content-Type'] = \
                    source_resp.headers['Content-Type']
            if config_true_value(
                    sink_req.headers.get('x-fresh-metadata', 'false')):
                # post-as-copy: ignore new sysmeta, copy existing sysmeta
                condition = lambda k: is_sys_meta('object', k)
                remove_items(sink_req.headers, condition)
                copy_header_subset(source_resp, sink_req, condition)
            else:
                # copy/update existing sysmeta and user meta
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)

            # copy over x-static-large-object for POSTs and manifest copies
            if 'X-Static-Large-Object' in source_resp.headers and \
                    req.params.get('multipart-manifest') == 'get':
                sink_req.headers['X-Static-Large-Object'] = \
                    source_resp.headers['X-Static-Large-Object']

            req = sink_req

        req, delete_at_container, delete_at_part, \
            delete_at_nodes = self._config_obj_expiration(req)

        node_iter = GreenthreadSafeIterator(
            self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get('transfer-encoding', '')
        chunked = ('chunked' in te)

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers,
            delete_at_container, delete_at_part, delete_at_nodes)

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders['Expect'] = '100-continue'

#################################  CHANGED_CODE  ###################################################################
# Replaced node_iter by nodes in the following line to make sure that a new list with different order isnt used.
# Change from node_iter to nodes to make sure it writes to the same device.
# Without this, it gets a new list of nodes from the ring in a different order and connects to the first one.

            pile.spawn(self._connect_put_node, nodes, partition,
                       req.swift_entity_path, nheaders,
                       self.app.logger.thread_locals)

#################################  CHANGED_CODE ###################################################################

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and '*' in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(
                    _('Object PUT returning 412, %(statuses)r'),
                    {'statuses': statuses})
                return HTTPPreconditionFailed(request=req)

        if any(conn for conn in conns
               if conn.resp and conn.resp.status == HTTP_CONFLICT):
            timestamps = [
                HeaderKeyDict(
                    conn.resp.getheaders()).get('X-Backend-Timestamp')
                for conn in conns if conn.resp
            ]
            self.app.logger.debug(
                _('Object PUT returning 202 for 409: '
                  '%(req_timestamp)s <= %(timestamps)r'), {
                      'req_timestamp': req.timestamp.internal,
                      'timestamps': ', '.join(timestamps)
                  })
            return HTTPAccepted(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _('Object PUT returning 503, %(conns)s/%(nodes)s '
                  'required connections'), {
                      'conns': len(conns),
                      'nodes': min_conns
                  })
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put('0\r\n\r\n')
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' %
                                           (len(chunk),
                                            chunk) if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(
                            _('Object PUT exceptions during'
                              ' send, %(conns)s/%(nodes)s required connections'
                              ), {
                                  'conns': len(conns),
                                  'nodes': min_conns
                              })
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(_('ERROR Client read timeout (%ss)'),
                                 err.seconds)
            self.app.logger.increment('client_timeouts')
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(
                _('ERROR Exception causing client disconnect'))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(
                _('Client disconnected without sending enough data'))
            self.app.logger.increment('client_disconnects')
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(
            req, conns, nodes)

        if len(etags) > 1:
            self.app.logger.error(
                _('Object servers returned %s mismatched etags'), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req,
                                  statuses,
                                  reasons,
                                  bodies,
                                  _('Object PUT'),
                                  etag=etag)
        if source_header:
            acct, path = source_header.split('/', 3)[2:4]
            resp.headers['X-Copied-From-Account'] = quote(acct)
            resp.headers['X-Copied-From'] = quote(path)
            if 'last-modified' in source_resp.headers:
                resp.headers['X-Copied-From-Last-Modified'] = \
                    source_resp.headers['last-modified']
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(
            float(Timestamp(req.headers['X-Timestamp'])))
        return resp
示例#57
0
class Benchmarker():

    def __init__(self, rate, auth, total_ops=None, data_size=1024*1024, container='nacho', base='item-', queue_size=None):
        self.rate = rate
        self.dist = NegExp(self.rate)
        self.data_size = data_size
        self.ops = 0
        self.errors = 0
        self.container = container
        self.base = base
        self.outstanding = 0
        self.total_ops = total_ops
        self.running = False
        if queue_size:
            self.pile = GreenPile(queue_size)
        else:
            self.pile = GreenPile()
        self._done = Event()
        # self.client = Connection(authurl='http://localhost:8080/auth/v1.0', user='******', key='testing')
        self.storage_url, self.token = get_auth(auth, 'test:tester', 'testing')
        self.data = "x" * data_size
        LOG.info("Object-size=%s" % (len(self.data)))
        LOG.info("Object-base=%s" % base)
        if total_ops:
            LOG.info("This benchmark will take aprox %.0f seconds" % (total_ops / rate))

    def __iter__(self):
        return self.pile

    def next(): self.pile.next()

    def _work(self, op):
        running_ops = self.outstanding
        self.outstanding += 1
        t1 = time()
        try:
            # client = Connection(authurl='http://192.168.16.12:8080/auth/v1.0', user='******', key='testing')
            # client.put_object(self.container,'%s-%s' % (self.base, op), self.data)
            get_object(self.storage_url, token=self.token, container=self.container, name='%s-%s' % (self.base, op))
            t2 = time()
            elapsed = (t2-t1) * 1000
            self.outstanding -= 1
            LOG.info("Operation #%d took %.2f ms (%.2f MB/s, %d ops outstanding on arrival)" % (op, elapsed, (self.data_size / (1024.*1024)) / (t2 - t1) , running_ops))
            entry = { "Operation": op,
                      "Arrival-time": t1,
                      "Completion-time": t2,
                      "Elapsed": elapsed,
                      "Outstanding-on-arrival": running_ops,
                      "Outstanding-on-completion": self.outstanding }
            print '%d, %f, %f, %.2f, %d, %d' % (op, t1, t2, elapsed, running_ops, self.outstanding)
            return entry
        except KeyboardInterrupt:
            self.outstanding -= 1
            self.running = False
            return None
        except Exception:
            self.errors += 1
            raise

    def _run(self):
        print '# date-time = %s' % datetime.now()
        print '# object-size = %s' % self.data_size
        print '# rate = %s' % self.rate
        print "Operation, Arrival-time, Completion-time, Elapsed, Outstanding-on-arrival, Outstanding-on-completion"
        self.running = True
        while self.running:
            sleep(self.dist.next())
            if self.running:
                self.ops += 1
                self.pile.spawn(self._work, self.ops)
                # stop looping if we have an operations limit
            if self.running and self.total_ops:
                self.running = self.ops < self.total_ops
        self._done.send()

    def start(self):
        spawn_n(self._run)

    def wait(self):
        # wait for pending jobs
        #while self.outstanding > 0: sleep(.1)
        self._done.wait()
        self.pile.pool.waitall()

    def stop(self):
        self.running = False
        self.wait()
示例#58
0
    def PUT(self, req):
        """HTTP PUT request handler."""
        if req.if_none_match is not None and "*" not in req.if_none_match:
            # Sending an etag with if-none-match isn't currently supported
            return HTTPBadRequest(request=req, content_type="text/plain", body="If-None-Match only supports *")
        container_info = self.container_info(self.account_name, self.container_name, req)
        policy_index = req.headers.get("X-Backend-Storage-Policy-Index", container_info["storage_policy"])
        obj_ring = self.app.get_object_ring(policy_index)
        # pass the policy index to storage nodes via req header
        req.headers["X-Backend-Storage-Policy-Index"] = policy_index
        container_partition = container_info["partition"]
        containers = container_info["nodes"]
        req.acl = container_info["write_acl"]
        req.environ["swift_sync_key"] = container_info["sync_key"]
        object_versions = container_info["versions"]
        if "swift.authorize" in req.environ:
            aresp = req.environ["swift.authorize"](req)
            if aresp:
                return aresp
        if not containers:
            return HTTPNotFound(request=req)
        try:
            ml = req.message_length()
        except ValueError as e:
            return HTTPBadRequest(request=req, content_type="text/plain", body=str(e))
        except AttributeError as e:
            return HTTPNotImplemented(request=req, content_type="text/plain", body=str(e))
        if ml is not None and ml > constraints.MAX_FILE_SIZE:
            return HTTPRequestEntityTooLarge(request=req)
        if "x-delete-after" in req.headers:
            try:
                x_delete_after = int(req.headers["x-delete-after"])
            except ValueError:
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-After")
            req.headers["x-delete-at"] = normalize_delete_at_timestamp(time.time() + x_delete_after)
        partition, nodes = obj_ring.get_nodes(self.account_name, self.container_name, self.object_name)
        # do a HEAD request for container sync and checking object versions
        if "x-timestamp" in req.headers or (object_versions and not req.environ.get("swift_versioned_copy")):
            # make sure proxy-server uses the right policy index
            _headers = {
                "X-Backend-Storage-Policy-Index": req.headers["X-Backend-Storage-Policy-Index"],
                "X-Newest": "True",
            }
            hreq = Request.blank(req.path_info, headers=_headers, environ={"REQUEST_METHOD": "HEAD"})
            hresp = self.GETorHEAD_base(hreq, _("Object"), obj_ring, partition, hreq.swift_entity_path)
        # Used by container sync feature
        if "x-timestamp" in req.headers:
            try:
                req_timestamp = Timestamp(req.headers["X-Timestamp"])
                if (
                    hresp.environ
                    and "swift_x_timestamp" in hresp.environ
                    and hresp.environ["swift_x_timestamp"] >= req_timestamp
                ):
                    return HTTPAccepted(request=req)
            except ValueError:
                return HTTPBadRequest(
                    request=req,
                    content_type="text/plain",
                    body="X-Timestamp should be a UNIX timestamp float value; " "was %r" % req.headers["x-timestamp"],
                )
            req.headers["X-Timestamp"] = req_timestamp.internal
        else:
            req.headers["X-Timestamp"] = Timestamp(time.time()).internal
        # Sometimes the 'content-type' header exists, but is set to None.
        content_type_manually_set = True
        detect_content_type = config_true_value(req.headers.get("x-detect-content-type"))
        if detect_content_type or not req.headers.get("content-type"):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            req.headers["Content-Type"] = guessed_type or "application/octet-stream"
            if detect_content_type:
                req.headers.pop("x-detect-content-type")
            else:
                content_type_manually_set = False

        error_response = check_object_creation(req, self.object_name) or check_content_type(req)
        if error_response:
            return error_response
        if object_versions and not req.environ.get("swift_versioned_copy"):
            if hresp.status_int != HTTP_NOT_FOUND:
                # This is a version manifest and needs to be handled
                # differently. First copy the existing data to a new object,
                # then write the data from this request to the version manifest
                # object.
                lcontainer = object_versions.split("/")[0]
                prefix_len = "%03x" % len(self.object_name)
                lprefix = prefix_len + self.object_name + "/"
                ts_source = hresp.environ.get("swift_x_timestamp")
                if ts_source is None:
                    ts_source = time.mktime(time.strptime(hresp.headers["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
                new_ts = Timestamp(ts_source).internal
                vers_obj_name = lprefix + new_ts
                copy_headers = {"Destination": "%s/%s" % (lcontainer, vers_obj_name)}
                copy_environ = {"REQUEST_METHOD": "COPY", "swift_versioned_copy": True}
                copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ)
                copy_resp = self.COPY(copy_req)
                if is_client_error(copy_resp.status_int):
                    # missing container or bad permissions
                    return HTTPPreconditionFailed(request=req)
                elif not is_success(copy_resp.status_int):
                    # could not copy the data, bail
                    return HTTPServiceUnavailable(request=req)

        reader = req.environ["wsgi.input"].read
        data_source = iter(lambda: reader(self.app.client_chunk_size), "")
        source_header = req.headers.get("X-Copy-From")
        source_resp = None
        if source_header:
            if req.environ.get("swift.orig_req_method", req.method) != "POST":
                req.environ.setdefault("swift.log_info", []).append("x-copy-from:%s" % source_header)
            src_container_name, src_obj_name = check_copy_from_header(req)
            ver, acct, _rest = req.split_path(2, 3, True)
            if isinstance(acct, unicode):
                acct = acct.encode("utf-8")
            source_header = "/%s/%s/%s/%s" % (ver, acct, src_container_name, src_obj_name)
            source_req = req.copy_get()
            # make sure the source request uses it's container_info
            source_req.headers.pop("X-Backend-Storage-Policy-Index", None)
            source_req.path_info = source_header
            source_req.headers["X-Newest"] = "true"
            orig_obj_name = self.object_name
            orig_container_name = self.container_name
            self.object_name = src_obj_name
            self.container_name = src_container_name
            sink_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers)
            source_resp = self.GET(source_req)
            # This gives middlewares a way to change the source; for example,
            # this lets you COPY a SLO manifest and have the new object be the
            # concatenation of the segments (like what a GET request gives
            # the client), not a copy of the manifest file.
            hook = req.environ.get("swift.copy_hook", (lambda source_req, source_resp, sink_req: source_resp))
            source_resp = hook(source_req, source_resp, sink_req)

            if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
                return source_resp
            self.object_name = orig_obj_name
            self.container_name = orig_container_name
            data_source = iter(source_resp.app_iter)
            sink_req.content_length = source_resp.content_length
            if sink_req.content_length is None:
                # This indicates a transfer-encoding: chunked source object,
                # which currently only happens because there are more than
                # CONTAINER_LISTING_LIMIT segments in a segmented object. In
                # this case, we're going to refuse to do the server-side copy.
                return HTTPRequestEntityTooLarge(request=req)
            if sink_req.content_length > constraints.MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=req)
            sink_req.etag = source_resp.etag
            # we no longer need the X-Copy-From header
            del sink_req.headers["X-Copy-From"]
            if not content_type_manually_set:
                sink_req.headers["Content-Type"] = source_resp.headers["Content-Type"]
            if not config_true_value(sink_req.headers.get("x-fresh-metadata", "false")):
                copy_headers_into(source_resp, sink_req)
                copy_headers_into(req, sink_req)
            # copy over x-static-large-object for POSTs and manifest copies
            if "X-Static-Large-Object" in source_resp.headers and req.params.get("multipart-manifest") == "get":
                sink_req.headers["X-Static-Large-Object"] = source_resp.headers["X-Static-Large-Object"]

            req = sink_req

        if "x-delete-at" in req.headers:
            try:
                x_delete_at = normalize_delete_at_timestamp(int(req.headers["x-delete-at"]))
                if int(x_delete_at) < time.time():
                    return HTTPBadRequest(body="X-Delete-At in past", request=req, content_type="text/plain")
            except ValueError:
                return HTTPBadRequest(request=req, content_type="text/plain", body="Non-integer X-Delete-At")
            req.environ.setdefault("swift.log_info", []).append("x-delete-at:%s" % x_delete_at)
            delete_at_container = normalize_delete_at_timestamp(
                int(x_delete_at)
                / self.app.expiring_objects_container_divisor
                * self.app.expiring_objects_container_divisor
            )
            delete_at_part, delete_at_nodes = self.app.container_ring.get_nodes(
                self.app.expiring_objects_account, delete_at_container
            )
        else:
            delete_at_container = delete_at_part = delete_at_nodes = None

        node_iter = GreenthreadSafeIterator(self.iter_nodes_local_first(obj_ring, partition))
        pile = GreenPile(len(nodes))
        te = req.headers.get("transfer-encoding", "")
        chunked = "chunked" in te

        outgoing_headers = self._backend_requests(
            req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes
        )

        for nheaders in outgoing_headers:
            # RFC2616:8.2.3 disallows 100-continue without a body
            if (req.content_length > 0) or chunked:
                nheaders["Expect"] = "100-continue"
            pile.spawn(
                self._connect_put_node,
                node_iter,
                partition,
                req.swift_entity_path,
                nheaders,
                self.app.logger.thread_locals,
            )

        conns = [conn for conn in pile if conn]
        min_conns = quorum_size(len(nodes))

        if req.if_none_match is not None and "*" in req.if_none_match:
            statuses = [conn.resp.status for conn in conns if conn.resp]
            if HTTP_PRECONDITION_FAILED in statuses:
                # If we find any copy of the file, it shouldn't be uploaded
                self.app.logger.debug(_("Object PUT returning 412, %(statuses)r"), {"statuses": statuses})
                return HTTPPreconditionFailed(request=req)

        if len(conns) < min_conns:
            self.app.logger.error(
                _("Object PUT returning 503, %(conns)s/%(nodes)s " "required connections"),
                {"conns": len(conns), "nodes": min_conns},
            )
            return HTTPServiceUnavailable(request=req)
        bytes_transferred = 0
        try:
            with ContextPool(len(nodes)) as pool:
                for conn in conns:
                    conn.failed = False
                    conn.queue = Queue(self.app.put_queue_depth)
                    pool.spawn(self._send_file, conn, req.path)
                while True:
                    with ChunkReadTimeout(self.app.client_timeout):
                        try:
                            chunk = next(data_source)
                        except StopIteration:
                            if chunked:
                                for conn in conns:
                                    conn.queue.put("0\r\n\r\n")
                            break
                    bytes_transferred += len(chunk)
                    if bytes_transferred > constraints.MAX_FILE_SIZE:
                        return HTTPRequestEntityTooLarge(request=req)
                    for conn in list(conns):
                        if not conn.failed:
                            conn.queue.put("%x\r\n%s\r\n" % (len(chunk), chunk) if chunked else chunk)
                        else:
                            conns.remove(conn)
                    if len(conns) < min_conns:
                        self.app.logger.error(
                            _("Object PUT exceptions during" " send, %(conns)s/%(nodes)s required connections"),
                            {"conns": len(conns), "nodes": min_conns},
                        )
                        return HTTPServiceUnavailable(request=req)
                for conn in conns:
                    if conn.queue.unfinished_tasks:
                        conn.queue.join()
            conns = [conn for conn in conns if not conn.failed]
        except ChunkReadTimeout as err:
            self.app.logger.warn(_("ERROR Client read timeout (%ss)"), err.seconds)
            self.app.logger.increment("client_timeouts")
            return HTTPRequestTimeout(request=req)
        except (Exception, Timeout):
            self.app.logger.exception(_("ERROR Exception causing client disconnect"))
            return HTTPClientDisconnect(request=req)
        if req.content_length and bytes_transferred < req.content_length:
            req.client_disconnect = True
            self.app.logger.warn(_("Client disconnected without sending enough data"))
            self.app.logger.increment("client_disconnects")
            return HTTPClientDisconnect(request=req)

        statuses, reasons, bodies, etags = self._get_put_responses(req, conns, nodes)

        if len(etags) > 1:
            self.app.logger.error(_("Object servers returned %s mismatched etags"), len(etags))
            return HTTPServerError(request=req)
        etag = etags.pop() if len(etags) else None
        resp = self.best_response(req, statuses, reasons, bodies, _("Object PUT"), etag=etag)
        if source_header:
            resp.headers["X-Copied-From"] = quote(source_header.split("/", 3)[3])
            if "last-modified" in source_resp.headers:
                resp.headers["X-Copied-From-Last-Modified"] = source_resp.headers["last-modified"]
            copy_headers_into(req, resp)
        resp.last_modified = math.ceil(float(Timestamp(req.headers["X-Timestamp"])))
        return resp