示例#1
0
 def __init__(self, app, conf, jit_conf):
     self.app = app
     self.conf = jit_conf
     self.logger = get_logger(self.conf, log_route=name)
     self.chain = dict()
     #self.chain = Chain(self.logger, self.conf['chainsave'], self.conf['totalseconds'], self.conf['probthreshold'], self.conf['twolevels'])
     self.pool = GreenAsyncPile(self.conf['nthreads'])
示例#2
0
文件: obj.py 项目: anishnarang/gswift
    def _get_put_responses(self, req, conns, nodes):
        statuses = []
        reasons = []
        bodies = []
        etags = set()

        def get_conn_response(conn):
            try:
                with Timeout(self.app.node_timeout):
                    if conn.resp:
                        return (conn, conn.resp)
                    else:
                        return (conn, conn.getresponse())
            except (Exception, Timeout):
                self.app.exception_occurred(
                    conn.node, _('Object'),
                    _('Trying to get final status of PUT to %s') % req.path)
            return (None, None)

        pile = GreenAsyncPile(len(conns))
        for conn in conns:
            pile.spawn(get_conn_response, conn)

        def _handle_response(conn, response):
            statuses.append(response.status)
            reasons.append(response.reason)
            bodies.append(response.read())
            if response.status == HTTP_INSUFFICIENT_STORAGE:
                self.app.error_limit(conn.node,
                                     _('ERROR Insufficient Storage'))
            elif response.status >= HTTP_INTERNAL_SERVER_ERROR:
                self.app.error_occurred(
                    conn.node,
                    _('ERROR %(status)d %(body)s From Object Server '
                      're: %(path)s') % {
                          'status': response.status,
                          'body': bodies[-1][:1024],
                          'path': req.path
                      })
            elif is_success(response.status):
                etags.add(response.getheader('etag').strip('"'))

        for (conn, response) in pile:
            if response:
                _handle_response(conn, response)
                if self.have_quorum(statuses, len(nodes)):
                    break

        # give any pending requests *some* chance to finish
        finished_quickly = pile.waitall(self.app.post_quorum_timeout)
        for (conn, response) in finished_quickly:
            if response:
                _handle_response(conn, response)

        while len(statuses) < len(nodes):
            statuses.append(HTTP_SERVICE_UNAVAILABLE)
            reasons.append('')
            bodies.append('')
        return statuses, reasons, bodies, etags
示例#3
0
文件: obj.py 项目: dpgoetz/swift
    def _get_put_responses(self, req, conns, nodes):
        statuses = []
        reasons = []
        bodies = []
        etags = set()

        def get_conn_response(conn):
            try:
                with Timeout(self.app.node_timeout):
                    if conn.resp:
                        return (conn, conn.resp)
                    else:
                        return (conn, conn.getresponse())
            except (Exception, Timeout):
                self.app.exception_occurred(
                    conn.node, _('Object'),
                    _('Trying to get final status of PUT to %s') % req.path)
            return (None, None)

        pile = GreenAsyncPile(len(conns))
        for conn in conns:
            pile.spawn(get_conn_response, conn)

        def _handle_response(conn, response):
            statuses.append(response.status)
            reasons.append(response.reason)
            bodies.append(response.read())
            if response.status == HTTP_INSUFFICIENT_STORAGE:
                self.app.error_limit(conn.node,
                                     _('ERROR Insufficient Storage'))
            elif response.status >= HTTP_INTERNAL_SERVER_ERROR:
                self.app.error_occurred(
                    conn.node,
                    _('ERROR %(status)d %(body)s From Object Server '
                      're: %(path)s') %
                    {'status': response.status,
                     'body': bodies[-1][:1024], 'path': req.path})
            elif is_success(response.status):
                etags.add(response.getheader('etag').strip('"'))

        for (conn, response) in pile:
            if response:
                _handle_response(conn, response)
                if self.have_quorum(statuses, len(nodes)):
                    break

        # give any pending requests *some* chance to finish
        finished_quickly = pile.waitall(self.app.post_quorum_timeout)
        for (conn, response) in finished_quickly:
            if response:
                _handle_response(conn, response)

        while len(statuses) < len(nodes):
            statuses.append(HTTP_SERVICE_UNAVAILABLE)
            reasons.append('')
            bodies.append('')
        return statuses, reasons, bodies, etags
示例#4
0
    def _get_put_responses(self, req, conns, nodes):
        statuses = []
        reasons = []
        bodies = []
        etags = set()

        def get_conn_response(conn):
            try:
                with Timeout(self.app.node_timeout):
                    if conn.resp:
                        return conn.resp
                    else:
                        return conn.getresponse()
            except (Exception, Timeout):
                self.app.exception_occurred(
                    conn.node, _("Object"), _("Trying to get final status of PUT to %s") % req.path
                )

        pile = GreenAsyncPile(len(conns))
        for conn in conns:
            pile.spawn(get_conn_response, conn)
        for response in pile:
            if response:
                statuses.append(response.status)
                reasons.append(response.reason)
                bodies.append(response.read())
                if response.status >= HTTP_INTERNAL_SERVER_ERROR:
                    self.app.error_occurred(
                        conn.node,
                        _("ERROR %(status)d %(body)s From Object Server " "re: %(path)s")
                        % {"status": response.status, "body": bodies[-1][:1024], "path": req.path},
                    )
                elif is_success(response.status):
                    etags.add(response.getheader("etag").strip('"'))
                if self.have_quorum(statuses, len(nodes)):
                    break
        # give any pending requests *some* chance to finish
        pile.waitall(self.app.post_quorum_timeout)
        while len(statuses) < len(nodes):
            statuses.append(HTTP_SERVICE_UNAVAILABLE)
            reasons.append("")
            bodies.append("")
        return statuses, reasons, bodies, etags
示例#5
0
文件: base.py 项目: 10389030/swift
    def make_requests(self, req, ring, part, method, path, headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param req: a request sent by the client
        :param ring: the ring used for finding backend servers
        :param part: the partition number
        :param method: the method to send to the backend
        :param path: the path to send to the backend
                     (full path ends up being  /<$device>/<$part>/<$path>)
        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :param query_string: optional query string to send to the backend
        :returns: a swob.Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = GreenthreadSafeIterator(self.app.iter_nodes(ring, part))
        pile = GreenAsyncPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path,
                       head, query_string, self.app.logger.thread_locals)
        response = []
        statuses = []
        for resp in pile:
            if not resp:
                continue
            response.append(resp)
            statuses.append(resp[0])
            if self.have_quorum(statuses, len(start_nodes)):
                break
        # give any pending requests *some* chance to finish
        pile.waitall(self.app.post_quorum_timeout)
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
        statuses, reasons, resp_headers, bodies = zip(*response)
        return self.best_response(req, statuses, reasons, bodies,
                                  '%s %s' % (self.server_type, req.method),
                                  headers=resp_headers)
示例#6
0
文件: base.py 项目: afliu/swift
    def make_requests(self,
                      req,
                      ring,
                      part,
                      method,
                      path,
                      headers,
                      query_string=''):
        """
        Sends an HTTP request to multiple nodes and aggregates the results.
        It attempts the primary nodes concurrently, then iterates over the
        handoff nodes as needed.

        :param req: a request sent by the client
        :param ring: the ring used for finding backend servers
        :param part: the partition number
        :param method: the method to send to the backend
        :param path: the path to send to the backend
                     (full path ends up being  /<$device>/<$part>/<$path>)
        :param headers: a list of dicts, where each dict represents one
                        backend request that should be made.
        :param query_string: optional query string to send to the backend
        :returns: a swob.Response object
        """
        start_nodes = ring.get_part_nodes(part)
        nodes = GreenthreadSafeIterator(self.app.iter_nodes(ring, part))
        pile = GreenAsyncPile(len(start_nodes))
        for head in headers:
            pile.spawn(self._make_request, nodes, part, method, path, head,
                       query_string, self.app.logger.thread_locals)
        response = []
        statuses = []
        for resp in pile:
            if not resp:
                continue
            response.append(resp)
            statuses.append(resp[0])
            if self.have_quorum(statuses, len(start_nodes)):
                break
        # give any pending requests *some* chance to finish
        pile.waitall(self.app.post_quorum_timeout)
        while len(response) < len(start_nodes):
            response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
        statuses, reasons, resp_headers, bodies = zip(*response)
        return self.best_response(req,
                                  statuses,
                                  reasons,
                                  bodies,
                                  '%s %s' % (self.server_type, req.method),
                                  headers=resp_headers)
示例#7
0
    def reconstruct_fa(self, job, node, datafile_metadata):
        """
        Reconstructs a fragment archive - this method is called from ssync
        after a remote node responds that is missing this object - the local
        diskfile is opened to provide metadata - but to reconstruct the
        missing fragment archive we must connect to multiple object servers.

        :param job: job from ssync_sender
        :param node: node that we're rebuilding to
        :param datafile_metadata:  the datafile metadata to attach to
                                   the rebuilt fragment archive
        :returns: a DiskFile like class for use by ssync
        :raises DiskFileError: if the fragment archive cannot be reconstructed
        """

        part_nodes = job['policy'].object_ring.get_part_nodes(job['partition'])
        part_nodes.remove(node)

        # the fragment index we need to reconstruct is the position index
        # of the node we're rebuilding to within the primary part list
        fi_to_rebuild = node['index']

        # KISS send out connection requests to all nodes, see what sticks
        headers = self.headers.copy()
        headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        pile = GreenAsyncPile(len(part_nodes))
        path = datafile_metadata['name']
        for node in part_nodes:
            pile.spawn(self._get_response, node, job['partition'], path,
                       headers, job['policy'])
        responses = []
        etag = None
        for resp in pile:
            if not resp:
                continue
            resp.headers = HeaderKeyDict(resp.getheaders())
            if str(fi_to_rebuild) == \
                    resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index'):
                continue
            if resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index') in set(
                    r.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
                    for r in responses):
                continue
            responses.append(resp)
            etag = sorted(
                responses,
                reverse=True,
                key=lambda r: Timestamp(r.headers.get('X-Backend-Timestamp'))
            )[0].headers.get('X-Object-Sysmeta-Ec-Etag')
            responses = [
                r for r in responses
                if r.headers.get('X-Object-Sysmeta-Ec-Etag') == etag
            ]

            if len(responses) >= job['policy'].ec_ndata:
                break
        else:
            self.logger.error('Unable to get enough responses (%s/%s) '
                              'to reconstruct %s with ETag %s' %
                              (len(responses), job['policy'].ec_ndata,
                               self._full_path(node, job['partition'],
                                               datafile_metadata['name'],
                                               job['policy']), etag))
            raise DiskFileError('Unable to reconstruct EC archive')

        rebuilt_fragment_iter = self.make_rebuilt_fragment_iter(
            responses[:job['policy'].ec_ndata], path, job['policy'],
            fi_to_rebuild)
        return RebuildingECDiskFileStream(datafile_metadata, fi_to_rebuild,
                                          rebuilt_fragment_iter)
示例#8
0
    def reconstruct_fa(self, job, node, datafile_metadata):
        """
        Reconstructs a fragment archive - this method is called from ssync
        after a remote node responds that is missing this object - the local
        diskfile is opened to provide metadata - but to reconstruct the
        missing fragment archive we must connect to multiple object servers.

        :param job: job from ssync_sender
        :param node: node that we're rebuilding to
        :param datafile_metadata:  the datafile metadata to attach to
                                   the rebuilt fragment archive
        :returns: a DiskFile like class for use by ssync
        :raises DiskFileError: if the fragment archive cannot be reconstructed
        """

        part_nodes = job['policy'].object_ring.get_part_nodes(job['partition'])
        part_nodes.remove(node)

        # the fragment index we need to reconstruct is the position index
        # of the node we're rebuilding to within the primary part list
        fi_to_rebuild = job['policy'].get_backend_index(node['index'])

        # KISS send out connection requests to all nodes, see what sticks.
        # Use fragment preferences header to tell other nodes that we want
        # fragments at the same timestamp as our fragment, and that they don't
        # need to be durable.
        headers = self.headers.copy()
        headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        frag_prefs = [{
            'timestamp': datafile_metadata['X-Timestamp'],
            'exclude': []
        }]
        headers['X-Backend-Fragment-Preferences'] = json.dumps(frag_prefs)
        pile = GreenAsyncPile(len(part_nodes))
        path = datafile_metadata['name']
        for _node in part_nodes:
            pile.spawn(self._get_response, _node, job['partition'], path,
                       headers, job['policy'])

        buckets = defaultdict(dict)
        etag_buckets = {}
        error_resp_count = 0
        for resp in pile:
            if not resp:
                error_resp_count += 1
                continue
            resp.headers = HeaderKeyDict(resp.getheaders())
            frag_index = resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
            try:
                resp_frag_index = int(frag_index)
            except (TypeError, ValueError):
                # The successful response should include valid X-Object-
                # Sysmeta-Ec-Frag-Index but for safety, catching the case
                # either missing X-Object-Sysmeta-Ec-Frag-Index or invalid
                # frag index to reconstruct and dump warning log for that
                self.logger.warning(
                    'Invalid resp from %s '
                    '(invalid X-Object-Sysmeta-Ec-Frag-Index: %r)',
                    resp.full_path, frag_index)
                continue

            if fi_to_rebuild == resp_frag_index:
                # TODO: With duplicated EC frags it's not unreasonable to find
                # the very fragment we're trying to rebuild exists on another
                # primary node.  In this case we should stream it directly from
                # the remote node to our target instead of rebuild.  But
                # instead we ignore it.
                self.logger.debug(
                    'Found existing frag #%s at %s while rebuilding to %s',
                    fi_to_rebuild, resp.full_path,
                    _full_path(node, job['partition'],
                               datafile_metadata['name'], job['policy']))
                continue

            timestamp = resp.headers.get('X-Backend-Timestamp')
            if not timestamp:
                self.logger.warning(
                    'Invalid resp from %s, frag index %s '
                    '(missing X-Backend-Timestamp)', resp.full_path,
                    resp_frag_index)
                continue
            timestamp = Timestamp(timestamp)

            etag = resp.headers.get('X-Object-Sysmeta-Ec-Etag')
            if not etag:
                self.logger.warning(
                    'Invalid resp from %s, frag index %s '
                    '(missing Etag)', resp.full_path, resp_frag_index)
                continue

            if etag != etag_buckets.setdefault(timestamp, etag):
                self.logger.error(
                    'Mixed Etag (%s, %s) for %s frag#%s', etag,
                    etag_buckets[timestamp],
                    _full_path(node, job['partition'],
                               datafile_metadata['name'], job['policy']),
                    fi_to_rebuild)
                continue

            if resp_frag_index not in buckets[timestamp]:
                buckets[timestamp][resp_frag_index] = resp
                if len(buckets[timestamp]) >= job['policy'].ec_ndata:
                    responses = buckets[timestamp].values()
                    self.logger.debug(
                        'Reconstruct frag #%s with frag indexes %s' %
                        (fi_to_rebuild, list(buckets[timestamp])))
                    break
        else:
            for timestamp, resp in sorted(buckets.items()):
                etag = etag_buckets[timestamp]
                self.logger.error(
                    'Unable to get enough responses (%s/%s) '
                    'to reconstruct %s frag#%s with ETag %s' %
                    (len(resp), job['policy'].ec_ndata,
                     _full_path(node, job['partition'],
                                datafile_metadata['name'],
                                job['policy']), fi_to_rebuild, etag))

            if error_resp_count:
                self.logger.error(
                    'Unable to get enough responses (%s error responses) '
                    'to reconstruct %s frag#%s' %
                    (error_resp_count,
                     _full_path(node, job['partition'],
                                datafile_metadata['name'],
                                job['policy']), fi_to_rebuild))

            raise DiskFileError('Unable to reconstruct EC archive')

        rebuilt_fragment_iter = self.make_rebuilt_fragment_iter(
            responses[:job['policy'].ec_ndata], path, job['policy'],
            fi_to_rebuild)
        return RebuildingECDiskFileStream(datafile_metadata, fi_to_rebuild,
                                          rebuilt_fragment_iter)
示例#9
0
    def reconstruct_fa(self, job, node, datafile_metadata):
        """
        Reconstructs a fragment archive - this method is called from ssync
        after a remote node responds that is missing this object - the local
        diskfile is opened to provide metadata - but to reconstruct the
        missing fragment archive we must connect to multiple object servers.

        :param job: job from ssync_sender
        :param node: node that we're rebuilding to
        :param datafile_metadata:  the datafile metadata to attach to
                                   the rebuilt fragment archive
        :returns: a DiskFile like class for use by ssync
        :raises DiskFileError: if the fragment archive cannot be reconstructed
        """

        part_nodes = job['policy'].object_ring.get_part_nodes(
            job['partition'])
        part_nodes.remove(node)

        # the fragment index we need to reconstruct is the position index
        # of the node we're rebuilding to within the primary part list
        fi_to_rebuild = node['index']

        # KISS send out connection requests to all nodes, see what sticks
        headers = self.headers.copy()
        headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        pile = GreenAsyncPile(len(part_nodes))
        path = datafile_metadata['name']
        for node in part_nodes:
            pile.spawn(self._get_response, node, job['partition'],
                       path, headers, job['policy'])
        responses = []
        etag = None
        for resp in pile:
            if not resp:
                continue
            resp.headers = HeaderKeyDict(resp.getheaders())
            if str(fi_to_rebuild) == \
                    resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index'):
                continue
            if resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index') in set(
                    r.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
                    for r in responses):
                continue
            responses.append(resp)
            etag = sorted(responses, reverse=True,
                          key=lambda r: Timestamp(
                              r.headers.get('X-Backend-Timestamp')
                          ))[0].headers.get('X-Object-Sysmeta-Ec-Etag')
            responses = [r for r in responses if
                         r.headers.get('X-Object-Sysmeta-Ec-Etag') == etag]

            if len(responses) >= job['policy'].ec_ndata:
                break
        else:
            self.logger.error(
                'Unable to get enough responses (%s/%s) '
                'to reconstruct %s with ETag %s' % (
                    len(responses), job['policy'].ec_ndata,
                    self._full_path(node, job['partition'],
                                    datafile_metadata['name'], job['policy']),
                    etag))
            raise DiskFileError('Unable to reconstruct EC archive')

        rebuilt_fragment_iter = self.make_rebuilt_fragment_iter(
            responses[:job['policy'].ec_ndata], path, job['policy'],
            fi_to_rebuild)
        return RebuildingECDiskFileStream(datafile_metadata, fi_to_rebuild,
                                          rebuilt_fragment_iter)
示例#10
0
class JITPrefetchMiddleware(object):

    __metaclass__ = Singleton

    def __init__(self, app, conf, jit_conf):
        self.app = app
        self.conf = jit_conf
        self.logger = get_logger(self.conf, log_route=name)
        self.chain = dict()
        #self.chain = Chain(self.logger, self.conf['chainsave'], self.conf['totalseconds'], self.conf['probthreshold'], self.conf['twolevels'])
        self.pool = GreenAsyncPile(self.conf['nthreads'])

    def __del__(self):
        self.chain.save_chain()

    def __call__(self, env, start_response):
        request = Request(env)
        resp = request.get_response(self.app)
        try:
            (version, account, container,
             objname) = split_path(request.path_info, 4, 4, True)
        except ValueError:
            return self.app
        if 'HTTP_X_NO_PREFETCH' not in request.environ:
            if request.method == 'GET':
                (site, objid, size, ext) = objname.split('_')
                if site not in self.chain:
                    self.chain[site] = Chain(self.logger,
                                             self.conf['chainsave'],
                                             self.conf['totalseconds'],
                                             self.conf['probthreshold'],
                                             self.conf['twolevels'])
                oid = (hashlib.md5(request.path_info).hexdigest())
                self.add_object_to_chain(site, oid, container, objname)
                if PREFETCH:
                    data = self.get_prefetched(site, oid, objname)
                    self.prefetch_objects(site, oid, account, request)
                    if data:
                        resp.headers['X-object-prefetched'] = 'True'
                        resp.body = data

        return resp(env, start_response)

    def add_object_to_chain(self, site, oid, container, object_name):
        self.chain[site].add(oid, object_name, container)

    def get_prefetched(self, site, oid, name):
        global multiplier
        if oid in prefetched_objects:
            data, diff, ts = prefetched_objects[oid]
            multiplier = multiplier + 0.05
            self.chain[site].add_down_time(oid, diff)
            if multiplier > 1:
                multiplier = 1
            if DELETE_WHEN_SERVED:
                del prefetched_objects[oid]
                self.logger.debug('Object ' + name + ' served and deleted')
            return data
        return False

    def prefetch_objects(self, site, oid, account, req_resp):
        objs = self.chain[site].get_probabilities(oid)
        for oid, o in objs:
            self.logger.debug(o.object_to_string())
        token = req_resp.environ['HTTP_X_AUTH_TOKEN']
        user_agent = req_resp.environ['HTTP_USER_AGENT']

        for oid, obj in objs:
            if oid not in prefetched_objects:
                self.pool.spawn(
                    Downloader(self.logger, oid, account, obj.container,
                               obj.name, user_agent, token,
                               obj.time_stamp * multiplier).run)